Ruby 3.5.0dev (2025-10-06 revision 704677257ecb01c7ee10aa0dfc55ca1d4fc4636d)
gc.c (704677257ecb01c7ee10aa0dfc55ca1d4fc4636d)
1/**********************************************************************
2
3 gc.c -
4
5 $Author$
6 created at: Tue Oct 5 09:44:46 JST 1993
7
8 Copyright (C) 1993-2007 Yukihiro Matsumoto
9 Copyright (C) 2000 Network Applied Communication Laboratory, Inc.
10 Copyright (C) 2000 Information-technology Promotion Agency, Japan
11
12**********************************************************************/
13
14#define rb_data_object_alloc rb_data_object_alloc
15#define rb_data_typed_object_alloc rb_data_typed_object_alloc
16
17#include "ruby/internal/config.h"
18#ifdef _WIN32
19# include "ruby/ruby.h"
20#endif
21
22#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
23# include "wasm/setjmp.h"
24# include "wasm/machine.h"
25#else
26# include <setjmp.h>
27#endif
28#include <stdarg.h>
29#include <stdio.h>
30
31/* MALLOC_HEADERS_BEGIN */
32#ifndef HAVE_MALLOC_USABLE_SIZE
33# ifdef _WIN32
34# define HAVE_MALLOC_USABLE_SIZE
35# define malloc_usable_size(a) _msize(a)
36# elif defined HAVE_MALLOC_SIZE
37# define HAVE_MALLOC_USABLE_SIZE
38# define malloc_usable_size(a) malloc_size(a)
39# endif
40#endif
41
42#ifdef HAVE_MALLOC_USABLE_SIZE
43# ifdef RUBY_ALTERNATIVE_MALLOC_HEADER
44/* Alternative malloc header is included in ruby/missing.h */
45# elif defined(HAVE_MALLOC_H)
46# include <malloc.h>
47# elif defined(HAVE_MALLOC_NP_H)
48# include <malloc_np.h>
49# elif defined(HAVE_MALLOC_MALLOC_H)
50# include <malloc/malloc.h>
51# endif
52#endif
53
54/* MALLOC_HEADERS_END */
55
56#ifdef HAVE_SYS_TIME_H
57# include <sys/time.h>
58#endif
59
60#ifdef HAVE_SYS_RESOURCE_H
61# include <sys/resource.h>
62#endif
63
64#if defined _WIN32 || defined __CYGWIN__
65# include <windows.h>
66#elif defined(HAVE_POSIX_MEMALIGN)
67#elif defined(HAVE_MEMALIGN)
68# include <malloc.h>
69#endif
70
71#include <sys/types.h>
72
73#ifdef __EMSCRIPTEN__
74#include <emscripten.h>
75#endif
76
77/* For ruby_annotate_mmap */
78#ifdef HAVE_SYS_PRCTL_H
79#include <sys/prctl.h>
80#endif
81
82#undef LIST_HEAD /* ccan/list conflicts with BSD-origin sys/queue.h. */
83
84#include "constant.h"
85#include "darray.h"
86#include "debug_counter.h"
87#include "eval_intern.h"
88#include "gc/gc.h"
89#include "id_table.h"
90#include "internal.h"
91#include "internal/class.h"
92#include "internal/compile.h"
93#include "internal/complex.h"
94#include "internal/concurrent_set.h"
95#include "internal/cont.h"
96#include "internal/error.h"
97#include "internal/eval.h"
98#include "internal/gc.h"
99#include "internal/hash.h"
100#include "internal/imemo.h"
101#include "internal/io.h"
102#include "internal/numeric.h"
103#include "internal/object.h"
104#include "internal/proc.h"
105#include "internal/rational.h"
106#include "internal/sanitizers.h"
107#include "internal/struct.h"
108#include "internal/symbol.h"
109#include "internal/thread.h"
110#include "internal/variable.h"
111#include "internal/warnings.h"
112#include "probes.h"
113#include "regint.h"
114#include "ruby/debug.h"
115#include "ruby/io.h"
116#include "ruby/re.h"
117#include "ruby/st.h"
118#include "ruby/thread.h"
119#include "ruby/util.h"
120#include "ruby/vm.h"
121#include "ruby_assert.h"
122#include "ruby_atomic.h"
123#include "symbol.h"
124#include "variable.h"
125#include "vm_core.h"
126#include "vm_sync.h"
127#include "vm_callinfo.h"
128#include "ractor_core.h"
129#include "yjit.h"
130#include "zjit.h"
131
132#include "builtin.h"
133#include "shape.h"
134
135unsigned int
136rb_gc_vm_lock(const char *file, int line)
137{
138 unsigned int lev = 0;
139 rb_vm_lock_enter(&lev, file, line);
140 return lev;
141}
142
143void
144rb_gc_vm_unlock(unsigned int lev, const char *file, int line)
145{
146 rb_vm_lock_leave(&lev, file, line);
147}
148
149unsigned int
150rb_gc_cr_lock(const char *file, int line)
151{
152 unsigned int lev;
153 rb_vm_lock_enter_cr(GET_RACTOR(), &lev, file, line);
154 return lev;
155}
156
157void
158rb_gc_cr_unlock(unsigned int lev, const char *file, int line)
159{
160 rb_vm_lock_leave_cr(GET_RACTOR(), &lev, file, line);
161}
162
163unsigned int
164rb_gc_vm_lock_no_barrier(const char *file, int line)
165{
166 unsigned int lev = 0;
167 rb_vm_lock_enter_nb(&lev, file, line);
168 return lev;
169}
170
171void
172rb_gc_vm_unlock_no_barrier(unsigned int lev, const char *file, int line)
173{
174 rb_vm_lock_leave_nb(&lev, file, line);
175}
176
177void
178rb_gc_vm_barrier(void)
179{
180 rb_vm_barrier();
181}
182
183#if USE_MODULAR_GC
184void *
185rb_gc_get_ractor_newobj_cache(void)
186{
187 return GET_RACTOR()->newobj_cache;
188}
189
190void
191rb_gc_initialize_vm_context(struct rb_gc_vm_context *context)
192{
193 rb_native_mutex_initialize(&context->lock);
194 context->ec = GET_EC();
195}
196
197void
198rb_gc_worker_thread_set_vm_context(struct rb_gc_vm_context *context)
199{
200 rb_native_mutex_lock(&context->lock);
201
202 GC_ASSERT(rb_current_execution_context(false) == NULL);
203
204#ifdef RB_THREAD_LOCAL_SPECIFIER
205 rb_current_ec_set(context->ec);
206#else
207 native_tls_set(ruby_current_ec_key, context->ec);
208#endif
209}
210
211void
212rb_gc_worker_thread_unset_vm_context(struct rb_gc_vm_context *context)
213{
214 rb_native_mutex_unlock(&context->lock);
215
216 GC_ASSERT(rb_current_execution_context(true) == context->ec);
217
218#ifdef RB_THREAD_LOCAL_SPECIFIER
219 rb_current_ec_set(NULL);
220#else
221 native_tls_set(ruby_current_ec_key, NULL);
222#endif
223}
224#endif
225
226bool
227rb_gc_event_hook_required_p(rb_event_flag_t event)
228{
229 return ruby_vm_event_flags & event;
230}
231
232void
233rb_gc_event_hook(VALUE obj, rb_event_flag_t event)
234{
235 if (LIKELY(!rb_gc_event_hook_required_p(event))) return;
236
237 rb_execution_context_t *ec = GET_EC();
238 if (!ec->cfp) return;
239
240 EXEC_EVENT_HOOK(ec, event, ec->cfp->self, 0, 0, 0, obj);
241}
242
243void *
244rb_gc_get_objspace(void)
245{
246 return GET_VM()->gc.objspace;
247}
248
249
250void
251rb_gc_ractor_newobj_cache_foreach(void (*func)(void *cache, void *data), void *data)
252{
253 rb_ractor_t *r = NULL;
254 if (RB_LIKELY(ruby_single_main_ractor)) {
255 GC_ASSERT(
256 ccan_list_empty(&GET_VM()->ractor.set) ||
257 (ccan_list_top(&GET_VM()->ractor.set, rb_ractor_t, vmlr_node) == ruby_single_main_ractor &&
258 ccan_list_tail(&GET_VM()->ractor.set, rb_ractor_t, vmlr_node) == ruby_single_main_ractor)
259 );
260
261 func(ruby_single_main_ractor->newobj_cache, data);
262 }
263 else {
264 ccan_list_for_each(&GET_VM()->ractor.set, r, vmlr_node) {
265 func(r->newobj_cache, data);
266 }
267 }
268}
269
270void
271rb_gc_run_obj_finalizer(VALUE objid, long count, VALUE (*callback)(long i, void *data), void *data)
272{
273 volatile struct {
274 VALUE errinfo;
275 VALUE final;
277 VALUE *sp;
278 long finished;
279 } saved;
280
281 rb_execution_context_t * volatile ec = GET_EC();
282#define RESTORE_FINALIZER() (\
283 ec->cfp = saved.cfp, \
284 ec->cfp->sp = saved.sp, \
285 ec->errinfo = saved.errinfo)
286
287 saved.errinfo = ec->errinfo;
288 saved.cfp = ec->cfp;
289 saved.sp = ec->cfp->sp;
290 saved.finished = 0;
291 saved.final = Qundef;
292
293 rb_ractor_ignore_belonging(true);
294 EC_PUSH_TAG(ec);
295 enum ruby_tag_type state = EC_EXEC_TAG();
296 if (state != TAG_NONE) {
297 ++saved.finished; /* skip failed finalizer */
298
299 VALUE failed_final = saved.final;
300 saved.final = Qundef;
301 if (!UNDEF_P(failed_final) && !NIL_P(ruby_verbose)) {
302 rb_warn("Exception in finalizer %+"PRIsVALUE, failed_final);
303 rb_ec_error_print(ec, ec->errinfo);
304 }
305 }
306
307 for (long i = saved.finished; RESTORE_FINALIZER(), i < count; saved.finished = ++i) {
308 saved.final = callback(i, data);
309 rb_check_funcall(saved.final, idCall, 1, &objid);
310 }
311 EC_POP_TAG();
312 rb_ractor_ignore_belonging(false);
313#undef RESTORE_FINALIZER
314}
315
316void
317rb_gc_set_pending_interrupt(void)
318{
319 rb_execution_context_t *ec = GET_EC();
320 ec->interrupt_mask |= PENDING_INTERRUPT_MASK;
321}
322
323void
324rb_gc_unset_pending_interrupt(void)
325{
326 rb_execution_context_t *ec = GET_EC();
327 ec->interrupt_mask &= ~PENDING_INTERRUPT_MASK;
328}
329
330bool
331rb_gc_multi_ractor_p(void)
332{
333 return rb_multi_ractor_p();
334}
335
336bool
337rb_gc_shutdown_call_finalizer_p(VALUE obj)
338{
339 switch (BUILTIN_TYPE(obj)) {
340 case T_DATA:
341 if (!ruby_free_at_exit_p() && (!DATA_PTR(obj) || !RDATA(obj)->dfree)) return false;
342 if (rb_obj_is_thread(obj)) return false;
343 if (rb_obj_is_mutex(obj)) return false;
344 if (rb_obj_is_fiber(obj)) return false;
345 if (rb_ractor_p(obj)) return false;
346 if (rb_obj_is_fstring_table(obj)) return false;
347 if (rb_obj_is_symbol_table(obj)) return false;
348
349 return true;
350
351 case T_FILE:
352 return true;
353
354 case T_SYMBOL:
355 return true;
356
357 case T_NONE:
358 return false;
359
360 default:
361 return ruby_free_at_exit_p();
362 }
363}
364
365uint32_t
366rb_gc_get_shape(VALUE obj)
367{
368 return (uint32_t)rb_obj_shape_id(obj);
369}
370
371void
372rb_gc_set_shape(VALUE obj, uint32_t shape_id)
373{
374 RBASIC_SET_SHAPE_ID(obj, (uint32_t)shape_id);
375}
376
377uint32_t
378rb_gc_rebuild_shape(VALUE obj, size_t heap_id)
379{
381
382 return (uint32_t)rb_shape_transition_heap(obj, heap_id);
383}
384
385void rb_vm_update_references(void *ptr);
386
387#define rb_setjmp(env) RUBY_SETJMP(env)
388#define rb_jmp_buf rb_jmpbuf_t
389#undef rb_data_object_wrap
390
391#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
392#define MAP_ANONYMOUS MAP_ANON
393#endif
394
395#define unless_objspace(objspace) \
396 void *objspace; \
397 rb_vm_t *unless_objspace_vm = GET_VM(); \
398 if (unless_objspace_vm) objspace = unless_objspace_vm->gc.objspace; \
399 else /* return; or objspace will be warned uninitialized */
400
401#define RMOVED(obj) ((struct RMoved *)(obj))
402
403#define TYPED_UPDATE_IF_MOVED(_objspace, _type, _thing) do { \
404 if (rb_gc_impl_object_moved_p((_objspace), (VALUE)(_thing))) { \
405 *(_type *)&(_thing) = (_type)gc_location_internal(_objspace, (VALUE)_thing); \
406 } \
407} while (0)
408
409#define UPDATE_IF_MOVED(_objspace, _thing) TYPED_UPDATE_IF_MOVED(_objspace, VALUE, _thing)
410
411#if RUBY_MARK_FREE_DEBUG
412int ruby_gc_debug_indent = 0;
413#endif
414
415#ifndef RGENGC_OBJ_INFO
416# define RGENGC_OBJ_INFO RGENGC_CHECK_MODE
417#endif
418
419#ifndef CALC_EXACT_MALLOC_SIZE
420# define CALC_EXACT_MALLOC_SIZE 0
421#endif
422
424
425static size_t malloc_offset = 0;
426#if defined(HAVE_MALLOC_USABLE_SIZE)
427static size_t
428gc_compute_malloc_offset(void)
429{
430 // Different allocators use different metadata storage strategies which result in different
431 // ideal sizes.
432 // For instance malloc(64) will waste 8B with glibc, but waste 0B with jemalloc.
433 // But malloc(56) will waste 0B with glibc, but waste 8B with jemalloc.
434 // So we try allocating 64, 56 and 48 bytes and select the first offset that doesn't
435 // waste memory.
436 // This was tested on Linux with glibc 2.35 and jemalloc 5, and for both it result in
437 // no wasted memory.
438 size_t offset = 0;
439 for (offset = 0; offset <= 16; offset += 8) {
440 size_t allocated = (64 - offset);
441 void *test_ptr = malloc(allocated);
442 size_t wasted = malloc_usable_size(test_ptr) - allocated;
443 free(test_ptr);
444
445 if (wasted == 0) {
446 return offset;
447 }
448 }
449 return 0;
450}
451#else
452static size_t
453gc_compute_malloc_offset(void)
454{
455 // If we don't have malloc_usable_size, we use powers of 2.
456 return 0;
457}
458#endif
459
460size_t
461rb_malloc_grow_capa(size_t current, size_t type_size)
462{
463 size_t current_capacity = current;
464 if (current_capacity < 4) {
465 current_capacity = 4;
466 }
467 current_capacity *= type_size;
468
469 // We double the current capacity.
470 size_t new_capacity = (current_capacity * 2);
471
472 // And round up to the next power of 2 if it's not already one.
473 if (rb_popcount64(new_capacity) != 1) {
474 new_capacity = (size_t)(1 << (64 - nlz_int64(new_capacity)));
475 }
476
477 new_capacity -= malloc_offset;
478 new_capacity /= type_size;
479 if (current > new_capacity) {
480 rb_bug("rb_malloc_grow_capa: current_capacity=%zu, new_capacity=%zu, malloc_offset=%zu", current, new_capacity, malloc_offset);
481 }
482 RUBY_ASSERT(new_capacity > current);
483 return new_capacity;
484}
485
486static inline struct rbimpl_size_mul_overflow_tag
487size_mul_add_overflow(size_t x, size_t y, size_t z) /* x * y + z */
488{
489 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
490 struct rbimpl_size_mul_overflow_tag u = rbimpl_size_add_overflow(t.right, z);
491 return (struct rbimpl_size_mul_overflow_tag) { t.left || u.left, u.right };
492}
493
494static inline struct rbimpl_size_mul_overflow_tag
495size_mul_add_mul_overflow(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
496{
497 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
498 struct rbimpl_size_mul_overflow_tag u = rbimpl_size_mul_overflow(z, w);
499 struct rbimpl_size_mul_overflow_tag v = rbimpl_size_add_overflow(t.right, u.right);
500 return (struct rbimpl_size_mul_overflow_tag) { t.left || u.left || v.left, v.right };
501}
502
503PRINTF_ARGS(NORETURN(static void gc_raise(VALUE, const char*, ...)), 2, 3);
504
505static inline size_t
506size_mul_or_raise(size_t x, size_t y, VALUE exc)
507{
508 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
509 if (LIKELY(!t.left)) {
510 return t.right;
511 }
512 else if (rb_during_gc()) {
513 rb_memerror(); /* or...? */
514 }
515 else {
516 gc_raise(
517 exc,
518 "integer overflow: %"PRIuSIZE
519 " * %"PRIuSIZE
520 " > %"PRIuSIZE,
521 x, y, (size_t)SIZE_MAX);
522 }
523}
524
525size_t
526rb_size_mul_or_raise(size_t x, size_t y, VALUE exc)
527{
528 return size_mul_or_raise(x, y, exc);
529}
530
531static inline size_t
532size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
533{
534 struct rbimpl_size_mul_overflow_tag t = size_mul_add_overflow(x, y, z);
535 if (LIKELY(!t.left)) {
536 return t.right;
537 }
538 else if (rb_during_gc()) {
539 rb_memerror(); /* or...? */
540 }
541 else {
542 gc_raise(
543 exc,
544 "integer overflow: %"PRIuSIZE
545 " * %"PRIuSIZE
546 " + %"PRIuSIZE
547 " > %"PRIuSIZE,
548 x, y, z, (size_t)SIZE_MAX);
549 }
550}
551
552size_t
553rb_size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
554{
555 return size_mul_add_or_raise(x, y, z, exc);
556}
557
558static inline size_t
559size_mul_add_mul_or_raise(size_t x, size_t y, size_t z, size_t w, VALUE exc)
560{
561 struct rbimpl_size_mul_overflow_tag t = size_mul_add_mul_overflow(x, y, z, w);
562 if (LIKELY(!t.left)) {
563 return t.right;
564 }
565 else if (rb_during_gc()) {
566 rb_memerror(); /* or...? */
567 }
568 else {
569 gc_raise(
570 exc,
571 "integer overflow: %"PRIdSIZE
572 " * %"PRIdSIZE
573 " + %"PRIdSIZE
574 " * %"PRIdSIZE
575 " > %"PRIdSIZE,
576 x, y, z, w, (size_t)SIZE_MAX);
577 }
578}
579
580#if defined(HAVE_RB_GC_GUARDED_PTR_VAL) && HAVE_RB_GC_GUARDED_PTR_VAL
581/* trick the compiler into thinking a external signal handler uses this */
582volatile VALUE rb_gc_guarded_val;
583volatile VALUE *
584rb_gc_guarded_ptr_val(volatile VALUE *ptr, VALUE val)
585{
586 rb_gc_guarded_val = val;
587
588 return ptr;
589}
590#endif
591
592static const char *obj_type_name(VALUE obj);
593#include "gc/default/default.c"
594
595#if USE_MODULAR_GC && !defined(HAVE_DLOPEN)
596# error "Modular GC requires dlopen"
597#elif USE_MODULAR_GC
598#include <dlfcn.h>
599
600typedef struct gc_function_map {
601 // Bootup
602 void *(*objspace_alloc)(void);
603 void (*objspace_init)(void *objspace_ptr);
604 void *(*ractor_cache_alloc)(void *objspace_ptr, void *ractor);
605 void (*set_params)(void *objspace_ptr);
606 void (*init)(void);
607 size_t *(*heap_sizes)(void *objspace_ptr);
608 // Shutdown
609 void (*shutdown_free_objects)(void *objspace_ptr);
610 void (*objspace_free)(void *objspace_ptr);
611 void (*ractor_cache_free)(void *objspace_ptr, void *cache);
612 // GC
613 void (*start)(void *objspace_ptr, bool full_mark, bool immediate_mark, bool immediate_sweep, bool compact);
614 bool (*during_gc_p)(void *objspace_ptr);
615 void (*prepare_heap)(void *objspace_ptr);
616 void (*gc_enable)(void *objspace_ptr);
617 void (*gc_disable)(void *objspace_ptr, bool finish_current_gc);
618 bool (*gc_enabled_p)(void *objspace_ptr);
619 VALUE (*config_get)(void *objpace_ptr);
620 void (*config_set)(void *objspace_ptr, VALUE hash);
621 void (*stress_set)(void *objspace_ptr, VALUE flag);
622 VALUE (*stress_get)(void *objspace_ptr);
623 // Object allocation
624 VALUE (*new_obj)(void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags, bool wb_protected, size_t alloc_size);
625 size_t (*obj_slot_size)(VALUE obj);
626 size_t (*heap_id_for_size)(void *objspace_ptr, size_t size);
627 bool (*size_allocatable_p)(size_t size);
628 // Malloc
629 void *(*malloc)(void *objspace_ptr, size_t size, bool gc_allowed);
630 void *(*calloc)(void *objspace_ptr, size_t size, bool gc_allowed);
631 void *(*realloc)(void *objspace_ptr, void *ptr, size_t new_size, size_t old_size, bool gc_allowed);
632 void (*free)(void *objspace_ptr, void *ptr, size_t old_size);
633 void (*adjust_memory_usage)(void *objspace_ptr, ssize_t diff);
634 // Marking
635 void (*mark)(void *objspace_ptr, VALUE obj);
636 void (*mark_and_move)(void *objspace_ptr, VALUE *ptr);
637 void (*mark_and_pin)(void *objspace_ptr, VALUE obj);
638 void (*mark_maybe)(void *objspace_ptr, VALUE obj);
639 void (*mark_weak)(void *objspace_ptr, VALUE *ptr);
640 void (*remove_weak)(void *objspace_ptr, VALUE parent_obj, VALUE *ptr);
641 // Compaction
642 bool (*object_moved_p)(void *objspace_ptr, VALUE obj);
643 VALUE (*location)(void *objspace_ptr, VALUE value);
644 // Write barriers
645 void (*writebarrier)(void *objspace_ptr, VALUE a, VALUE b);
646 void (*writebarrier_unprotect)(void *objspace_ptr, VALUE obj);
647 void (*writebarrier_remember)(void *objspace_ptr, VALUE obj);
648 // Heap walking
649 void (*each_objects)(void *objspace_ptr, int (*callback)(void *, void *, size_t, void *), void *data);
650 void (*each_object)(void *objspace_ptr, void (*func)(VALUE obj, void *data), void *data);
651 // Finalizers
652 void (*make_zombie)(void *objspace_ptr, VALUE obj, void (*dfree)(void *), void *data);
653 VALUE (*define_finalizer)(void *objspace_ptr, VALUE obj, VALUE block);
654 void (*undefine_finalizer)(void *objspace_ptr, VALUE obj);
655 void (*copy_finalizer)(void *objspace_ptr, VALUE dest, VALUE obj);
656 void (*shutdown_call_finalizer)(void *objspace_ptr);
657 // Forking
658 void (*before_fork)(void *objspace_ptr);
659 void (*after_fork)(void *objspace_ptr, rb_pid_t pid);
660 // Statistics
661 void (*set_measure_total_time)(void *objspace_ptr, VALUE flag);
662 bool (*get_measure_total_time)(void *objspace_ptr);
663 unsigned long long (*get_total_time)(void *objspace_ptr);
664 size_t (*gc_count)(void *objspace_ptr);
665 VALUE (*latest_gc_info)(void *objspace_ptr, VALUE key);
666 VALUE (*stat)(void *objspace_ptr, VALUE hash_or_sym);
667 VALUE (*stat_heap)(void *objspace_ptr, VALUE heap_name, VALUE hash_or_sym);
668 const char *(*active_gc_name)(void);
669 // Miscellaneous
670 struct rb_gc_object_metadata_entry *(*object_metadata)(void *objspace_ptr, VALUE obj);
671 bool (*pointer_to_heap_p)(void *objspace_ptr, const void *ptr);
672 bool (*garbage_object_p)(void *objspace_ptr, VALUE obj);
673 void (*set_event_hook)(void *objspace_ptr, const rb_event_flag_t event);
674 void (*copy_attributes)(void *objspace_ptr, VALUE dest, VALUE obj);
675
676 bool modular_gc_loaded_p;
677} rb_gc_function_map_t;
678
679static rb_gc_function_map_t rb_gc_functions;
680
681# define RUBY_GC_LIBRARY "RUBY_GC_LIBRARY"
682# define MODULAR_GC_DIR STRINGIZE(modular_gc_dir)
683
684static void
685ruby_modular_gc_init(void)
686{
687 // Assert that the directory path ends with a /
688 RUBY_ASSERT_ALWAYS(MODULAR_GC_DIR[sizeof(MODULAR_GC_DIR) - 2] == '/');
689
690 const char *gc_so_file = getenv(RUBY_GC_LIBRARY);
691
692 rb_gc_function_map_t gc_functions = { 0 };
693
694 char *gc_so_path = NULL;
695 void *handle = NULL;
696 if (gc_so_file) {
697 /* Check to make sure that gc_so_file matches /[\w-_]+/ so that it does
698 * not load a shared object outside of the directory. */
699 for (size_t i = 0; i < strlen(gc_so_file); i++) {
700 char c = gc_so_file[i];
701 if (isalnum(c)) continue;
702 switch (c) {
703 case '-':
704 case '_':
705 break;
706 default:
707 fprintf(stderr, "Only alphanumeric, dash, and underscore is allowed in "RUBY_GC_LIBRARY"\n");
708 exit(1);
709 }
710 }
711
712 size_t gc_so_path_size = strlen(MODULAR_GC_DIR "librubygc." DLEXT) + strlen(gc_so_file) + 1;
713#ifdef LOAD_RELATIVE
714 Dl_info dli;
715 size_t prefix_len = 0;
716 if (dladdr((void *)(uintptr_t)ruby_modular_gc_init, &dli)) {
717 const char *base = strrchr(dli.dli_fname, '/');
718 if (base) {
719 size_t tail = 0;
720# define end_with_p(lit) \
721 (prefix_len >= (tail = rb_strlen_lit(lit)) && \
722 memcmp(base - tail, lit, tail) == 0)
723
724 prefix_len = base - dli.dli_fname;
725 if (end_with_p("/bin") || end_with_p("/lib")) {
726 prefix_len -= tail;
727 }
728 prefix_len += MODULAR_GC_DIR[0] != '/';
729 gc_so_path_size += prefix_len;
730 }
731 }
732#endif
733 gc_so_path = alloca(gc_so_path_size);
734 {
735 size_t gc_so_path_idx = 0;
736#define GC_SO_PATH_APPEND(str) do { \
737 gc_so_path_idx += strlcpy(gc_so_path + gc_so_path_idx, str, gc_so_path_size - gc_so_path_idx); \
738} while (0)
739#ifdef LOAD_RELATIVE
740 if (prefix_len > 0) {
741 memcpy(gc_so_path, dli.dli_fname, prefix_len);
742 gc_so_path_idx = prefix_len;
743 }
744#endif
745 GC_SO_PATH_APPEND(MODULAR_GC_DIR "librubygc.");
746 GC_SO_PATH_APPEND(gc_so_file);
747 GC_SO_PATH_APPEND(DLEXT);
748 GC_ASSERT(gc_so_path_idx == gc_so_path_size - 1);
749#undef GC_SO_PATH_APPEND
750 }
751
752 handle = dlopen(gc_so_path, RTLD_LAZY | RTLD_GLOBAL);
753 if (!handle) {
754 fprintf(stderr, "ruby_modular_gc_init: Shared library %s cannot be opened: %s\n", gc_so_path, dlerror());
755 exit(1);
756 }
757
758 gc_functions.modular_gc_loaded_p = true;
759 }
760
761# define load_modular_gc_func(name) do { \
762 if (handle) { \
763 const char *func_name = "rb_gc_impl_" #name; \
764 gc_functions.name = dlsym(handle, func_name); \
765 if (!gc_functions.name) { \
766 fprintf(stderr, "ruby_modular_gc_init: %s function not exported by library %s\n", func_name, gc_so_path); \
767 exit(1); \
768 } \
769 } \
770 else { \
771 gc_functions.name = rb_gc_impl_##name; \
772 } \
773} while (0)
774
775 // Bootup
776 load_modular_gc_func(objspace_alloc);
777 load_modular_gc_func(objspace_init);
778 load_modular_gc_func(ractor_cache_alloc);
779 load_modular_gc_func(set_params);
780 load_modular_gc_func(init);
781 load_modular_gc_func(heap_sizes);
782 // Shutdown
783 load_modular_gc_func(shutdown_free_objects);
784 load_modular_gc_func(objspace_free);
785 load_modular_gc_func(ractor_cache_free);
786 // GC
787 load_modular_gc_func(start);
788 load_modular_gc_func(during_gc_p);
789 load_modular_gc_func(prepare_heap);
790 load_modular_gc_func(gc_enable);
791 load_modular_gc_func(gc_disable);
792 load_modular_gc_func(gc_enabled_p);
793 load_modular_gc_func(config_set);
794 load_modular_gc_func(config_get);
795 load_modular_gc_func(stress_set);
796 load_modular_gc_func(stress_get);
797 // Object allocation
798 load_modular_gc_func(new_obj);
799 load_modular_gc_func(obj_slot_size);
800 load_modular_gc_func(heap_id_for_size);
801 load_modular_gc_func(size_allocatable_p);
802 // Malloc
803 load_modular_gc_func(malloc);
804 load_modular_gc_func(calloc);
805 load_modular_gc_func(realloc);
806 load_modular_gc_func(free);
807 load_modular_gc_func(adjust_memory_usage);
808 // Marking
809 load_modular_gc_func(mark);
810 load_modular_gc_func(mark_and_move);
811 load_modular_gc_func(mark_and_pin);
812 load_modular_gc_func(mark_maybe);
813 load_modular_gc_func(mark_weak);
814 load_modular_gc_func(remove_weak);
815 // Compaction
816 load_modular_gc_func(object_moved_p);
817 load_modular_gc_func(location);
818 // Write barriers
819 load_modular_gc_func(writebarrier);
820 load_modular_gc_func(writebarrier_unprotect);
821 load_modular_gc_func(writebarrier_remember);
822 // Heap walking
823 load_modular_gc_func(each_objects);
824 load_modular_gc_func(each_object);
825 // Finalizers
826 load_modular_gc_func(make_zombie);
827 load_modular_gc_func(define_finalizer);
828 load_modular_gc_func(undefine_finalizer);
829 load_modular_gc_func(copy_finalizer);
830 load_modular_gc_func(shutdown_call_finalizer);
831 // Forking
832 load_modular_gc_func(before_fork);
833 load_modular_gc_func(after_fork);
834 // Statistics
835 load_modular_gc_func(set_measure_total_time);
836 load_modular_gc_func(get_measure_total_time);
837 load_modular_gc_func(get_total_time);
838 load_modular_gc_func(gc_count);
839 load_modular_gc_func(latest_gc_info);
840 load_modular_gc_func(stat);
841 load_modular_gc_func(stat_heap);
842 load_modular_gc_func(active_gc_name);
843 // Miscellaneous
844 load_modular_gc_func(object_metadata);
845 load_modular_gc_func(pointer_to_heap_p);
846 load_modular_gc_func(garbage_object_p);
847 load_modular_gc_func(set_event_hook);
848 load_modular_gc_func(copy_attributes);
849
850# undef load_modular_gc_func
851
852 rb_gc_functions = gc_functions;
853}
854
855// Bootup
856# define rb_gc_impl_objspace_alloc rb_gc_functions.objspace_alloc
857# define rb_gc_impl_objspace_init rb_gc_functions.objspace_init
858# define rb_gc_impl_ractor_cache_alloc rb_gc_functions.ractor_cache_alloc
859# define rb_gc_impl_set_params rb_gc_functions.set_params
860# define rb_gc_impl_init rb_gc_functions.init
861# define rb_gc_impl_heap_sizes rb_gc_functions.heap_sizes
862// Shutdown
863# define rb_gc_impl_shutdown_free_objects rb_gc_functions.shutdown_free_objects
864# define rb_gc_impl_objspace_free rb_gc_functions.objspace_free
865# define rb_gc_impl_ractor_cache_free rb_gc_functions.ractor_cache_free
866// GC
867# define rb_gc_impl_start rb_gc_functions.start
868# define rb_gc_impl_during_gc_p rb_gc_functions.during_gc_p
869# define rb_gc_impl_prepare_heap rb_gc_functions.prepare_heap
870# define rb_gc_impl_gc_enable rb_gc_functions.gc_enable
871# define rb_gc_impl_gc_disable rb_gc_functions.gc_disable
872# define rb_gc_impl_gc_enabled_p rb_gc_functions.gc_enabled_p
873# define rb_gc_impl_config_get rb_gc_functions.config_get
874# define rb_gc_impl_config_set rb_gc_functions.config_set
875# define rb_gc_impl_stress_set rb_gc_functions.stress_set
876# define rb_gc_impl_stress_get rb_gc_functions.stress_get
877// Object allocation
878# define rb_gc_impl_new_obj rb_gc_functions.new_obj
879# define rb_gc_impl_obj_slot_size rb_gc_functions.obj_slot_size
880# define rb_gc_impl_heap_id_for_size rb_gc_functions.heap_id_for_size
881# define rb_gc_impl_size_allocatable_p rb_gc_functions.size_allocatable_p
882// Malloc
883# define rb_gc_impl_malloc rb_gc_functions.malloc
884# define rb_gc_impl_calloc rb_gc_functions.calloc
885# define rb_gc_impl_realloc rb_gc_functions.realloc
886# define rb_gc_impl_free rb_gc_functions.free
887# define rb_gc_impl_adjust_memory_usage rb_gc_functions.adjust_memory_usage
888// Marking
889# define rb_gc_impl_mark rb_gc_functions.mark
890# define rb_gc_impl_mark_and_move rb_gc_functions.mark_and_move
891# define rb_gc_impl_mark_and_pin rb_gc_functions.mark_and_pin
892# define rb_gc_impl_mark_maybe rb_gc_functions.mark_maybe
893# define rb_gc_impl_mark_weak rb_gc_functions.mark_weak
894# define rb_gc_impl_remove_weak rb_gc_functions.remove_weak
895// Compaction
896# define rb_gc_impl_object_moved_p rb_gc_functions.object_moved_p
897# define rb_gc_impl_location rb_gc_functions.location
898// Write barriers
899# define rb_gc_impl_writebarrier rb_gc_functions.writebarrier
900# define rb_gc_impl_writebarrier_unprotect rb_gc_functions.writebarrier_unprotect
901# define rb_gc_impl_writebarrier_remember rb_gc_functions.writebarrier_remember
902// Heap walking
903# define rb_gc_impl_each_objects rb_gc_functions.each_objects
904# define rb_gc_impl_each_object rb_gc_functions.each_object
905// Finalizers
906# define rb_gc_impl_make_zombie rb_gc_functions.make_zombie
907# define rb_gc_impl_define_finalizer rb_gc_functions.define_finalizer
908# define rb_gc_impl_undefine_finalizer rb_gc_functions.undefine_finalizer
909# define rb_gc_impl_copy_finalizer rb_gc_functions.copy_finalizer
910# define rb_gc_impl_shutdown_call_finalizer rb_gc_functions.shutdown_call_finalizer
911// Forking
912# define rb_gc_impl_before_fork rb_gc_functions.before_fork
913# define rb_gc_impl_after_fork rb_gc_functions.after_fork
914// Statistics
915# define rb_gc_impl_set_measure_total_time rb_gc_functions.set_measure_total_time
916# define rb_gc_impl_get_measure_total_time rb_gc_functions.get_measure_total_time
917# define rb_gc_impl_get_total_time rb_gc_functions.get_total_time
918# define rb_gc_impl_gc_count rb_gc_functions.gc_count
919# define rb_gc_impl_latest_gc_info rb_gc_functions.latest_gc_info
920# define rb_gc_impl_stat rb_gc_functions.stat
921# define rb_gc_impl_stat_heap rb_gc_functions.stat_heap
922# define rb_gc_impl_active_gc_name rb_gc_functions.active_gc_name
923// Miscellaneous
924# define rb_gc_impl_object_metadata rb_gc_functions.object_metadata
925# define rb_gc_impl_pointer_to_heap_p rb_gc_functions.pointer_to_heap_p
926# define rb_gc_impl_garbage_object_p rb_gc_functions.garbage_object_p
927# define rb_gc_impl_set_event_hook rb_gc_functions.set_event_hook
928# define rb_gc_impl_copy_attributes rb_gc_functions.copy_attributes
929#endif
930
931#ifdef RUBY_ASAN_ENABLED
932static void
933asan_death_callback(void)
934{
935 if (GET_VM()) {
936 rb_bug_without_die("ASAN error");
937 }
938}
939#endif
940
941static VALUE initial_stress = Qfalse;
942
943void *
944rb_objspace_alloc(void)
945{
946#if USE_MODULAR_GC
947 ruby_modular_gc_init();
948#endif
949
950 void *objspace = rb_gc_impl_objspace_alloc();
951 ruby_current_vm_ptr->gc.objspace = objspace;
952 rb_gc_impl_objspace_init(objspace);
953 rb_gc_impl_stress_set(objspace, initial_stress);
954
955#ifdef RUBY_ASAN_ENABLED
956 __sanitizer_set_death_callback(asan_death_callback);
957#endif
958
959 return objspace;
960}
961
962void
963rb_objspace_free(void *objspace)
964{
965 rb_gc_impl_objspace_free(objspace);
966}
967
968size_t
969rb_gc_obj_slot_size(VALUE obj)
970{
971 return rb_gc_impl_obj_slot_size(obj);
972}
973
974static inline void
975gc_validate_pc(VALUE obj)
976{
977#if RUBY_DEBUG
978 // IMEMOs and objects without a class (e.g managed id table) are not traceable
979 if (RB_TYPE_P(obj, T_IMEMO) || !CLASS_OF(obj)) return;
980
981 rb_execution_context_t *ec = GET_EC();
982 const rb_control_frame_t *cfp = ec->cfp;
983 if (cfp && VM_FRAME_RUBYFRAME_P(cfp) && cfp->pc) {
984 const VALUE *iseq_encoded = ISEQ_BODY(cfp->iseq)->iseq_encoded;
985 const VALUE *iseq_encoded_end = iseq_encoded + ISEQ_BODY(cfp->iseq)->iseq_size;
986 RUBY_ASSERT(cfp->pc >= iseq_encoded, "PC not set when allocating, breaking tracing");
987 RUBY_ASSERT(cfp->pc <= iseq_encoded_end, "PC not set when allocating, breaking tracing");
988 }
989#endif
990}
991
992static inline VALUE
993newobj_of(rb_ractor_t *cr, VALUE klass, VALUE flags, bool wb_protected, size_t size)
994{
995 VALUE obj = rb_gc_impl_new_obj(rb_gc_get_objspace(), cr->newobj_cache, klass, flags, wb_protected, size);
996
997 gc_validate_pc(obj);
998
999 if (UNLIKELY(rb_gc_event_hook_required_p(RUBY_INTERNAL_EVENT_NEWOBJ))) {
1000 int lev = RB_GC_VM_LOCK_NO_BARRIER();
1001 {
1002 memset((char *)obj + RVALUE_SIZE, 0, rb_gc_obj_slot_size(obj) - RVALUE_SIZE);
1003
1004 /* We must disable GC here because the callback could call xmalloc
1005 * which could potentially trigger a GC, and a lot of code is unsafe
1006 * to trigger a GC right after an object has been allocated because
1007 * they perform initialization for the object and assume that the
1008 * GC does not trigger before then. */
1009 bool gc_disabled = RTEST(rb_gc_disable_no_rest());
1010 {
1011 rb_gc_event_hook(obj, RUBY_INTERNAL_EVENT_NEWOBJ);
1012 }
1013 if (!gc_disabled) rb_gc_enable();
1014 }
1015 RB_GC_VM_UNLOCK_NO_BARRIER(lev);
1016 }
1017
1018#if RGENGC_CHECK_MODE
1019# ifndef GC_DEBUG_SLOT_FILL_SPECIAL_VALUE
1020# define GC_DEBUG_SLOT_FILL_SPECIAL_VALUE 255
1021# endif
1022
1023 memset(
1024 (void *)(obj + sizeof(struct RBasic)),
1025 GC_DEBUG_SLOT_FILL_SPECIAL_VALUE,
1026 rb_gc_obj_slot_size(obj) - sizeof(struct RBasic)
1027 );
1028#endif
1029
1030 return obj;
1031}
1032
1033VALUE
1034rb_wb_unprotected_newobj_of(VALUE klass, VALUE flags, size_t size)
1035{
1036 GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
1037 return newobj_of(GET_RACTOR(), klass, flags, FALSE, size);
1038}
1039
1040VALUE
1041rb_wb_protected_newobj_of(rb_execution_context_t *ec, VALUE klass, VALUE flags, size_t size)
1042{
1043 GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
1044 return newobj_of(rb_ec_ractor_ptr(ec), klass, flags, TRUE, size);
1045}
1046
1047#define UNEXPECTED_NODE(func) \
1048 rb_bug(#func"(): GC does not handle T_NODE 0x%x(%p) 0x%"PRIxVALUE, \
1049 BUILTIN_TYPE(obj), (void*)(obj), RBASIC(obj)->flags)
1050
1051static inline void
1052rb_data_object_check(VALUE klass)
1053{
1054 if (klass != rb_cObject && (rb_get_alloc_func(klass) == rb_class_allocate_instance)) {
1055 rb_undef_alloc_func(klass);
1056 rb_warn("undefining the allocator of T_DATA class %"PRIsVALUE, klass);
1057 }
1058}
1059
1060VALUE
1061rb_data_object_wrap(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
1062{
1064 if (klass) rb_data_object_check(klass);
1065 VALUE obj = newobj_of(GET_RACTOR(), klass, T_DATA, !dmark, sizeof(struct RTypedData));
1066
1067 struct RData *data = (struct RData *)obj;
1068 data->dmark = dmark;
1069 data->dfree = dfree;
1070 data->data = datap;
1071
1072 return obj;
1073}
1074
1075VALUE
1077{
1078 VALUE obj = rb_data_object_wrap(klass, 0, dmark, dfree);
1079 DATA_PTR(obj) = xcalloc(1, size);
1080 return obj;
1081}
1082
1083static VALUE
1084typed_data_alloc(VALUE klass, VALUE typed_flag, void *datap, const rb_data_type_t *type, size_t size)
1085{
1086 RBIMPL_NONNULL_ARG(type);
1087 if (klass) rb_data_object_check(klass);
1088 bool wb_protected = (type->flags & RUBY_FL_WB_PROTECTED) || !type->function.dmark;
1089 VALUE obj = newobj_of(GET_RACTOR(), klass, T_DATA | RUBY_TYPED_FL_IS_TYPED_DATA, wb_protected, size);
1090
1091 struct RTypedData *data = (struct RTypedData *)obj;
1092 data->fields_obj = 0;
1093 *(VALUE *)&data->type = ((VALUE)type) | typed_flag;
1094 data->data = datap;
1095
1096 return obj;
1097}
1098
1099VALUE
1101{
1102 if (UNLIKELY(type->flags & RUBY_TYPED_EMBEDDABLE)) {
1103 rb_raise(rb_eTypeError, "Cannot wrap an embeddable TypedData");
1104 }
1105
1106 return typed_data_alloc(klass, 0, datap, type, sizeof(struct RTypedData));
1107}
1108
1109VALUE
1111{
1112 if (type->flags & RUBY_TYPED_EMBEDDABLE) {
1113 if (!(type->flags & RUBY_TYPED_FREE_IMMEDIATELY)) {
1114 rb_raise(rb_eTypeError, "Embeddable TypedData must be freed immediately");
1115 }
1116
1117 size_t embed_size = offsetof(struct RTypedData, data) + size;
1118 if (rb_gc_size_allocatable_p(embed_size)) {
1119 VALUE obj = typed_data_alloc(klass, TYPED_DATA_EMBEDDED, 0, type, embed_size);
1120 memset((char *)obj + offsetof(struct RTypedData, data), 0, size);
1121 return obj;
1122 }
1123 }
1124
1125 VALUE obj = typed_data_alloc(klass, 0, NULL, type, sizeof(struct RTypedData));
1126 DATA_PTR(obj) = xcalloc(1, size);
1127 return obj;
1128}
1129
1130static size_t
1131rb_objspace_data_type_memsize(VALUE obj)
1132{
1133 size_t size = 0;
1134 if (RTYPEDDATA_P(obj)) {
1135 const rb_data_type_t *type = RTYPEDDATA_TYPE(obj);
1136 const void *ptr = RTYPEDDATA_GET_DATA(obj);
1137
1138 if (RTYPEDDATA_TYPE(obj)->flags & RUBY_TYPED_EMBEDDABLE && !RTYPEDDATA_EMBEDDED_P(obj)) {
1139#ifdef HAVE_MALLOC_USABLE_SIZE
1140 size += malloc_usable_size((void *)ptr);
1141#endif
1142 }
1143
1144 if (ptr && type->function.dsize) {
1145 size += type->function.dsize(ptr);
1146 }
1147 }
1148
1149 return size;
1150}
1151
1152const char *
1153rb_objspace_data_type_name(VALUE obj)
1154{
1155 if (RTYPEDDATA_P(obj)) {
1156 return RTYPEDDATA_TYPE(obj)->wrap_struct_name;
1157 }
1158 else {
1159 return 0;
1160 }
1161}
1162
1163static enum rb_id_table_iterator_result
1164cvar_table_free_i(VALUE value, void *ctx)
1165{
1166 xfree((void *)value);
1167 return ID_TABLE_CONTINUE;
1168}
1169
1170static void
1171io_fptr_finalize(void *fptr)
1172{
1173 rb_io_fptr_finalize((struct rb_io *)fptr);
1174}
1175
1176static inline void
1177make_io_zombie(void *objspace, VALUE obj)
1178{
1179 rb_io_t *fptr = RFILE(obj)->fptr;
1180 rb_gc_impl_make_zombie(objspace, obj, io_fptr_finalize, fptr);
1181}
1182
1183static bool
1184rb_data_free(void *objspace, VALUE obj)
1185{
1186 void *data = RTYPEDDATA_P(obj) ? RTYPEDDATA_GET_DATA(obj) : DATA_PTR(obj);
1187 if (data) {
1188 int free_immediately = false;
1189 void (*dfree)(void *);
1190
1191 if (RTYPEDDATA_P(obj)) {
1192 free_immediately = (RTYPEDDATA_TYPE(obj)->flags & RUBY_TYPED_FREE_IMMEDIATELY) != 0;
1193 dfree = RTYPEDDATA_TYPE(obj)->function.dfree;
1194 }
1195 else {
1196 dfree = RDATA(obj)->dfree;
1197 }
1198
1199 if (dfree) {
1200 if (dfree == RUBY_DEFAULT_FREE) {
1201 if (!RTYPEDDATA_P(obj) || !RTYPEDDATA_EMBEDDED_P(obj)) {
1202 xfree(data);
1203 RB_DEBUG_COUNTER_INC(obj_data_xfree);
1204 }
1205 }
1206 else if (free_immediately) {
1207 (*dfree)(data);
1208 if (RTYPEDDATA_TYPE(obj)->flags & RUBY_TYPED_EMBEDDABLE && !RTYPEDDATA_EMBEDDED_P(obj)) {
1209 xfree(data);
1210 }
1211
1212 RB_DEBUG_COUNTER_INC(obj_data_imm_free);
1213 }
1214 else {
1215 rb_gc_impl_make_zombie(objspace, obj, dfree, data);
1216 RB_DEBUG_COUNTER_INC(obj_data_zombie);
1217 return FALSE;
1218 }
1219 }
1220 else {
1221 RB_DEBUG_COUNTER_INC(obj_data_empty);
1222 }
1223 }
1224
1225 return true;
1226}
1227
1229 VALUE klass;
1230 rb_objspace_t *objspace; // used for update_*
1231};
1232
1233static void
1234classext_free(rb_classext_t *ext, bool is_prime, VALUE namespace, void *arg)
1235{
1236 struct rb_id_table *tbl;
1237 struct classext_foreach_args *args = (struct classext_foreach_args *)arg;
1238
1239 rb_id_table_free(RCLASSEXT_M_TBL(ext));
1240
1241 if (!RCLASSEXT_SHARED_CONST_TBL(ext) && (tbl = RCLASSEXT_CONST_TBL(ext)) != NULL) {
1242 rb_free_const_table(tbl);
1243 }
1244 if ((tbl = RCLASSEXT_CVC_TBL(ext)) != NULL) {
1245 rb_id_table_foreach_values(tbl, cvar_table_free_i, NULL);
1246 rb_id_table_free(tbl);
1247 }
1248 rb_class_classext_free_subclasses(ext, args->klass);
1249 if (RCLASSEXT_SUPERCLASSES_WITH_SELF(ext)) {
1250 RUBY_ASSERT(is_prime); // superclasses should only be used on prime
1251 xfree(RCLASSEXT_SUPERCLASSES(ext));
1252 }
1253 if (!is_prime) { // the prime classext will be freed with RClass
1254 xfree(ext);
1255 }
1256}
1257
1258static void
1259classext_iclass_free(rb_classext_t *ext, bool is_prime, VALUE namespace, void *arg)
1260{
1261 struct classext_foreach_args *args = (struct classext_foreach_args *)arg;
1262
1263 if (RCLASSEXT_ICLASS_IS_ORIGIN(ext) && !RCLASSEXT_ICLASS_ORIGIN_SHARED_MTBL(ext)) {
1264 /* Method table is not shared for origin iclasses of classes */
1265 rb_id_table_free(RCLASSEXT_M_TBL(ext));
1266 }
1267 if (RCLASSEXT_CALLABLE_M_TBL(ext) != NULL) {
1268 rb_id_table_free(RCLASSEXT_CALLABLE_M_TBL(ext));
1269 }
1270
1271 rb_class_classext_free_subclasses(ext, args->klass);
1272
1273 if (!is_prime) { // the prime classext will be freed with RClass
1274 xfree(ext);
1275 }
1276}
1277
1278bool
1279rb_gc_obj_free(void *objspace, VALUE obj)
1280{
1281 struct classext_foreach_args args;
1282
1283 RB_DEBUG_COUNTER_INC(obj_free);
1284
1285 switch (BUILTIN_TYPE(obj)) {
1286 case T_NIL:
1287 case T_FIXNUM:
1288 case T_TRUE:
1289 case T_FALSE:
1290 rb_bug("obj_free() called for broken object");
1291 break;
1292 default:
1293 break;
1294 }
1295
1296 switch (BUILTIN_TYPE(obj)) {
1297 case T_OBJECT:
1298 if (FL_TEST_RAW(obj, ROBJECT_HEAP)) {
1299 if (rb_shape_obj_too_complex_p(obj)) {
1300 RB_DEBUG_COUNTER_INC(obj_obj_too_complex);
1301 st_free_table(ROBJECT_FIELDS_HASH(obj));
1302 }
1303 else {
1304 xfree(ROBJECT(obj)->as.heap.fields);
1305 RB_DEBUG_COUNTER_INC(obj_obj_ptr);
1306 }
1307 }
1308 else {
1309 RB_DEBUG_COUNTER_INC(obj_obj_embed);
1310 }
1311 break;
1312 case T_MODULE:
1313 case T_CLASS:
1314#if USE_ZJIT
1315 rb_zjit_klass_free(obj);
1316#endif
1317 args.klass = obj;
1318 rb_class_classext_foreach(obj, classext_free, (void *)&args);
1319 if (RCLASS_CLASSEXT_TBL(obj)) {
1320 st_free_table(RCLASS_CLASSEXT_TBL(obj));
1321 }
1322 (void)RB_DEBUG_COUNTER_INC_IF(obj_module_ptr, BUILTIN_TYPE(obj) == T_MODULE);
1323 (void)RB_DEBUG_COUNTER_INC_IF(obj_class_ptr, BUILTIN_TYPE(obj) == T_CLASS);
1324 break;
1325 case T_STRING:
1326 rb_str_free(obj);
1327 break;
1328 case T_ARRAY:
1329 rb_ary_free(obj);
1330 break;
1331 case T_HASH:
1332#if USE_DEBUG_COUNTER
1333 switch (RHASH_SIZE(obj)) {
1334 case 0:
1335 RB_DEBUG_COUNTER_INC(obj_hash_empty);
1336 break;
1337 case 1:
1338 RB_DEBUG_COUNTER_INC(obj_hash_1);
1339 break;
1340 case 2:
1341 RB_DEBUG_COUNTER_INC(obj_hash_2);
1342 break;
1343 case 3:
1344 RB_DEBUG_COUNTER_INC(obj_hash_3);
1345 break;
1346 case 4:
1347 RB_DEBUG_COUNTER_INC(obj_hash_4);
1348 break;
1349 case 5:
1350 case 6:
1351 case 7:
1352 case 8:
1353 RB_DEBUG_COUNTER_INC(obj_hash_5_8);
1354 break;
1355 default:
1356 GC_ASSERT(RHASH_SIZE(obj) > 8);
1357 RB_DEBUG_COUNTER_INC(obj_hash_g8);
1358 }
1359
1360 if (RHASH_AR_TABLE_P(obj)) {
1361 if (RHASH_AR_TABLE(obj) == NULL) {
1362 RB_DEBUG_COUNTER_INC(obj_hash_null);
1363 }
1364 else {
1365 RB_DEBUG_COUNTER_INC(obj_hash_ar);
1366 }
1367 }
1368 else {
1369 RB_DEBUG_COUNTER_INC(obj_hash_st);
1370 }
1371#endif
1372
1373 rb_hash_free(obj);
1374 break;
1375 case T_REGEXP:
1376 if (RREGEXP(obj)->ptr) {
1377 onig_free(RREGEXP(obj)->ptr);
1378 RB_DEBUG_COUNTER_INC(obj_regexp_ptr);
1379 }
1380 break;
1381 case T_DATA:
1382 if (!rb_data_free(objspace, obj)) return false;
1383 break;
1384 case T_MATCH:
1385 {
1386 rb_matchext_t *rm = RMATCH_EXT(obj);
1387#if USE_DEBUG_COUNTER
1388 if (rm->regs.num_regs >= 8) {
1389 RB_DEBUG_COUNTER_INC(obj_match_ge8);
1390 }
1391 else if (rm->regs.num_regs >= 4) {
1392 RB_DEBUG_COUNTER_INC(obj_match_ge4);
1393 }
1394 else if (rm->regs.num_regs >= 1) {
1395 RB_DEBUG_COUNTER_INC(obj_match_under4);
1396 }
1397#endif
1398 onig_region_free(&rm->regs, 0);
1399 xfree(rm->char_offset);
1400
1401 RB_DEBUG_COUNTER_INC(obj_match_ptr);
1402 }
1403 break;
1404 case T_FILE:
1405 if (RFILE(obj)->fptr) {
1406 make_io_zombie(objspace, obj);
1407 RB_DEBUG_COUNTER_INC(obj_file_ptr);
1408 return FALSE;
1409 }
1410 break;
1411 case T_RATIONAL:
1412 RB_DEBUG_COUNTER_INC(obj_rational);
1413 break;
1414 case T_COMPLEX:
1415 RB_DEBUG_COUNTER_INC(obj_complex);
1416 break;
1417 case T_MOVED:
1418 break;
1419 case T_ICLASS:
1420 args.klass = obj;
1421
1422 rb_class_classext_foreach(obj, classext_iclass_free, (void *)&args);
1423 if (RCLASS_CLASSEXT_TBL(obj)) {
1424 st_free_table(RCLASS_CLASSEXT_TBL(obj));
1425 }
1426
1427 RB_DEBUG_COUNTER_INC(obj_iclass_ptr);
1428 break;
1429
1430 case T_FLOAT:
1431 RB_DEBUG_COUNTER_INC(obj_float);
1432 break;
1433
1434 case T_BIGNUM:
1435 if (!BIGNUM_EMBED_P(obj) && BIGNUM_DIGITS(obj)) {
1436 xfree(BIGNUM_DIGITS(obj));
1437 RB_DEBUG_COUNTER_INC(obj_bignum_ptr);
1438 }
1439 else {
1440 RB_DEBUG_COUNTER_INC(obj_bignum_embed);
1441 }
1442 break;
1443
1444 case T_NODE:
1445 UNEXPECTED_NODE(obj_free);
1446 break;
1447
1448 case T_STRUCT:
1449 if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) ||
1450 RSTRUCT(obj)->as.heap.ptr == NULL) {
1451 RB_DEBUG_COUNTER_INC(obj_struct_embed);
1452 }
1453 else {
1454 xfree((void *)RSTRUCT(obj)->as.heap.ptr);
1455 RB_DEBUG_COUNTER_INC(obj_struct_ptr);
1456 }
1457 break;
1458
1459 case T_SYMBOL:
1460 RB_DEBUG_COUNTER_INC(obj_symbol);
1461 break;
1462
1463 case T_IMEMO:
1464 rb_imemo_free((VALUE)obj);
1465 break;
1466
1467 default:
1468 rb_bug("gc_sweep(): unknown data type 0x%x(%p) 0x%"PRIxVALUE,
1469 BUILTIN_TYPE(obj), (void*)obj, RBASIC(obj)->flags);
1470 }
1471
1472 if (FL_TEST_RAW(obj, FL_FINALIZE)) {
1473 rb_gc_impl_make_zombie(objspace, obj, 0, 0);
1474 return FALSE;
1475 }
1476 else {
1477 return TRUE;
1478 }
1479}
1480
1481void
1482rb_objspace_set_event_hook(const rb_event_flag_t event)
1483{
1484 rb_gc_impl_set_event_hook(rb_gc_get_objspace(), event);
1485}
1486
1487static int
1488internal_object_p(VALUE obj)
1489{
1490 void *ptr = asan_unpoison_object_temporary(obj);
1491
1492 if (RBASIC(obj)->flags) {
1493 switch (BUILTIN_TYPE(obj)) {
1494 case T_NODE:
1495 UNEXPECTED_NODE(internal_object_p);
1496 break;
1497 case T_NONE:
1498 case T_MOVED:
1499 case T_IMEMO:
1500 case T_ICLASS:
1501 case T_ZOMBIE:
1502 break;
1503 case T_CLASS:
1504 if (obj == rb_mRubyVMFrozenCore)
1505 return 1;
1506
1507 if (!RBASIC_CLASS(obj)) break;
1508 if (RCLASS_SINGLETON_P(obj)) {
1509 return rb_singleton_class_internal_p(obj);
1510 }
1511 return 0;
1512 default:
1513 if (!RBASIC(obj)->klass) break;
1514 return 0;
1515 }
1516 }
1517 if (ptr || !RBASIC(obj)->flags) {
1518 rb_asan_poison_object(obj);
1519 }
1520 return 1;
1521}
1522
1523int
1524rb_objspace_internal_object_p(VALUE obj)
1525{
1526 return internal_object_p(obj);
1527}
1528
1530 size_t num;
1531 VALUE of;
1532};
1533
1534static int
1535os_obj_of_i(void *vstart, void *vend, size_t stride, void *data)
1536{
1537 struct os_each_struct *oes = (struct os_each_struct *)data;
1538
1539 VALUE v = (VALUE)vstart;
1540 for (; v != (VALUE)vend; v += stride) {
1541 if (!internal_object_p(v)) {
1542 if (!oes->of || rb_obj_is_kind_of(v, oes->of)) {
1543 if (!rb_multi_ractor_p() || rb_ractor_shareable_p(v)) {
1544 rb_yield(v);
1545 oes->num++;
1546 }
1547 }
1548 }
1549 }
1550
1551 return 0;
1552}
1553
1554static VALUE
1555os_obj_of(VALUE of)
1556{
1557 struct os_each_struct oes;
1558
1559 oes.num = 0;
1560 oes.of = of;
1561 rb_objspace_each_objects(os_obj_of_i, &oes);
1562 return SIZET2NUM(oes.num);
1563}
1564
1565/*
1566 * call-seq:
1567 * ObjectSpace.each_object([module]) {|obj| ... } -> integer
1568 * ObjectSpace.each_object([module]) -> an_enumerator
1569 *
1570 * Calls the block once for each living, nonimmediate object in this
1571 * Ruby process. If <i>module</i> is specified, calls the block
1572 * for only those classes or modules that match (or are a subclass of)
1573 * <i>module</i>. Returns the number of objects found. Immediate
1574 * objects (<code>Fixnum</code>s, <code>Symbol</code>s
1575 * <code>true</code>, <code>false</code>, and <code>nil</code>) are
1576 * never returned. In the example below, #each_object returns both
1577 * the numbers we defined and several constants defined in the Math
1578 * module.
1579 *
1580 * If no block is given, an enumerator is returned instead.
1581 *
1582 * a = 102.7
1583 * b = 95 # Won't be returned
1584 * c = 12345678987654321
1585 * count = ObjectSpace.each_object(Numeric) {|x| p x }
1586 * puts "Total count: #{count}"
1587 *
1588 * <em>produces:</em>
1589 *
1590 * 12345678987654321
1591 * 102.7
1592 * 2.71828182845905
1593 * 3.14159265358979
1594 * 2.22044604925031e-16
1595 * 1.7976931348623157e+308
1596 * 2.2250738585072e-308
1597 * Total count: 7
1598 *
1599 * Due to a current known Ractor implementation issue, this method will not yield
1600 * Ractor-unshareable objects in multi-Ractor mode (when
1601 * <code>Ractor.new</code> has been called within the process at least once).
1602 * See https://bugs.ruby-lang.org/issues/19387 for more information.
1603 *
1604 * a = 12345678987654321 # shareable
1605 * b = [].freeze # shareable
1606 * c = {} # not shareable
1607 * ObjectSpace.each_object {|x| x } # yields a, b, and c
1608 * Ractor.new {} # enter multi-Ractor mode
1609 * ObjectSpace.each_object {|x| x } # does not yield c
1610 *
1611 */
1612
1613static VALUE
1614os_each_obj(int argc, VALUE *argv, VALUE os)
1615{
1616 VALUE of;
1617
1618 of = (!rb_check_arity(argc, 0, 1) ? 0 : argv[0]);
1619 RETURN_ENUMERATOR(os, 1, &of);
1620 return os_obj_of(of);
1621}
1622
1623/*
1624 * call-seq:
1625 * ObjectSpace.undefine_finalizer(obj)
1626 *
1627 * Removes all finalizers for <i>obj</i>.
1628 *
1629 */
1630
1631static VALUE
1632undefine_final(VALUE os, VALUE obj)
1633{
1634 return rb_undefine_finalizer(obj);
1635}
1636
1637VALUE
1638rb_undefine_finalizer(VALUE obj)
1639{
1640 rb_check_frozen(obj);
1641
1642 rb_gc_impl_undefine_finalizer(rb_gc_get_objspace(), obj);
1643
1644 return obj;
1645}
1646
1647static void
1648should_be_callable(VALUE block)
1649{
1650 if (!rb_obj_respond_to(block, idCall, TRUE)) {
1651 rb_raise(rb_eArgError, "wrong type argument %"PRIsVALUE" (should be callable)",
1652 rb_obj_class(block));
1653 }
1654}
1655
1656static void
1657should_be_finalizable(VALUE obj)
1658{
1659 if (!FL_ABLE(obj)) {
1660 rb_raise(rb_eArgError, "cannot define finalizer for %s",
1661 rb_obj_classname(obj));
1662 }
1663 rb_check_frozen(obj);
1664}
1665
1666void
1667rb_gc_copy_finalizer(VALUE dest, VALUE obj)
1668{
1669 rb_gc_impl_copy_finalizer(rb_gc_get_objspace(), dest, obj);
1670}
1671
1672/*
1673 * call-seq:
1674 * ObjectSpace.define_finalizer(obj, aProc=proc())
1675 *
1676 * Adds <i>aProc</i> as a finalizer, to be called after <i>obj</i>
1677 * was destroyed. The object ID of the <i>obj</i> will be passed
1678 * as an argument to <i>aProc</i>. If <i>aProc</i> is a lambda or
1679 * method, make sure it can be called with a single argument.
1680 *
1681 * The return value is an array <code>[0, aProc]</code>.
1682 *
1683 * The two recommended patterns are to either create the finaliser proc
1684 * in a non-instance method where it can safely capture the needed state,
1685 * or to use a custom callable object that stores the needed state
1686 * explicitly as instance variables.
1687 *
1688 * class Foo
1689 * def initialize(data_needed_for_finalization)
1690 * ObjectSpace.define_finalizer(self, self.class.create_finalizer(data_needed_for_finalization))
1691 * end
1692 *
1693 * def self.create_finalizer(data_needed_for_finalization)
1694 * proc {
1695 * puts "finalizing #{data_needed_for_finalization}"
1696 * }
1697 * end
1698 * end
1699 *
1700 * class Bar
1701 * class Remover
1702 * def initialize(data_needed_for_finalization)
1703 * @data_needed_for_finalization = data_needed_for_finalization
1704 * end
1705 *
1706 * def call(id)
1707 * puts "finalizing #{@data_needed_for_finalization}"
1708 * end
1709 * end
1710 *
1711 * def initialize(data_needed_for_finalization)
1712 * ObjectSpace.define_finalizer(self, Remover.new(data_needed_for_finalization))
1713 * end
1714 * end
1715 *
1716 * Note that if your finalizer references the object to be
1717 * finalized it will never be run on GC, although it will still be
1718 * run at exit. You will get a warning if you capture the object
1719 * to be finalized as the receiver of the finalizer.
1720 *
1721 * class CapturesSelf
1722 * def initialize(name)
1723 * ObjectSpace.define_finalizer(self, proc {
1724 * # this finalizer will only be run on exit
1725 * puts "finalizing #{name}"
1726 * })
1727 * end
1728 * end
1729 *
1730 * Also note that finalization can be unpredictable and is never guaranteed
1731 * to be run except on exit.
1732 */
1733
1734static VALUE
1735define_final(int argc, VALUE *argv, VALUE os)
1736{
1737 VALUE obj, block;
1738
1739 rb_scan_args(argc, argv, "11", &obj, &block);
1740 if (argc == 1) {
1741 block = rb_block_proc();
1742 }
1743
1744 if (rb_callable_receiver(block) == obj) {
1745 rb_warn("finalizer references object to be finalized");
1746 }
1747
1748 return rb_define_finalizer(obj, block);
1749}
1750
1751VALUE
1752rb_define_finalizer(VALUE obj, VALUE block)
1753{
1754 should_be_finalizable(obj);
1755 should_be_callable(block);
1756
1757 block = rb_gc_impl_define_finalizer(rb_gc_get_objspace(), obj, block);
1758
1759 block = rb_ary_new3(2, INT2FIX(0), block);
1760 OBJ_FREEZE(block);
1761 return block;
1762}
1763
1764void
1765rb_objspace_call_finalizer(void)
1766{
1767 rb_gc_impl_shutdown_call_finalizer(rb_gc_get_objspace());
1768}
1769
1770void
1771rb_objspace_free_objects(void *objspace)
1772{
1773 rb_gc_impl_shutdown_free_objects(objspace);
1774}
1775
1776int
1777rb_objspace_garbage_object_p(VALUE obj)
1778{
1779 return !SPECIAL_CONST_P(obj) && rb_gc_impl_garbage_object_p(rb_gc_get_objspace(), obj);
1780}
1781
1782bool
1783rb_gc_pointer_to_heap_p(VALUE obj)
1784{
1785 return rb_gc_impl_pointer_to_heap_p(rb_gc_get_objspace(), (void *)obj);
1786}
1787
1788#define OBJ_ID_INCREMENT (RUBY_IMMEDIATE_MASK + 1)
1789#define LAST_OBJECT_ID() (object_id_counter * OBJ_ID_INCREMENT)
1790static VALUE id2ref_value = 0;
1791static st_table *id2ref_tbl = NULL;
1792
1793#if SIZEOF_SIZE_T == SIZEOF_LONG_LONG
1794static size_t object_id_counter = 1;
1795#else
1796static unsigned long long object_id_counter = 1;
1797#endif
1798
1799static inline VALUE
1800generate_next_object_id(void)
1801{
1802#if SIZEOF_SIZE_T == SIZEOF_LONG_LONG
1803 // 64bit atomics are available
1804 return SIZET2NUM(RUBY_ATOMIC_SIZE_FETCH_ADD(object_id_counter, 1) * OBJ_ID_INCREMENT);
1805#else
1806 unsigned int lock_lev = RB_GC_VM_LOCK();
1807 VALUE id = ULL2NUM(++object_id_counter * OBJ_ID_INCREMENT);
1808 RB_GC_VM_UNLOCK(lock_lev);
1809 return id;
1810#endif
1811}
1812
1813void
1814rb_gc_obj_id_moved(VALUE obj)
1815{
1816 if (UNLIKELY(id2ref_tbl)) {
1817 st_insert(id2ref_tbl, (st_data_t)rb_obj_id(obj), (st_data_t)obj);
1818 }
1819}
1820
1821static int
1822object_id_cmp(st_data_t x, st_data_t y)
1823{
1824 if (RB_TYPE_P(x, T_BIGNUM)) {
1825 return !rb_big_eql(x, y);
1826 }
1827 else {
1828 return x != y;
1829 }
1830}
1831
1832static st_index_t
1833object_id_hash(st_data_t n)
1834{
1835 return FIX2LONG(rb_hash((VALUE)n));
1836}
1837
1838static const struct st_hash_type object_id_hash_type = {
1839 object_id_cmp,
1840 object_id_hash,
1841};
1842
1843static void gc_mark_tbl_no_pin(st_table *table);
1844
1845static void
1846id2ref_tbl_mark(void *data)
1847{
1848 st_table *table = (st_table *)data;
1849 if (UNLIKELY(!RB_POSFIXABLE(LAST_OBJECT_ID()))) {
1850 // It's very unlikely, but if enough object ids were generated, keys may be T_BIGNUM
1851 rb_mark_set(table);
1852 }
1853 // We purposedly don't mark values, as they are weak references.
1854 // rb_gc_obj_free_vm_weak_references takes care of cleaning them up.
1855}
1856
1857static size_t
1858id2ref_tbl_memsize(const void *data)
1859{
1860 return rb_st_memsize(data);
1861}
1862
1863static void
1864id2ref_tbl_free(void *data)
1865{
1866 id2ref_tbl = NULL; // clear global ref
1867 st_table *table = (st_table *)data;
1868 st_free_table(table);
1869}
1870
1871static const rb_data_type_t id2ref_tbl_type = {
1872 .wrap_struct_name = "VM/_id2ref_table",
1873 .function = {
1874 .dmark = id2ref_tbl_mark,
1875 .dfree = id2ref_tbl_free,
1876 .dsize = id2ref_tbl_memsize,
1877 // dcompact function not required because the table is reference updated
1878 // in rb_gc_vm_weak_table_foreach
1879 },
1880 .flags = RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_FREE_IMMEDIATELY
1881};
1882
1883static VALUE
1884class_object_id(VALUE klass)
1885{
1886 VALUE id = RUBY_ATOMIC_VALUE_LOAD(RCLASS(klass)->object_id);
1887 if (!id) {
1888 unsigned int lock_lev = RB_GC_VM_LOCK();
1889 id = generate_next_object_id();
1890 VALUE existing_id = RUBY_ATOMIC_VALUE_CAS(RCLASS(klass)->object_id, 0, id);
1891 if (existing_id) {
1892 id = existing_id;
1893 }
1894 else if (RB_UNLIKELY(id2ref_tbl)) {
1895 st_insert(id2ref_tbl, id, klass);
1896 }
1897 RB_GC_VM_UNLOCK(lock_lev);
1898 }
1899 return id;
1900}
1901
1902static inline VALUE
1903object_id_get(VALUE obj, shape_id_t shape_id)
1904{
1905 VALUE id;
1906 if (rb_shape_too_complex_p(shape_id)) {
1907 id = rb_obj_field_get(obj, ROOT_TOO_COMPLEX_WITH_OBJ_ID);
1908 }
1909 else {
1910 id = rb_obj_field_get(obj, rb_shape_object_id(shape_id));
1911 }
1912
1913#if RUBY_DEBUG
1914 if (!(FIXNUM_P(id) || RB_TYPE_P(id, T_BIGNUM))) {
1915 rb_p(obj);
1916 rb_bug("Object's shape includes object_id, but it's missing %s", rb_obj_info(obj));
1917 }
1918#endif
1919
1920 return id;
1921}
1922
1923static VALUE
1924object_id0(VALUE obj)
1925{
1926 VALUE id = Qfalse;
1927 shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
1928
1929 if (rb_shape_has_object_id(shape_id)) {
1930 return object_id_get(obj, shape_id);
1931 }
1932
1933 shape_id_t object_id_shape_id = rb_shape_transition_object_id(obj);
1934
1935 id = generate_next_object_id();
1936 rb_obj_field_set(obj, object_id_shape_id, 0, id);
1937
1938 RUBY_ASSERT(RBASIC_SHAPE_ID(obj) == object_id_shape_id);
1939 RUBY_ASSERT(rb_shape_obj_has_id(obj));
1940
1941 if (RB_UNLIKELY(id2ref_tbl)) {
1942 st_insert(id2ref_tbl, (st_data_t)id, (st_data_t)obj);
1943 }
1944 return id;
1945}
1946
1947static VALUE
1948object_id(VALUE obj)
1949{
1950 switch (BUILTIN_TYPE(obj)) {
1951 case T_CLASS:
1952 case T_MODULE:
1953 // With namespaces, classes and modules have different fields
1954 // in different namespaces, so we cannot store the object id
1955 // in fields.
1956 return class_object_id(obj);
1957 case T_IMEMO:
1958 RUBY_ASSERT(IMEMO_TYPE_P(obj, imemo_fields));
1959 break;
1960 default:
1961 break;
1962 }
1963
1964 if (UNLIKELY(rb_gc_multi_ractor_p() && rb_ractor_shareable_p(obj))) {
1965 unsigned int lock_lev = RB_GC_VM_LOCK();
1966 VALUE id = object_id0(obj);
1967 RB_GC_VM_UNLOCK(lock_lev);
1968 return id;
1969 }
1970
1971 return object_id0(obj);
1972}
1973
1974static void
1975build_id2ref_i(VALUE obj, void *data)
1976{
1977 st_table *id2ref_tbl = (st_table *)data;
1978
1979 switch (BUILTIN_TYPE(obj)) {
1980 case T_CLASS:
1981 case T_MODULE:
1982 RUBY_ASSERT(!rb_objspace_garbage_object_p(obj));
1983 if (RCLASS(obj)->object_id) {
1984 st_insert(id2ref_tbl, RCLASS(obj)->object_id, obj);
1985 }
1986 break;
1987 case T_IMEMO:
1988 RUBY_ASSERT(!rb_objspace_garbage_object_p(obj));
1989 if (IMEMO_TYPE_P(obj, imemo_fields) && rb_shape_obj_has_id(obj)) {
1990 st_insert(id2ref_tbl, rb_obj_id(obj), rb_imemo_fields_owner(obj));
1991 }
1992 break;
1993 case T_OBJECT:
1994 RUBY_ASSERT(!rb_objspace_garbage_object_p(obj));
1995 if (rb_shape_obj_has_id(obj)) {
1996 st_insert(id2ref_tbl, rb_obj_id(obj), obj);
1997 }
1998 break;
1999 default:
2000 // For generic_fields, the T_IMEMO/fields is responsible for populating the entry.
2001 break;
2002 }
2003}
2004
2005static VALUE
2006object_id_to_ref(void *objspace_ptr, VALUE object_id)
2007{
2008 rb_objspace_t *objspace = objspace_ptr;
2009
2010 unsigned int lev = RB_GC_VM_LOCK();
2011
2012 if (!id2ref_tbl) {
2013 rb_gc_vm_barrier(); // stop other ractors
2014
2015 // GC Must not trigger while we build the table, otherwise if we end
2016 // up freeing an object that had an ID, we might try to delete it from
2017 // the table even though it wasn't inserted yet.
2018 st_table *tmp_id2ref_tbl = st_init_table(&object_id_hash_type);
2019 VALUE tmp_id2ref_value = TypedData_Wrap_Struct(0, &id2ref_tbl_type, tmp_id2ref_tbl);
2020
2021 // build_id2ref_i will most certainly malloc, which could trigger GC and sweep
2022 // objects we just added to the table.
2023 // By calling rb_gc_disable() we also save having to handle potentially garbage objects.
2024 bool gc_disabled = RTEST(rb_gc_disable());
2025 {
2026 id2ref_tbl = tmp_id2ref_tbl;
2027 id2ref_value = tmp_id2ref_value;
2028
2029 rb_gc_impl_each_object(objspace, build_id2ref_i, (void *)id2ref_tbl);
2030 }
2031 if (!gc_disabled) rb_gc_enable();
2032 }
2033
2034 VALUE obj;
2035 bool found = st_lookup(id2ref_tbl, object_id, &obj) && !rb_gc_impl_garbage_object_p(objspace, obj);
2036
2037 RB_GC_VM_UNLOCK(lev);
2038
2039 if (found) {
2040 return obj;
2041 }
2042
2043 if (rb_funcall(object_id, rb_intern(">="), 1, ULL2NUM(LAST_OBJECT_ID()))) {
2044 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is not an id value", rb_funcall(object_id, rb_intern("to_s"), 1, INT2FIX(10)));
2045 }
2046 else {
2047 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is a recycled object", rb_funcall(object_id, rb_intern("to_s"), 1, INT2FIX(10)));
2048 }
2049}
2050
2051static inline void
2052obj_free_object_id(VALUE obj)
2053{
2054 VALUE obj_id = 0;
2055 if (RB_UNLIKELY(id2ref_tbl)) {
2056 switch (BUILTIN_TYPE(obj)) {
2057 case T_CLASS:
2058 case T_MODULE:
2059 obj_id = RCLASS(obj)->object_id;
2060 break;
2061 case T_IMEMO:
2062 if (!IMEMO_TYPE_P(obj, imemo_fields)) {
2063 return;
2064 }
2065 // fallthrough
2066 case T_OBJECT:
2067 {
2068 shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
2069 if (rb_shape_has_object_id(shape_id)) {
2070 obj_id = object_id_get(obj, shape_id);
2071 }
2072 break;
2073 }
2074 default:
2075 // For generic_fields, the T_IMEMO/fields is responsible for freeing the id.
2076 return;
2077 }
2078
2079 if (RB_UNLIKELY(obj_id)) {
2080 RUBY_ASSERT(FIXNUM_P(obj_id) || RB_TYPE_P(obj_id, T_BIGNUM));
2081
2082 if (!st_delete(id2ref_tbl, (st_data_t *)&obj_id, NULL)) {
2083 // The the object is a T_IMEMO/fields, then it's possible the actual object
2084 // has been garbage collected already.
2085 if (!RB_TYPE_P(obj, T_IMEMO)) {
2086 rb_bug("Object ID seen, but not in _id2ref table: object_id=%llu object=%s", NUM2ULL(obj_id), rb_obj_info(obj));
2087 }
2088 }
2089 }
2090 }
2091}
2092
2093void
2094rb_gc_obj_free_vm_weak_references(VALUE obj)
2095{
2096 obj_free_object_id(obj);
2097
2098 if (rb_obj_exivar_p(obj)) {
2100 }
2101
2102 switch (BUILTIN_TYPE(obj)) {
2103 case T_STRING:
2104 if (FL_TEST_RAW(obj, RSTRING_FSTR)) {
2105 rb_gc_free_fstring(obj);
2106 }
2107 break;
2108 case T_SYMBOL:
2109 rb_gc_free_dsymbol(obj);
2110 break;
2111 case T_IMEMO:
2112 switch (imemo_type(obj)) {
2113 case imemo_callcache: {
2114 const struct rb_callcache *cc = (const struct rb_callcache *)obj;
2115
2116 if (vm_cc_refinement_p(cc)) {
2117 rb_vm_delete_cc_refinement(cc);
2118 }
2119
2120 break;
2121 }
2122 case imemo_callinfo:
2123 rb_vm_ci_free((const struct rb_callinfo *)obj);
2124 break;
2125 case imemo_ment:
2126 rb_free_method_entry_vm_weak_references((const rb_method_entry_t *)obj);
2127 break;
2128 default:
2129 break;
2130 }
2131 break;
2132 default:
2133 break;
2134 }
2135}
2136
2137/*
2138 * call-seq:
2139 * ObjectSpace._id2ref(object_id) -> an_object
2140 *
2141 * Converts an object id to a reference to the object. May not be
2142 * called on an object id passed as a parameter to a finalizer.
2143 *
2144 * s = "I am a string" #=> "I am a string"
2145 * r = ObjectSpace._id2ref(s.object_id) #=> "I am a string"
2146 * r == s #=> true
2147 *
2148 * On multi-ractor mode, if the object is not shareable, it raises
2149 * RangeError.
2150 *
2151 * This method is deprecated and should no longer be used.
2152 */
2153
2154static VALUE
2155id2ref(VALUE objid)
2156{
2157#if SIZEOF_LONG == SIZEOF_VOIDP
2158#define NUM2PTR(x) NUM2ULONG(x)
2159#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
2160#define NUM2PTR(x) NUM2ULL(x)
2161#endif
2162 objid = rb_to_int(objid);
2163 if (FIXNUM_P(objid) || rb_big_size(objid) <= SIZEOF_VOIDP) {
2164 VALUE ptr = NUM2PTR(objid);
2165 if (SPECIAL_CONST_P(ptr)) {
2166 if (ptr == Qtrue) return Qtrue;
2167 if (ptr == Qfalse) return Qfalse;
2168 if (NIL_P(ptr)) return Qnil;
2169 if (FIXNUM_P(ptr)) return ptr;
2170 if (FLONUM_P(ptr)) return ptr;
2171
2172 if (SYMBOL_P(ptr)) {
2173 // Check that the symbol is valid
2174 if (rb_static_id_valid_p(SYM2ID(ptr))) {
2175 return ptr;
2176 }
2177 else {
2178 rb_raise(rb_eRangeError, "%p is not a symbol id value", (void *)ptr);
2179 }
2180 }
2181
2182 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is not an id value", rb_int2str(objid, 10));
2183 }
2184 }
2185
2186 VALUE obj = object_id_to_ref(rb_gc_get_objspace(), objid);
2187 if (!rb_multi_ractor_p() || rb_ractor_shareable_p(obj)) {
2188 return obj;
2189 }
2190 else {
2191 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is the id of an unshareable object on multi-ractor", rb_int2str(objid, 10));
2192 }
2193}
2194
2195/* :nodoc: */
2196static VALUE
2197os_id2ref(VALUE os, VALUE objid)
2198{
2199 rb_category_warn(RB_WARN_CATEGORY_DEPRECATED, "ObjectSpace._id2ref is deprecated");
2200 return id2ref(objid);
2201}
2202
2203static VALUE
2204rb_find_object_id(void *objspace, VALUE obj, VALUE (*get_heap_object_id)(VALUE))
2205{
2206 if (SPECIAL_CONST_P(obj)) {
2207#if SIZEOF_LONG == SIZEOF_VOIDP
2208 return LONG2NUM((SIGNED_VALUE)obj);
2209#else
2210 return LL2NUM((SIGNED_VALUE)obj);
2211#endif
2212 }
2213
2214 return get_heap_object_id(obj);
2215}
2216
2217static VALUE
2218nonspecial_obj_id(VALUE obj)
2219{
2220#if SIZEOF_LONG == SIZEOF_VOIDP
2221 return (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG);
2222#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
2223 return LL2NUM((SIGNED_VALUE)(obj) / 2);
2224#else
2225# error not supported
2226#endif
2227}
2228
2229VALUE
2230rb_memory_id(VALUE obj)
2231{
2232 return rb_find_object_id(NULL, obj, nonspecial_obj_id);
2233}
2234
2235/*
2236 * Document-method: __id__
2237 * Document-method: object_id
2238 *
2239 * call-seq:
2240 * obj.__id__ -> integer
2241 * obj.object_id -> integer
2242 *
2243 * Returns an integer identifier for +obj+.
2244 *
2245 * The same number will be returned on all calls to +object_id+ for a given
2246 * object, and no two active objects will share an id.
2247 *
2248 * Note: that some objects of builtin classes are reused for optimization.
2249 * This is the case for immediate values and frozen string literals.
2250 *
2251 * BasicObject implements +__id__+, Kernel implements +object_id+.
2252 *
2253 * Immediate values are not passed by reference but are passed by value:
2254 * +nil+, +true+, +false+, Fixnums, Symbols, and some Floats.
2255 *
2256 * Object.new.object_id == Object.new.object_id # => false
2257 * (21 * 2).object_id == (21 * 2).object_id # => true
2258 * "hello".object_id == "hello".object_id # => false
2259 * "hi".freeze.object_id == "hi".freeze.object_id # => true
2260 */
2261
2262VALUE
2263rb_obj_id(VALUE obj)
2264{
2265 /* If obj is an immediate, the object ID is obj directly converted to a Numeric.
2266 * Otherwise, the object ID is a Numeric that is a non-zero multiple of
2267 * (RUBY_IMMEDIATE_MASK + 1) which guarantees that it does not collide with
2268 * any immediates. */
2269 return rb_find_object_id(rb_gc_get_objspace(), obj, object_id);
2270}
2271
2272bool
2273rb_obj_id_p(VALUE obj)
2274{
2275 return !RB_TYPE_P(obj, T_IMEMO) && rb_shape_obj_has_id(obj);
2276}
2277
2278/*
2279 * GC implementations should call this function before the GC phase that updates references
2280 * embedded in the machine code generated by JIT compilers. JIT compilers usually enforce the
2281 * "W^X" policy and protect the code memory from being modified during execution. This function
2282 * makes the code memory writeable.
2283 */
2284void
2285rb_gc_before_updating_jit_code(void)
2286{
2287#if USE_YJIT
2288 rb_yjit_mark_all_writeable();
2289#endif
2290}
2291
2292/*
2293 * GC implementations should call this function before the GC phase that updates references
2294 * embedded in the machine code generated by JIT compilers. This function makes the code memory
2295 * executable again.
2296 */
2297void
2298rb_gc_after_updating_jit_code(void)
2299{
2300#if USE_YJIT
2301 rb_yjit_mark_all_executable();
2302#endif
2303}
2304
2305static void
2306classext_memsize(rb_classext_t *ext, bool prime, VALUE namespace, void *arg)
2307{
2308 size_t *size = (size_t *)arg;
2309 size_t s = 0;
2310
2311 if (RCLASSEXT_M_TBL(ext)) {
2312 s += rb_id_table_memsize(RCLASSEXT_M_TBL(ext));
2313 }
2314 if (RCLASSEXT_CVC_TBL(ext)) {
2315 s += rb_id_table_memsize(RCLASSEXT_CVC_TBL(ext));
2316 }
2317 if (RCLASSEXT_CONST_TBL(ext)) {
2318 s += rb_id_table_memsize(RCLASSEXT_CONST_TBL(ext));
2319 }
2320 if (RCLASSEXT_SUPERCLASSES_WITH_SELF(ext)) {
2321 s += (RCLASSEXT_SUPERCLASS_DEPTH(ext) + 1) * sizeof(VALUE);
2322 }
2323 if (!prime) {
2324 s += sizeof(rb_classext_t);
2325 }
2326 *size += s;
2327}
2328
2329static void
2330classext_superclasses_memsize(rb_classext_t *ext, bool prime, VALUE namespace, void *arg)
2331{
2332 size_t *size = (size_t *)arg;
2333 size_t array_size;
2334 if (RCLASSEXT_SUPERCLASSES_WITH_SELF(ext)) {
2335 RUBY_ASSERT(prime);
2336 array_size = RCLASSEXT_SUPERCLASS_DEPTH(ext) + 1;
2337 *size += array_size * sizeof(VALUE);
2338 }
2339}
2340
2341size_t
2342rb_obj_memsize_of(VALUE obj)
2343{
2344 size_t size = 0;
2345
2346 if (SPECIAL_CONST_P(obj)) {
2347 return 0;
2348 }
2349
2350 switch (BUILTIN_TYPE(obj)) {
2351 case T_OBJECT:
2352 if (FL_TEST_RAW(obj, ROBJECT_HEAP)) {
2353 if (rb_shape_obj_too_complex_p(obj)) {
2354 size += rb_st_memsize(ROBJECT_FIELDS_HASH(obj));
2355 }
2356 else {
2357 size += ROBJECT_FIELDS_CAPACITY(obj) * sizeof(VALUE);
2358 }
2359 }
2360 break;
2361 case T_MODULE:
2362 case T_CLASS:
2363 rb_class_classext_foreach(obj, classext_memsize, (void *)&size);
2364 rb_class_classext_foreach(obj, classext_superclasses_memsize, (void *)&size);
2365 break;
2366 case T_ICLASS:
2367 if (RICLASS_OWNS_M_TBL_P(obj)) {
2368 if (RCLASS_M_TBL(obj)) {
2369 size += rb_id_table_memsize(RCLASS_M_TBL(obj));
2370 }
2371 }
2372 break;
2373 case T_STRING:
2374 size += rb_str_memsize(obj);
2375 break;
2376 case T_ARRAY:
2377 size += rb_ary_memsize(obj);
2378 break;
2379 case T_HASH:
2380 if (RHASH_ST_TABLE_P(obj)) {
2381 VM_ASSERT(RHASH_ST_TABLE(obj) != NULL);
2382 /* st_table is in the slot */
2383 size += st_memsize(RHASH_ST_TABLE(obj)) - sizeof(st_table);
2384 }
2385 break;
2386 case T_REGEXP:
2387 if (RREGEXP_PTR(obj)) {
2388 size += onig_memsize(RREGEXP_PTR(obj));
2389 }
2390 break;
2391 case T_DATA:
2392 size += rb_objspace_data_type_memsize(obj);
2393 break;
2394 case T_MATCH:
2395 {
2396 rb_matchext_t *rm = RMATCH_EXT(obj);
2397 size += onig_region_memsize(&rm->regs);
2398 size += sizeof(struct rmatch_offset) * rm->char_offset_num_allocated;
2399 }
2400 break;
2401 case T_FILE:
2402 if (RFILE(obj)->fptr) {
2403 size += rb_io_memsize(RFILE(obj)->fptr);
2404 }
2405 break;
2406 case T_RATIONAL:
2407 case T_COMPLEX:
2408 break;
2409 case T_IMEMO:
2410 size += rb_imemo_memsize(obj);
2411 break;
2412
2413 case T_FLOAT:
2414 case T_SYMBOL:
2415 break;
2416
2417 case T_BIGNUM:
2418 if (!(RBASIC(obj)->flags & BIGNUM_EMBED_FLAG) && BIGNUM_DIGITS(obj)) {
2419 size += BIGNUM_LEN(obj) * sizeof(BDIGIT);
2420 }
2421 break;
2422
2423 case T_NODE:
2424 UNEXPECTED_NODE(obj_memsize_of);
2425 break;
2426
2427 case T_STRUCT:
2428 if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) == 0 &&
2429 RSTRUCT(obj)->as.heap.ptr) {
2430 size += sizeof(VALUE) * RSTRUCT_LEN(obj);
2431 }
2432 break;
2433
2434 case T_ZOMBIE:
2435 case T_MOVED:
2436 break;
2437
2438 default:
2439 rb_bug("objspace/memsize_of(): unknown data type 0x%x(%p)",
2440 BUILTIN_TYPE(obj), (void*)obj);
2441 }
2442
2443 return size + rb_gc_obj_slot_size(obj);
2444}
2445
2446static int
2447set_zero(st_data_t key, st_data_t val, st_data_t arg)
2448{
2449 VALUE k = (VALUE)key;
2450 VALUE hash = (VALUE)arg;
2451 rb_hash_aset(hash, k, INT2FIX(0));
2452 return ST_CONTINUE;
2453}
2454
2456 size_t counts[T_MASK+1];
2457 size_t freed;
2458 size_t total;
2459};
2460
2461static void
2462count_objects_i(VALUE obj, void *d)
2463{
2464 struct count_objects_data *data = (struct count_objects_data *)d;
2465
2466 if (RBASIC(obj)->flags) {
2467 data->counts[BUILTIN_TYPE(obj)]++;
2468 }
2469 else {
2470 data->freed++;
2471 }
2472
2473 data->total++;
2474}
2475
2476/*
2477 * call-seq:
2478 * ObjectSpace.count_objects([result_hash]) -> hash
2479 *
2480 * Counts all objects grouped by type.
2481 *
2482 * It returns a hash, such as:
2483 * {
2484 * :TOTAL=>10000,
2485 * :FREE=>3011,
2486 * :T_OBJECT=>6,
2487 * :T_CLASS=>404,
2488 * # ...
2489 * }
2490 *
2491 * The contents of the returned hash are implementation specific.
2492 * It may be changed in future.
2493 *
2494 * The keys starting with +:T_+ means live objects.
2495 * For example, +:T_ARRAY+ is the number of arrays.
2496 * +:FREE+ means object slots which is not used now.
2497 * +:TOTAL+ means sum of above.
2498 *
2499 * If the optional argument +result_hash+ is given,
2500 * it is overwritten and returned. This is intended to avoid probe effect.
2501 *
2502 * h = {}
2503 * ObjectSpace.count_objects(h)
2504 * puts h
2505 * # => { :TOTAL=>10000, :T_CLASS=>158280, :T_MODULE=>20672, :T_STRING=>527249 }
2506 *
2507 * This method is only expected to work on C Ruby.
2508 *
2509 */
2510
2511static VALUE
2512count_objects(int argc, VALUE *argv, VALUE os)
2513{
2514 struct count_objects_data data = { 0 };
2515 VALUE hash = Qnil;
2516 VALUE types[T_MASK + 1];
2517
2518 if (rb_check_arity(argc, 0, 1) == 1) {
2519 hash = argv[0];
2520 if (!RB_TYPE_P(hash, T_HASH))
2521 rb_raise(rb_eTypeError, "non-hash given");
2522 }
2523
2524 for (size_t i = 0; i <= T_MASK; i++) {
2525 // type_sym can allocate an object,
2526 // so we need to create all key symbols in advance
2527 // not to disturb the result
2528 types[i] = type_sym(i);
2529 }
2530
2531 // Same as type_sym, we need to create all key symbols in advance
2532 VALUE total = ID2SYM(rb_intern("TOTAL"));
2533 VALUE free = ID2SYM(rb_intern("FREE"));
2534
2535 rb_gc_impl_each_object(rb_gc_get_objspace(), count_objects_i, &data);
2536
2537 if (NIL_P(hash)) {
2538 hash = rb_hash_new();
2539 }
2540 else if (!RHASH_EMPTY_P(hash)) {
2541 rb_hash_stlike_foreach(hash, set_zero, hash);
2542 }
2543 rb_hash_aset(hash, total, SIZET2NUM(data.total));
2544 rb_hash_aset(hash, free, SIZET2NUM(data.freed));
2545
2546 for (size_t i = 0; i <= T_MASK; i++) {
2547 if (data.counts[i]) {
2548 rb_hash_aset(hash, types[i], SIZET2NUM(data.counts[i]));
2549 }
2550 }
2551
2552 return hash;
2553}
2554
2555#define SET_STACK_END SET_MACHINE_STACK_END(&ec->machine.stack_end)
2556
2557#define STACK_START (ec->machine.stack_start)
2558#define STACK_END (ec->machine.stack_end)
2559#define STACK_LEVEL_MAX (ec->machine.stack_maxsize/sizeof(VALUE))
2560
2561#if STACK_GROW_DIRECTION < 0
2562# define STACK_LENGTH (size_t)(STACK_START - STACK_END)
2563#elif STACK_GROW_DIRECTION > 0
2564# define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
2565#else
2566# define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
2567 : (size_t)(STACK_END - STACK_START + 1))
2568#endif
2569#if !STACK_GROW_DIRECTION
2570int ruby_stack_grow_direction;
2571int
2572ruby_get_stack_grow_direction(volatile VALUE *addr)
2573{
2574 VALUE *end;
2575 SET_MACHINE_STACK_END(&end);
2576
2577 if (end > addr) return ruby_stack_grow_direction = 1;
2578 return ruby_stack_grow_direction = -1;
2579}
2580#endif
2581
2582size_t
2584{
2585 rb_execution_context_t *ec = GET_EC();
2586 SET_STACK_END;
2587 if (p) *p = STACK_UPPER(STACK_END, STACK_START, STACK_END);
2588 return STACK_LENGTH;
2589}
2590
2591#define PREVENT_STACK_OVERFLOW 1
2592#ifndef PREVENT_STACK_OVERFLOW
2593#if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
2594# define PREVENT_STACK_OVERFLOW 1
2595#else
2596# define PREVENT_STACK_OVERFLOW 0
2597#endif
2598#endif
2599#if PREVENT_STACK_OVERFLOW && !defined(__EMSCRIPTEN__)
2600static int
2601stack_check(rb_execution_context_t *ec, int water_mark)
2602{
2603 SET_STACK_END;
2604
2605 size_t length = STACK_LENGTH;
2606 size_t maximum_length = STACK_LEVEL_MAX - water_mark;
2607
2608 return length > maximum_length;
2609}
2610#else
2611#define stack_check(ec, water_mark) FALSE
2612#endif
2613
2614#define STACKFRAME_FOR_CALL_CFUNC 2048
2615
2616int
2617rb_ec_stack_check(rb_execution_context_t *ec)
2618{
2619 return stack_check(ec, STACKFRAME_FOR_CALL_CFUNC);
2620}
2621
2622int
2624{
2625 return stack_check(GET_EC(), STACKFRAME_FOR_CALL_CFUNC);
2626}
2627
2628/* ==================== Marking ==================== */
2629
2630#define RB_GC_MARK_OR_TRAVERSE(func, obj_or_ptr, obj, check_obj) do { \
2631 if (!RB_SPECIAL_CONST_P(obj)) { \
2632 rb_vm_t *vm = GET_VM(); \
2633 void *objspace = vm->gc.objspace; \
2634 if (LIKELY(vm->gc.mark_func_data == NULL)) { \
2635 GC_ASSERT(rb_gc_impl_during_gc_p(objspace)); \
2636 (func)(objspace, (obj_or_ptr)); \
2637 } \
2638 else if (check_obj ? \
2639 rb_gc_impl_pointer_to_heap_p(objspace, (const void *)obj) && \
2640 !rb_gc_impl_garbage_object_p(objspace, obj) : \
2641 true) { \
2642 GC_ASSERT(!rb_gc_impl_during_gc_p(objspace)); \
2643 struct gc_mark_func_data_struct *mark_func_data = vm->gc.mark_func_data; \
2644 vm->gc.mark_func_data = NULL; \
2645 mark_func_data->mark_func((obj), mark_func_data->data); \
2646 vm->gc.mark_func_data = mark_func_data; \
2647 } \
2648 } \
2649} while (0)
2650
2651static inline void
2652gc_mark_internal(VALUE obj)
2653{
2654 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark, obj, obj, false);
2655}
2656
2657void
2658rb_gc_mark_movable(VALUE obj)
2659{
2660 gc_mark_internal(obj);
2661}
2662
2663void
2664rb_gc_mark_and_move(VALUE *ptr)
2665{
2666 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark_and_move, ptr, *ptr, false);
2667}
2668
2669static inline void
2670gc_mark_and_pin_internal(VALUE obj)
2671{
2672 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark_and_pin, obj, obj, false);
2673}
2674
2675void
2676rb_gc_mark(VALUE obj)
2677{
2678 gc_mark_and_pin_internal(obj);
2679}
2680
2681static inline void
2682gc_mark_maybe_internal(VALUE obj)
2683{
2684 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark_maybe, obj, obj, true);
2685}
2686
2687void
2688rb_gc_mark_maybe(VALUE obj)
2689{
2690 gc_mark_maybe_internal(obj);
2691}
2692
2693void
2694rb_gc_mark_weak(VALUE *ptr)
2695{
2696 if (RB_SPECIAL_CONST_P(*ptr)) return;
2697
2698 rb_vm_t *vm = GET_VM();
2699 void *objspace = vm->gc.objspace;
2700 if (LIKELY(vm->gc.mark_func_data == NULL)) {
2701 GC_ASSERT(rb_gc_impl_during_gc_p(objspace));
2702
2703 rb_gc_impl_mark_weak(objspace, ptr);
2704 }
2705 else {
2706 GC_ASSERT(!rb_gc_impl_during_gc_p(objspace));
2707 }
2708}
2709
2710void
2711rb_gc_remove_weak(VALUE parent_obj, VALUE *ptr)
2712{
2713 rb_gc_impl_remove_weak(rb_gc_get_objspace(), parent_obj, ptr);
2714}
2715
2716ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS(static void each_location(register const VALUE *x, register long n, void (*cb)(VALUE, void *), void *data));
2717static void
2718each_location(register const VALUE *x, register long n, void (*cb)(VALUE, void *), void *data)
2719{
2720 VALUE v;
2721 while (n--) {
2722 v = *x;
2723 cb(v, data);
2724 x++;
2725 }
2726}
2727
2728static void
2729each_location_ptr(const VALUE *start, const VALUE *end, void (*cb)(VALUE, void *), void *data)
2730{
2731 if (end <= start) return;
2732 each_location(start, end - start, cb, data);
2733}
2734
2735static void
2736gc_mark_maybe_each_location(VALUE obj, void *data)
2737{
2738 gc_mark_maybe_internal(obj);
2739}
2740
2741void
2742rb_gc_mark_locations(const VALUE *start, const VALUE *end)
2743{
2744 each_location_ptr(start, end, gc_mark_maybe_each_location, NULL);
2745}
2746
2747void
2748rb_gc_mark_values(long n, const VALUE *values)
2749{
2750 for (long i = 0; i < n; i++) {
2751 gc_mark_internal(values[i]);
2752 }
2753}
2754
2755void
2756rb_gc_mark_vm_stack_values(long n, const VALUE *values)
2757{
2758 for (long i = 0; i < n; i++) {
2759 gc_mark_and_pin_internal(values[i]);
2760 }
2761}
2762
2763static int
2764mark_key(st_data_t key, st_data_t value, st_data_t data)
2765{
2766 gc_mark_and_pin_internal((VALUE)key);
2767
2768 return ST_CONTINUE;
2769}
2770
2771void
2772rb_mark_set(st_table *tbl)
2773{
2774 if (!tbl) return;
2775
2776 st_foreach(tbl, mark_key, (st_data_t)rb_gc_get_objspace());
2777}
2778
2779static int
2780mark_keyvalue(st_data_t key, st_data_t value, st_data_t data)
2781{
2782 gc_mark_internal((VALUE)key);
2783 gc_mark_internal((VALUE)value);
2784
2785 return ST_CONTINUE;
2786}
2787
2788static int
2789pin_key_pin_value(st_data_t key, st_data_t value, st_data_t data)
2790{
2791 gc_mark_and_pin_internal((VALUE)key);
2792 gc_mark_and_pin_internal((VALUE)value);
2793
2794 return ST_CONTINUE;
2795}
2796
2797static int
2798pin_key_mark_value(st_data_t key, st_data_t value, st_data_t data)
2799{
2800 gc_mark_and_pin_internal((VALUE)key);
2801 gc_mark_internal((VALUE)value);
2802
2803 return ST_CONTINUE;
2804}
2805
2806static void
2807mark_hash(VALUE hash)
2808{
2809 if (rb_hash_compare_by_id_p(hash)) {
2810 rb_hash_stlike_foreach(hash, pin_key_mark_value, 0);
2811 }
2812 else {
2813 rb_hash_stlike_foreach(hash, mark_keyvalue, 0);
2814 }
2815
2816 gc_mark_internal(RHASH(hash)->ifnone);
2817}
2818
2819void
2820rb_mark_hash(st_table *tbl)
2821{
2822 if (!tbl) return;
2823
2824 st_foreach(tbl, pin_key_pin_value, 0);
2825}
2826
2827static enum rb_id_table_iterator_result
2828mark_method_entry_i(VALUE me, void *objspace)
2829{
2830 gc_mark_internal(me);
2831
2832 return ID_TABLE_CONTINUE;
2833}
2834
2835static void
2836mark_m_tbl(void *objspace, struct rb_id_table *tbl)
2837{
2838 if (tbl) {
2839 rb_id_table_foreach_values(tbl, mark_method_entry_i, objspace);
2840 }
2841}
2842
2843static enum rb_id_table_iterator_result
2844mark_const_entry_i(VALUE value, void *objspace)
2845{
2846 const rb_const_entry_t *ce = (const rb_const_entry_t *)value;
2847
2848 gc_mark_internal(ce->value);
2849 gc_mark_internal(ce->file);
2850 return ID_TABLE_CONTINUE;
2851}
2852
2853static void
2854mark_const_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
2855{
2856 if (!tbl) return;
2857 rb_id_table_foreach_values(tbl, mark_const_entry_i, objspace);
2858}
2859
2860static enum rb_id_table_iterator_result
2861mark_cvc_tbl_i(VALUE cvc_entry, void *objspace)
2862{
2863 struct rb_cvar_class_tbl_entry *entry;
2864
2865 entry = (struct rb_cvar_class_tbl_entry *)cvc_entry;
2866
2867 RUBY_ASSERT(entry->cref == 0 || (BUILTIN_TYPE((VALUE)entry->cref) == T_IMEMO && IMEMO_TYPE_P(entry->cref, imemo_cref)));
2868 gc_mark_internal((VALUE)entry->cref);
2869
2870 return ID_TABLE_CONTINUE;
2871}
2872
2873static void
2874mark_cvc_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
2875{
2876 if (!tbl) return;
2877 rb_id_table_foreach_values(tbl, mark_cvc_tbl_i, objspace);
2878}
2879
2880#if STACK_GROW_DIRECTION < 0
2881#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
2882#elif STACK_GROW_DIRECTION > 0
2883#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
2884#else
2885#define GET_STACK_BOUNDS(start, end, appendix) \
2886 ((STACK_END < STACK_START) ? \
2887 ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
2888#endif
2889
2890static void
2891gc_mark_machine_stack_location_maybe(VALUE obj, void *data)
2892{
2893 gc_mark_maybe_internal(obj);
2894
2895#ifdef RUBY_ASAN_ENABLED
2896 const rb_execution_context_t *ec = (const rb_execution_context_t *)data;
2897 void *fake_frame_start;
2898 void *fake_frame_end;
2899 bool is_fake_frame = asan_get_fake_stack_extents(
2900 ec->machine.asan_fake_stack_handle, obj,
2901 ec->machine.stack_start, ec->machine.stack_end,
2902 &fake_frame_start, &fake_frame_end
2903 );
2904 if (is_fake_frame) {
2905 each_location_ptr(fake_frame_start, fake_frame_end, gc_mark_maybe_each_location, NULL);
2906 }
2907#endif
2908}
2909
2910static VALUE
2911gc_location_internal(void *objspace, VALUE value)
2912{
2913 if (SPECIAL_CONST_P(value)) {
2914 return value;
2915 }
2916
2917 GC_ASSERT(rb_gc_impl_pointer_to_heap_p(objspace, (void *)value));
2918
2919 return rb_gc_impl_location(objspace, value);
2920}
2921
2922VALUE
2923rb_gc_location(VALUE value)
2924{
2925 return gc_location_internal(rb_gc_get_objspace(), value);
2926}
2927
2928#if defined(__wasm__)
2929
2930
2931static VALUE *rb_stack_range_tmp[2];
2932
2933static void
2934rb_mark_locations(void *begin, void *end)
2935{
2936 rb_stack_range_tmp[0] = begin;
2937 rb_stack_range_tmp[1] = end;
2938}
2939
2940void
2941rb_gc_save_machine_context(void)
2942{
2943 // no-op
2944}
2945
2946# if defined(__EMSCRIPTEN__)
2947
2948static void
2949mark_current_machine_context(const rb_execution_context_t *ec)
2950{
2951 emscripten_scan_stack(rb_mark_locations);
2952 each_location_ptr(rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe_each_location, NULL);
2953
2954 emscripten_scan_registers(rb_mark_locations);
2955 each_location_ptr(rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe_each_location, NULL);
2956}
2957# else // use Asyncify version
2958
2959static void
2960mark_current_machine_context(rb_execution_context_t *ec)
2961{
2962 VALUE *stack_start, *stack_end;
2963 SET_STACK_END;
2964 GET_STACK_BOUNDS(stack_start, stack_end, 1);
2965 each_location_ptr(stack_start, stack_end, gc_mark_maybe_each_location, NULL);
2966
2967 rb_wasm_scan_locals(rb_mark_locations);
2968 each_location_ptr(rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe_each_location, NULL);
2969}
2970
2971# endif
2972
2973#else // !defined(__wasm__)
2974
2975void
2976rb_gc_save_machine_context(void)
2977{
2978 rb_thread_t *thread = GET_THREAD();
2979
2980 RB_VM_SAVE_MACHINE_CONTEXT(thread);
2981}
2982
2983
2984static void
2985mark_current_machine_context(const rb_execution_context_t *ec)
2986{
2987 rb_gc_mark_machine_context(ec);
2988}
2989#endif
2990
2991void
2992rb_gc_mark_machine_context(const rb_execution_context_t *ec)
2993{
2994 VALUE *stack_start, *stack_end;
2995
2996 GET_STACK_BOUNDS(stack_start, stack_end, 0);
2997 RUBY_DEBUG_LOG("ec->th:%u stack_start:%p stack_end:%p", rb_ec_thread_ptr(ec)->serial, stack_start, stack_end);
2998
2999 void *data =
3000#ifdef RUBY_ASAN_ENABLED
3001 /* gc_mark_machine_stack_location_maybe() uses data as const */
3003#else
3004 NULL;
3005#endif
3006
3007 each_location_ptr(stack_start, stack_end, gc_mark_machine_stack_location_maybe, data);
3008 int num_regs = sizeof(ec->machine.regs)/(sizeof(VALUE));
3009 each_location((VALUE*)&ec->machine.regs, num_regs, gc_mark_machine_stack_location_maybe, data);
3010}
3011
3012static int
3013rb_mark_tbl_i(st_data_t key, st_data_t value, st_data_t data)
3014{
3015 gc_mark_and_pin_internal((VALUE)value);
3016
3017 return ST_CONTINUE;
3018}
3019
3020void
3021rb_mark_tbl(st_table *tbl)
3022{
3023 if (!tbl || tbl->num_entries == 0) return;
3024
3025 st_foreach(tbl, rb_mark_tbl_i, 0);
3026}
3027
3028static void
3029gc_mark_tbl_no_pin(st_table *tbl)
3030{
3031 if (!tbl || tbl->num_entries == 0) return;
3032
3033 st_foreach(tbl, gc_mark_tbl_no_pin_i, 0);
3034}
3035
3036void
3037rb_mark_tbl_no_pin(st_table *tbl)
3038{
3039 gc_mark_tbl_no_pin(tbl);
3040}
3041
3042static bool
3043gc_declarative_marking_p(const rb_data_type_t *type)
3044{
3045 return (type->flags & RUBY_TYPED_DECL_MARKING) != 0;
3046}
3047
3048void
3049rb_gc_mark_roots(void *objspace, const char **categoryp)
3050{
3051 rb_execution_context_t *ec = GET_EC();
3052 rb_vm_t *vm = rb_ec_vm_ptr(ec);
3053
3054#define MARK_CHECKPOINT(category) do { \
3055 if (categoryp) *categoryp = category; \
3056} while (0)
3057
3058 MARK_CHECKPOINT("vm");
3059 rb_vm_mark(vm);
3060
3061 MARK_CHECKPOINT("end_proc");
3062 rb_mark_end_proc();
3063
3064 MARK_CHECKPOINT("global_tbl");
3065 rb_gc_mark_global_tbl();
3066
3067#if USE_YJIT
3068 void rb_yjit_root_mark(void); // in Rust
3069
3070 if (rb_yjit_enabled_p) {
3071 MARK_CHECKPOINT("YJIT");
3072 rb_yjit_root_mark();
3073 }
3074#endif
3075
3076#if USE_ZJIT
3077 void rb_zjit_root_mark(void);
3078 if (rb_zjit_enabled_p) {
3079 MARK_CHECKPOINT("ZJIT");
3080 rb_zjit_root_mark();
3081 }
3082#endif
3083
3084 MARK_CHECKPOINT("machine_context");
3085 mark_current_machine_context(ec);
3086
3087 MARK_CHECKPOINT("global_symbols");
3088 rb_sym_global_symbols_mark_and_move();
3089
3090 MARK_CHECKPOINT("finish");
3091
3092#undef MARK_CHECKPOINT
3093}
3094
3099
3100static void
3101gc_mark_classext_module(rb_classext_t *ext, bool prime, VALUE namespace, void *arg)
3102{
3104 rb_objspace_t *objspace = foreach_arg->objspace;
3105
3106 if (RCLASSEXT_SUPER(ext)) {
3107 gc_mark_internal(RCLASSEXT_SUPER(ext));
3108 }
3109 mark_m_tbl(objspace, RCLASSEXT_M_TBL(ext));
3110 gc_mark_internal(RCLASSEXT_FIELDS_OBJ(ext));
3111 if (!RCLASSEXT_SHARED_CONST_TBL(ext) && RCLASSEXT_CONST_TBL(ext)) {
3112 mark_const_tbl(objspace, RCLASSEXT_CONST_TBL(ext));
3113 }
3114 mark_m_tbl(objspace, RCLASSEXT_CALLABLE_M_TBL(ext));
3115 gc_mark_internal(RCLASSEXT_CC_TBL(ext));
3116 mark_cvc_tbl(objspace, RCLASSEXT_CVC_TBL(ext));
3117 gc_mark_internal(RCLASSEXT_CLASSPATH(ext));
3118}
3119
3120static void
3121gc_mark_classext_iclass(rb_classext_t *ext, bool prime, VALUE namespace, void *arg)
3122{
3124 rb_objspace_t *objspace = foreach_arg->objspace;
3125
3126 if (RCLASSEXT_SUPER(ext)) {
3127 gc_mark_internal(RCLASSEXT_SUPER(ext));
3128 }
3129 if (RCLASSEXT_ICLASS_IS_ORIGIN(ext) && !RCLASSEXT_ICLASS_ORIGIN_SHARED_MTBL(ext)) {
3130 mark_m_tbl(objspace, RCLASSEXT_M_TBL(ext));
3131 }
3132 if (RCLASSEXT_INCLUDER(ext)) {
3133 gc_mark_internal(RCLASSEXT_INCLUDER(ext));
3134 }
3135 mark_m_tbl(objspace, RCLASSEXT_CALLABLE_M_TBL(ext));
3136 gc_mark_internal(RCLASSEXT_CC_TBL(ext));
3137}
3138
3139#define TYPED_DATA_REFS_OFFSET_LIST(d) (size_t *)(uintptr_t)RTYPEDDATA_TYPE(d)->function.dmark
3140
3141void
3142rb_gc_mark_children(void *objspace, VALUE obj)
3143{
3144 struct gc_mark_classext_foreach_arg foreach_args;
3145
3146 if (rb_obj_exivar_p(obj)) {
3147 rb_mark_generic_ivar(obj);
3148 }
3149
3150 switch (BUILTIN_TYPE(obj)) {
3151 case T_FLOAT:
3152 case T_BIGNUM:
3153 return;
3154
3155 case T_NIL:
3156 case T_FIXNUM:
3157 rb_bug("rb_gc_mark() called for broken object");
3158 break;
3159
3160 case T_NODE:
3161 UNEXPECTED_NODE(rb_gc_mark);
3162 break;
3163
3164 case T_IMEMO:
3165 rb_imemo_mark_and_move(obj, false);
3166 return;
3167
3168 default:
3169 break;
3170 }
3171
3172 gc_mark_internal(RBASIC(obj)->klass);
3173
3174 switch (BUILTIN_TYPE(obj)) {
3175 case T_CLASS:
3176 if (FL_TEST_RAW(obj, FL_SINGLETON)) {
3177 gc_mark_internal(RCLASS_ATTACHED_OBJECT(obj));
3178 }
3179 // Continue to the shared T_CLASS/T_MODULE
3180 case T_MODULE:
3181 foreach_args.objspace = objspace;
3182 foreach_args.obj = obj;
3183 rb_class_classext_foreach(obj, gc_mark_classext_module, (void *)&foreach_args);
3184 break;
3185
3186 case T_ICLASS:
3187 foreach_args.objspace = objspace;
3188 foreach_args.obj = obj;
3189 rb_class_classext_foreach(obj, gc_mark_classext_iclass, (void *)&foreach_args);
3190 break;
3191
3192 case T_ARRAY:
3193 if (ARY_SHARED_P(obj)) {
3194 VALUE root = ARY_SHARED_ROOT(obj);
3195 gc_mark_internal(root);
3196 }
3197 else {
3198 long len = RARRAY_LEN(obj);
3199 const VALUE *ptr = RARRAY_CONST_PTR(obj);
3200 for (long i = 0; i < len; i++) {
3201 gc_mark_internal(ptr[i]);
3202 }
3203 }
3204 break;
3205
3206 case T_HASH:
3207 mark_hash(obj);
3208 break;
3209
3210 case T_SYMBOL:
3211 gc_mark_internal(RSYMBOL(obj)->fstr);
3212 break;
3213
3214 case T_STRING:
3215 if (STR_SHARED_P(obj)) {
3216 if (STR_EMBED_P(RSTRING(obj)->as.heap.aux.shared)) {
3217 /* Embedded shared strings cannot be moved because this string
3218 * points into the slot of the shared string. There may be code
3219 * using the RSTRING_PTR on the stack, which would pin this
3220 * string but not pin the shared string, causing it to move. */
3221 gc_mark_and_pin_internal(RSTRING(obj)->as.heap.aux.shared);
3222 }
3223 else {
3224 gc_mark_internal(RSTRING(obj)->as.heap.aux.shared);
3225 }
3226 }
3227 break;
3228
3229 case T_DATA: {
3230 bool typed_data = RTYPEDDATA_P(obj);
3231 void *const ptr = typed_data ? RTYPEDDATA_GET_DATA(obj) : DATA_PTR(obj);
3232
3233 if (typed_data) {
3234 gc_mark_internal(RTYPEDDATA(obj)->fields_obj);
3235 }
3236
3237 if (ptr) {
3238 if (typed_data && gc_declarative_marking_p(RTYPEDDATA_TYPE(obj))) {
3239 size_t *offset_list = TYPED_DATA_REFS_OFFSET_LIST(obj);
3240
3241 for (size_t offset = *offset_list; offset != RUBY_REF_END; offset = *offset_list++) {
3242 gc_mark_internal(*(VALUE *)((char *)ptr + offset));
3243 }
3244 }
3245 else {
3246 RUBY_DATA_FUNC mark_func = typed_data ?
3248 RDATA(obj)->dmark;
3249 if (mark_func) (*mark_func)(ptr);
3250 }
3251 }
3252
3253 break;
3254 }
3255
3256 case T_OBJECT: {
3257 if (rb_shape_obj_too_complex_p(obj)) {
3258 gc_mark_tbl_no_pin(ROBJECT_FIELDS_HASH(obj));
3259 }
3260 else {
3261 const VALUE * const ptr = ROBJECT_FIELDS(obj);
3262
3263 uint32_t len = ROBJECT_FIELDS_COUNT(obj);
3264 for (uint32_t i = 0; i < len; i++) {
3265 gc_mark_internal(ptr[i]);
3266 }
3267 }
3268
3269 attr_index_t fields_count = ROBJECT_FIELDS_COUNT(obj);
3270 if (fields_count) {
3271 VALUE klass = RBASIC_CLASS(obj);
3272
3273 // Increment max_iv_count if applicable, used to determine size pool allocation
3274 if (RCLASS_MAX_IV_COUNT(klass) < fields_count) {
3275 RCLASS_SET_MAX_IV_COUNT(klass, fields_count);
3276 }
3277 }
3278
3279 break;
3280 }
3281
3282 case T_FILE:
3283 if (RFILE(obj)->fptr) {
3284 gc_mark_internal(RFILE(obj)->fptr->self);
3285 gc_mark_internal(RFILE(obj)->fptr->pathv);
3286 gc_mark_internal(RFILE(obj)->fptr->tied_io_for_writing);
3287 gc_mark_internal(RFILE(obj)->fptr->writeconv_asciicompat);
3288 gc_mark_internal(RFILE(obj)->fptr->writeconv_pre_ecopts);
3289 gc_mark_internal(RFILE(obj)->fptr->encs.ecopts);
3290 gc_mark_internal(RFILE(obj)->fptr->write_lock);
3291 gc_mark_internal(RFILE(obj)->fptr->timeout);
3292 gc_mark_internal(RFILE(obj)->fptr->wakeup_mutex);
3293 }
3294 break;
3295
3296 case T_REGEXP:
3297 gc_mark_internal(RREGEXP(obj)->src);
3298 break;
3299
3300 case T_MATCH:
3301 gc_mark_internal(RMATCH(obj)->regexp);
3302 if (RMATCH(obj)->str) {
3303 gc_mark_internal(RMATCH(obj)->str);
3304 }
3305 break;
3306
3307 case T_RATIONAL:
3308 gc_mark_internal(RRATIONAL(obj)->num);
3309 gc_mark_internal(RRATIONAL(obj)->den);
3310 break;
3311
3312 case T_COMPLEX:
3313 gc_mark_internal(RCOMPLEX(obj)->real);
3314 gc_mark_internal(RCOMPLEX(obj)->imag);
3315 break;
3316
3317 case T_STRUCT: {
3318 const long len = RSTRUCT_LEN(obj);
3319 const VALUE * const ptr = RSTRUCT_CONST_PTR(obj);
3320
3321 for (long i = 0; i < len; i++) {
3322 gc_mark_internal(ptr[i]);
3323 }
3324
3325 if (!FL_TEST_RAW(obj, RSTRUCT_GEN_FIELDS)) {
3326 gc_mark_internal(RSTRUCT_FIELDS_OBJ(obj));
3327 }
3328
3329 break;
3330 }
3331
3332 default:
3333 if (BUILTIN_TYPE(obj) == T_MOVED) rb_bug("rb_gc_mark(): %p is T_MOVED", (void *)obj);
3334 if (BUILTIN_TYPE(obj) == T_NONE) rb_bug("rb_gc_mark(): %p is T_NONE", (void *)obj);
3335 if (BUILTIN_TYPE(obj) == T_ZOMBIE) rb_bug("rb_gc_mark(): %p is T_ZOMBIE", (void *)obj);
3336 rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
3337 BUILTIN_TYPE(obj), (void *)obj,
3338 rb_gc_impl_pointer_to_heap_p(objspace, (void *)obj) ? "corrupted object" : "non object");
3339 }
3340}
3341
3342size_t
3343rb_gc_obj_optimal_size(VALUE obj)
3344{
3345 switch (BUILTIN_TYPE(obj)) {
3346 case T_ARRAY:
3347 return rb_ary_size_as_embedded(obj);
3348
3349 case T_OBJECT:
3350 if (rb_shape_obj_too_complex_p(obj)) {
3351 return sizeof(struct RObject);
3352 }
3353 else {
3354 return rb_obj_embedded_size(ROBJECT_FIELDS_CAPACITY(obj));
3355 }
3356
3357 case T_STRING:
3358 return rb_str_size_as_embedded(obj);
3359
3360 case T_HASH:
3361 return sizeof(struct RHash) + (RHASH_ST_TABLE_P(obj) ? sizeof(st_table) : sizeof(ar_table));
3362
3363 default:
3364 return 0;
3365 }
3366}
3367
3368void
3369rb_gc_writebarrier(VALUE a, VALUE b)
3370{
3371 rb_gc_impl_writebarrier(rb_gc_get_objspace(), a, b);
3372}
3373
3374void
3375rb_gc_writebarrier_unprotect(VALUE obj)
3376{
3377 rb_gc_impl_writebarrier_unprotect(rb_gc_get_objspace(), obj);
3378}
3379
3380/*
3381 * remember `obj' if needed.
3382 */
3383void
3384rb_gc_writebarrier_remember(VALUE obj)
3385{
3386 rb_gc_impl_writebarrier_remember(rb_gc_get_objspace(), obj);
3387}
3388
3389void
3390rb_gc_copy_attributes(VALUE dest, VALUE obj)
3391{
3392 rb_gc_impl_copy_attributes(rb_gc_get_objspace(), dest, obj);
3393}
3394
3395int
3396rb_gc_modular_gc_loaded_p(void)
3397{
3398#if USE_MODULAR_GC
3399 return rb_gc_functions.modular_gc_loaded_p;
3400#else
3401 return false;
3402#endif
3403}
3404
3405const char *
3406rb_gc_active_gc_name(void)
3407{
3408 const char *gc_name = rb_gc_impl_active_gc_name();
3409
3410 const size_t len = strlen(gc_name);
3411 if (len > RB_GC_MAX_NAME_LEN) {
3412 rb_bug("GC should have a name no more than %d chars long. Currently: %zu (%s)",
3413 RB_GC_MAX_NAME_LEN, len, gc_name);
3414 }
3415
3416 return gc_name;
3417}
3418
3420rb_gc_object_metadata(VALUE obj)
3421{
3422 return rb_gc_impl_object_metadata(rb_gc_get_objspace(), obj);
3423}
3424
3425/* GC */
3426
3427void *
3428rb_gc_ractor_cache_alloc(rb_ractor_t *ractor)
3429{
3430 return rb_gc_impl_ractor_cache_alloc(rb_gc_get_objspace(), ractor);
3431}
3432
3433void
3434rb_gc_ractor_cache_free(void *cache)
3435{
3436 rb_gc_impl_ractor_cache_free(rb_gc_get_objspace(), cache);
3437}
3438
3439void
3440rb_gc_register_mark_object(VALUE obj)
3441{
3442 if (!rb_gc_impl_pointer_to_heap_p(rb_gc_get_objspace(), (void *)obj))
3443 return;
3444
3445 rb_vm_register_global_object(obj);
3446}
3447
3448void
3449rb_gc_register_address(VALUE *addr)
3450{
3451 rb_vm_t *vm = GET_VM();
3452
3453 VALUE obj = *addr;
3454
3455 struct global_object_list *tmp = ALLOC(struct global_object_list);
3456 tmp->next = vm->global_object_list;
3457 tmp->varptr = addr;
3458 vm->global_object_list = tmp;
3459
3460 /*
3461 * Because some C extensions have assignment-then-register bugs,
3462 * we guard `obj` here so that it would not get swept defensively.
3463 */
3464 RB_GC_GUARD(obj);
3465 if (0 && !SPECIAL_CONST_P(obj)) {
3466 rb_warn("Object is assigned to registering address already: %"PRIsVALUE,
3467 rb_obj_class(obj));
3468 rb_print_backtrace(stderr);
3469 }
3470}
3471
3472void
3473rb_gc_unregister_address(VALUE *addr)
3474{
3475 rb_vm_t *vm = GET_VM();
3476 struct global_object_list *tmp = vm->global_object_list;
3477
3478 if (tmp->varptr == addr) {
3479 vm->global_object_list = tmp->next;
3480 xfree(tmp);
3481 return;
3482 }
3483 while (tmp->next) {
3484 if (tmp->next->varptr == addr) {
3485 struct global_object_list *t = tmp->next;
3486
3487 tmp->next = tmp->next->next;
3488 xfree(t);
3489 break;
3490 }
3491 tmp = tmp->next;
3492 }
3493}
3494
3495void
3497{
3498 rb_gc_register_address(var);
3499}
3500
3501static VALUE
3502gc_start_internal(rb_execution_context_t *ec, VALUE self, VALUE full_mark, VALUE immediate_mark, VALUE immediate_sweep, VALUE compact)
3503{
3504 rb_gc_impl_start(rb_gc_get_objspace(), RTEST(full_mark), RTEST(immediate_mark), RTEST(immediate_sweep), RTEST(compact));
3505
3506 return Qnil;
3507}
3508
3509/*
3510 * rb_objspace_each_objects() is special C API to walk through
3511 * Ruby object space. This C API is too difficult to use it.
3512 * To be frank, you should not use it. Or you need to read the
3513 * source code of this function and understand what this function does.
3514 *
3515 * 'callback' will be called several times (the number of heap page,
3516 * at current implementation) with:
3517 * vstart: a pointer to the first living object of the heap_page.
3518 * vend: a pointer to next to the valid heap_page area.
3519 * stride: a distance to next VALUE.
3520 *
3521 * If callback() returns non-zero, the iteration will be stopped.
3522 *
3523 * This is a sample callback code to iterate liveness objects:
3524 *
3525 * static int
3526 * sample_callback(void *vstart, void *vend, int stride, void *data)
3527 * {
3528 * VALUE v = (VALUE)vstart;
3529 * for (; v != (VALUE)vend; v += stride) {
3530 * if (!rb_objspace_internal_object_p(v)) { // liveness check
3531 * // do something with live object 'v'
3532 * }
3533 * }
3534 * return 0; // continue to iteration
3535 * }
3536 *
3537 * Note: 'vstart' is not a top of heap_page. This point the first
3538 * living object to grasp at least one object to avoid GC issue.
3539 * This means that you can not walk through all Ruby object page
3540 * including freed object page.
3541 *
3542 * Note: On this implementation, 'stride' is the same as sizeof(RVALUE).
3543 * However, there are possibilities to pass variable values with
3544 * 'stride' with some reasons. You must use stride instead of
3545 * use some constant value in the iteration.
3546 */
3547void
3548rb_objspace_each_objects(int (*callback)(void *, void *, size_t, void *), void *data)
3549{
3550 rb_gc_impl_each_objects(rb_gc_get_objspace(), callback, data);
3551}
3552
3553static void
3554gc_ref_update_array(void *objspace, VALUE v)
3555{
3556 if (ARY_SHARED_P(v)) {
3557 VALUE old_root = RARRAY(v)->as.heap.aux.shared_root;
3558
3559 UPDATE_IF_MOVED(objspace, RARRAY(v)->as.heap.aux.shared_root);
3560
3561 VALUE new_root = RARRAY(v)->as.heap.aux.shared_root;
3562 // If the root is embedded and its location has changed
3563 if (ARY_EMBED_P(new_root) && new_root != old_root) {
3564 size_t offset = (size_t)(RARRAY(v)->as.heap.ptr - RARRAY(old_root)->as.ary);
3565 GC_ASSERT(RARRAY(v)->as.heap.ptr >= RARRAY(old_root)->as.ary);
3566 RARRAY(v)->as.heap.ptr = RARRAY(new_root)->as.ary + offset;
3567 }
3568 }
3569 else {
3570 long len = RARRAY_LEN(v);
3571
3572 if (len > 0) {
3573 VALUE *ptr = (VALUE *)RARRAY_CONST_PTR(v);
3574 for (long i = 0; i < len; i++) {
3575 UPDATE_IF_MOVED(objspace, ptr[i]);
3576 }
3577 }
3578
3579 if (rb_gc_obj_slot_size(v) >= rb_ary_size_as_embedded(v)) {
3580 if (rb_ary_embeddable_p(v)) {
3581 rb_ary_make_embedded(v);
3582 }
3583 }
3584 }
3585}
3586
3587static void
3588gc_ref_update_object(void *objspace, VALUE v)
3589{
3590 VALUE *ptr = ROBJECT_FIELDS(v);
3591
3592 if (FL_TEST_RAW(v, ROBJECT_HEAP)) {
3593 if (rb_shape_obj_too_complex_p(v)) {
3594 gc_ref_update_table_values_only(ROBJECT_FIELDS_HASH(v));
3595 return;
3596 }
3597
3598 size_t slot_size = rb_gc_obj_slot_size(v);
3599 size_t embed_size = rb_obj_embedded_size(ROBJECT_FIELDS_CAPACITY(v));
3600 if (slot_size >= embed_size) {
3601 // Object can be re-embedded
3602 memcpy(ROBJECT(v)->as.ary, ptr, sizeof(VALUE) * ROBJECT_FIELDS_COUNT(v));
3603 FL_UNSET_RAW(v, ROBJECT_HEAP);
3604 xfree(ptr);
3605 ptr = ROBJECT(v)->as.ary;
3606 }
3607 }
3608
3609 for (uint32_t i = 0; i < ROBJECT_FIELDS_COUNT(v); i++) {
3610 UPDATE_IF_MOVED(objspace, ptr[i]);
3611 }
3612}
3613
3614void
3615rb_gc_ref_update_table_values_only(st_table *tbl)
3616{
3617 gc_ref_update_table_values_only(tbl);
3618}
3619
3620/* Update MOVED references in a VALUE=>VALUE st_table */
3621void
3622rb_gc_update_tbl_refs(st_table *ptr)
3623{
3624 gc_update_table_refs(ptr);
3625}
3626
3627static void
3628gc_ref_update_hash(void *objspace, VALUE v)
3629{
3630 rb_hash_stlike_foreach_with_replace(v, hash_foreach_replace, hash_replace_ref, (st_data_t)objspace);
3631}
3632
3633static void
3634gc_update_values(void *objspace, long n, VALUE *values)
3635{
3636 for (long i = 0; i < n; i++) {
3637 UPDATE_IF_MOVED(objspace, values[i]);
3638 }
3639}
3640
3641void
3642rb_gc_update_values(long n, VALUE *values)
3643{
3644 gc_update_values(rb_gc_get_objspace(), n, values);
3645}
3646
3647static enum rb_id_table_iterator_result
3648check_id_table_move(VALUE value, void *data)
3649{
3650 void *objspace = (void *)data;
3651
3652 if (rb_gc_impl_object_moved_p(objspace, (VALUE)value)) {
3653 return ID_TABLE_REPLACE;
3654 }
3655
3656 return ID_TABLE_CONTINUE;
3657}
3658
3659void
3660rb_gc_prepare_heap_process_object(VALUE obj)
3661{
3662 switch (BUILTIN_TYPE(obj)) {
3663 case T_STRING:
3664 // Precompute the string coderange. This both save time for when it will be
3665 // eventually needed, and avoid mutating heap pages after a potential fork.
3667 break;
3668 default:
3669 break;
3670 }
3671}
3672
3673void
3674rb_gc_prepare_heap(void)
3675{
3676 rb_gc_impl_prepare_heap(rb_gc_get_objspace());
3677}
3678
3679size_t
3680rb_gc_heap_id_for_size(size_t size)
3681{
3682 return rb_gc_impl_heap_id_for_size(rb_gc_get_objspace(), size);
3683}
3684
3685bool
3686rb_gc_size_allocatable_p(size_t size)
3687{
3688 return rb_gc_impl_size_allocatable_p(size);
3689}
3690
3691static enum rb_id_table_iterator_result
3692update_id_table(VALUE *value, void *data, int existing)
3693{
3694 void *objspace = (void *)data;
3695
3696 if (rb_gc_impl_object_moved_p(objspace, (VALUE)*value)) {
3697 *value = gc_location_internal(objspace, (VALUE)*value);
3698 }
3699
3700 return ID_TABLE_CONTINUE;
3701}
3702
3703static void
3704update_m_tbl(void *objspace, struct rb_id_table *tbl)
3705{
3706 if (tbl) {
3707 rb_id_table_foreach_values_with_replace(tbl, check_id_table_move, update_id_table, objspace);
3708 }
3709}
3710
3711static enum rb_id_table_iterator_result
3712update_cvc_tbl_i(VALUE cvc_entry, void *objspace)
3713{
3714 struct rb_cvar_class_tbl_entry *entry;
3715
3716 entry = (struct rb_cvar_class_tbl_entry *)cvc_entry;
3717
3718 if (entry->cref) {
3719 TYPED_UPDATE_IF_MOVED(objspace, rb_cref_t *, entry->cref);
3720 }
3721
3722 entry->class_value = gc_location_internal(objspace, entry->class_value);
3723
3724 return ID_TABLE_CONTINUE;
3725}
3726
3727static void
3728update_cvc_tbl(void *objspace, struct rb_id_table *tbl)
3729{
3730 if (!tbl) return;
3731 rb_id_table_foreach_values(tbl, update_cvc_tbl_i, objspace);
3732}
3733
3734static enum rb_id_table_iterator_result
3735update_const_tbl_i(VALUE value, void *objspace)
3736{
3737 rb_const_entry_t *ce = (rb_const_entry_t *)value;
3738
3739 if (rb_gc_impl_object_moved_p(objspace, ce->value)) {
3740 ce->value = gc_location_internal(objspace, ce->value);
3741 }
3742
3743 if (rb_gc_impl_object_moved_p(objspace, ce->file)) {
3744 ce->file = gc_location_internal(objspace, ce->file);
3745 }
3746
3747 return ID_TABLE_CONTINUE;
3748}
3749
3750static void
3751update_const_tbl(void *objspace, struct rb_id_table *tbl)
3752{
3753 if (!tbl) return;
3754 rb_id_table_foreach_values(tbl, update_const_tbl_i, objspace);
3755}
3756
3757static void
3758update_subclasses(void *objspace, rb_classext_t *ext)
3759{
3760 rb_subclass_entry_t *entry;
3761 rb_subclass_anchor_t *anchor = RCLASSEXT_SUBCLASSES(ext);
3762 if (!anchor) return;
3763 entry = anchor->head;
3764 while (entry) {
3765 if (entry->klass)
3766 UPDATE_IF_MOVED(objspace, entry->klass);
3767 entry = entry->next;
3768 }
3769}
3770
3771static void
3772update_superclasses(rb_objspace_t *objspace, rb_classext_t *ext)
3773{
3774 if (RCLASSEXT_SUPERCLASSES_WITH_SELF(ext)) {
3775 size_t array_size = RCLASSEXT_SUPERCLASS_DEPTH(ext) + 1;
3776 for (size_t i = 0; i < array_size; i++) {
3777 UPDATE_IF_MOVED(objspace, RCLASSEXT_SUPERCLASSES(ext)[i]);
3778 }
3779 }
3780}
3781
3782static void
3783update_classext_values(rb_objspace_t *objspace, rb_classext_t *ext, bool is_iclass)
3784{
3785 UPDATE_IF_MOVED(objspace, RCLASSEXT_ORIGIN(ext));
3786 UPDATE_IF_MOVED(objspace, RCLASSEXT_REFINED_CLASS(ext));
3787 UPDATE_IF_MOVED(objspace, RCLASSEXT_CLASSPATH(ext));
3788 if (is_iclass) {
3789 UPDATE_IF_MOVED(objspace, RCLASSEXT_INCLUDER(ext));
3790 }
3791}
3792
3793static void
3794update_classext(rb_classext_t *ext, bool is_prime, VALUE namespace, void *arg)
3795{
3796 struct classext_foreach_args *args = (struct classext_foreach_args *)arg;
3797 rb_objspace_t *objspace = args->objspace;
3798
3799 if (RCLASSEXT_SUPER(ext)) {
3800 UPDATE_IF_MOVED(objspace, RCLASSEXT_SUPER(ext));
3801 }
3802
3803 update_m_tbl(objspace, RCLASSEXT_M_TBL(ext));
3804
3805 UPDATE_IF_MOVED(objspace, ext->fields_obj);
3806 if (!RCLASSEXT_SHARED_CONST_TBL(ext)) {
3807 update_const_tbl(objspace, RCLASSEXT_CONST_TBL(ext));
3808 }
3809 UPDATE_IF_MOVED(objspace, RCLASSEXT_CC_TBL(ext));
3810 update_cvc_tbl(objspace, RCLASSEXT_CVC_TBL(ext));
3811 update_superclasses(objspace, ext);
3812 update_subclasses(objspace, ext);
3813
3814 update_classext_values(objspace, ext, false);
3815}
3816
3817static void
3818update_iclass_classext(rb_classext_t *ext, bool is_prime, VALUE namespace, void *arg)
3819{
3820 struct classext_foreach_args *args = (struct classext_foreach_args *)arg;
3821 rb_objspace_t *objspace = args->objspace;
3822
3823 if (RCLASSEXT_SUPER(ext)) {
3824 UPDATE_IF_MOVED(objspace, RCLASSEXT_SUPER(ext));
3825 }
3826 update_m_tbl(objspace, RCLASSEXT_M_TBL(ext));
3827 update_m_tbl(objspace, RCLASSEXT_CALLABLE_M_TBL(ext));
3828 UPDATE_IF_MOVED(objspace, RCLASSEXT_CC_TBL(ext));
3829 update_subclasses(objspace, ext);
3830
3831 update_classext_values(objspace, ext, true);
3832}
3833
3835 vm_table_foreach_callback_func callback;
3836 vm_table_update_callback_func update_callback;
3837 void *data;
3838 bool weak_only;
3839};
3840
3841static int
3842vm_weak_table_foreach_weak_key(st_data_t key, st_data_t value, st_data_t data, int error)
3843{
3844 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3845
3846 int ret = iter_data->callback((VALUE)key, iter_data->data);
3847
3848 if (!iter_data->weak_only) {
3849 if (ret != ST_CONTINUE) return ret;
3850
3851 ret = iter_data->callback((VALUE)value, iter_data->data);
3852 }
3853
3854 return ret;
3855}
3856
3857static int
3858vm_weak_table_foreach_update_weak_key(st_data_t *key, st_data_t *value, st_data_t data, int existing)
3859{
3860 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3861
3862 int ret = iter_data->update_callback((VALUE *)key, iter_data->data);
3863
3864 if (!iter_data->weak_only) {
3865 if (ret != ST_CONTINUE) return ret;
3866
3867 ret = iter_data->update_callback((VALUE *)value, iter_data->data);
3868 }
3869
3870 return ret;
3871}
3872
3873static int
3874vm_weak_table_cc_refinement_foreach(st_data_t key, st_data_t data, int error)
3875{
3876 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3877
3878 return iter_data->callback((VALUE)key, iter_data->data);
3879}
3880
3881static int
3882vm_weak_table_cc_refinement_foreach_update_update(st_data_t *key, st_data_t data, int existing)
3883{
3884 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3885
3886 return iter_data->update_callback((VALUE *)key, iter_data->data);
3887}
3888
3889
3890static int
3891vm_weak_table_sym_set_foreach(VALUE *sym_ptr, void *data)
3892{
3893 VALUE sym = *sym_ptr;
3894 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3895
3896 if (RB_SPECIAL_CONST_P(sym)) return ST_CONTINUE;
3897
3898 int ret = iter_data->callback(sym, iter_data->data);
3899
3900 if (ret == ST_REPLACE) {
3901 ret = iter_data->update_callback(sym_ptr, iter_data->data);
3902 }
3903
3904 return ret;
3905}
3906
3907struct st_table *rb_generic_fields_tbl_get(void);
3908
3909static int
3910vm_weak_table_id2ref_foreach(st_data_t key, st_data_t value, st_data_t data, int error)
3911{
3912 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3913
3914 if (!iter_data->weak_only && !FIXNUM_P((VALUE)key)) {
3915 int ret = iter_data->callback((VALUE)key, iter_data->data);
3916 if (ret != ST_CONTINUE) return ret;
3917 }
3918
3919 return iter_data->callback((VALUE)value, iter_data->data);
3920}
3921
3922static int
3923vm_weak_table_id2ref_foreach_update(st_data_t *key, st_data_t *value, st_data_t data, int existing)
3924{
3925 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3926
3927 iter_data->update_callback((VALUE *)value, iter_data->data);
3928
3929 if (!iter_data->weak_only && !FIXNUM_P((VALUE)*key)) {
3930 iter_data->update_callback((VALUE *)key, iter_data->data);
3931 }
3932
3933 return ST_CONTINUE;
3934}
3935
3936static int
3937vm_weak_table_gen_fields_foreach(st_data_t key, st_data_t value, st_data_t data)
3938{
3939 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3940
3941 int ret = iter_data->callback((VALUE)key, iter_data->data);
3942
3943 VALUE new_value = (VALUE)value;
3944 VALUE new_key = (VALUE)key;
3945
3946 switch (ret) {
3947 case ST_CONTINUE:
3948 break;
3949
3950 case ST_DELETE:
3951 RBASIC_SET_SHAPE_ID((VALUE)key, ROOT_SHAPE_ID);
3952 return ST_DELETE;
3953
3954 case ST_REPLACE: {
3955 ret = iter_data->update_callback(&new_key, iter_data->data);
3956 if (key != new_key) {
3957 ret = ST_DELETE;
3958 }
3959 break;
3960 }
3961
3962 default:
3963 rb_bug("vm_weak_table_gen_fields_foreach: return value %d not supported", ret);
3964 }
3965
3966 if (!iter_data->weak_only) {
3967 int ivar_ret = iter_data->callback(new_value, iter_data->data);
3968 switch (ivar_ret) {
3969 case ST_CONTINUE:
3970 break;
3971
3972 case ST_REPLACE:
3973 iter_data->update_callback(&new_value, iter_data->data);
3974 break;
3975
3976 default:
3977 rb_bug("vm_weak_table_gen_fields_foreach: return value %d not supported", ivar_ret);
3978 }
3979 }
3980
3981 if (key != new_key || value != new_value) {
3982 DURING_GC_COULD_MALLOC_REGION_START();
3983 {
3984 st_insert(rb_generic_fields_tbl_get(), (st_data_t)new_key, new_value);
3985 }
3986 DURING_GC_COULD_MALLOC_REGION_END();
3987 }
3988
3989 return ret;
3990}
3991
3992static int
3993vm_weak_table_frozen_strings_foreach(VALUE *str, void *data)
3994{
3995 // int retval = vm_weak_table_foreach_weak_key(key, value, data, error);
3996 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3997 int retval = iter_data->callback(*str, iter_data->data);
3998
3999 if (retval == ST_REPLACE) {
4000 retval = iter_data->update_callback(str, iter_data->data);
4001 }
4002
4003 if (retval == ST_DELETE) {
4004 FL_UNSET(*str, RSTRING_FSTR);
4005 }
4006
4007 return retval;
4008}
4009
4010void rb_fstring_foreach_with_replace(int (*callback)(VALUE *str, void *data), void *data);
4011void
4012rb_gc_vm_weak_table_foreach(vm_table_foreach_callback_func callback,
4013 vm_table_update_callback_func update_callback,
4014 void *data,
4015 bool weak_only,
4016 enum rb_gc_vm_weak_tables table)
4017{
4018 rb_vm_t *vm = GET_VM();
4019
4020 struct global_vm_table_foreach_data foreach_data = {
4021 .callback = callback,
4022 .update_callback = update_callback,
4023 .data = data,
4024 .weak_only = weak_only,
4025 };
4026
4027 switch (table) {
4028 case RB_GC_VM_CI_TABLE: {
4029 if (vm->ci_table) {
4030 st_foreach_with_replace(
4031 vm->ci_table,
4032 vm_weak_table_foreach_weak_key,
4033 vm_weak_table_foreach_update_weak_key,
4034 (st_data_t)&foreach_data
4035 );
4036 }
4037 break;
4038 }
4039 case RB_GC_VM_OVERLOADED_CME_TABLE: {
4040 if (vm->overloaded_cme_table) {
4041 st_foreach_with_replace(
4042 vm->overloaded_cme_table,
4043 vm_weak_table_foreach_weak_key,
4044 vm_weak_table_foreach_update_weak_key,
4045 (st_data_t)&foreach_data
4046 );
4047 }
4048 break;
4049 }
4050 case RB_GC_VM_GLOBAL_SYMBOLS_TABLE: {
4051 rb_sym_global_symbol_table_foreach_weak_reference(
4052 vm_weak_table_sym_set_foreach,
4053 &foreach_data
4054 );
4055 break;
4056 }
4057 case RB_GC_VM_ID2REF_TABLE: {
4058 if (id2ref_tbl) {
4059 st_foreach_with_replace(
4060 id2ref_tbl,
4061 vm_weak_table_id2ref_foreach,
4062 vm_weak_table_id2ref_foreach_update,
4063 (st_data_t)&foreach_data
4064 );
4065 }
4066 break;
4067 }
4068 case RB_GC_VM_GENERIC_FIELDS_TABLE: {
4069 st_table *generic_fields_tbl = rb_generic_fields_tbl_get();
4070 if (generic_fields_tbl) {
4071 st_foreach(
4072 generic_fields_tbl,
4073 vm_weak_table_gen_fields_foreach,
4074 (st_data_t)&foreach_data
4075 );
4076 }
4077 break;
4078 }
4079 case RB_GC_VM_FROZEN_STRINGS_TABLE: {
4080 rb_fstring_foreach_with_replace(
4081 vm_weak_table_frozen_strings_foreach,
4082 &foreach_data
4083 );
4084 break;
4085 }
4086 case RB_GC_VM_CC_REFINEMENT_TABLE: {
4087 if (vm->cc_refinement_table) {
4088 set_foreach_with_replace(
4089 vm->cc_refinement_table,
4090 vm_weak_table_cc_refinement_foreach,
4091 vm_weak_table_cc_refinement_foreach_update_update,
4092 (st_data_t)&foreach_data
4093 );
4094 }
4095 break;
4096 }
4097 case RB_GC_VM_WEAK_TABLE_COUNT:
4098 rb_bug("Unreachable");
4099 default:
4100 rb_bug("rb_gc_vm_weak_table_foreach: unknown table %d", table);
4101 }
4102}
4103
4104void
4105rb_gc_update_vm_references(void *objspace)
4106{
4107 rb_execution_context_t *ec = GET_EC();
4108 rb_vm_t *vm = rb_ec_vm_ptr(ec);
4109
4110 rb_vm_update_references(vm);
4111 rb_gc_update_global_tbl();
4112 rb_sym_global_symbols_mark_and_move();
4113
4114#if USE_YJIT
4115 void rb_yjit_root_update_references(void); // in Rust
4116
4117 if (rb_yjit_enabled_p) {
4118 rb_yjit_root_update_references();
4119 }
4120#endif
4121
4122#if USE_ZJIT
4123 void rb_zjit_root_update_references(void); // in Rust
4124
4125 if (rb_zjit_enabled_p) {
4126 rb_zjit_root_update_references();
4127 }
4128#endif
4129}
4130
4131void
4132rb_gc_update_object_references(void *objspace, VALUE obj)
4133{
4134 struct classext_foreach_args args;
4135
4136 switch (BUILTIN_TYPE(obj)) {
4137 case T_CLASS:
4138 if (FL_TEST_RAW(obj, FL_SINGLETON)) {
4139 UPDATE_IF_MOVED(objspace, RCLASS_ATTACHED_OBJECT(obj));
4140 }
4141 // Continue to the shared T_CLASS/T_MODULE
4142 case T_MODULE:
4143 args.klass = obj;
4144 args.objspace = objspace;
4145 rb_class_classext_foreach(obj, update_classext, (void *)&args);
4146 break;
4147
4148 case T_ICLASS:
4149 args.objspace = objspace;
4150 rb_class_classext_foreach(obj, update_iclass_classext, (void *)&args);
4151 break;
4152
4153 case T_IMEMO:
4154 rb_imemo_mark_and_move(obj, true);
4155 return;
4156
4157 case T_NIL:
4158 case T_FIXNUM:
4159 case T_NODE:
4160 case T_MOVED:
4161 case T_NONE:
4162 /* These can't move */
4163 return;
4164
4165 case T_ARRAY:
4166 gc_ref_update_array(objspace, obj);
4167 break;
4168
4169 case T_HASH:
4170 gc_ref_update_hash(objspace, obj);
4171 UPDATE_IF_MOVED(objspace, RHASH(obj)->ifnone);
4172 break;
4173
4174 case T_STRING:
4175 {
4176 if (STR_SHARED_P(obj)) {
4177 UPDATE_IF_MOVED(objspace, RSTRING(obj)->as.heap.aux.shared);
4178 }
4179
4180 /* If, after move the string is not embedded, and can fit in the
4181 * slot it's been placed in, then re-embed it. */
4182 if (rb_gc_obj_slot_size(obj) >= rb_str_size_as_embedded(obj)) {
4183 if (!STR_EMBED_P(obj) && rb_str_reembeddable_p(obj)) {
4184 rb_str_make_embedded(obj);
4185 }
4186 }
4187
4188 break;
4189 }
4190 case T_DATA:
4191 /* Call the compaction callback, if it exists */
4192 {
4193 bool typed_data = RTYPEDDATA_P(obj);
4194 void *const ptr = typed_data ? RTYPEDDATA_GET_DATA(obj) : DATA_PTR(obj);
4195
4196 if (typed_data) {
4197 UPDATE_IF_MOVED(objspace, RTYPEDDATA(obj)->fields_obj);
4198 }
4199
4200 if (ptr) {
4201 if (typed_data && gc_declarative_marking_p(RTYPEDDATA_TYPE(obj))) {
4202 size_t *offset_list = TYPED_DATA_REFS_OFFSET_LIST(obj);
4203
4204 for (size_t offset = *offset_list; offset != RUBY_REF_END; offset = *offset_list++) {
4205 VALUE *ref = (VALUE *)((char *)ptr + offset);
4206 *ref = gc_location_internal(objspace, *ref);
4207 }
4208 }
4209 else if (typed_data) {
4210 RUBY_DATA_FUNC compact_func = RTYPEDDATA_TYPE(obj)->function.dcompact;
4211 if (compact_func) (*compact_func)(ptr);
4212 }
4213 }
4214 }
4215 break;
4216
4217 case T_OBJECT:
4218 gc_ref_update_object(objspace, obj);
4219 break;
4220
4221 case T_FILE:
4222 if (RFILE(obj)->fptr) {
4223 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->self);
4224 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->pathv);
4225 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->tied_io_for_writing);
4226 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->writeconv_asciicompat);
4227 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->writeconv_pre_ecopts);
4228 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->encs.ecopts);
4229 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->write_lock);
4230 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->timeout);
4231 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->wakeup_mutex);
4232 }
4233 break;
4234 case T_REGEXP:
4235 UPDATE_IF_MOVED(objspace, RREGEXP(obj)->src);
4236 break;
4237
4238 case T_SYMBOL:
4239 UPDATE_IF_MOVED(objspace, RSYMBOL(obj)->fstr);
4240 break;
4241
4242 case T_FLOAT:
4243 case T_BIGNUM:
4244 break;
4245
4246 case T_MATCH:
4247 UPDATE_IF_MOVED(objspace, RMATCH(obj)->regexp);
4248
4249 if (RMATCH(obj)->str) {
4250 UPDATE_IF_MOVED(objspace, RMATCH(obj)->str);
4251 }
4252 break;
4253
4254 case T_RATIONAL:
4255 UPDATE_IF_MOVED(objspace, RRATIONAL(obj)->num);
4256 UPDATE_IF_MOVED(objspace, RRATIONAL(obj)->den);
4257 break;
4258
4259 case T_COMPLEX:
4260 UPDATE_IF_MOVED(objspace, RCOMPLEX(obj)->real);
4261 UPDATE_IF_MOVED(objspace, RCOMPLEX(obj)->imag);
4262
4263 break;
4264
4265 case T_STRUCT:
4266 {
4267 long i, len = RSTRUCT_LEN(obj);
4268 VALUE *ptr = (VALUE *)RSTRUCT_CONST_PTR(obj);
4269
4270 for (i = 0; i < len; i++) {
4271 UPDATE_IF_MOVED(objspace, ptr[i]);
4272 }
4273
4274 if (RSTRUCT_EMBED_LEN(obj)) {
4275 if (!FL_TEST_RAW(obj, RSTRUCT_GEN_FIELDS)) {
4276 UPDATE_IF_MOVED(objspace, ptr[len]);
4277 }
4278 }
4279 else {
4280 UPDATE_IF_MOVED(objspace, RSTRUCT(obj)->as.heap.fields_obj);
4281 }
4282 }
4283 break;
4284 default:
4285 rb_bug("unreachable");
4286 break;
4287 }
4288
4289 UPDATE_IF_MOVED(objspace, RBASIC(obj)->klass);
4290}
4291
4292VALUE
4293rb_gc_start(void)
4294{
4295 rb_gc();
4296 return Qnil;
4297}
4298
4299void
4300rb_gc(void)
4301{
4302 unless_objspace(objspace) { return; }
4303
4304 rb_gc_impl_start(objspace, true, true, true, false);
4305}
4306
4307int
4308rb_during_gc(void)
4309{
4310 unless_objspace(objspace) { return FALSE; }
4311
4312 return rb_gc_impl_during_gc_p(objspace);
4313}
4314
4315size_t
4316rb_gc_count(void)
4317{
4318 return rb_gc_impl_gc_count(rb_gc_get_objspace());
4319}
4320
4321static VALUE
4322gc_count(rb_execution_context_t *ec, VALUE self)
4323{
4324 return SIZET2NUM(rb_gc_count());
4325}
4326
4327VALUE
4328rb_gc_latest_gc_info(VALUE key)
4329{
4330 if (!SYMBOL_P(key) && !RB_TYPE_P(key, T_HASH)) {
4331 rb_raise(rb_eTypeError, "non-hash or symbol given");
4332 }
4333
4334 VALUE val = rb_gc_impl_latest_gc_info(rb_gc_get_objspace(), key);
4335
4336 if (val == Qundef) {
4337 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
4338 }
4339
4340 return val;
4341}
4342
4343static VALUE
4344gc_stat(rb_execution_context_t *ec, VALUE self, VALUE arg) // arg is (nil || hash || symbol)
4345{
4346 if (NIL_P(arg)) {
4347 arg = rb_hash_new();
4348 }
4349 else if (!RB_TYPE_P(arg, T_HASH) && !SYMBOL_P(arg)) {
4350 rb_raise(rb_eTypeError, "non-hash or symbol given");
4351 }
4352
4353 VALUE ret = rb_gc_impl_stat(rb_gc_get_objspace(), arg);
4354
4355 if (ret == Qundef) {
4356 GC_ASSERT(SYMBOL_P(arg));
4357
4358 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(arg));
4359 }
4360
4361 return ret;
4362}
4363
4364size_t
4365rb_gc_stat(VALUE arg)
4366{
4367 if (!RB_TYPE_P(arg, T_HASH) && !SYMBOL_P(arg)) {
4368 rb_raise(rb_eTypeError, "non-hash or symbol given");
4369 }
4370
4371 VALUE ret = rb_gc_impl_stat(rb_gc_get_objspace(), arg);
4372
4373 if (ret == Qundef) {
4374 GC_ASSERT(SYMBOL_P(arg));
4375
4376 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(arg));
4377 }
4378
4379 if (SYMBOL_P(arg)) {
4380 return NUM2SIZET(ret);
4381 }
4382 else {
4383 return 0;
4384 }
4385}
4386
4387static VALUE
4388gc_stat_heap(rb_execution_context_t *ec, VALUE self, VALUE heap_name, VALUE arg)
4389{
4390 if (NIL_P(arg)) {
4391 arg = rb_hash_new();
4392 }
4393
4394 if (NIL_P(heap_name)) {
4395 if (!RB_TYPE_P(arg, T_HASH)) {
4396 rb_raise(rb_eTypeError, "non-hash given");
4397 }
4398 }
4399 else if (FIXNUM_P(heap_name)) {
4400 if (!SYMBOL_P(arg) && !RB_TYPE_P(arg, T_HASH)) {
4401 rb_raise(rb_eTypeError, "non-hash or symbol given");
4402 }
4403 }
4404 else {
4405 rb_raise(rb_eTypeError, "heap_name must be nil or an Integer");
4406 }
4407
4408 VALUE ret = rb_gc_impl_stat_heap(rb_gc_get_objspace(), heap_name, arg);
4409
4410 if (ret == Qundef) {
4411 GC_ASSERT(SYMBOL_P(arg));
4412
4413 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(arg));
4414 }
4415
4416 return ret;
4417}
4418
4419static VALUE
4420gc_config_get(rb_execution_context_t *ec, VALUE self)
4421{
4422 VALUE cfg_hash = rb_gc_impl_config_get(rb_gc_get_objspace());
4423 rb_hash_aset(cfg_hash, sym("implementation"), rb_fstring_cstr(rb_gc_impl_active_gc_name()));
4424
4425 return cfg_hash;
4426}
4427
4428static VALUE
4429gc_config_set(rb_execution_context_t *ec, VALUE self, VALUE hash)
4430{
4431 void *objspace = rb_gc_get_objspace();
4432
4433 rb_gc_impl_config_set(objspace, hash);
4434
4435 return Qnil;
4436}
4437
4438static VALUE
4439gc_stress_get(rb_execution_context_t *ec, VALUE self)
4440{
4441 return rb_gc_impl_stress_get(rb_gc_get_objspace());
4442}
4443
4444static VALUE
4445gc_stress_set_m(rb_execution_context_t *ec, VALUE self, VALUE flag)
4446{
4447 rb_gc_impl_stress_set(rb_gc_get_objspace(), flag);
4448
4449 return flag;
4450}
4451
4452void
4453rb_gc_initial_stress_set(VALUE flag)
4454{
4455 initial_stress = flag;
4456}
4457
4458size_t *
4459rb_gc_heap_sizes(void)
4460{
4461 return rb_gc_impl_heap_sizes(rb_gc_get_objspace());
4462}
4463
4464VALUE
4465rb_gc_enable(void)
4466{
4467 return rb_objspace_gc_enable(rb_gc_get_objspace());
4468}
4469
4470VALUE
4471rb_objspace_gc_enable(void *objspace)
4472{
4473 bool disabled = !rb_gc_impl_gc_enabled_p(objspace);
4474 rb_gc_impl_gc_enable(objspace);
4475 return RBOOL(disabled);
4476}
4477
4478static VALUE
4479gc_enable(rb_execution_context_t *ec, VALUE _)
4480{
4481 return rb_gc_enable();
4482}
4483
4484static VALUE
4485gc_disable_no_rest(void *objspace)
4486{
4487 bool disabled = !rb_gc_impl_gc_enabled_p(objspace);
4488 rb_gc_impl_gc_disable(objspace, false);
4489 return RBOOL(disabled);
4490}
4491
4492VALUE
4493rb_gc_disable_no_rest(void)
4494{
4495 return gc_disable_no_rest(rb_gc_get_objspace());
4496}
4497
4498VALUE
4499rb_gc_disable(void)
4500{
4501 return rb_objspace_gc_disable(rb_gc_get_objspace());
4502}
4503
4504VALUE
4505rb_objspace_gc_disable(void *objspace)
4506{
4507 bool disabled = !rb_gc_impl_gc_enabled_p(objspace);
4508 rb_gc_impl_gc_disable(objspace, true);
4509 return RBOOL(disabled);
4510}
4511
4512static VALUE
4513gc_disable(rb_execution_context_t *ec, VALUE _)
4514{
4515 return rb_gc_disable();
4516}
4517
4518// TODO: think about moving ruby_gc_set_params into Init_heap or Init_gc
4519void
4520ruby_gc_set_params(void)
4521{
4522 rb_gc_impl_set_params(rb_gc_get_objspace());
4523}
4524
4525void
4526rb_objspace_reachable_objects_from(VALUE obj, void (func)(VALUE, void *), void *data)
4527{
4528 RB_VM_LOCKING() {
4529 if (rb_gc_impl_during_gc_p(rb_gc_get_objspace())) rb_bug("rb_objspace_reachable_objects_from() is not supported while during GC");
4530
4531 if (!RB_SPECIAL_CONST_P(obj)) {
4532 rb_vm_t *vm = GET_VM();
4533 struct gc_mark_func_data_struct *prev_mfd = vm->gc.mark_func_data;
4534 struct gc_mark_func_data_struct mfd = {
4535 .mark_func = func,
4536 .data = data,
4537 };
4538
4539 vm->gc.mark_func_data = &mfd;
4540 rb_gc_mark_children(rb_gc_get_objspace(), obj);
4541 vm->gc.mark_func_data = prev_mfd;
4542 }
4543 }
4544}
4545
4547 const char *category;
4548 void (*func)(const char *category, VALUE, void *);
4549 void *data;
4550};
4551
4552static void
4553root_objects_from(VALUE obj, void *ptr)
4554{
4555 const struct root_objects_data *data = (struct root_objects_data *)ptr;
4556 (*data->func)(data->category, obj, data->data);
4557}
4558
4559void
4560rb_objspace_reachable_objects_from_root(void (func)(const char *category, VALUE, void *), void *passing_data)
4561{
4562 if (rb_gc_impl_during_gc_p(rb_gc_get_objspace())) rb_bug("rb_gc_impl_objspace_reachable_objects_from_root() is not supported while during GC");
4563
4564 rb_vm_t *vm = GET_VM();
4565
4566 struct root_objects_data data = {
4567 .func = func,
4568 .data = passing_data,
4569 };
4570
4571 struct gc_mark_func_data_struct *prev_mfd = vm->gc.mark_func_data;
4572 struct gc_mark_func_data_struct mfd = {
4573 .mark_func = root_objects_from,
4574 .data = &data,
4575 };
4576
4577 vm->gc.mark_func_data = &mfd;
4578 rb_gc_save_machine_context();
4579 rb_gc_mark_roots(vm->gc.objspace, &data.category);
4580 vm->gc.mark_func_data = prev_mfd;
4581}
4582
4583/*
4584 ------------------------------ DEBUG ------------------------------
4585*/
4586
4587static const char *
4588type_name(int type, VALUE obj)
4589{
4590 switch (type) {
4591#define TYPE_NAME(t) case (t): return #t;
4592 TYPE_NAME(T_NONE);
4593 TYPE_NAME(T_OBJECT);
4594 TYPE_NAME(T_CLASS);
4595 TYPE_NAME(T_MODULE);
4596 TYPE_NAME(T_FLOAT);
4597 TYPE_NAME(T_STRING);
4598 TYPE_NAME(T_REGEXP);
4599 TYPE_NAME(T_ARRAY);
4600 TYPE_NAME(T_HASH);
4601 TYPE_NAME(T_STRUCT);
4602 TYPE_NAME(T_BIGNUM);
4603 TYPE_NAME(T_FILE);
4604 TYPE_NAME(T_MATCH);
4605 TYPE_NAME(T_COMPLEX);
4606 TYPE_NAME(T_RATIONAL);
4607 TYPE_NAME(T_NIL);
4608 TYPE_NAME(T_TRUE);
4609 TYPE_NAME(T_FALSE);
4610 TYPE_NAME(T_SYMBOL);
4611 TYPE_NAME(T_FIXNUM);
4612 TYPE_NAME(T_UNDEF);
4613 TYPE_NAME(T_IMEMO);
4614 TYPE_NAME(T_ICLASS);
4615 TYPE_NAME(T_MOVED);
4616 TYPE_NAME(T_ZOMBIE);
4617 case T_DATA:
4618 if (obj && rb_objspace_data_type_name(obj)) {
4619 return rb_objspace_data_type_name(obj);
4620 }
4621 return "T_DATA";
4622#undef TYPE_NAME
4623 }
4624 return "unknown";
4625}
4626
4627static const char *
4628obj_type_name(VALUE obj)
4629{
4630 return type_name(TYPE(obj), obj);
4631}
4632
4633const char *
4634rb_method_type_name(rb_method_type_t type)
4635{
4636 switch (type) {
4637 case VM_METHOD_TYPE_ISEQ: return "iseq";
4638 case VM_METHOD_TYPE_ATTRSET: return "attrest";
4639 case VM_METHOD_TYPE_IVAR: return "ivar";
4640 case VM_METHOD_TYPE_BMETHOD: return "bmethod";
4641 case VM_METHOD_TYPE_ALIAS: return "alias";
4642 case VM_METHOD_TYPE_REFINED: return "refined";
4643 case VM_METHOD_TYPE_CFUNC: return "cfunc";
4644 case VM_METHOD_TYPE_ZSUPER: return "zsuper";
4645 case VM_METHOD_TYPE_MISSING: return "missing";
4646 case VM_METHOD_TYPE_OPTIMIZED: return "optimized";
4647 case VM_METHOD_TYPE_UNDEF: return "undef";
4648 case VM_METHOD_TYPE_NOTIMPLEMENTED: return "notimplemented";
4649 }
4650 rb_bug("rb_method_type_name: unreachable (type: %d)", type);
4651}
4652
4653static void
4654rb_raw_iseq_info(char *const buff, const size_t buff_size, const rb_iseq_t *iseq)
4655{
4656 if (buff_size > 0 && ISEQ_BODY(iseq) && ISEQ_BODY(iseq)->location.label && !RB_TYPE_P(ISEQ_BODY(iseq)->location.pathobj, T_MOVED)) {
4657 VALUE path = rb_iseq_path(iseq);
4658 int n = ISEQ_BODY(iseq)->location.first_lineno;
4659 snprintf(buff, buff_size, " %s@%s:%d",
4660 RSTRING_PTR(ISEQ_BODY(iseq)->location.label),
4661 RSTRING_PTR(path), n);
4662 }
4663}
4664
4665static int
4666str_len_no_raise(VALUE str)
4667{
4668 long len = RSTRING_LEN(str);
4669 if (len < 0) return 0;
4670 if (len > INT_MAX) return INT_MAX;
4671 return (int)len;
4672}
4673
4674#define BUFF_ARGS buff + pos, buff_size - pos
4675#define APPEND_F(...) if ((pos += snprintf(BUFF_ARGS, "" __VA_ARGS__)) >= buff_size) goto end
4676#define APPEND_S(s) do { \
4677 if ((pos + (int)rb_strlen_lit(s)) >= buff_size) { \
4678 goto end; \
4679 } \
4680 else { \
4681 memcpy(buff + pos, (s), rb_strlen_lit(s) + 1); \
4682 } \
4683 } while (0)
4684#define C(c, s) ((c) != 0 ? (s) : " ")
4685
4686static size_t
4687rb_raw_obj_info_common(char *const buff, const size_t buff_size, const VALUE obj)
4688{
4689 size_t pos = 0;
4690
4691 if (SPECIAL_CONST_P(obj)) {
4692 APPEND_F("%s", obj_type_name(obj));
4693
4694 if (FIXNUM_P(obj)) {
4695 APPEND_F(" %ld", FIX2LONG(obj));
4696 }
4697 else if (SYMBOL_P(obj)) {
4698 APPEND_F(" %s", rb_id2name(SYM2ID(obj)));
4699 }
4700 }
4701 else {
4702 // const int age = RVALUE_AGE_GET(obj);
4703
4704 if (rb_gc_impl_pointer_to_heap_p(rb_gc_get_objspace(), (void *)obj)) {
4705 APPEND_F("%p %s/", (void *)obj, obj_type_name(obj));
4706 // TODO: fixme
4707 // APPEND_F("%p [%d%s%s%s%s%s%s] %s ",
4708 // (void *)obj, age,
4709 // C(RVALUE_UNCOLLECTIBLE_BITMAP(obj), "L"),
4710 // C(RVALUE_MARK_BITMAP(obj), "M"),
4711 // C(RVALUE_PIN_BITMAP(obj), "P"),
4712 // C(RVALUE_MARKING_BITMAP(obj), "R"),
4713 // C(RVALUE_WB_UNPROTECTED_BITMAP(obj), "U"),
4714 // C(rb_objspace_garbage_object_p(obj), "G"),
4715 // obj_type_name(obj));
4716 }
4717 else {
4718 /* fake */
4719 // APPEND_F("%p [%dXXXX] %s",
4720 // (void *)obj, age,
4721 // obj_type_name(obj));
4722 }
4723
4724 if (internal_object_p(obj)) {
4725 /* ignore */
4726 }
4727 else if (RBASIC(obj)->klass == 0) {
4728 APPEND_S("(temporary internal)");
4729 }
4730 else if (RTEST(RBASIC(obj)->klass)) {
4731 VALUE class_path = rb_class_path_cached(RBASIC(obj)->klass);
4732 if (!NIL_P(class_path)) {
4733 APPEND_F("%s ", RSTRING_PTR(class_path));
4734 }
4735 }
4736 }
4737 end:
4738
4739 return pos;
4740}
4741
4742const char *rb_raw_obj_info(char *const buff, const size_t buff_size, VALUE obj);
4743
4744static size_t
4745rb_raw_obj_info_buitin_type(char *const buff, const size_t buff_size, const VALUE obj, size_t pos)
4746{
4747 if (LIKELY(pos < buff_size) && !SPECIAL_CONST_P(obj)) {
4748 const enum ruby_value_type type = BUILTIN_TYPE(obj);
4749
4750 switch (type) {
4751 case T_NODE:
4752 UNEXPECTED_NODE(rb_raw_obj_info);
4753 break;
4754 case T_ARRAY:
4755 if (ARY_SHARED_P(obj)) {
4756 APPEND_S("shared -> ");
4757 rb_raw_obj_info(BUFF_ARGS, ARY_SHARED_ROOT(obj));
4758 }
4759 else {
4760 APPEND_F("[%s%s%s] ",
4761 C(ARY_EMBED_P(obj), "E"),
4762 C(ARY_SHARED_P(obj), "S"),
4763 C(ARY_SHARED_ROOT_P(obj), "R"));
4764
4765 if (ARY_EMBED_P(obj)) {
4766 APPEND_F("len: %ld (embed)",
4767 RARRAY_LEN(obj));
4768 }
4769 else {
4770 APPEND_F("len: %ld, capa:%ld ptr:%p",
4771 RARRAY_LEN(obj),
4772 RARRAY(obj)->as.heap.aux.capa,
4773 (void *)RARRAY_CONST_PTR(obj));
4774 }
4775 }
4776 break;
4777 case T_STRING: {
4778 if (STR_SHARED_P(obj)) {
4779 APPEND_F(" [shared] len: %ld", RSTRING_LEN(obj));
4780 }
4781 else {
4782 if (STR_EMBED_P(obj)) APPEND_S(" [embed]");
4783
4784 APPEND_F(" len: %ld, capa: %" PRIdSIZE, RSTRING_LEN(obj), rb_str_capacity(obj));
4785 }
4786 APPEND_F(" \"%.*s\"", str_len_no_raise(obj), RSTRING_PTR(obj));
4787 break;
4788 }
4789 case T_SYMBOL: {
4790 VALUE fstr = RSYMBOL(obj)->fstr;
4791 ID id = RSYMBOL(obj)->id;
4792 if (RB_TYPE_P(fstr, T_STRING)) {
4793 APPEND_F(":%s id:%d", RSTRING_PTR(fstr), (unsigned int)id);
4794 }
4795 else {
4796 APPEND_F("(%p) id:%d", (void *)fstr, (unsigned int)id);
4797 }
4798 break;
4799 }
4800 case T_MOVED: {
4801 APPEND_F("-> %p", (void*)gc_location_internal(rb_gc_get_objspace(), obj));
4802 break;
4803 }
4804 case T_HASH: {
4805 APPEND_F("[%c] %"PRIdSIZE,
4806 RHASH_AR_TABLE_P(obj) ? 'A' : 'S',
4807 RHASH_SIZE(obj));
4808 break;
4809 }
4810 case T_CLASS:
4811 case T_MODULE:
4812 {
4813 VALUE class_path = rb_class_path_cached(obj);
4814 if (!NIL_P(class_path)) {
4815 APPEND_F("%s", RSTRING_PTR(class_path));
4816 }
4817 else {
4818 APPEND_S("(anon)");
4819 }
4820 break;
4821 }
4822 case T_ICLASS:
4823 {
4824 VALUE class_path = rb_class_path_cached(RBASIC_CLASS(obj));
4825 if (!NIL_P(class_path)) {
4826 APPEND_F("src:%s", RSTRING_PTR(class_path));
4827 }
4828 break;
4829 }
4830 case T_OBJECT:
4831 {
4832 if (FL_TEST_RAW(obj, ROBJECT_HEAP)) {
4833 if (rb_shape_obj_too_complex_p(obj)) {
4834 size_t hash_len = rb_st_table_size(ROBJECT_FIELDS_HASH(obj));
4835 APPEND_F("(too_complex) len:%zu", hash_len);
4836 }
4837 else {
4838 APPEND_F("(embed) len:%d", ROBJECT_FIELDS_CAPACITY(obj));
4839 }
4840 }
4841 else {
4842 APPEND_F("len:%d ptr:%p", ROBJECT_FIELDS_CAPACITY(obj), (void *)ROBJECT_FIELDS(obj));
4843 }
4844 }
4845 break;
4846 case T_DATA: {
4847 const struct rb_block *block;
4848 const rb_iseq_t *iseq;
4849 if (rb_obj_is_proc(obj) &&
4850 (block = vm_proc_block(obj)) != NULL &&
4851 (vm_block_type(block) == block_type_iseq) &&
4852 (iseq = vm_block_iseq(block)) != NULL) {
4853 rb_raw_iseq_info(BUFF_ARGS, iseq);
4854 }
4855 else if (rb_ractor_p(obj)) {
4856 rb_ractor_t *r = (void *)DATA_PTR(obj);
4857 if (r) {
4858 APPEND_F("r:%d", r->pub.id);
4859 }
4860 }
4861 else {
4862 const char * const type_name = rb_objspace_data_type_name(obj);
4863 if (type_name) {
4864 APPEND_F("%s", type_name);
4865 }
4866 }
4867 break;
4868 }
4869 case T_IMEMO: {
4870 APPEND_F("<%s> ", rb_imemo_name(imemo_type(obj)));
4871
4872 switch (imemo_type(obj)) {
4873 case imemo_ment:
4874 {
4875 const rb_method_entry_t *me = (const rb_method_entry_t *)obj;
4876
4877 APPEND_F(":%s (%s%s%s%s) type:%s aliased:%d owner:%p defined_class:%p",
4878 rb_id2name(me->called_id),
4879 METHOD_ENTRY_VISI(me) == METHOD_VISI_PUBLIC ? "pub" :
4880 METHOD_ENTRY_VISI(me) == METHOD_VISI_PRIVATE ? "pri" : "pro",
4881 METHOD_ENTRY_COMPLEMENTED(me) ? ",cmp" : "",
4882 METHOD_ENTRY_CACHED(me) ? ",cc" : "",
4883 METHOD_ENTRY_INVALIDATED(me) ? ",inv" : "",
4884 me->def ? rb_method_type_name(me->def->type) : "NULL",
4885 me->def ? me->def->aliased : -1,
4886 (void *)me->owner, // obj_info(me->owner),
4887 (void *)me->defined_class); //obj_info(me->defined_class)));
4888
4889 if (me->def) {
4890 switch (me->def->type) {
4891 case VM_METHOD_TYPE_ISEQ:
4892 APPEND_S(" (iseq:");
4893 rb_raw_obj_info(BUFF_ARGS, (VALUE)me->def->body.iseq.iseqptr);
4894 APPEND_S(")");
4895 break;
4896 default:
4897 break;
4898 }
4899 }
4900
4901 break;
4902 }
4903 case imemo_iseq: {
4904 const rb_iseq_t *iseq = (const rb_iseq_t *)obj;
4905 rb_raw_iseq_info(BUFF_ARGS, iseq);
4906 break;
4907 }
4908 case imemo_callinfo:
4909 {
4910 const struct rb_callinfo *ci = (const struct rb_callinfo *)obj;
4911 APPEND_F("(mid:%s, flag:%x argc:%d, kwarg:%s)",
4912 rb_id2name(vm_ci_mid(ci)),
4913 vm_ci_flag(ci),
4914 vm_ci_argc(ci),
4915 vm_ci_kwarg(ci) ? "available" : "NULL");
4916 break;
4917 }
4918 case imemo_callcache:
4919 {
4920 const struct rb_callcache *cc = (const struct rb_callcache *)obj;
4921 VALUE class_path = vm_cc_valid(cc) ? rb_class_path_cached(cc->klass) : Qnil;
4922 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4923
4924 APPEND_F("(klass:%s cme:%s%s (%p) call:%p",
4925 NIL_P(class_path) ? (vm_cc_valid(cc) ? "??" : "<NULL>") : RSTRING_PTR(class_path),
4926 cme ? rb_id2name(cme->called_id) : "<NULL>",
4927 cme ? (METHOD_ENTRY_INVALIDATED(cme) ? " [inv]" : "") : "",
4928 (void *)cme,
4929 (void *)(uintptr_t)vm_cc_call(cc));
4930 break;
4931 }
4932 default:
4933 break;
4934 }
4935 }
4936 default:
4937 break;
4938 }
4939 }
4940 end:
4941
4942 return pos;
4943}
4944
4945#undef C
4946
4947#ifdef RUBY_ASAN_ENABLED
4948void
4949rb_asan_poison_object(VALUE obj)
4950{
4951 MAYBE_UNUSED(struct RVALUE *) ptr = (void *)obj;
4952 asan_poison_memory_region(ptr, rb_gc_obj_slot_size(obj));
4953}
4954
4955void
4956rb_asan_unpoison_object(VALUE obj, bool newobj_p)
4957{
4958 MAYBE_UNUSED(struct RVALUE *) ptr = (void *)obj;
4959 asan_unpoison_memory_region(ptr, rb_gc_obj_slot_size(obj), newobj_p);
4960}
4961
4962void *
4963rb_asan_poisoned_object_p(VALUE obj)
4964{
4965 MAYBE_UNUSED(struct RVALUE *) ptr = (void *)obj;
4966 return __asan_region_is_poisoned(ptr, rb_gc_obj_slot_size(obj));
4967}
4968#endif
4969
4970static void
4971raw_obj_info(char *const buff, const size_t buff_size, VALUE obj)
4972{
4973 size_t pos = rb_raw_obj_info_common(buff, buff_size, obj);
4974 pos = rb_raw_obj_info_buitin_type(buff, buff_size, obj, pos);
4975 if (pos >= buff_size) {} // truncated
4976}
4977
4978const char *
4979rb_raw_obj_info(char *const buff, const size_t buff_size, VALUE obj)
4980{
4981 void *objspace = rb_gc_get_objspace();
4982
4983 if (SPECIAL_CONST_P(obj)) {
4984 raw_obj_info(buff, buff_size, obj);
4985 }
4986 else if (!rb_gc_impl_pointer_to_heap_p(objspace, (const void *)obj)) {
4987 snprintf(buff, buff_size, "out-of-heap:%p", (void *)obj);
4988 }
4989#if 0 // maybe no need to check it?
4990 else if (0 && rb_gc_impl_garbage_object_p(objspace, obj)) {
4991 snprintf(buff, buff_size, "garbage:%p", (void *)obj);
4992 }
4993#endif
4994 else {
4995 asan_unpoisoning_object(obj) {
4996 raw_obj_info(buff, buff_size, obj);
4997 }
4998 }
4999 return buff;
5000}
5001
5002#undef APPEND_S
5003#undef APPEND_F
5004#undef BUFF_ARGS
5005
5006/* Increments *var atomically and resets *var to 0 when maxval is
5007 * reached. Returns the wraparound old *var value (0...maxval). */
5008static rb_atomic_t
5009atomic_inc_wraparound(rb_atomic_t *var, const rb_atomic_t maxval)
5010{
5011 rb_atomic_t oldval = RUBY_ATOMIC_FETCH_ADD(*var, 1);
5012 if (RB_UNLIKELY(oldval >= maxval - 1)) { // wraparound *var
5013 const rb_atomic_t newval = oldval + 1;
5014 RUBY_ATOMIC_CAS(*var, newval, newval % maxval);
5015 oldval %= maxval;
5016 }
5017 return oldval;
5018}
5019
5020static const char *
5021obj_info(VALUE obj)
5022{
5023 if (RGENGC_OBJ_INFO) {
5024 static struct {
5025 rb_atomic_t index;
5026 char buffers[10][0x100];
5027 } info = {0};
5028
5029 rb_atomic_t index = atomic_inc_wraparound(&info.index, numberof(info.buffers));
5030 char *const buff = info.buffers[index];
5031 return rb_raw_obj_info(buff, sizeof(info.buffers[0]), obj);
5032 }
5033 return obj_type_name(obj);
5034}
5035
5036/*
5037 ------------------------ Extended allocator ------------------------
5038*/
5039
5041 VALUE exc;
5042 const char *fmt;
5043 va_list *ap;
5044};
5045
5046static void *
5047gc_vraise(void *ptr)
5048{
5049 struct gc_raise_tag *argv = ptr;
5050 rb_vraise(argv->exc, argv->fmt, *argv->ap);
5051 UNREACHABLE_RETURN(NULL);
5052}
5053
5054static void
5055gc_raise(VALUE exc, const char *fmt, ...)
5056{
5057 va_list ap;
5058 va_start(ap, fmt);
5059 struct gc_raise_tag argv = {
5060 exc, fmt, &ap,
5061 };
5062
5063 if (ruby_thread_has_gvl_p()) {
5064 gc_vraise(&argv);
5066 }
5067 else if (ruby_native_thread_p()) {
5068 rb_thread_call_with_gvl(gc_vraise, &argv);
5070 }
5071 else {
5072 /* Not in a ruby thread */
5073 fprintf(stderr, "%s", "[FATAL] ");
5074 vfprintf(stderr, fmt, ap);
5075 }
5076
5077 va_end(ap);
5078 abort();
5079}
5080
5081NORETURN(static void negative_size_allocation_error(const char *));
5082static void
5083negative_size_allocation_error(const char *msg)
5084{
5085 gc_raise(rb_eNoMemError, "%s", msg);
5086}
5087
5088static void *
5089ruby_memerror_body(void *dummy)
5090{
5091 rb_memerror();
5092 return 0;
5093}
5094
5095NORETURN(static void ruby_memerror(void));
5097static void
5098ruby_memerror(void)
5099{
5100 if (ruby_thread_has_gvl_p()) {
5101 rb_memerror();
5102 }
5103 else {
5104 if (ruby_native_thread_p()) {
5105 rb_thread_call_with_gvl(ruby_memerror_body, 0);
5106 }
5107 else {
5108 /* no ruby thread */
5109 fprintf(stderr, "[FATAL] failed to allocate memory\n");
5110 }
5111 }
5112
5113 /* We have discussions whether we should die here; */
5114 /* We might rethink about it later. */
5115 exit(EXIT_FAILURE);
5116}
5117
5118void
5119rb_memerror(void)
5120{
5121 /* the `GET_VM()->special_exceptions` below assumes that
5122 * the VM is reachable from the current thread. We should
5123 * definitely make sure of that. */
5124 RUBY_ASSERT_ALWAYS(ruby_thread_has_gvl_p());
5125
5126 rb_execution_context_t *ec = GET_EC();
5127 VALUE exc = GET_VM()->special_exceptions[ruby_error_nomemory];
5128
5129 if (!exc ||
5130 rb_ec_raised_p(ec, RAISED_NOMEMORY) ||
5131 rb_ec_vm_lock_rec(ec) != ec->tag->lock_rec) {
5132 fprintf(stderr, "[FATAL] failed to allocate memory\n");
5133 exit(EXIT_FAILURE);
5134 }
5135 if (rb_ec_raised_p(ec, RAISED_NOMEMORY)) {
5136 rb_ec_raised_clear(ec);
5137 }
5138 else {
5139 rb_ec_raised_set(ec, RAISED_NOMEMORY);
5140 exc = ruby_vm_special_exception_copy(exc);
5141 }
5142 ec->errinfo = exc;
5143 EC_JUMP_TAG(ec, TAG_RAISE);
5144}
5145
5146bool
5147rb_memerror_reentered(void)
5148{
5149 rb_execution_context_t *ec = GET_EC();
5150 return (ec && rb_ec_raised_p(ec, RAISED_NOMEMORY));
5151}
5152
5153static void *
5154handle_malloc_failure(void *ptr)
5155{
5156 if (LIKELY(ptr)) {
5157 return ptr;
5158 }
5159 else {
5160 ruby_memerror();
5161 UNREACHABLE_RETURN(ptr);
5162 }
5163}
5164
5165static void *ruby_xmalloc_body(size_t size);
5166
5167void *
5168ruby_xmalloc(size_t size)
5169{
5170 return handle_malloc_failure(ruby_xmalloc_body(size));
5171}
5172
5173static bool
5174malloc_gc_allowed(void)
5175{
5176 rb_ractor_t *r = rb_current_ractor_raw(false);
5177
5178 return r == NULL || !r->malloc_gc_disabled;
5179}
5180
5181static void *
5182ruby_xmalloc_body(size_t size)
5183{
5184 if ((ssize_t)size < 0) {
5185 negative_size_allocation_error("too large allocation size");
5186 }
5187
5188 return rb_gc_impl_malloc(rb_gc_get_objspace(), size, malloc_gc_allowed());
5189}
5190
5191void
5192ruby_malloc_size_overflow(size_t count, size_t elsize)
5193{
5194 rb_raise(rb_eArgError,
5195 "malloc: possible integer overflow (%"PRIuSIZE"*%"PRIuSIZE")",
5196 count, elsize);
5197}
5198
5199void
5200ruby_malloc_add_size_overflow(size_t x, size_t y)
5201{
5202 rb_raise(rb_eArgError,
5203 "malloc: possible integer overflow (%"PRIuSIZE"+%"PRIuSIZE")",
5204 x, y);
5205}
5206
5207static void *ruby_xmalloc2_body(size_t n, size_t size);
5208
5209void *
5210ruby_xmalloc2(size_t n, size_t size)
5211{
5212 return handle_malloc_failure(ruby_xmalloc2_body(n, size));
5213}
5214
5215static void *
5216ruby_xmalloc2_body(size_t n, size_t size)
5217{
5218 return rb_gc_impl_malloc(rb_gc_get_objspace(), xmalloc2_size(n, size), malloc_gc_allowed());
5219}
5220
5221static void *ruby_xcalloc_body(size_t n, size_t size);
5222
5223void *
5224ruby_xcalloc(size_t n, size_t size)
5225{
5226 return handle_malloc_failure(ruby_xcalloc_body(n, size));
5227}
5228
5229static void *
5230ruby_xcalloc_body(size_t n, size_t size)
5231{
5232 return rb_gc_impl_calloc(rb_gc_get_objspace(), xmalloc2_size(n, size), malloc_gc_allowed());
5233}
5234
5235static void *ruby_sized_xrealloc_body(void *ptr, size_t new_size, size_t old_size);
5236
5237#ifdef ruby_sized_xrealloc
5238#undef ruby_sized_xrealloc
5239#endif
5240void *
5241ruby_sized_xrealloc(void *ptr, size_t new_size, size_t old_size)
5242{
5243 return handle_malloc_failure(ruby_sized_xrealloc_body(ptr, new_size, old_size));
5244}
5245
5246static void *
5247ruby_sized_xrealloc_body(void *ptr, size_t new_size, size_t old_size)
5248{
5249 if ((ssize_t)new_size < 0) {
5250 negative_size_allocation_error("too large allocation size");
5251 }
5252
5253 return rb_gc_impl_realloc(rb_gc_get_objspace(), ptr, new_size, old_size, malloc_gc_allowed());
5254}
5255
5256void *
5257ruby_xrealloc(void *ptr, size_t new_size)
5258{
5259 return ruby_sized_xrealloc(ptr, new_size, 0);
5260}
5261
5262static void *ruby_sized_xrealloc2_body(void *ptr, size_t n, size_t size, size_t old_n);
5263
5264#ifdef ruby_sized_xrealloc2
5265#undef ruby_sized_xrealloc2
5266#endif
5267void *
5268ruby_sized_xrealloc2(void *ptr, size_t n, size_t size, size_t old_n)
5269{
5270 return handle_malloc_failure(ruby_sized_xrealloc2_body(ptr, n, size, old_n));
5271}
5272
5273static void *
5274ruby_sized_xrealloc2_body(void *ptr, size_t n, size_t size, size_t old_n)
5275{
5276 size_t len = xmalloc2_size(n, size);
5277 return rb_gc_impl_realloc(rb_gc_get_objspace(), ptr, len, old_n * size, malloc_gc_allowed());
5278}
5279
5280void *
5281ruby_xrealloc2(void *ptr, size_t n, size_t size)
5282{
5283 return ruby_sized_xrealloc2(ptr, n, size, 0);
5284}
5285
5286#ifdef ruby_sized_xfree
5287#undef ruby_sized_xfree
5288#endif
5289void
5290ruby_sized_xfree(void *x, size_t size)
5291{
5292 if (LIKELY(x)) {
5293 /* It's possible for a C extension's pthread destructor function set by pthread_key_create
5294 * to be called after ruby_vm_destruct and attempt to free memory. Fall back to mimfree in
5295 * that case. */
5296 if (LIKELY(GET_VM())) {
5297 rb_gc_impl_free(rb_gc_get_objspace(), x, size);
5298 }
5299 else {
5300 ruby_mimfree(x);
5301 }
5302 }
5303}
5304
5305void
5306ruby_xfree(void *x)
5307{
5308 ruby_sized_xfree(x, 0);
5309}
5310
5311void *
5312rb_xmalloc_mul_add(size_t x, size_t y, size_t z) /* x * y + z */
5313{
5314 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
5315 return ruby_xmalloc(w);
5316}
5317
5318void *
5319rb_xcalloc_mul_add(size_t x, size_t y, size_t z) /* x * y + z */
5320{
5321 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
5322 return ruby_xcalloc(w, 1);
5323}
5324
5325void *
5326rb_xrealloc_mul_add(const void *p, size_t x, size_t y, size_t z) /* x * y + z */
5327{
5328 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
5329 return ruby_xrealloc((void *)p, w);
5330}
5331
5332void *
5333rb_xmalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
5334{
5335 size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
5336 return ruby_xmalloc(u);
5337}
5338
5339void *
5340rb_xcalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
5341{
5342 size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
5343 return ruby_xcalloc(u, 1);
5344}
5345
5346/* Mimic ruby_xmalloc, but need not rb_objspace.
5347 * should return pointer suitable for ruby_xfree
5348 */
5349void *
5350ruby_mimmalloc(size_t size)
5351{
5352 void *mem;
5353#if CALC_EXACT_MALLOC_SIZE
5354 size += sizeof(struct malloc_obj_info);
5355#endif
5356 mem = malloc(size);
5357#if CALC_EXACT_MALLOC_SIZE
5358 if (!mem) {
5359 return NULL;
5360 }
5361 else
5362 /* set 0 for consistency of allocated_size/allocations */
5363 {
5364 struct malloc_obj_info *info = mem;
5365 info->size = 0;
5366 mem = info + 1;
5367 }
5368#endif
5369 return mem;
5370}
5371
5372void *
5373ruby_mimcalloc(size_t num, size_t size)
5374{
5375 void *mem;
5376#if CALC_EXACT_MALLOC_SIZE
5377 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(num, size);
5378 if (UNLIKELY(t.left)) {
5379 return NULL;
5380 }
5381 size = t.right + sizeof(struct malloc_obj_info);
5382 mem = calloc1(size);
5383 if (!mem) {
5384 return NULL;
5385 }
5386 else
5387 /* set 0 for consistency of allocated_size/allocations */
5388 {
5389 struct malloc_obj_info *info = mem;
5390 info->size = 0;
5391 mem = info + 1;
5392 }
5393#else
5394 mem = calloc(num, size);
5395#endif
5396 return mem;
5397}
5398
5399void
5400ruby_mimfree(void *ptr)
5401{
5402#if CALC_EXACT_MALLOC_SIZE
5403 struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
5404 ptr = info;
5405#endif
5406 free(ptr);
5407}
5408
5409void
5410rb_gc_adjust_memory_usage(ssize_t diff)
5411{
5412 unless_objspace(objspace) { return; }
5413
5414 rb_gc_impl_adjust_memory_usage(objspace, diff);
5415}
5416
5417const char *
5418rb_obj_info(VALUE obj)
5419{
5420 return obj_info(obj);
5421}
5422
5423void
5424rb_obj_info_dump(VALUE obj)
5425{
5426 char buff[0x100];
5427 fprintf(stderr, "rb_obj_info_dump: %s\n", rb_raw_obj_info(buff, 0x100, obj));
5428}
5429
5430void
5431rb_obj_info_dump_loc(VALUE obj, const char *file, int line, const char *func)
5432{
5433 char buff[0x100];
5434 fprintf(stderr, "<OBJ_INFO:%s@%s:%d> %s\n", func, file, line, rb_raw_obj_info(buff, 0x100, obj));
5435}
5436
5437void
5438rb_gc_before_fork(void)
5439{
5440 rb_gc_impl_before_fork(rb_gc_get_objspace());
5441}
5442
5443void
5444rb_gc_after_fork(rb_pid_t pid)
5445{
5446 rb_gc_impl_after_fork(rb_gc_get_objspace(), pid);
5447}
5448
5449/*
5450 * Document-module: ObjectSpace
5451 *
5452 * The ObjectSpace module contains a number of routines
5453 * that interact with the garbage collection facility and allow you to
5454 * traverse all living objects with an iterator.
5455 *
5456 * ObjectSpace also provides support for object finalizers, procs that will be
5457 * called after a specific object was destroyed by garbage collection. See
5458 * the documentation for +ObjectSpace.define_finalizer+ for important
5459 * information on how to use this method correctly.
5460 *
5461 * a = "A"
5462 * b = "B"
5463 *
5464 * ObjectSpace.define_finalizer(a, proc {|id| puts "Finalizer one on #{id}" })
5465 * ObjectSpace.define_finalizer(b, proc {|id| puts "Finalizer two on #{id}" })
5466 *
5467 * a = nil
5468 * b = nil
5469 *
5470 * _produces:_
5471 *
5472 * Finalizer two on 537763470
5473 * Finalizer one on 537763480
5474 */
5475
5476/* Document-class: GC::Profiler
5477 *
5478 * The GC profiler provides access to information on GC runs including time,
5479 * length and object space size.
5480 *
5481 * Example:
5482 *
5483 * GC::Profiler.enable
5484 *
5485 * require 'rdoc/rdoc'
5486 *
5487 * GC::Profiler.report
5488 *
5489 * GC::Profiler.disable
5490 *
5491 * See also GC.count, GC.malloc_allocated_size and GC.malloc_allocations
5492 */
5493
5494#include "gc.rbinc"
5495
5496void
5497Init_GC(void)
5498{
5499#undef rb_intern
5500 rb_gc_register_address(&id2ref_value);
5501
5502 malloc_offset = gc_compute_malloc_offset();
5503
5504 rb_mGC = rb_define_module("GC");
5505
5506 VALUE rb_mObjSpace = rb_define_module("ObjectSpace");
5507
5508 rb_define_module_function(rb_mObjSpace, "each_object", os_each_obj, -1);
5509
5510 rb_define_module_function(rb_mObjSpace, "define_finalizer", define_final, -1);
5511 rb_define_module_function(rb_mObjSpace, "undefine_finalizer", undefine_final, 1);
5512
5513 rb_define_module_function(rb_mObjSpace, "_id2ref", os_id2ref, 1);
5514
5515 rb_vm_register_special_exception(ruby_error_nomemory, rb_eNoMemError, "failed to allocate memory");
5516
5517 rb_define_method(rb_cBasicObject, "__id__", rb_obj_id, 0);
5518 rb_define_method(rb_mKernel, "object_id", rb_obj_id, 0);
5519
5520 rb_define_module_function(rb_mObjSpace, "count_objects", count_objects, -1);
5521
5522 rb_gc_impl_init();
5523}
5524
5525// Set a name for the anonymous virtual memory area. `addr` is the starting
5526// address of the area and `size` is its length in bytes. `name` is a
5527// NUL-terminated human-readable string.
5528//
5529// This function is usually called after calling `mmap()`. The human-readable
5530// annotation helps developers identify the call site of `mmap()` that created
5531// the memory mapping.
5532//
5533// This function currently only works on Linux 5.17 or higher. After calling
5534// this function, we can see annotations in the form of "[anon:...]" in
5535// `/proc/self/maps`, where `...` is the content of `name`. This function has
5536// no effect when called on other platforms.
5537void
5538ruby_annotate_mmap(const void *addr, unsigned long size, const char *name)
5539{
5540#if defined(HAVE_SYS_PRCTL_H) && defined(PR_SET_VMA) && defined(PR_SET_VMA_ANON_NAME)
5541 // The name length cannot exceed 80 (including the '\0').
5542 RUBY_ASSERT(strlen(name) < 80);
5543 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, (unsigned long)addr, size, name);
5544 // We ignore errors in prctl. prctl may set errno to EINVAL for several
5545 // reasons.
5546 // 1. The attr (PR_SET_VMA_ANON_NAME) is not a valid attribute.
5547 // 2. addr is an invalid address.
5548 // 3. The string pointed by name is too long.
5549 // The first error indicates PR_SET_VMA_ANON_NAME is not available, and may
5550 // happen if we run the compiled binary on an old kernel. In theory, all
5551 // other errors should result in a failure. But since EINVAL cannot tell
5552 // the first error from others, and this function is mainly used for
5553 // debugging, we silently ignore the error.
5554 errno = 0;
5555#endif
5556}
#define RUBY_ASSERT_ALWAYS(expr,...)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
Definition assert.h:199
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
#define RUBY_ATOMIC_VALUE_CAS(var, oldval, newval)
Identical to RUBY_ATOMIC_CAS, except it expects its arguments are VALUE.
Definition atomic.h:406
#define RUBY_ATOMIC_SIZE_FETCH_ADD(var, val)
Identical to RUBY_ATOMIC_FETCH_ADD, except it expects its arguments to be size_t.
Definition atomic.h:235
#define RUBY_ATOMIC_CAS(var, oldval, newval)
Atomic compare-and-swap.
Definition atomic.h:165
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ATOMIC_FETCH_ADD(var, val)
Atomically replaces the value pointed by var with the result of addition of val to the old value of v...
Definition atomic.h:118
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_module_function(klass, mid, func, arity)
Defines klass#mid and makes it a module function.
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RUBY_INTERNAL_EVENT_NEWOBJ
Object allocated.
Definition event.h:93
@ RUBY_FL_WB_PROTECTED
Definition fl_type.h:198
VALUE rb_define_module(const char *name)
Defines a top-level module.
Definition class.c:1602
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
Retrieves argument from argc and argv to given VALUE references according to the format string.
Definition class.c:3143
#define T_COMPLEX
Old name of RUBY_T_COMPLEX.
Definition value_type.h:59
#define TYPE(_)
Old name of rb_type.
Definition value_type.h:108
#define FL_SINGLETON
Old name of RUBY_FL_SINGLETON.
Definition fl_type.h:58
#define T_FILE
Old name of RUBY_T_FILE.
Definition value_type.h:62
#define FL_UNSET_RAW
Old name of RB_FL_UNSET_RAW.
Definition fl_type.h:133
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:400
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define T_MASK
Old name of RUBY_T_MASK.
Definition value_type.h:68
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define T_NIL
Old name of RUBY_T_NIL.
Definition value_type.h:72
#define UNREACHABLE
Old name of RBIMPL_UNREACHABLE.
Definition assume.h:28
#define T_FLOAT
Old name of RUBY_T_FLOAT.
Definition value_type.h:64
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition value_type.h:67
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
Definition value_type.h:57
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
Definition value_type.h:79
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition fl_type.h:134
#define T_FIXNUM
Old name of RUBY_T_FIXNUM.
Definition value_type.h:63
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition assume.h:29
#define SYM2ID
Old name of RB_SYM2ID.
Definition symbol.h:45
#define T_DATA
Old name of RUBY_T_DATA.
Definition value_type.h:60
#define FIXNUM_FLAG
Old name of RUBY_FIXNUM_FLAG.
#define LL2NUM
Old name of RB_LL2NUM.
Definition long_long.h:30
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:206
#define T_NONE
Old name of RUBY_T_NONE.
Definition value_type.h:74
#define T_NODE
Old name of RUBY_T_NODE.
Definition value_type.h:73
#define SIZET2NUM
Old name of RB_SIZE2NUM.
Definition size_t.h:62
#define FL_FINALIZE
Old name of RUBY_FL_FINALIZE.
Definition fl_type.h:61
#define T_MODULE
Old name of RUBY_T_MODULE.
Definition value_type.h:70
#define T_TRUE
Old name of RUBY_T_TRUE.
Definition value_type.h:81
#define T_RATIONAL
Old name of RUBY_T_RATIONAL.
Definition value_type.h:76
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define FL_ABLE
Old name of RB_FL_ABLE.
Definition fl_type.h:121
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:131
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition array.h:658
#define LONG2NUM
Old name of RB_LONG2NUM.
Definition long.h:50
#define T_FALSE
Old name of RUBY_T_FALSE.
Definition value_type.h:61
#define ULL2NUM
Old name of RB_ULL2NUM.
Definition long_long.h:31
#define T_UNDEF
Old name of RUBY_T_UNDEF.
Definition value_type.h:82
#define FLONUM_P
Old name of RB_FLONUM_P.
#define Qtrue
Old name of RUBY_Qtrue.
#define T_ZOMBIE
Old name of RUBY_T_ZOMBIE.
Definition value_type.h:83
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define NUM2ULL
Old name of RB_NUM2ULL.
Definition long_long.h:35
#define FL_WB_PROTECTED
Old name of RUBY_FL_WB_PROTECTED.
Definition fl_type.h:59
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
Definition value_type.h:80
#define T_MATCH
Old name of RUBY_T_MATCH.
Definition value_type.h:69
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define T_MOVED
Old name of RUBY_T_MOVED.
Definition value_type.h:71
#define xcalloc
Old name of ruby_xcalloc.
Definition xmalloc.h:55
#define FL_UNSET
Old name of RB_FL_UNSET.
Definition fl_type.h:132
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define NUM2SIZET
Old name of RB_NUM2SIZE.
Definition size_t.h:61
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
#define T_REGEXP
Old name of RUBY_T_REGEXP.
Definition value_type.h:77
size_t ruby_stack_length(VALUE **p)
Queries what Ruby thinks is the machine stack.
Definition gc.c:2583
int ruby_stack_check(void)
Checks for stack overflow.
Definition gc.c:2623
void rb_category_warn(rb_warning_category_t category, const char *fmt,...)
Identical to rb_category_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:476
VALUE rb_eNoMemError
NoMemoryError exception.
Definition error.c:1441
VALUE rb_eRangeError
RangeError exception.
Definition error.c:1434
#define ruby_verbose
This variable controls whether the interpreter is in debug mode.
Definition error.h:475
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1430
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:466
@ RB_WARN_CATEGORY_DEPRECATED
Warning is for deprecated features.
Definition error.h:48
VALUE rb_mKernel
Kernel module.
Definition object.c:60
VALUE rb_mGC
GC module.
Definition gc.c:423
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:265
VALUE rb_cBasicObject
BasicObject class.
Definition object.c:59
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:910
size_t rb_obj_embedded_size(uint32_t fields_count)
Internal header for Object.
Definition object.c:94
VALUE rb_to_int(VALUE val)
Identical to rb_check_to_int(), except it raises in case of conversion mismatch.
Definition object.c:3262
#define RB_POSFIXABLE(_)
Checks if the passed value is in range of fixnum, assuming it is a positive number.
Definition fixnum.h:43
int rb_enc_str_coderange(VALUE str)
Scans the passed string to collect its code range.
Definition string.c:932
VALUE rb_funcall(VALUE recv, ID mid, int n,...)
Calls a method.
Definition vm_eval.c:1117
Defines RBIMPL_HAS_BUILTIN.
void rb_ary_free(VALUE ary)
Destroys the given array for no reason.
#define RETURN_ENUMERATOR(obj, argc, argv)
Identical to RETURN_SIZED_ENUMERATOR(), except its size is unknown.
Definition enumerator.h:239
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:284
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition proc.c:848
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
Definition proc.c:120
void rb_str_free(VALUE str)
Destroys the given string for no reason.
Definition string.c:1727
size_t rb_str_capacity(VALUE str)
Queries the capacity of the given string.
Definition string.c:986
VALUE rb_class_path_cached(VALUE mod)
Just another name of rb_mod_name.
Definition variable.c:388
void rb_free_generic_ivar(VALUE obj)
Frees the list of instance variables.
Definition variable.c:1270
void rb_undef_alloc_func(VALUE klass)
Deletes the allocator function of a class.
Definition vm_method.c:1622
VALUE rb_check_funcall(VALUE recv, ID mid, int argc, const VALUE *argv)
Identical to rb_funcallv(), except it returns RUBY_Qundef instead of raising rb_eNoMethodError.
Definition vm_eval.c:686
rb_alloc_func_t rb_get_alloc_func(VALUE klass)
Queries the allocator function of a class.
Definition vm_method.c:1628
int rb_obj_respond_to(VALUE obj, ID mid, int private_p)
Identical to rb_respond_to(), except it additionally takes the visibility parameter.
Definition vm_method.c:3343
VALUE rb_sym2str(VALUE symbol)
Obtain a frozen string representation of a symbol (not including the leading colon).
Definition symbol.c:993
int rb_io_fptr_finalize(rb_io_t *fptr)
Destroys the given IO.
Definition io.c:5675
int len
Length of the buffer.
Definition io.h:8
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
Definition ractor.h:249
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
Definition thread.c:2046
VALUE rb_yield(VALUE val)
Yields the block.
Definition vm_eval.c:1372
#define RBIMPL_ATTR_MAYBE_UNUSED()
Wraps (or simulates) [[maybe_unused]]
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
VALUE type(ANYARGS)
ANYARGS-ed function type.
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
#define RARRAY(obj)
Convenient casting macro.
Definition rarray.h:44
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:163
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS(obj)
Convenient casting macro.
Definition rclass.h:38
VALUE rb_data_object_wrap(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
This is the primitive way to wrap an existing C struct into RData.
Definition gc.c:1061
#define DATA_PTR(obj)
Convenient getter macro.
Definition rdata.h:67
VALUE rb_data_object_zalloc(VALUE klass, size_t size, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
Identical to rb_data_object_wrap(), except it allocates a new data region internally instead of takin...
Definition gc.c:1076
#define RDATA(obj)
Convenient casting macro.
Definition rdata.h:59
#define RUBY_DEFAULT_FREE
This is a value you can set to RData::dfree.
Definition rdata.h:78
void(* RUBY_DATA_FUNC)(void *)
This is the type of callbacks registered to RData.
Definition rdata.h:104
#define RFILE(obj)
Convenient casting macro.
Definition rfile.h:50
#define RHASH_SIZE(h)
Queries the size of the hash.
Definition rhash.h:69
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
#define RMATCH(obj)
Convenient casting macro.
Definition rmatch.h:37
#define ROBJECT(obj)
Convenient casting macro.
Definition robject.h:43
static VALUE * ROBJECT_FIELDS(VALUE obj)
Queries the instance variables.
Definition robject.h:128
#define RREGEXP(obj)
Convenient casting macro.
Definition rregexp.h:37
#define RREGEXP_PTR(obj)
Convenient accessor macro.
Definition rregexp.h:45
#define RSTRING(obj)
Convenient casting macro.
Definition rstring.h:41
static bool RTYPEDDATA_P(VALUE obj)
Checks whether the passed object is RTypedData or RData.
Definition rtypeddata.h:584
VALUE rb_data_typed_object_wrap(VALUE klass, void *datap, const rb_data_type_t *type)
This is the primitive way to wrap an existing C struct into RTypedData.
Definition gc.c:1100
VALUE rb_data_typed_object_zalloc(VALUE klass, size_t size, const rb_data_type_t *type)
Identical to rb_data_typed_object_wrap(), except it allocates a new data region internally instead of...
Definition gc.c:1110
#define TypedData_Wrap_Struct(klass, data_type, sval)
Converts sval, a pointer to your struct, into a Ruby object.
Definition rtypeddata.h:456
#define RTYPEDDATA(obj)
Convenient casting macro.
Definition rtypeddata.h:95
static const struct rb_data_type_struct * RTYPEDDATA_TYPE(VALUE obj)
Queries for the type of given object.
Definition rtypeddata.h:607
const char * rb_obj_classname(VALUE obj)
Queries the name of the class of the passed object.
Definition variable.c:513
void rb_p(VALUE obj)
Inspects an object.
Definition io.c:9068
#define errno
Ractor-aware version of errno.
Definition ruby.h:388
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition thread.c:5768
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
Defines old _.
#define _(args)
This was a transition path from K&R to ANSI.
Definition stdarg.h:35
Ruby object's base components.
Definition rbasic.h:69
Definition rdata.h:120
RUBY_DATA_FUNC dfree
This function is called when the object is no longer used.
Definition rdata.h:143
RUBY_DATA_FUNC dmark
This function is called when the object is experiencing GC marks.
Definition rdata.h:134
void * data
Pointer to the actual C level struct that you want to wrap.
Definition rdata.h:149
Definition hash.h:53
Ruby's ordinal objects.
Definition robject.h:85
"Typed" user data.
Definition rtypeddata.h:352
void * data
Pointer to the actual C level struct that you want to wrap.
Definition rtypeddata.h:373
VALUE fields_obj
Direct reference to the slots that holds instance variables, if any.
Definition rtypeddata.h:358
Definition method.h:63
Definition constant.h:33
CREF (Class REFerence)
Definition method.h:45
Definition class.h:72
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:202
RUBY_DATA_FUNC dfree
This function is called when the object is no longer used.
Definition rtypeddata.h:232
RUBY_DATA_FUNC dcompact
This function is called when the object is relocated.
Definition rtypeddata.h:253
struct rb_data_type_struct::@54 function
Function pointers.
const char * wrap_struct_name
Name of structs of this kind.
Definition rtypeddata.h:209
RUBY_DATA_FUNC dmark
This function is called when the object is experiencing GC marks.
Definition rtypeddata.h:223
VALUE flags
Type-specific behavioural characteristics.
Definition rtypeddata.h:311
Definition gc_impl.h:15
Ruby's IO, metadata and buffers.
Definition io.h:295
Represents a match.
Definition rmatch.h:71
struct rmatch_offset * char_offset
Capture group offsets, in C array.
Definition rmatch.h:79
int char_offset_num_allocated
Number of rmatch_offset that ::rmatch::char_offset holds.
Definition rmatch.h:82
struct re_registers regs
"Registers" of a match.
Definition rmatch.h:76
Definition method.h:55
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:143
Definition class.h:65
Represents the region of a capture group.
Definition rmatch.h:65
Definition st.h:79
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
intptr_t SIGNED_VALUE
A signed integer type that has the same width with VALUE.
Definition value.h:63
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376
ruby_value_type
C-level type of an object.
Definition value_type.h:113