Ruby 4.1.0dev (2026-04-04 revision 3b6245536cf55da9e8bfcdb03c845fe9ef931d7f)
gc.c (3b6245536cf55da9e8bfcdb03c845fe9ef931d7f)
1/**********************************************************************
2
3 gc.c -
4
5 $Author$
6 created at: Tue Oct 5 09:44:46 JST 1993
7
8 Copyright (C) 1993-2007 Yukihiro Matsumoto
9 Copyright (C) 2000 Network Applied Communication Laboratory, Inc.
10 Copyright (C) 2000 Information-technology Promotion Agency, Japan
11
12**********************************************************************/
13
14#include "ruby/internal/config.h"
15#ifdef _WIN32
16# include "ruby/ruby.h"
17#endif
18
19#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
20# include "wasm/setjmp.h"
21# include "wasm/machine.h"
22#else
23# include <setjmp.h>
24#endif
25#include <stdarg.h>
26#include <stdio.h>
27
28/* MALLOC_HEADERS_BEGIN */
29#ifndef HAVE_MALLOC_USABLE_SIZE
30# ifdef _WIN32
31# define HAVE_MALLOC_USABLE_SIZE
32# define malloc_usable_size(a) _msize(a)
33# elif defined HAVE_MALLOC_SIZE
34# define HAVE_MALLOC_USABLE_SIZE
35# define malloc_usable_size(a) malloc_size(a)
36# endif
37#endif
38
39#ifdef HAVE_MALLOC_USABLE_SIZE
40# ifdef RUBY_ALTERNATIVE_MALLOC_HEADER
41/* Alternative malloc header is included in ruby/missing.h */
42# elif defined(HAVE_MALLOC_H)
43# include <malloc.h>
44# elif defined(HAVE_MALLOC_NP_H)
45# include <malloc_np.h>
46# elif defined(HAVE_MALLOC_MALLOC_H)
47# include <malloc/malloc.h>
48# endif
49#endif
50
51/* MALLOC_HEADERS_END */
52
53#ifdef HAVE_SYS_TIME_H
54# include <sys/time.h>
55#endif
56
57#ifdef HAVE_SYS_RESOURCE_H
58# include <sys/resource.h>
59#endif
60
61#if defined _WIN32 || defined __CYGWIN__
62# include <windows.h>
63#elif defined(HAVE_POSIX_MEMALIGN)
64#elif defined(HAVE_MEMALIGN)
65# include <malloc.h>
66#endif
67
68#include <sys/types.h>
69
70#ifdef __EMSCRIPTEN__
71#include <emscripten.h>
72#endif
73
74/* For ruby_annotate_mmap */
75#ifdef HAVE_SYS_PRCTL_H
76#include <sys/prctl.h>
77#endif
78
79#undef LIST_HEAD /* ccan/list conflicts with BSD-origin sys/queue.h. */
80
81#include "constant.h"
82#include "darray.h"
83#include "debug_counter.h"
84#include "eval_intern.h"
85#include "gc/gc.h"
86#include "id_table.h"
87#include "internal.h"
88#include "internal/class.h"
89#include "internal/compile.h"
90#include "internal/complex.h"
91#include "internal/concurrent_set.h"
92#include "internal/cont.h"
93#include "internal/error.h"
94#include "internal/eval.h"
95#include "internal/gc.h"
96#include "internal/hash.h"
97#include "internal/imemo.h"
98#include "internal/io.h"
99#include "internal/numeric.h"
100#include "internal/object.h"
101#include "internal/proc.h"
102#include "internal/rational.h"
103#include "internal/sanitizers.h"
104#include "internal/struct.h"
105#include "internal/symbol.h"
106#include "internal/thread.h"
107#include "internal/variable.h"
108#include "internal/warnings.h"
109#include "probes.h"
110#include "regint.h"
111#include "ruby/debug.h"
112#include "ruby/io.h"
113#include "ruby/re.h"
114#include "ruby/st.h"
115#include "ruby/thread.h"
116#include "ruby/util.h"
117#include "ruby/vm.h"
118#include "ruby_assert.h"
119#include "ruby_atomic.h"
120#include "symbol.h"
121#include "variable.h"
122#include "vm_core.h"
123#include "vm_sync.h"
124#include "vm_callinfo.h"
125#include "ractor_core.h"
126#include "yjit.h"
127#include "zjit.h"
128
129#include "builtin.h"
130#include "shape.h"
131
132// TODO: Don't export this function in modular GC, instead MMTk should figure out
133// how to combine GC thread backtrace with mutator thread backtrace.
134void
135rb_gc_print_backtrace(void)
136{
137 rb_print_backtrace(stderr);
138}
139
140unsigned int
141rb_gc_vm_lock(const char *file, int line)
142{
143 unsigned int lev = 0;
144 rb_vm_lock_enter(&lev, file, line);
145 return lev;
146}
147
148void
149rb_gc_vm_unlock(unsigned int lev, const char *file, int line)
150{
151 rb_vm_lock_leave(&lev, file, line);
152}
153
154unsigned int
155rb_gc_cr_lock(const char *file, int line)
156{
157 unsigned int lev;
158 rb_vm_lock_enter_cr(GET_RACTOR(), &lev, file, line);
159 return lev;
160}
161
162void
163rb_gc_cr_unlock(unsigned int lev, const char *file, int line)
164{
165 rb_vm_lock_leave_cr(GET_RACTOR(), &lev, file, line);
166}
167
168unsigned int
169rb_gc_vm_lock_no_barrier(const char *file, int line)
170{
171 unsigned int lev = 0;
172 rb_vm_lock_enter_nb(&lev, file, line);
173 return lev;
174}
175
176void
177rb_gc_vm_unlock_no_barrier(unsigned int lev, const char *file, int line)
178{
179 rb_vm_lock_leave_nb(&lev, file, line);
180}
181
182void
183rb_gc_vm_barrier(void)
184{
185 rb_vm_barrier();
186}
187
188void *
189rb_gc_get_ractor_newobj_cache(void)
190{
191 return GET_RACTOR()->newobj_cache;
192}
193
194#if USE_MODULAR_GC
195void
196rb_gc_initialize_vm_context(struct rb_gc_vm_context *context)
197{
198 rb_native_mutex_initialize(&context->lock);
199 context->ec = GET_EC();
200}
201
202void
203rb_gc_worker_thread_set_vm_context(struct rb_gc_vm_context *context)
204{
205 rb_native_mutex_lock(&context->lock);
206
207 GC_ASSERT(rb_current_execution_context(false) == NULL);
208
209#ifdef RB_THREAD_LOCAL_SPECIFIER
210 rb_current_ec_set(context->ec);
211#else
212 native_tls_set(ruby_current_ec_key, context->ec);
213#endif
214}
215
216void
217rb_gc_worker_thread_unset_vm_context(struct rb_gc_vm_context *context)
218{
219 rb_native_mutex_unlock(&context->lock);
220
221 GC_ASSERT(rb_current_execution_context(true) == context->ec);
222
223#ifdef RB_THREAD_LOCAL_SPECIFIER
224 rb_current_ec_set(NULL);
225#else
226 native_tls_set(ruby_current_ec_key, NULL);
227#endif
228}
229#endif
230
231bool
232rb_gc_event_hook_required_p(rb_event_flag_t event)
233{
234 return ruby_vm_event_flags & event;
235}
236
237void
238rb_gc_event_hook(VALUE obj, rb_event_flag_t event)
239{
240 if (LIKELY(!rb_gc_event_hook_required_p(event))) return;
241
242 rb_execution_context_t *ec = GET_EC();
243 if (!ec->cfp) return;
244
245 EXEC_EVENT_HOOK(ec, event, ec->cfp->self, 0, 0, 0, obj);
246}
247
248void *
249rb_gc_get_objspace(void)
250{
251 return GET_VM()->gc.objspace;
252}
253
254
255void
256rb_gc_ractor_newobj_cache_foreach(void (*func)(void *cache, void *data), void *data)
257{
258 rb_ractor_t *r = NULL;
259 if (RB_LIKELY(ruby_single_main_ractor)) {
260 GC_ASSERT(
261 ccan_list_empty(&GET_VM()->ractor.set) ||
262 (ccan_list_top(&GET_VM()->ractor.set, rb_ractor_t, vmlr_node) == ruby_single_main_ractor &&
263 ccan_list_tail(&GET_VM()->ractor.set, rb_ractor_t, vmlr_node) == ruby_single_main_ractor)
264 );
265
266 func(ruby_single_main_ractor->newobj_cache, data);
267 }
268 else {
269 ccan_list_for_each(&GET_VM()->ractor.set, r, vmlr_node) {
270 func(r->newobj_cache, data);
271 }
272 }
273}
274
275void
276rb_gc_run_obj_finalizer(VALUE objid, long count, VALUE (*callback)(long i, void *data), void *data)
277{
278 volatile struct {
279 VALUE errinfo;
280 VALUE final;
282 VALUE *sp;
283 long finished;
284 } saved;
285
286 rb_execution_context_t * volatile ec = GET_EC();
287#define RESTORE_FINALIZER() (\
288 ec->cfp = saved.cfp, \
289 ec->cfp->sp = saved.sp, \
290 ec->errinfo = saved.errinfo)
291
292 saved.errinfo = ec->errinfo;
293 saved.cfp = ec->cfp;
294 saved.sp = ec->cfp->sp;
295 saved.finished = 0;
296 saved.final = Qundef;
297
298 ASSERT_vm_unlocking();
299 rb_ractor_ignore_belonging(true);
300 EC_PUSH_TAG(ec);
301 enum ruby_tag_type state = EC_EXEC_TAG();
302 if (state != TAG_NONE) {
303 ++saved.finished; /* skip failed finalizer */
304
305 VALUE failed_final = saved.final;
306 saved.final = Qundef;
307 if (!UNDEF_P(failed_final) && !NIL_P(ruby_verbose)) {
308 rb_warn("Exception in finalizer %+"PRIsVALUE, failed_final);
309 rb_ec_error_print(ec, ec->errinfo);
310 }
311 }
312
313 for (long i = saved.finished; RESTORE_FINALIZER(), i < count; saved.finished = ++i) {
314 saved.final = callback(i, data);
315 rb_check_funcall(saved.final, idCall, 1, &objid);
316 }
317 EC_POP_TAG();
318 rb_ractor_ignore_belonging(false);
319#undef RESTORE_FINALIZER
320}
321
322void
323rb_gc_set_pending_interrupt(void)
324{
325 rb_execution_context_t *ec = GET_EC();
326 ec->interrupt_mask |= PENDING_INTERRUPT_MASK;
327}
328
329void
330rb_gc_unset_pending_interrupt(void)
331{
332 rb_execution_context_t *ec = GET_EC();
333 ec->interrupt_mask &= ~PENDING_INTERRUPT_MASK;
334}
335
336bool
337rb_gc_multi_ractor_p(void)
338{
339 return rb_multi_ractor_p();
340}
341
342bool
343rb_gc_shutdown_call_finalizer_p(VALUE obj)
344{
345 switch (BUILTIN_TYPE(obj)) {
346 case T_DATA:
347 if (!ruby_free_at_exit_p() && (!DATA_PTR(obj) || !RDATA(obj)->dfree)) return false;
348 if (rb_obj_is_thread(obj)) return false;
349 if (rb_obj_is_mutex(obj)) return false;
350 if (rb_obj_is_fiber(obj)) return false;
351 if (rb_ractor_p(obj)) return false;
352 if (rb_obj_is_fstring_table(obj)) return false;
353 if (rb_obj_is_symbol_table(obj)) return false;
354
355 return true;
356
357 case T_FILE:
358 return true;
359
360 case T_SYMBOL:
361 return true;
362
363 case T_NONE:
364 return false;
365
366 default:
367 return ruby_free_at_exit_p();
368 }
369}
370
371uint32_t
372rb_gc_get_shape(VALUE obj)
373{
374 return (uint32_t)rb_obj_shape_id(obj);
375}
376
377void
378rb_gc_set_shape(VALUE obj, uint32_t shape_id)
379{
380 RBASIC_SET_SHAPE_ID(obj, (uint32_t)shape_id);
381}
382
383uint32_t
384rb_gc_rebuild_shape(VALUE obj, size_t heap_id)
385{
387
388 return (uint32_t)rb_shape_transition_heap(obj, heap_id);
389}
390
391void rb_vm_update_references(void *ptr);
392
393#define rb_setjmp(env) RUBY_SETJMP(env)
394#define rb_jmp_buf rb_jmpbuf_t
395#undef rb_data_object_wrap
396
397#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
398#define MAP_ANONYMOUS MAP_ANON
399#endif
400
401#define unless_objspace(objspace) \
402 void *objspace; \
403 rb_vm_t *unless_objspace_vm = GET_VM(); \
404 if (unless_objspace_vm) objspace = unless_objspace_vm->gc.objspace; \
405 else /* return; or objspace will be warned uninitialized */
406
407#define RMOVED(obj) ((struct RMoved *)(obj))
408
409#define TYPED_UPDATE_IF_MOVED(_objspace, _type, _thing) do { \
410 if (gc_object_moved_p_internal((_objspace), (VALUE)(_thing))) { \
411 *(_type *)&(_thing) = (_type)gc_location_internal(_objspace, (VALUE)_thing); \
412 } \
413} while (0)
414
415#define UPDATE_IF_MOVED(_objspace, _thing) TYPED_UPDATE_IF_MOVED(_objspace, VALUE, _thing)
416
417#if RUBY_MARK_FREE_DEBUG
418int ruby_gc_debug_indent = 0;
419#endif
420
421#ifndef RGENGC_OBJ_INFO
422# define RGENGC_OBJ_INFO RGENGC_CHECK_MODE
423#endif
424
425#ifndef CALC_EXACT_MALLOC_SIZE
426# define CALC_EXACT_MALLOC_SIZE 0
427#endif
428
430
431static size_t malloc_offset = 0;
432#if defined(HAVE_MALLOC_USABLE_SIZE)
433static size_t
434gc_compute_malloc_offset(void)
435{
436 // Different allocators use different metadata storage strategies which result in different
437 // ideal sizes.
438 // For instance malloc(64) will waste 8B with glibc, but waste 0B with jemalloc.
439 // But malloc(56) will waste 0B with glibc, but waste 8B with jemalloc.
440 // So we try allocating 64, 56 and 48 bytes and select the first offset that doesn't
441 // waste memory.
442 // This was tested on Linux with glibc 2.35 and jemalloc 5, and for both it result in
443 // no wasted memory.
444 size_t offset = 0;
445 for (offset = 0; offset <= 16; offset += 8) {
446 size_t allocated = (64 - offset);
447 void *test_ptr = malloc(allocated);
448 size_t wasted = malloc_usable_size(test_ptr) - allocated;
449 free(test_ptr);
450
451 if (wasted == 0) {
452 return offset;
453 }
454 }
455 return 0;
456}
457#else
458static size_t
459gc_compute_malloc_offset(void)
460{
461 // If we don't have malloc_usable_size, we use powers of 2.
462 return 0;
463}
464#endif
465
466size_t
467rb_malloc_grow_capa(size_t current, size_t type_size)
468{
469 size_t current_capacity = current;
470 if (current_capacity < 4) {
471 current_capacity = 4;
472 }
473 current_capacity *= type_size;
474
475 // We double the current capacity.
476 size_t new_capacity = (current_capacity * 2);
477
478 // And round up to the next power of 2 if it's not already one.
479 if (rb_popcount64(new_capacity) != 1) {
480 new_capacity = (size_t)(1 << (64 - nlz_int64(new_capacity)));
481 }
482
483 new_capacity -= malloc_offset;
484 new_capacity /= type_size;
485 if (current > new_capacity) {
486 rb_bug("rb_malloc_grow_capa: current_capacity=%zu, new_capacity=%zu, malloc_offset=%zu", current, new_capacity, malloc_offset);
487 }
488 RUBY_ASSERT(new_capacity > current);
489 return new_capacity;
490}
491
492static inline struct rbimpl_size_overflow_tag
493size_mul_add_overflow(size_t x, size_t y, size_t z) /* x * y + z */
494{
495 struct rbimpl_size_overflow_tag t = rbimpl_size_mul_overflow(x, y);
496 struct rbimpl_size_overflow_tag u = rbimpl_size_add_overflow(t.result, z);
497 return (struct rbimpl_size_overflow_tag) { t.overflowed || u.overflowed, u.result };
498}
499
500static inline struct rbimpl_size_overflow_tag
501size_mul_add_mul_overflow(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
502{
503 struct rbimpl_size_overflow_tag t = rbimpl_size_mul_overflow(x, y);
504 struct rbimpl_size_overflow_tag u = rbimpl_size_mul_overflow(z, w);
505 struct rbimpl_size_overflow_tag v = rbimpl_size_add_overflow(t.result, u.result);
506 return (struct rbimpl_size_overflow_tag) { t.overflowed || u.overflowed || v.overflowed, v.result };
507}
508
509PRINTF_ARGS(NORETURN(static void gc_raise(VALUE, const char*, ...)), 2, 3);
510
511static inline size_t
512size_mul_or_raise(size_t x, size_t y, VALUE exc)
513{
514 struct rbimpl_size_overflow_tag t = rbimpl_size_mul_overflow(x, y);
515 if (LIKELY(!t.overflowed)) {
516 return t.result;
517 }
518 else if (rb_during_gc()) {
519 rb_memerror(); /* or...? */
520 }
521 else {
522 gc_raise(
523 exc,
524 "integer overflow: %"PRIuSIZE
525 " * %"PRIuSIZE
526 " > %"PRIuSIZE,
527 x, y, (size_t)SIZE_MAX);
528 }
529}
530
531size_t
532rb_size_mul_or_raise(size_t x, size_t y, VALUE exc)
533{
534 return size_mul_or_raise(x, y, exc);
535}
536
537static inline size_t
538size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
539{
540 struct rbimpl_size_overflow_tag t = size_mul_add_overflow(x, y, z);
541 if (LIKELY(!t.overflowed)) {
542 return t.result;
543 }
544 else if (rb_during_gc()) {
545 rb_memerror(); /* or...? */
546 }
547 else {
548 gc_raise(
549 exc,
550 "integer overflow: %"PRIuSIZE
551 " * %"PRIuSIZE
552 " + %"PRIuSIZE
553 " > %"PRIuSIZE,
554 x, y, z, (size_t)SIZE_MAX);
555 }
556}
557
558size_t
559rb_size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
560{
561 return size_mul_add_or_raise(x, y, z, exc);
562}
563
564static inline size_t
565size_mul_add_mul_or_raise(size_t x, size_t y, size_t z, size_t w, VALUE exc)
566{
567 struct rbimpl_size_overflow_tag t = size_mul_add_mul_overflow(x, y, z, w);
568 if (LIKELY(!t.overflowed)) {
569 return t.result;
570 }
571 else if (rb_during_gc()) {
572 rb_memerror(); /* or...? */
573 }
574 else {
575 gc_raise(
576 exc,
577 "integer overflow: %"PRIdSIZE
578 " * %"PRIdSIZE
579 " + %"PRIdSIZE
580 " * %"PRIdSIZE
581 " > %"PRIdSIZE,
582 x, y, z, w, (size_t)SIZE_MAX);
583 }
584}
585
586#if defined(HAVE_RB_GC_GUARDED_PTR_VAL) && HAVE_RB_GC_GUARDED_PTR_VAL
587/* trick the compiler into thinking a external signal handler uses this */
588volatile VALUE rb_gc_guarded_val;
589volatile VALUE *
590rb_gc_guarded_ptr_val(volatile VALUE *ptr, VALUE val)
591{
592 rb_gc_guarded_val = val;
593
594 return ptr;
595}
596#endif
597
598static const char *obj_type_name(VALUE obj);
599static st_table *id2ref_tbl;
600#include "gc/default/default.c"
601
602#if USE_MODULAR_GC && !defined(HAVE_DLOPEN)
603# error "Modular GC requires dlopen"
604#elif USE_MODULAR_GC
605#include <dlfcn.h>
606
607typedef struct gc_function_map {
608 // Bootup
609 void *(*objspace_alloc)(void);
610 void (*objspace_init)(void *objspace_ptr);
611 void *(*ractor_cache_alloc)(void *objspace_ptr, void *ractor);
612 void (*set_params)(void *objspace_ptr);
613 void (*init)(void);
614 size_t *(*heap_sizes)(void *objspace_ptr);
615 // Shutdown
616 void (*shutdown_free_objects)(void *objspace_ptr);
617 void (*objspace_free)(void *objspace_ptr);
618 void (*ractor_cache_free)(void *objspace_ptr, void *cache);
619 // GC
620 void (*start)(void *objspace_ptr, bool full_mark, bool immediate_mark, bool immediate_sweep, bool compact);
621 bool (*during_gc_p)(void *objspace_ptr);
622 void (*prepare_heap)(void *objspace_ptr);
623 void (*gc_enable)(void *objspace_ptr);
624 void (*gc_disable)(void *objspace_ptr, bool finish_current_gc);
625 bool (*gc_enabled_p)(void *objspace_ptr);
626 VALUE (*config_get)(void *objpace_ptr);
627 void (*config_set)(void *objspace_ptr, VALUE hash);
628 void (*stress_set)(void *objspace_ptr, VALUE flag);
629 VALUE (*stress_get)(void *objspace_ptr);
630 // Object allocation
631 VALUE (*new_obj)(void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags, bool wb_protected, size_t alloc_size);
632 size_t (*obj_slot_size)(VALUE obj);
633 size_t (*heap_id_for_size)(void *objspace_ptr, size_t size);
634 bool (*size_allocatable_p)(size_t size);
635 // Malloc
636 void *(*malloc)(void *objspace_ptr, size_t size, bool gc_allowed);
637 void *(*calloc)(void *objspace_ptr, size_t size, bool gc_allowed);
638 void *(*realloc)(void *objspace_ptr, void *ptr, size_t new_size, size_t old_size, bool gc_allowed);
639 void (*free)(void *objspace_ptr, void *ptr, size_t old_size);
640 void (*adjust_memory_usage)(void *objspace_ptr, ssize_t diff);
641 // Marking
642 void (*mark)(void *objspace_ptr, VALUE obj);
643 void (*mark_and_move)(void *objspace_ptr, VALUE *ptr);
644 void (*mark_and_pin)(void *objspace_ptr, VALUE obj);
645 void (*mark_maybe)(void *objspace_ptr, VALUE obj);
646 // Weak references
647 void (*declare_weak_references)(void *objspace_ptr, VALUE obj);
648 bool (*handle_weak_references_alive_p)(void *objspace_ptr, VALUE obj);
649 // Compaction
650 void (*register_pinning_obj)(void *objspace_ptr, VALUE obj);
651 bool (*object_moved_p)(void *objspace_ptr, VALUE obj);
652 VALUE (*location)(void *objspace_ptr, VALUE value);
653 // Write barriers
654 void (*writebarrier)(void *objspace_ptr, VALUE a, VALUE b);
655 void (*writebarrier_unprotect)(void *objspace_ptr, VALUE obj);
656 void (*writebarrier_remember)(void *objspace_ptr, VALUE obj);
657 // Heap walking
658 void (*each_objects)(void *objspace_ptr, int (*callback)(void *, void *, size_t, void *), void *data);
659 void (*each_object)(void *objspace_ptr, void (*func)(VALUE obj, void *data), void *data);
660 // Finalizers
661 void (*make_zombie)(void *objspace_ptr, VALUE obj, void (*dfree)(void *), void *data);
662 VALUE (*define_finalizer)(void *objspace_ptr, VALUE obj, VALUE block);
663 void (*undefine_finalizer)(void *objspace_ptr, VALUE obj);
664 void (*copy_finalizer)(void *objspace_ptr, VALUE dest, VALUE obj);
665 void (*shutdown_call_finalizer)(void *objspace_ptr);
666 // Forking
667 void (*before_fork)(void *objspace_ptr);
668 void (*after_fork)(void *objspace_ptr, rb_pid_t pid);
669 // Statistics
670 void (*set_measure_total_time)(void *objspace_ptr, VALUE flag);
671 bool (*get_measure_total_time)(void *objspace_ptr);
672 unsigned long long (*get_total_time)(void *objspace_ptr);
673 size_t (*gc_count)(void *objspace_ptr);
674 VALUE (*latest_gc_info)(void *objspace_ptr, VALUE key);
675 VALUE (*stat)(void *objspace_ptr, VALUE hash_or_sym);
676 VALUE (*stat_heap)(void *objspace_ptr, VALUE heap_name, VALUE hash_or_sym);
677 const char *(*active_gc_name)(void);
678 // Miscellaneous
679 struct rb_gc_object_metadata_entry *(*object_metadata)(void *objspace_ptr, VALUE obj);
680 bool (*pointer_to_heap_p)(void *objspace_ptr, const void *ptr);
681 bool (*garbage_object_p)(void *objspace_ptr, VALUE obj);
682 void (*set_event_hook)(void *objspace_ptr, const rb_event_flag_t event);
683 void (*copy_attributes)(void *objspace_ptr, VALUE dest, VALUE obj);
684
685 bool modular_gc_loaded_p;
686} rb_gc_function_map_t;
687
688static rb_gc_function_map_t rb_gc_functions;
689
690# define RUBY_GC_LIBRARY "RUBY_GC_LIBRARY"
691# define MODULAR_GC_DIR STRINGIZE(modular_gc_dir)
692
693static void
694ruby_modular_gc_init(void)
695{
696 // Assert that the directory path ends with a /
697 RUBY_ASSERT_ALWAYS(MODULAR_GC_DIR[sizeof(MODULAR_GC_DIR) - 2] == '/');
698
699 const char *gc_so_file = getenv(RUBY_GC_LIBRARY);
700
701 rb_gc_function_map_t gc_functions = { 0 };
702
703 char *gc_so_path = NULL;
704 void *handle = NULL;
705 if (gc_so_file) {
706 /* Check to make sure that gc_so_file matches /[\w-_]+/ so that it does
707 * not load a shared object outside of the directory. */
708 for (size_t i = 0; i < strlen(gc_so_file); i++) {
709 char c = gc_so_file[i];
710 if (isalnum(c)) continue;
711 switch (c) {
712 case '-':
713 case '_':
714 break;
715 default:
716 fprintf(stderr, "Only alphanumeric, dash, and underscore is allowed in "RUBY_GC_LIBRARY"\n");
717 exit(EXIT_FAILURE);
718 }
719 }
720
721 size_t gc_so_path_size = strlen(MODULAR_GC_DIR "librubygc." DLEXT) + strlen(gc_so_file) + 1;
722#ifdef LOAD_RELATIVE
723 Dl_info dli;
724 size_t prefix_len = 0;
725 if (dladdr((void *)(uintptr_t)ruby_modular_gc_init, &dli)) {
726 const char *base = strrchr(dli.dli_fname, '/');
727 if (base) {
728 size_t tail = 0;
729# define end_with_p(lit) \
730 (prefix_len >= (tail = rb_strlen_lit(lit)) && \
731 memcmp(base - tail, lit, tail) == 0)
732
733 prefix_len = base - dli.dli_fname;
734 if (end_with_p("/bin") || end_with_p("/lib")) {
735 prefix_len -= tail;
736 }
737 prefix_len += MODULAR_GC_DIR[0] != '/';
738 gc_so_path_size += prefix_len;
739 }
740 }
741#endif
742 gc_so_path = alloca(gc_so_path_size);
743 {
744 size_t gc_so_path_idx = 0;
745#define GC_SO_PATH_APPEND(str) do { \
746 gc_so_path_idx += strlcpy(gc_so_path + gc_so_path_idx, str, gc_so_path_size - gc_so_path_idx); \
747} while (0)
748#ifdef LOAD_RELATIVE
749 if (prefix_len > 0) {
750 memcpy(gc_so_path, dli.dli_fname, prefix_len);
751 gc_so_path_idx = prefix_len;
752 }
753#endif
754 GC_SO_PATH_APPEND(MODULAR_GC_DIR "librubygc.");
755 GC_SO_PATH_APPEND(gc_so_file);
756 GC_SO_PATH_APPEND(DLEXT);
757 GC_ASSERT(gc_so_path_idx == gc_so_path_size - 1);
758#undef GC_SO_PATH_APPEND
759 }
760
761 handle = dlopen(gc_so_path, RTLD_LAZY | RTLD_GLOBAL);
762 if (!handle) {
763 fprintf(stderr, "ruby_modular_gc_init: Shared library %s cannot be opened: %s\n", gc_so_path, dlerror());
764 exit(EXIT_FAILURE);
765 }
766
767 gc_functions.modular_gc_loaded_p = true;
768 }
769
770 unsigned int err_count = 0;
771
772# define load_modular_gc_func(name) do { \
773 if (handle) { \
774 const char *func_name = "rb_gc_impl_" #name; \
775 gc_functions.name = dlsym(handle, func_name); \
776 if (!gc_functions.name) { \
777 fprintf(stderr, "ruby_modular_gc_init: %s function not exported by library %s\n", func_name, gc_so_path); \
778 err_count++; \
779 } \
780 } \
781 else { \
782 gc_functions.name = rb_gc_impl_##name; \
783 } \
784} while (0)
785
786 // Bootup
787 load_modular_gc_func(objspace_alloc);
788 load_modular_gc_func(objspace_init);
789 load_modular_gc_func(ractor_cache_alloc);
790 load_modular_gc_func(set_params);
791 load_modular_gc_func(init);
792 load_modular_gc_func(heap_sizes);
793 // Shutdown
794 load_modular_gc_func(shutdown_free_objects);
795 load_modular_gc_func(objspace_free);
796 load_modular_gc_func(ractor_cache_free);
797 // GC
798 load_modular_gc_func(start);
799 load_modular_gc_func(during_gc_p);
800 load_modular_gc_func(prepare_heap);
801 load_modular_gc_func(gc_enable);
802 load_modular_gc_func(gc_disable);
803 load_modular_gc_func(gc_enabled_p);
804 load_modular_gc_func(config_set);
805 load_modular_gc_func(config_get);
806 load_modular_gc_func(stress_set);
807 load_modular_gc_func(stress_get);
808 // Object allocation
809 load_modular_gc_func(new_obj);
810 load_modular_gc_func(obj_slot_size);
811 load_modular_gc_func(heap_id_for_size);
812 load_modular_gc_func(size_allocatable_p);
813 // Malloc
814 load_modular_gc_func(malloc);
815 load_modular_gc_func(calloc);
816 load_modular_gc_func(realloc);
817 load_modular_gc_func(free);
818 load_modular_gc_func(adjust_memory_usage);
819 // Marking
820 load_modular_gc_func(mark);
821 load_modular_gc_func(mark_and_move);
822 load_modular_gc_func(mark_and_pin);
823 load_modular_gc_func(mark_maybe);
824 // Weak references
825 load_modular_gc_func(declare_weak_references);
826 load_modular_gc_func(handle_weak_references_alive_p);
827 // Compaction
828 load_modular_gc_func(register_pinning_obj);
829 load_modular_gc_func(object_moved_p);
830 load_modular_gc_func(location);
831 // Write barriers
832 load_modular_gc_func(writebarrier);
833 load_modular_gc_func(writebarrier_unprotect);
834 load_modular_gc_func(writebarrier_remember);
835 // Heap walking
836 load_modular_gc_func(each_objects);
837 load_modular_gc_func(each_object);
838 // Finalizers
839 load_modular_gc_func(make_zombie);
840 load_modular_gc_func(define_finalizer);
841 load_modular_gc_func(undefine_finalizer);
842 load_modular_gc_func(copy_finalizer);
843 load_modular_gc_func(shutdown_call_finalizer);
844 // Forking
845 load_modular_gc_func(before_fork);
846 load_modular_gc_func(after_fork);
847 // Statistics
848 load_modular_gc_func(set_measure_total_time);
849 load_modular_gc_func(get_measure_total_time);
850 load_modular_gc_func(get_total_time);
851 load_modular_gc_func(gc_count);
852 load_modular_gc_func(latest_gc_info);
853 load_modular_gc_func(stat);
854 load_modular_gc_func(stat_heap);
855 load_modular_gc_func(active_gc_name);
856 // Miscellaneous
857 load_modular_gc_func(object_metadata);
858 load_modular_gc_func(pointer_to_heap_p);
859 load_modular_gc_func(garbage_object_p);
860 load_modular_gc_func(set_event_hook);
861 load_modular_gc_func(copy_attributes);
862
863 if (err_count > 0) {
864 fprintf(stderr, "ruby_modular_gc_init: found %u missing exports in library %s\n", err_count, gc_so_path);
865 exit(EXIT_FAILURE);
866 }
867
868# undef load_modular_gc_func
869
870 rb_gc_functions = gc_functions;
871}
872
873// Bootup
874# define rb_gc_impl_objspace_alloc rb_gc_functions.objspace_alloc
875# define rb_gc_impl_objspace_init rb_gc_functions.objspace_init
876# define rb_gc_impl_ractor_cache_alloc rb_gc_functions.ractor_cache_alloc
877# define rb_gc_impl_set_params rb_gc_functions.set_params
878# define rb_gc_impl_init rb_gc_functions.init
879# define rb_gc_impl_heap_sizes rb_gc_functions.heap_sizes
880// Shutdown
881# define rb_gc_impl_shutdown_free_objects rb_gc_functions.shutdown_free_objects
882# define rb_gc_impl_objspace_free rb_gc_functions.objspace_free
883# define rb_gc_impl_ractor_cache_free rb_gc_functions.ractor_cache_free
884// GC
885# define rb_gc_impl_start rb_gc_functions.start
886# define rb_gc_impl_during_gc_p rb_gc_functions.during_gc_p
887# define rb_gc_impl_prepare_heap rb_gc_functions.prepare_heap
888# define rb_gc_impl_gc_enable rb_gc_functions.gc_enable
889# define rb_gc_impl_gc_disable rb_gc_functions.gc_disable
890# define rb_gc_impl_gc_enabled_p rb_gc_functions.gc_enabled_p
891# define rb_gc_impl_config_get rb_gc_functions.config_get
892# define rb_gc_impl_config_set rb_gc_functions.config_set
893# define rb_gc_impl_stress_set rb_gc_functions.stress_set
894# define rb_gc_impl_stress_get rb_gc_functions.stress_get
895// Object allocation
896# define rb_gc_impl_new_obj rb_gc_functions.new_obj
897# define rb_gc_impl_obj_slot_size rb_gc_functions.obj_slot_size
898# define rb_gc_impl_heap_id_for_size rb_gc_functions.heap_id_for_size
899# define rb_gc_impl_size_allocatable_p rb_gc_functions.size_allocatable_p
900// Malloc
901# define rb_gc_impl_malloc rb_gc_functions.malloc
902# define rb_gc_impl_calloc rb_gc_functions.calloc
903# define rb_gc_impl_realloc rb_gc_functions.realloc
904# define rb_gc_impl_free rb_gc_functions.free
905# define rb_gc_impl_adjust_memory_usage rb_gc_functions.adjust_memory_usage
906// Marking
907# define rb_gc_impl_mark rb_gc_functions.mark
908# define rb_gc_impl_mark_and_move rb_gc_functions.mark_and_move
909# define rb_gc_impl_mark_and_pin rb_gc_functions.mark_and_pin
910# define rb_gc_impl_mark_maybe rb_gc_functions.mark_maybe
911// Weak references
912# define rb_gc_impl_declare_weak_references rb_gc_functions.declare_weak_references
913# define rb_gc_impl_handle_weak_references_alive_p rb_gc_functions.handle_weak_references_alive_p
914// Compaction
915# define rb_gc_impl_register_pinning_obj rb_gc_functions.register_pinning_obj
916# define rb_gc_impl_object_moved_p rb_gc_functions.object_moved_p
917# define rb_gc_impl_location rb_gc_functions.location
918// Write barriers
919# define rb_gc_impl_writebarrier rb_gc_functions.writebarrier
920# define rb_gc_impl_writebarrier_unprotect rb_gc_functions.writebarrier_unprotect
921# define rb_gc_impl_writebarrier_remember rb_gc_functions.writebarrier_remember
922// Heap walking
923# define rb_gc_impl_each_objects rb_gc_functions.each_objects
924# define rb_gc_impl_each_object rb_gc_functions.each_object
925// Finalizers
926# define rb_gc_impl_make_zombie rb_gc_functions.make_zombie
927# define rb_gc_impl_define_finalizer rb_gc_functions.define_finalizer
928# define rb_gc_impl_undefine_finalizer rb_gc_functions.undefine_finalizer
929# define rb_gc_impl_copy_finalizer rb_gc_functions.copy_finalizer
930# define rb_gc_impl_shutdown_call_finalizer rb_gc_functions.shutdown_call_finalizer
931// Forking
932# define rb_gc_impl_before_fork rb_gc_functions.before_fork
933# define rb_gc_impl_after_fork rb_gc_functions.after_fork
934// Statistics
935# define rb_gc_impl_set_measure_total_time rb_gc_functions.set_measure_total_time
936# define rb_gc_impl_get_measure_total_time rb_gc_functions.get_measure_total_time
937# define rb_gc_impl_get_total_time rb_gc_functions.get_total_time
938# define rb_gc_impl_gc_count rb_gc_functions.gc_count
939# define rb_gc_impl_latest_gc_info rb_gc_functions.latest_gc_info
940# define rb_gc_impl_stat rb_gc_functions.stat
941# define rb_gc_impl_stat_heap rb_gc_functions.stat_heap
942# define rb_gc_impl_active_gc_name rb_gc_functions.active_gc_name
943// Miscellaneous
944# define rb_gc_impl_object_metadata rb_gc_functions.object_metadata
945# define rb_gc_impl_pointer_to_heap_p rb_gc_functions.pointer_to_heap_p
946# define rb_gc_impl_garbage_object_p rb_gc_functions.garbage_object_p
947# define rb_gc_impl_set_event_hook rb_gc_functions.set_event_hook
948# define rb_gc_impl_copy_attributes rb_gc_functions.copy_attributes
949#endif
950
951#ifdef RUBY_ASAN_ENABLED
952static void
953asan_death_callback(void)
954{
955 if (GET_VM()) {
956 rb_bug_without_die("ASAN error");
957 }
958}
959#endif
960
961static VALUE initial_stress = Qfalse;
962
963void *
964rb_objspace_alloc(void)
965{
966#if USE_MODULAR_GC
967 ruby_modular_gc_init();
968#endif
969
970 void *objspace = rb_gc_impl_objspace_alloc();
971 ruby_current_vm_ptr->gc.objspace = objspace;
972 rb_gc_impl_objspace_init(objspace);
973 rb_gc_impl_stress_set(objspace, initial_stress);
974
975#ifdef RUBY_ASAN_ENABLED
976 __sanitizer_set_death_callback(asan_death_callback);
977#endif
978
979 return objspace;
980}
981
982void
983rb_objspace_free(void *objspace)
984{
985 rb_gc_impl_objspace_free(objspace);
986}
987
988size_t
989rb_gc_obj_slot_size(VALUE obj)
990{
991 return rb_gc_impl_obj_slot_size(obj);
992}
993
994static inline void
995gc_validate_pc(VALUE obj)
996{
997#if RUBY_DEBUG
998 // IMEMOs and objects without a class (e.g managed id table) are not traceable
999 if (RB_TYPE_P(obj, T_IMEMO) || !CLASS_OF(obj)) return;
1000
1001 rb_execution_context_t *ec = GET_EC();
1002 const rb_control_frame_t *cfp = ec->cfp;
1003 if (cfp && VM_FRAME_RUBYFRAME_P(cfp) && CFP_PC(cfp)) {
1004 const VALUE *iseq_encoded = ISEQ_BODY(CFP_ISEQ(cfp))->iseq_encoded;
1005 const VALUE *iseq_encoded_end = iseq_encoded + ISEQ_BODY(CFP_ISEQ(cfp))->iseq_size;
1006 RUBY_ASSERT(CFP_PC(cfp) >= iseq_encoded, "PC not set when allocating, breaking tracing");
1007 RUBY_ASSERT(CFP_PC(cfp) <= iseq_encoded_end, "PC not set when allocating, breaking tracing");
1008 }
1009#endif
1010}
1011
1012NOINLINE(static void gc_newobj_hook(VALUE obj));
1013static void
1014gc_newobj_hook(VALUE obj)
1015{
1016 int lev = RB_GC_VM_LOCK_NO_BARRIER();
1017 {
1018 size_t slot_size = rb_gc_obj_slot_size(obj);
1019 memset((char *)obj + sizeof(struct RBasic), 0, slot_size - sizeof(struct RBasic));
1020
1021 /* We must disable GC here because the callback could call xmalloc
1022 * which could potentially trigger a GC, and a lot of code is unsafe
1023 * to trigger a GC right after an object has been allocated because
1024 * they perform initialization for the object and assume that the
1025 * GC does not trigger before then. */
1026 bool gc_disabled = RTEST(rb_gc_disable_no_rest());
1027 {
1028 rb_gc_event_hook(obj, RUBY_INTERNAL_EVENT_NEWOBJ);
1029 }
1030 if (!gc_disabled) rb_gc_enable();
1031 }
1032 RB_GC_VM_UNLOCK_NO_BARRIER(lev);
1033}
1034
1035static inline VALUE
1036newobj_of(rb_ractor_t *cr, VALUE klass, VALUE flags, shape_id_t shape_id, bool wb_protected, size_t size)
1037{
1038 VALUE obj = rb_gc_impl_new_obj(rb_gc_get_objspace(), cr->newobj_cache, klass, flags, wb_protected, size);
1039 RBASIC_SET_SHAPE_ID_NO_CHECKS(obj, shape_id);
1040
1041 gc_validate_pc(obj);
1042
1043 if (UNLIKELY(rb_gc_event_hook_required_p(RUBY_INTERNAL_EVENT_NEWOBJ))) {
1044 gc_newobj_hook(obj);
1045 }
1046
1047#if RGENGC_CHECK_MODE
1048# ifndef GC_DEBUG_SLOT_FILL_SPECIAL_VALUE
1049# define GC_DEBUG_SLOT_FILL_SPECIAL_VALUE 255
1050# endif
1051
1052 memset(
1053 (void *)(obj + sizeof(struct RBasic)),
1054 GC_DEBUG_SLOT_FILL_SPECIAL_VALUE,
1055 rb_gc_obj_slot_size(obj) - sizeof(struct RBasic)
1056 );
1057#endif
1058
1059 return obj;
1060}
1061
1062VALUE
1063rb_wb_unprotected_newobj_of(VALUE klass, VALUE flags, shape_id_t shape_id, size_t size)
1064{
1065 GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
1066 return newobj_of(GET_RACTOR(), klass, flags, shape_id, FALSE, size);
1067}
1068
1069VALUE
1070rb_wb_protected_newobj_of(rb_execution_context_t *ec, VALUE klass, VALUE flags, shape_id_t shape_id, size_t size)
1071{
1072 GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
1073 return newobj_of(rb_ec_ractor_ptr(ec), klass, flags, shape_id, TRUE, size);
1074}
1075
1076VALUE
1077rb_class_allocate_instance(VALUE klass)
1078{
1079 uint32_t index_tbl_num_entries = RCLASS_MAX_IV_COUNT(klass);
1080
1081 size_t size = rb_obj_embedded_size(index_tbl_num_entries);
1082 if (!rb_gc_size_allocatable_p(size)) {
1083 size = sizeof(struct RObject);
1084 }
1085
1086 // There might be a NEWOBJ tracepoint callback, and it may set fields.
1087 // So the shape must be passed to `NEWOBJ_OF`.
1089 NEWOBJ_OF_WITH_SHAPE(o, struct RObject, klass, flags, rb_shape_root(rb_gc_heap_id_for_size(size)), size, 0);
1090 VALUE obj = (VALUE)o;
1091
1092#if RUBY_DEBUG
1093 RUBY_ASSERT(!rb_shape_obj_too_complex_p(obj));
1094 VALUE *ptr = ROBJECT_FIELDS(obj);
1095 size_t fields_count = RSHAPE_LEN(RBASIC_SHAPE_ID(obj));
1096 for (size_t i = fields_count; i < ROBJECT_FIELDS_CAPACITY(obj); i++) {
1097 ptr[i] = Qundef;
1098 }
1099 if (rb_obj_class(obj) != rb_class_real(klass)) {
1100 rb_bug("Expected rb_class_allocate_instance to set the class correctly");
1101 }
1102#endif
1103
1104 return obj;
1105}
1106
1107void
1108rb_gc_register_pinning_obj(VALUE obj)
1109{
1110 rb_gc_impl_register_pinning_obj(rb_gc_get_objspace(), obj);
1111}
1112
1113#define UNEXPECTED_NODE(func) \
1114 rb_bug(#func"(): GC does not handle T_NODE 0x%x(%p) 0x%"PRIxVALUE, \
1115 BUILTIN_TYPE(obj), (void*)(obj), RBASIC(obj)->flags)
1116
1117static inline void
1118rb_data_object_check(VALUE klass)
1119{
1120 RUBY_ASSERT(!RCLASS_SINGLETON_P(klass));
1121 if (klass != rb_cObject && (rb_get_alloc_func(klass) == rb_class_allocate_instance)) {
1122 rb_undef_alloc_func(klass);
1123 rb_warn("undefining the allocator of T_DATA class %"PRIsVALUE, klass);
1124 }
1125}
1126
1127VALUE
1128rb_data_object_wrap(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
1129{
1131 if (klass) rb_data_object_check(klass);
1132 VALUE obj = newobj_of(GET_RACTOR(), klass, T_DATA, ROOT_SHAPE_ID, !dmark, sizeof(struct RTypedData));
1133
1134 rb_gc_register_pinning_obj(obj);
1135
1136 struct RData *data = (struct RData *)obj;
1137 data->dmark = dmark;
1138 data->dfree = dfree;
1139 data->data = datap;
1140
1141 return obj;
1142}
1143
1144VALUE
1146{
1147 VALUE obj = rb_data_object_wrap(klass, 0, dmark, dfree);
1148 DATA_PTR(obj) = xcalloc(1, size);
1149 return obj;
1150}
1151
1152#define RTYPEDDATA_EMBEDDED_P rbimpl_typeddata_embedded_p
1153#define RB_DATA_TYPE_EMBEDDABLE_P(type) ((type)->flags & RUBY_TYPED_EMBEDDABLE)
1154#define RTYPEDDATA_EMBEDDABLE_P(obj) RB_DATA_TYPE_EMBEDDABLE_P(RTYPEDDATA_TYPE(obj))
1155
1156static VALUE
1157typed_data_alloc(VALUE klass, VALUE typed_flag, void *datap, const rb_data_type_t *type, size_t size)
1158{
1159 RBIMPL_NONNULL_ARG(type);
1160 if (klass) rb_data_object_check(klass);
1161 bool wb_protected = (type->flags & RUBY_FL_WB_PROTECTED) || !type->function.dmark;
1162 VALUE obj = newobj_of(GET_RACTOR(), klass, T_DATA | RUBY_TYPED_FL_IS_TYPED_DATA, ROOT_SHAPE_ID, wb_protected, size);
1163
1164 rb_gc_register_pinning_obj(obj);
1165
1166 struct RTypedData *data = (struct RTypedData *)obj;
1167 data->fields_obj = 0;
1168 *(VALUE *)&data->type = ((VALUE)type) | typed_flag;
1169 data->data = datap;
1170
1171 return obj;
1172}
1173
1174VALUE
1176{
1177 if (UNLIKELY(RB_DATA_TYPE_EMBEDDABLE_P(type))) {
1178 rb_raise(rb_eTypeError, "Cannot wrap an embeddable TypedData");
1179 }
1180
1181 return typed_data_alloc(klass, 0, datap, type, sizeof(struct RTypedData));
1182}
1183
1184VALUE
1186{
1187 if (RB_DATA_TYPE_EMBEDDABLE_P(type)) {
1188 if (!(type->flags & RUBY_TYPED_FREE_IMMEDIATELY)) {
1189 rb_raise(rb_eTypeError, "Embeddable TypedData must be freed immediately");
1190 }
1191
1192 size_t embed_size = offsetof(struct RTypedData, data) + size;
1193 if (rb_gc_size_allocatable_p(embed_size)) {
1194 VALUE obj = typed_data_alloc(klass, TYPED_DATA_EMBEDDED, 0, type, embed_size);
1195 memset((char *)obj + offsetof(struct RTypedData, data), 0, size);
1196 return obj;
1197 }
1198 }
1199
1200 VALUE obj = typed_data_alloc(klass, 0, NULL, type, sizeof(struct RTypedData));
1201 DATA_PTR(obj) = xcalloc(1, size);
1202 return obj;
1203}
1204
1205static size_t
1206rb_objspace_data_type_memsize(VALUE obj)
1207{
1208 size_t size = 0;
1209 if (RTYPEDDATA_P(obj)) {
1210 const void *ptr = RTYPEDDATA_GET_DATA(obj);
1211
1212 if (ptr) {
1213 const rb_data_type_t *type = RTYPEDDATA_TYPE(obj);
1214 if (RTYPEDDATA_EMBEDDABLE_P(obj) && !RTYPEDDATA_EMBEDDED_P(obj)) {
1215#ifdef HAVE_MALLOC_USABLE_SIZE
1216 size += malloc_usable_size((void *)ptr);
1217#endif
1218 }
1219
1220 if (type->function.dsize) {
1221 size += type->function.dsize(ptr);
1222 }
1223 }
1224 }
1225
1226 return size;
1227}
1228
1229const char *
1230rb_objspace_data_type_name(VALUE obj)
1231{
1232 if (RTYPEDDATA_P(obj)) {
1233 return RTYPEDDATA_TYPE(obj)->wrap_struct_name;
1234 }
1235 else {
1236 return 0;
1237 }
1238}
1239
1240void
1241rb_gc_declare_weak_references(VALUE obj)
1242{
1243 rb_gc_impl_declare_weak_references(rb_gc_get_objspace(), obj);
1244}
1245
1246bool
1247rb_gc_handle_weak_references_alive_p(VALUE obj)
1248{
1249 if (SPECIAL_CONST_P(obj)) return true;
1250
1251 return rb_gc_impl_handle_weak_references_alive_p(rb_gc_get_objspace(), obj);
1252}
1253
1254void
1255rb_gc_handle_weak_references(VALUE obj)
1256{
1257 switch (BUILTIN_TYPE(obj)) {
1258 case T_DATA:
1259 if (RTYPEDDATA_P(obj)) {
1260 const rb_data_type_t *type = RTYPEDDATA_TYPE(obj);
1261
1262 if (type->function.handle_weak_references) {
1263 (type->function.handle_weak_references)(RTYPEDDATA_GET_DATA(obj));
1264 }
1265 else {
1266 rb_bug(
1267 "rb_gc_handle_weak_references: TypedData %s does not implement handle_weak_references",
1268 RTYPEDDATA_TYPE(obj)->wrap_struct_name
1269 );
1270 }
1271 }
1272 else {
1273 rb_bug("rb_gc_handle_weak_references: unknown T_DATA");
1274 }
1275 break;
1276
1277 case T_IMEMO: {
1278 GC_ASSERT(imemo_type(obj) == imemo_callcache);
1279
1280 struct rb_callcache *cc = (struct rb_callcache *)obj;
1281 if (cc->klass != Qundef &&
1282 (!rb_gc_handle_weak_references_alive_p(cc->klass) ||
1283 !rb_gc_handle_weak_references_alive_p((VALUE)cc->cme_))) {
1284 vm_cc_invalidate(cc);
1285 }
1286
1287 break;
1288 }
1289 default:
1290 rb_bug("rb_gc_handle_weak_references: type not supported\n");
1291 }
1292}
1293
1294/*
1295 * Returns true if the object requires a full rb_gc_obj_free() call during sweep,
1296 * false if it can be freed quickly without calling destructors or cleanup.
1297 *
1298 * Objects that return false are:
1299 * - Simple embedded objects without external allocations
1300 * - Objects without finalizers
1301 * - Objects without object IDs registered in id2ref
1302 * - Objects without generic instance variables
1303 *
1304 * This is used by the GC sweep fast path to avoid function call overhead
1305 * for the majority of simple objects.
1306 */
1307bool
1308rb_gc_obj_needs_cleanup_p(VALUE obj)
1309{
1310 VALUE flags = RBASIC(obj)->flags;
1311
1312 if (flags & FL_FINALIZE) return true;
1313
1314 switch (flags & RUBY_T_MASK) {
1315 case T_IMEMO:
1316 switch (imemo_type(obj)) {
1317 case imemo_constcache:
1318 case imemo_cref:
1319 case imemo_ifunc:
1320 case imemo_memo:
1321 case imemo_svar:
1322 case imemo_callcache:
1323 case imemo_throw_data:
1324 return false;
1325 default:
1326 return true;
1327 }
1328
1329 case T_DATA:
1330 case T_OBJECT:
1331 case T_STRING:
1332 case T_ARRAY:
1333 case T_HASH:
1334 case T_BIGNUM:
1335 case T_STRUCT:
1336 case T_FLOAT:
1337 case T_RATIONAL:
1338 case T_COMPLEX:
1339 break;
1340
1341 case T_FILE:
1342 case T_SYMBOL:
1343 case T_CLASS:
1344 case T_ICLASS:
1345 case T_MODULE:
1346 case T_REGEXP:
1347 case T_MATCH:
1348 return true;
1349 }
1350
1351 shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
1352 if (id2ref_tbl && rb_shape_has_object_id(shape_id)) return true;
1353
1354 switch (flags & RUBY_T_MASK) {
1355 case T_OBJECT:
1356 if (flags & ROBJECT_HEAP) return true;
1357 return false;
1358
1359 case T_DATA:
1360 if (flags & RUBY_TYPED_FL_IS_TYPED_DATA) {
1361 uintptr_t type = (uintptr_t)RTYPEDDATA(obj)->type;
1362 if (type & TYPED_DATA_EMBEDDED) {
1363 RUBY_DATA_FUNC dfree = ((const rb_data_type_t *)(type & TYPED_DATA_PTR_MASK))->function.dfree;
1364 if (dfree == RUBY_NEVER_FREE || dfree == RUBY_TYPED_DEFAULT_FREE) {
1365 return false;
1366 }
1367 }
1368 }
1369 return true;
1370
1371 case T_STRING:
1372 if (flags & (RSTRING_NOEMBED | RSTRING_FSTR)) return true;
1373 return rb_shape_has_fields(shape_id);
1374
1375 case T_ARRAY:
1376 if (!(flags & RARRAY_EMBED_FLAG)) return true;
1377 return rb_shape_has_fields(shape_id);
1378
1379 case T_HASH:
1380 if (flags & RHASH_ST_TABLE_FLAG) return true;
1381 return rb_shape_has_fields(shape_id);
1382
1383 case T_BIGNUM:
1384 if (!(flags & BIGNUM_EMBED_FLAG)) return true;
1385 return rb_shape_has_fields(shape_id);
1386
1387 case T_STRUCT:
1388 if (!(flags & RSTRUCT_EMBED_LEN_MASK)) return true;
1389 if (flags & RSTRUCT_GEN_FIELDS) return rb_shape_has_fields(shape_id);
1390 return false;
1391
1392 case T_FLOAT:
1393 case T_RATIONAL:
1394 case T_COMPLEX:
1395 return rb_shape_has_fields(shape_id);
1396
1397 default:
1398 UNREACHABLE_RETURN(true);
1399 }
1400}
1401
1402static void
1403io_fptr_finalize(void *fptr)
1404{
1405 rb_io_fptr_finalize((struct rb_io *)fptr);
1406}
1407
1408static inline void
1409make_io_zombie(void *objspace, VALUE obj)
1410{
1411 rb_io_t *fptr = RFILE(obj)->fptr;
1412 rb_gc_impl_make_zombie(objspace, obj, io_fptr_finalize, fptr);
1413}
1414
1415static bool
1416rb_data_free(void *objspace, VALUE obj)
1417{
1418 void *data = RTYPEDDATA_P(obj) ? RTYPEDDATA_GET_DATA(obj) : DATA_PTR(obj);
1419 if (data) {
1420 int free_immediately = false;
1421 void (*dfree)(void *);
1422
1423 if (RTYPEDDATA_P(obj)) {
1424 free_immediately = (RTYPEDDATA_TYPE(obj)->flags & RUBY_TYPED_FREE_IMMEDIATELY) != 0;
1425 dfree = RTYPEDDATA_TYPE(obj)->function.dfree;
1426 }
1427 else {
1428 dfree = RDATA(obj)->dfree;
1429 }
1430
1431 if (dfree) {
1432 if (dfree == RUBY_DEFAULT_FREE) {
1433 if (!RTYPEDDATA_P(obj) || !RTYPEDDATA_EMBEDDED_P(obj)) {
1434 xfree(data);
1435 RB_DEBUG_COUNTER_INC(obj_data_xfree);
1436 }
1437 }
1438 else if (free_immediately) {
1439 (*dfree)(data);
1440 if (RTYPEDDATA_EMBEDDABLE_P(obj) && !RTYPEDDATA_EMBEDDED_P(obj)) {
1441 xfree(data);
1442 }
1443
1444 RB_DEBUG_COUNTER_INC(obj_data_imm_free);
1445 }
1446 else {
1447 rb_gc_impl_make_zombie(objspace, obj, dfree, data);
1448 RB_DEBUG_COUNTER_INC(obj_data_zombie);
1449 return FALSE;
1450 }
1451 }
1452 else {
1453 RB_DEBUG_COUNTER_INC(obj_data_empty);
1454 }
1455 }
1456
1457 return true;
1458}
1459
1461 VALUE klass;
1462 rb_objspace_t *objspace; // used for update_*
1463};
1464
1465static void
1466classext_free(rb_classext_t *ext, bool is_prime, VALUE box_value, void *arg)
1467{
1468 struct classext_foreach_args *args = (struct classext_foreach_args *)arg;
1469
1470 rb_class_classext_free(args->klass, ext, is_prime);
1471}
1472
1473static void
1474classext_iclass_free(rb_classext_t *ext, bool is_prime, VALUE box_value, void *arg)
1475{
1476 struct classext_foreach_args *args = (struct classext_foreach_args *)arg;
1477
1478 rb_iclass_classext_free(args->klass, ext, is_prime);
1479}
1480
1481bool
1482rb_gc_obj_free(void *objspace, VALUE obj)
1483{
1484 struct classext_foreach_args args;
1485
1486 RB_DEBUG_COUNTER_INC(obj_free);
1487
1488 switch (BUILTIN_TYPE(obj)) {
1489 case T_NIL:
1490 case T_FIXNUM:
1491 case T_TRUE:
1492 case T_FALSE:
1493 rb_bug("obj_free() called for broken object");
1494 break;
1495 default:
1496 break;
1497 }
1498
1499 switch (BUILTIN_TYPE(obj)) {
1500 case T_OBJECT:
1501 if (FL_TEST_RAW(obj, ROBJECT_HEAP)) {
1502 if (rb_shape_obj_too_complex_p(obj)) {
1503 RB_DEBUG_COUNTER_INC(obj_obj_too_complex);
1504 st_free_table(ROBJECT_FIELDS_HASH(obj));
1505 }
1506 else {
1507 SIZED_FREE_N(ROBJECT(obj)->as.heap.fields, ROBJECT_FIELDS_CAPACITY(obj));
1508 RB_DEBUG_COUNTER_INC(obj_obj_ptr);
1509 }
1510 }
1511 else {
1512 RB_DEBUG_COUNTER_INC(obj_obj_embed);
1513 }
1514 break;
1515 case T_MODULE:
1516 case T_CLASS:
1517#if USE_ZJIT
1518 rb_zjit_klass_free(obj);
1519#endif
1520 args.klass = obj;
1521 rb_class_classext_foreach(obj, classext_free, (void *)&args);
1522 if (RCLASS_CLASSEXT_TBL(obj)) {
1523 st_free_table(RCLASS_CLASSEXT_TBL(obj));
1524 }
1525 (void)RB_DEBUG_COUNTER_INC_IF(obj_module_ptr, BUILTIN_TYPE(obj) == T_MODULE);
1526 (void)RB_DEBUG_COUNTER_INC_IF(obj_class_ptr, BUILTIN_TYPE(obj) == T_CLASS);
1527 break;
1528 case T_STRING:
1529 rb_str_free(obj);
1530 break;
1531 case T_ARRAY:
1532 rb_ary_free(obj);
1533 break;
1534 case T_HASH:
1535#if USE_DEBUG_COUNTER
1536 switch (RHASH_SIZE(obj)) {
1537 case 0:
1538 RB_DEBUG_COUNTER_INC(obj_hash_empty);
1539 break;
1540 case 1:
1541 RB_DEBUG_COUNTER_INC(obj_hash_1);
1542 break;
1543 case 2:
1544 RB_DEBUG_COUNTER_INC(obj_hash_2);
1545 break;
1546 case 3:
1547 RB_DEBUG_COUNTER_INC(obj_hash_3);
1548 break;
1549 case 4:
1550 RB_DEBUG_COUNTER_INC(obj_hash_4);
1551 break;
1552 case 5:
1553 case 6:
1554 case 7:
1555 case 8:
1556 RB_DEBUG_COUNTER_INC(obj_hash_5_8);
1557 break;
1558 default:
1559 GC_ASSERT(RHASH_SIZE(obj) > 8);
1560 RB_DEBUG_COUNTER_INC(obj_hash_g8);
1561 }
1562
1563 if (RHASH_AR_TABLE_P(obj)) {
1564 if (RHASH_AR_TABLE(obj) == NULL) {
1565 RB_DEBUG_COUNTER_INC(obj_hash_null);
1566 }
1567 else {
1568 RB_DEBUG_COUNTER_INC(obj_hash_ar);
1569 }
1570 }
1571 else {
1572 RB_DEBUG_COUNTER_INC(obj_hash_st);
1573 }
1574#endif
1575
1576 rb_hash_free(obj);
1577 break;
1578 case T_REGEXP:
1579 if (RREGEXP(obj)->ptr) {
1580 onig_free(RREGEXP(obj)->ptr);
1581 RB_DEBUG_COUNTER_INC(obj_regexp_ptr);
1582 }
1583 break;
1584 case T_DATA:
1585 if (!rb_data_free(objspace, obj)) return false;
1586 break;
1587 case T_MATCH:
1588 {
1589 rb_matchext_t *rm = RMATCH_EXT(obj);
1590#if USE_DEBUG_COUNTER
1591 if (rm->regs.num_regs >= 8) {
1592 RB_DEBUG_COUNTER_INC(obj_match_ge8);
1593 }
1594 else if (rm->regs.num_regs >= 4) {
1595 RB_DEBUG_COUNTER_INC(obj_match_ge4);
1596 }
1597 else if (rm->regs.num_regs >= 1) {
1598 RB_DEBUG_COUNTER_INC(obj_match_under4);
1599 }
1600#endif
1601 onig_region_free(&rm->regs, 0);
1602 SIZED_FREE_N(rm->char_offset, rm->char_offset_num_allocated);
1603
1604 RB_DEBUG_COUNTER_INC(obj_match_ptr);
1605 }
1606 break;
1607 case T_FILE:
1608 if (RFILE(obj)->fptr) {
1609 make_io_zombie(objspace, obj);
1610 RB_DEBUG_COUNTER_INC(obj_file_ptr);
1611 return FALSE;
1612 }
1613 break;
1614 case T_RATIONAL:
1615 RB_DEBUG_COUNTER_INC(obj_rational);
1616 break;
1617 case T_COMPLEX:
1618 RB_DEBUG_COUNTER_INC(obj_complex);
1619 break;
1620 case T_MOVED:
1621 break;
1622 case T_ICLASS:
1623 args.klass = obj;
1624
1625 rb_class_classext_foreach(obj, classext_iclass_free, (void *)&args);
1626 if (RCLASS_CLASSEXT_TBL(obj)) {
1627 st_free_table(RCLASS_CLASSEXT_TBL(obj));
1628 }
1629
1630 RB_DEBUG_COUNTER_INC(obj_iclass_ptr);
1631 break;
1632
1633 case T_FLOAT:
1634 RB_DEBUG_COUNTER_INC(obj_float);
1635 break;
1636
1637 case T_BIGNUM:
1638 if (!BIGNUM_EMBED_P(obj) && BIGNUM_DIGITS(obj)) {
1639 SIZED_FREE_N(BIGNUM_DIGITS(obj), BIGNUM_LEN(obj));
1640 RB_DEBUG_COUNTER_INC(obj_bignum_ptr);
1641 }
1642 else {
1643 RB_DEBUG_COUNTER_INC(obj_bignum_embed);
1644 }
1645 break;
1646
1647 case T_NODE:
1648 UNEXPECTED_NODE(obj_free);
1649 break;
1650
1651 case T_STRUCT:
1652 if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) ||
1653 RSTRUCT(obj)->as.heap.ptr == NULL) {
1654 RB_DEBUG_COUNTER_INC(obj_struct_embed);
1655 }
1656 else {
1657 SIZED_FREE_N(RSTRUCT(obj)->as.heap.ptr, RSTRUCT(obj)->as.heap.len);
1658 RB_DEBUG_COUNTER_INC(obj_struct_ptr);
1659 }
1660 break;
1661
1662 case T_SYMBOL:
1663 RB_DEBUG_COUNTER_INC(obj_symbol);
1664 break;
1665
1666 case T_IMEMO:
1667 rb_imemo_free((VALUE)obj);
1668 break;
1669
1670 default:
1671 rb_bug("gc_sweep(): unknown data type 0x%x(%p) 0x%"PRIxVALUE,
1672 BUILTIN_TYPE(obj), (void*)obj, RBASIC(obj)->flags);
1673 }
1674
1675 if (FL_TEST_RAW(obj, FL_FINALIZE)) {
1676 rb_gc_impl_make_zombie(objspace, obj, 0, 0);
1677 return FALSE;
1678 }
1679 else {
1680 return TRUE;
1681 }
1682}
1683
1684void
1685rb_objspace_set_event_hook(const rb_event_flag_t event)
1686{
1687 rb_gc_impl_set_event_hook(rb_gc_get_objspace(), event);
1688}
1689
1690static int
1691internal_object_p(VALUE obj)
1692{
1693 void *ptr = asan_unpoison_object_temporary(obj);
1694
1695 if (RBASIC(obj)->flags) {
1696 switch (BUILTIN_TYPE(obj)) {
1697 case T_NODE:
1698 UNEXPECTED_NODE(internal_object_p);
1699 break;
1700 case T_NONE:
1701 case T_MOVED:
1702 case T_IMEMO:
1703 case T_ICLASS:
1704 case T_ZOMBIE:
1705 break;
1706 case T_CLASS:
1707 if (obj == rb_mRubyVMFrozenCore)
1708 return 1;
1709
1710 if (!RBASIC_CLASS(obj)) break;
1711 if (RCLASS_SINGLETON_P(obj)) {
1712 return rb_singleton_class_internal_p(obj);
1713 }
1714 return 0;
1715 default:
1716 if (!RBASIC(obj)->klass) break;
1717 return 0;
1718 }
1719 }
1720 if (ptr || !RBASIC(obj)->flags) {
1721 rb_asan_poison_object(obj);
1722 }
1723 return 1;
1724}
1725
1726int
1727rb_objspace_internal_object_p(VALUE obj)
1728{
1729 return internal_object_p(obj);
1730}
1731
1733 size_t num;
1734 VALUE of;
1735};
1736
1737static int
1738os_obj_of_i(void *vstart, void *vend, size_t stride, void *data)
1739{
1740 struct os_each_struct *oes = (struct os_each_struct *)data;
1741
1742 VALUE v = (VALUE)vstart;
1743 for (; v != (VALUE)vend; v += stride) {
1744 if (!internal_object_p(v)) {
1745 if (!oes->of || rb_obj_is_kind_of(v, oes->of)) {
1746 if (!rb_multi_ractor_p() || rb_ractor_shareable_p(v)) {
1747 rb_yield(v);
1748 oes->num++;
1749 }
1750 }
1751 }
1752 }
1753
1754 return 0;
1755}
1756
1757static VALUE
1758os_obj_of(VALUE of)
1759{
1760 struct os_each_struct oes;
1761
1762 oes.num = 0;
1763 oes.of = of;
1764 rb_objspace_each_objects(os_obj_of_i, &oes);
1765 return SIZET2NUM(oes.num);
1766}
1767
1768/*
1769 * call-seq:
1770 * ObjectSpace.each_object([module]) {|obj| ... } -> integer
1771 * ObjectSpace.each_object([module]) -> an_enumerator
1772 *
1773 * Calls the block once for each living, nonimmediate object in this
1774 * Ruby process. If <i>module</i> is specified, calls the block
1775 * for only those classes or modules that match (or are a subclass of)
1776 * <i>module</i>. Returns the number of objects found. Immediate
1777 * objects (such as <code>Fixnum</code>s, static <code>Symbol</code>s
1778 * <code>true</code>, <code>false</code> and <code>nil</code>) are
1779 * never returned.
1780 *
1781 * If no block is given, an enumerator is returned instead.
1782 *
1783 * Job = Class.new
1784 * jobs = [Job.new, Job.new]
1785 * count = ObjectSpace.each_object(Job) {|x| p x }
1786 * puts "Total count: #{count}"
1787 *
1788 * <em>produces:</em>
1789 *
1790 * #<Job:0x000000011d6cbbf0>
1791 * #<Job:0x000000011d6cbc68>
1792 * Total count: 2
1793 *
1794 * Due to a current Ractor implementation issue, this method does not yield
1795 * Ractor-unshareable objects when the process is in multi-Ractor mode. Multi-ractor
1796 * mode is enabled when <code>Ractor.new</code> has been called for the first time.
1797 * See https://bugs.ruby-lang.org/issues/19387 for more information.
1798 *
1799 * a = 12345678987654321 # shareable
1800 * b = [].freeze # shareable
1801 * c = {} # not shareable
1802 * ObjectSpace.each_object {|x| x } # yields a, b, and c
1803 * Ractor.new {} # enter multi-Ractor mode
1804 * ObjectSpace.each_object {|x| x } # does not yield c
1805 *
1806 */
1807
1808static VALUE
1809os_each_obj(int argc, VALUE *argv, VALUE os)
1810{
1811 VALUE of;
1812
1813 of = (!rb_check_arity(argc, 0, 1) ? 0 : argv[0]);
1814 RETURN_ENUMERATOR(os, 1, &of);
1815 return os_obj_of(of);
1816}
1817
1818/*
1819 * call-seq:
1820 * ObjectSpace.undefine_finalizer(obj)
1821 *
1822 * Removes all finalizers for <i>obj</i>.
1823 *
1824 */
1825
1826static VALUE
1827undefine_final(VALUE os, VALUE obj)
1828{
1829 return rb_undefine_finalizer(obj);
1830}
1831
1832VALUE
1833rb_undefine_finalizer(VALUE obj)
1834{
1835 rb_check_frozen(obj);
1836
1837 rb_gc_impl_undefine_finalizer(rb_gc_get_objspace(), obj);
1838
1839 return obj;
1840}
1841
1842static void
1843should_be_callable(VALUE block)
1844{
1845 if (!rb_obj_respond_to(block, idCall, TRUE)) {
1846 rb_raise(rb_eArgError, "wrong type argument %"PRIsVALUE" (should be callable)",
1847 rb_obj_class(block));
1848 }
1849}
1850
1851static void
1852should_be_finalizable(VALUE obj)
1853{
1854 if (!FL_ABLE(obj)) {
1855 rb_raise(rb_eArgError, "cannot define finalizer for %s",
1856 rb_obj_classname(obj));
1857 }
1858 rb_check_frozen(obj);
1859}
1860
1861void
1862rb_gc_copy_finalizer(VALUE dest, VALUE obj)
1863{
1864 rb_gc_impl_copy_finalizer(rb_gc_get_objspace(), dest, obj);
1865}
1866
1867/*
1868 * call-seq:
1869 * ObjectSpace.define_finalizer(obj) {|id| ... } -> array
1870 * ObjectSpace.define_finalizer(obj, finalizer) -> array
1871 *
1872 * Adds a new finalizer for +obj+ that is called when +obj+ is destroyed
1873 * by the garbage collector or when Ruby shuts down (which ever comes first).
1874 *
1875 * With a block given, uses the block as the callback. Without a block given,
1876 * uses a callable object +finalizer+ as the callback. The callback is called
1877 * when +obj+ is destroyed with a single argument +id+ which is the object
1878 * ID of +obj+ (see Object#object_id).
1879 *
1880 * The return value is an array <code>[0, callback]</code>, where +callback+
1881 * is a Proc created from the block if one was given or +finalizer+ otherwise.
1882 *
1883 * Note that defining a finalizer in an instance method of the object may prevent
1884 * the object from being garbage collected since if the block or +finalizer+ refers
1885 * to +obj+ then +obj+ will never be reclaimed by the garbage collector. For example,
1886 * the following script demonstrates the issue:
1887 *
1888 * class Foo
1889 * def define_final
1890 * ObjectSpace.define_finalizer(self) do |id|
1891 * puts "Running finalizer for #{id}!"
1892 * end
1893 * end
1894 * end
1895 *
1896 * obj = Foo.new
1897 * obj.define_final
1898 *
1899 * There are two patterns to solve this issue:
1900 *
1901 * - Create the finalizer in a non-instance method so it can safely capture
1902 * the needed state:
1903 *
1904 * class Foo
1905 * def define_final
1906 * ObjectSpace.define_finalizer(self, self.class.create_finalizer)
1907 * end
1908 *
1909 * def self.create_finalizer
1910 * proc do |id|
1911 * puts "Running finalizer for #{id}!"
1912 * end
1913 * end
1914 * end
1915 *
1916 * - Use a callable object:
1917 *
1918 * class Foo
1919 * class Finalizer
1920 * def call(id)
1921 * puts "Running finalizer for #{id}!"
1922 * end
1923 * end
1924 *
1925 * def define_final
1926 * ObjectSpace.define_finalizer(self, Finalizer.new)
1927 * end
1928 * end
1929 *
1930 * Note that finalization can be unpredictable and is never guaranteed
1931 * to be run except on exit.
1932 */
1933
1934static VALUE
1935define_final(int argc, VALUE *argv, VALUE os)
1936{
1937 VALUE obj, block;
1938
1939 rb_scan_args(argc, argv, "11", &obj, &block);
1940 if (argc == 1) {
1941 block = rb_block_proc();
1942 }
1943
1944 if (rb_callable_receiver(block) == obj) {
1945 rb_warn("finalizer references object to be finalized");
1946 }
1947
1948 return rb_define_finalizer(obj, block);
1949}
1950
1951VALUE
1952rb_define_finalizer(VALUE obj, VALUE block)
1953{
1954 should_be_finalizable(obj);
1955 should_be_callable(block);
1956
1957 block = rb_gc_impl_define_finalizer(rb_gc_get_objspace(), obj, block);
1958
1959 block = rb_ary_new3(2, INT2FIX(0), block);
1960 OBJ_FREEZE(block);
1961 return block;
1962}
1963
1964void
1965rb_objspace_call_finalizer(void)
1966{
1967 rb_gc_impl_shutdown_call_finalizer(rb_gc_get_objspace());
1968}
1969
1970void
1971rb_objspace_free_objects(void *objspace)
1972{
1973 rb_gc_impl_shutdown_free_objects(objspace);
1974}
1975
1976int
1977rb_objspace_garbage_object_p(VALUE obj)
1978{
1979 return !SPECIAL_CONST_P(obj) && rb_gc_impl_garbage_object_p(rb_gc_get_objspace(), obj);
1980}
1981
1982bool
1983rb_gc_pointer_to_heap_p(VALUE obj)
1984{
1985 return rb_gc_impl_pointer_to_heap_p(rb_gc_get_objspace(), (void *)obj);
1986}
1987
1988#define OBJ_ID_INCREMENT (RUBY_IMMEDIATE_MASK + 1)
1989#define LAST_OBJECT_ID() (object_id_counter * OBJ_ID_INCREMENT)
1990static VALUE id2ref_value = 0;
1991
1992#if SIZEOF_SIZE_T == SIZEOF_LONG_LONG
1993static size_t object_id_counter = 1;
1994#else
1995static unsigned long long object_id_counter = 1;
1996#endif
1997
1998static inline VALUE
1999generate_next_object_id(void)
2000{
2001#if SIZEOF_SIZE_T == SIZEOF_LONG_LONG
2002 // 64bit atomics are available
2003 return SIZET2NUM(RUBY_ATOMIC_SIZE_FETCH_ADD(object_id_counter, 1) * OBJ_ID_INCREMENT);
2004#else
2005 unsigned int lock_lev = RB_GC_VM_LOCK();
2006 VALUE id = ULL2NUM(++object_id_counter * OBJ_ID_INCREMENT);
2007 RB_GC_VM_UNLOCK(lock_lev);
2008 return id;
2009#endif
2010}
2011
2012void
2013rb_gc_obj_id_moved(VALUE obj)
2014{
2015 if (UNLIKELY(id2ref_tbl)) {
2016 st_insert(id2ref_tbl, (st_data_t)rb_obj_id(obj), (st_data_t)obj);
2017 }
2018}
2019
2020static int
2021object_id_cmp(st_data_t x, st_data_t y)
2022{
2023 if (RB_TYPE_P(x, T_BIGNUM)) {
2024 return !rb_big_eql(x, y);
2025 }
2026 else {
2027 return x != y;
2028 }
2029}
2030
2031static st_index_t
2032object_id_hash(st_data_t n)
2033{
2034 return FIX2LONG(rb_hash((VALUE)n));
2035}
2036
2037static const struct st_hash_type object_id_hash_type = {
2038 object_id_cmp,
2039 object_id_hash,
2040};
2041
2042static void gc_mark_tbl_no_pin(st_table *table);
2043
2044static void
2045id2ref_tbl_mark(void *data)
2046{
2047 st_table *table = (st_table *)data;
2048 if (UNLIKELY(!RB_POSFIXABLE(LAST_OBJECT_ID()))) {
2049 // It's very unlikely, but if enough object ids were generated, keys may be T_BIGNUM
2050 rb_mark_set(table);
2051 }
2052 // We purposely don't mark values, as they are weak references.
2053 // rb_gc_obj_free_vm_weak_references takes care of cleaning them up.
2054}
2055
2056static size_t
2057id2ref_tbl_memsize(const void *data)
2058{
2059 return rb_st_memsize(data);
2060}
2061
2062static void
2063id2ref_tbl_free(void *data)
2064{
2065 id2ref_tbl = NULL; // clear global ref
2066 st_table *table = (st_table *)data;
2067 st_free_table(table);
2068}
2069
2070static const rb_data_type_t id2ref_tbl_type = {
2071 .wrap_struct_name = "VM/_id2ref_table",
2072 .function = {
2073 .dmark = id2ref_tbl_mark,
2074 .dfree = id2ref_tbl_free,
2075 .dsize = id2ref_tbl_memsize,
2076 // dcompact function not required because the table is reference updated
2077 // in rb_gc_vm_weak_table_foreach
2078 },
2079 .flags = RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_FREE_IMMEDIATELY
2080};
2081
2082static VALUE
2083class_object_id(VALUE klass)
2084{
2085 VALUE id = RUBY_ATOMIC_VALUE_LOAD(RCLASS(klass)->object_id);
2086 if (!id) {
2087 unsigned int lock_lev = RB_GC_VM_LOCK();
2088 id = generate_next_object_id();
2089 VALUE existing_id = RUBY_ATOMIC_VALUE_CAS(RCLASS(klass)->object_id, 0, id);
2090 if (existing_id) {
2091 id = existing_id;
2092 }
2093 else if (RB_UNLIKELY(id2ref_tbl)) {
2094 st_insert(id2ref_tbl, id, klass);
2095 }
2096 RB_GC_VM_UNLOCK(lock_lev);
2097 }
2098 return id;
2099}
2100
2101static inline VALUE
2102object_id_get(VALUE obj, shape_id_t shape_id)
2103{
2104 VALUE id;
2105 if (rb_shape_too_complex_p(shape_id)) {
2106 id = rb_obj_field_get(obj, ROOT_TOO_COMPLEX_WITH_OBJ_ID);
2107 }
2108 else {
2109 id = rb_obj_field_get(obj, rb_shape_object_id(shape_id));
2110 }
2111
2112#if RUBY_DEBUG
2113 if (!(FIXNUM_P(id) || RB_TYPE_P(id, T_BIGNUM))) {
2114 rb_p(obj);
2115 rb_bug("Object's shape includes object_id, but it's missing %s", rb_obj_info(obj));
2116 }
2117#endif
2118
2119 return id;
2120}
2121
2122static VALUE
2123object_id0(VALUE obj)
2124{
2125 VALUE id = Qfalse;
2126 shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
2127
2128 if (rb_shape_has_object_id(shape_id)) {
2129 return object_id_get(obj, shape_id);
2130 }
2131
2132 shape_id_t object_id_shape_id = rb_shape_transition_object_id(obj);
2133
2134 id = generate_next_object_id();
2135 rb_obj_field_set(obj, object_id_shape_id, 0, id);
2136
2137 RUBY_ASSERT(RBASIC_SHAPE_ID(obj) == object_id_shape_id);
2138 RUBY_ASSERT(rb_shape_obj_has_id(obj));
2139
2140 if (RB_UNLIKELY(id2ref_tbl)) {
2141 RB_VM_LOCKING() {
2142 st_insert(id2ref_tbl, (st_data_t)id, (st_data_t)obj);
2143 }
2144 }
2145 return id;
2146}
2147
2148static VALUE
2149object_id(VALUE obj)
2150{
2151 switch (BUILTIN_TYPE(obj)) {
2152 case T_CLASS:
2153 case T_MODULE:
2154 // With Ruby Box, classes and modules have different fields
2155 // in different boxes, so we cannot store the object id
2156 // in fields.
2157 return class_object_id(obj);
2158 case T_IMEMO:
2159 RUBY_ASSERT(IMEMO_TYPE_P(obj, imemo_fields));
2160 break;
2161 default:
2162 break;
2163 }
2164
2165 if (UNLIKELY(rb_gc_multi_ractor_p() && rb_ractor_shareable_p(obj))) {
2166 unsigned int lock_lev = RB_GC_VM_LOCK();
2167 VALUE id = object_id0(obj);
2168 RB_GC_VM_UNLOCK(lock_lev);
2169 return id;
2170 }
2171
2172 return object_id0(obj);
2173}
2174
2175static void
2176build_id2ref_i(VALUE obj, void *data)
2177{
2178 st_table *id2ref_tbl = (st_table *)data;
2179
2180 switch (BUILTIN_TYPE(obj)) {
2181 case T_CLASS:
2182 case T_MODULE:
2183 RUBY_ASSERT(!rb_objspace_garbage_object_p(obj));
2184 if (RCLASS(obj)->object_id) {
2185 st_insert(id2ref_tbl, RCLASS(obj)->object_id, obj);
2186 }
2187 break;
2188 case T_IMEMO:
2189 RUBY_ASSERT(!rb_objspace_garbage_object_p(obj));
2190 if (IMEMO_TYPE_P(obj, imemo_fields) && rb_shape_obj_has_id(obj)) {
2191 st_insert(id2ref_tbl, rb_obj_id(obj), rb_imemo_fields_owner(obj));
2192 }
2193 break;
2194 case T_OBJECT:
2195 RUBY_ASSERT(!rb_objspace_garbage_object_p(obj));
2196 if (rb_shape_obj_has_id(obj)) {
2197 st_insert(id2ref_tbl, rb_obj_id(obj), obj);
2198 }
2199 break;
2200 default:
2201 // For generic_fields, the T_IMEMO/fields is responsible for populating the entry.
2202 break;
2203 }
2204}
2205
2206static VALUE
2207object_id_to_ref(void *objspace_ptr, VALUE object_id)
2208{
2209 rb_objspace_t *objspace = objspace_ptr;
2210
2211 unsigned int lev = RB_GC_VM_LOCK();
2212
2213 if (!id2ref_tbl) {
2214 rb_gc_vm_barrier(); // stop other ractors
2215
2216 // GC Must not trigger while we build the table, otherwise if we end
2217 // up freeing an object that had an ID, we might try to delete it from
2218 // the table even though it wasn't inserted yet.
2219 st_table *tmp_id2ref_tbl = st_init_table(&object_id_hash_type);
2220 VALUE tmp_id2ref_value = TypedData_Wrap_Struct(0, &id2ref_tbl_type, tmp_id2ref_tbl);
2221
2222 // build_id2ref_i will most certainly malloc, which could trigger GC and sweep
2223 // objects we just added to the table.
2224 // By calling rb_gc_disable() we also save having to handle potentially garbage objects.
2225 bool gc_disabled = RTEST(rb_gc_disable());
2226 {
2227 id2ref_tbl = tmp_id2ref_tbl;
2228 id2ref_value = tmp_id2ref_value;
2229
2230 rb_gc_impl_each_object(objspace, build_id2ref_i, (void *)id2ref_tbl);
2231 }
2232 if (!gc_disabled) rb_gc_enable();
2233 }
2234
2235 VALUE obj;
2236 bool found = st_lookup(id2ref_tbl, object_id, &obj) && !rb_gc_impl_garbage_object_p(objspace, obj);
2237
2238 RB_GC_VM_UNLOCK(lev);
2239
2240 if (found) {
2241 return obj;
2242 }
2243
2244 if (rb_funcall(object_id, rb_intern(">="), 1, ULL2NUM(LAST_OBJECT_ID()))) {
2245 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is not an id value", rb_funcall(object_id, rb_intern("to_s"), 1, INT2FIX(10)));
2246 }
2247 else {
2248 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is a recycled object", rb_funcall(object_id, rb_intern("to_s"), 1, INT2FIX(10)));
2249 }
2250}
2251
2252static inline void
2253obj_free_object_id(VALUE obj)
2254{
2255 VALUE obj_id = 0;
2256 if (RB_UNLIKELY(id2ref_tbl)) {
2257 switch (BUILTIN_TYPE(obj)) {
2258 case T_CLASS:
2259 case T_MODULE:
2260 obj_id = RCLASS(obj)->object_id;
2261 break;
2262 case T_IMEMO:
2263 if (!IMEMO_TYPE_P(obj, imemo_fields)) {
2264 return;
2265 }
2266 // fallthrough
2267 case T_OBJECT:
2268 {
2269 shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
2270 if (rb_shape_has_object_id(shape_id)) {
2271 obj_id = object_id_get(obj, shape_id);
2272 }
2273 break;
2274 }
2275 default:
2276 // For generic_fields, the T_IMEMO/fields is responsible for freeing the id.
2277 return;
2278 }
2279
2280 if (RB_UNLIKELY(obj_id)) {
2281 RUBY_ASSERT(FIXNUM_P(obj_id) || RB_TYPE_P(obj_id, T_BIGNUM));
2282
2283 if (!st_delete(id2ref_tbl, (st_data_t *)&obj_id, NULL)) {
2284 // The the object is a T_IMEMO/fields, then it's possible the actual object
2285 // has been garbage collected already.
2286 if (!RB_TYPE_P(obj, T_IMEMO)) {
2287 rb_bug("Object ID seen, but not in _id2ref table: object_id=%llu object=%s", NUM2ULL(obj_id), rb_obj_info(obj));
2288 }
2289 }
2290 }
2291 }
2292}
2293
2294void
2295rb_gc_obj_free_vm_weak_references(VALUE obj)
2296{
2298 obj_free_object_id(obj);
2299
2300 if (rb_obj_gen_fields_p(obj)) {
2302 }
2303
2304 switch (BUILTIN_TYPE(obj)) {
2305 case T_STRING:
2306 if (FL_TEST_RAW(obj, RSTRING_FSTR)) {
2307 rb_gc_free_fstring(obj);
2308 }
2309 break;
2310 case T_SYMBOL:
2311 rb_gc_free_dsymbol(obj);
2312 break;
2313 case T_IMEMO:
2314 switch (imemo_type(obj)) {
2315 case imemo_callinfo:
2316 rb_vm_ci_free((const struct rb_callinfo *)obj);
2317 break;
2318 case imemo_ment:
2319 rb_free_method_entry_vm_weak_references((const rb_method_entry_t *)obj);
2320 break;
2321 default:
2322 break;
2323 }
2324 break;
2325 default:
2326 break;
2327 }
2328}
2329
2330/*
2331 * call-seq:
2332 * ObjectSpace._id2ref(object_id) -> an_object
2333 *
2334 * Converts an object id to a reference to the object. May not be
2335 * called on an object id passed as a parameter to a finalizer.
2336 *
2337 * s = "I am a string" #=> "I am a string"
2338 * r = ObjectSpace._id2ref(s.object_id) #=> "I am a string"
2339 * r == s #=> true
2340 *
2341 * On multi-ractor mode, if the object is not shareable, it raises
2342 * RangeError.
2343 *
2344 * This method is deprecated and should no longer be used.
2345 */
2346
2347static VALUE
2348id2ref(VALUE objid)
2349{
2350 objid = rb_to_int(objid);
2351 if (FIXNUM_P(objid) || rb_big_size(objid) <= SIZEOF_VOIDP) {
2352 VALUE ptr = (VALUE)NUM2PTR(objid);
2353 if (SPECIAL_CONST_P(ptr)) {
2354 if (ptr == Qtrue) return Qtrue;
2355 if (ptr == Qfalse) return Qfalse;
2356 if (NIL_P(ptr)) return Qnil;
2357 if (FIXNUM_P(ptr)) return ptr;
2358 if (FLONUM_P(ptr)) return ptr;
2359
2360 if (SYMBOL_P(ptr)) {
2361 // Check that the symbol is valid
2362 if (rb_static_id_valid_p(SYM2ID(ptr))) {
2363 return ptr;
2364 }
2365 else {
2366 rb_raise(rb_eRangeError, "%p is not a symbol id value", (void *)ptr);
2367 }
2368 }
2369
2370 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is not an id value", rb_int2str(objid, 10));
2371 }
2372 }
2373
2374 VALUE obj = object_id_to_ref(rb_gc_get_objspace(), objid);
2375 if (!rb_multi_ractor_p() || rb_ractor_shareable_p(obj)) {
2376 return obj;
2377 }
2378 else {
2379 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is the id of an unshareable object on multi-ractor", rb_int2str(objid, 10));
2380 }
2381}
2382
2383/* :nodoc: */
2384static VALUE
2385os_id2ref(VALUE os, VALUE objid)
2386{
2387 rb_category_warn(RB_WARN_CATEGORY_DEPRECATED, "ObjectSpace._id2ref is deprecated");
2388 return id2ref(objid);
2389}
2390
2391static VALUE
2392rb_find_object_id(void *objspace, VALUE obj, VALUE (*get_heap_object_id)(VALUE))
2393{
2394 if (SPECIAL_CONST_P(obj)) {
2395#if SIZEOF_LONG == SIZEOF_VOIDP
2396 return LONG2NUM((SIGNED_VALUE)obj);
2397#else
2398 return LL2NUM((SIGNED_VALUE)obj);
2399#endif
2400 }
2401
2402 return get_heap_object_id(obj);
2403}
2404
2405static VALUE
2406nonspecial_obj_id(VALUE obj)
2407{
2408#if SIZEOF_LONG == SIZEOF_VOIDP
2409 return (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG);
2410#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
2411 return LL2NUM((SIGNED_VALUE)(obj) / 2);
2412#else
2413# error not supported
2414#endif
2415}
2416
2417VALUE
2418rb_memory_id(VALUE obj)
2419{
2420 return rb_find_object_id(NULL, obj, nonspecial_obj_id);
2421}
2422
2423/*
2424 * Document-method: __id__
2425 * Document-method: object_id
2426 *
2427 * call-seq:
2428 * obj.__id__ -> integer
2429 * obj.object_id -> integer
2430 *
2431 * Returns an integer identifier for +obj+.
2432 *
2433 * The same number will be returned on all calls to +object_id+ for a given
2434 * object, and no two active objects will share an id.
2435 *
2436 * Note: that some objects of builtin classes are reused for optimization.
2437 * This is the case for immediate values and frozen string literals.
2438 *
2439 * BasicObject implements +__id__+, Kernel implements +object_id+.
2440 *
2441 * Immediate values are not passed by reference but are passed by value:
2442 * +nil+, +true+, +false+, Fixnums, Symbols, and some Floats.
2443 *
2444 * Object.new.object_id == Object.new.object_id # => false
2445 * (21 * 2).object_id == (21 * 2).object_id # => true
2446 * "hello".object_id == "hello".object_id # => false
2447 * "hi".freeze.object_id == "hi".freeze.object_id # => true
2448 */
2449
2450VALUE
2451rb_obj_id(VALUE obj)
2452{
2453 /* If obj is an immediate, the object ID is obj directly converted to a Numeric.
2454 * Otherwise, the object ID is a Numeric that is a non-zero multiple of
2455 * (RUBY_IMMEDIATE_MASK + 1) which guarantees that it does not collide with
2456 * any immediates. */
2457 return rb_find_object_id(rb_gc_get_objspace(), obj, object_id);
2458}
2459
2460bool
2461rb_obj_id_p(VALUE obj)
2462{
2463 return !RB_TYPE_P(obj, T_IMEMO) && rb_shape_obj_has_id(obj);
2464}
2465
2466/*
2467 * GC implementations should call this function before the GC phase that updates references
2468 * embedded in the machine code generated by JIT compilers. JIT compilers usually enforce the
2469 * "W^X" policy and protect the code memory from being modified during execution. This function
2470 * makes the code memory writeable.
2471 */
2472void
2473rb_gc_before_updating_jit_code(void)
2474{
2475#if USE_YJIT
2476 rb_yjit_mark_all_writeable();
2477#endif
2478#if USE_ZJIT
2479 rb_zjit_mark_all_writable();
2480#endif
2481}
2482
2483/*
2484 * GC implementations should call this function before the GC phase that updates references
2485 * embedded in the machine code generated by JIT compilers. This function makes the code memory
2486 * executable again.
2487 */
2488void
2489rb_gc_after_updating_jit_code(void)
2490{
2491#if USE_YJIT
2492 rb_yjit_mark_all_executable();
2493#endif
2494#if USE_ZJIT
2495 rb_zjit_mark_all_executable();
2496#endif
2497}
2498
2499static void
2500classext_memsize(rb_classext_t *ext, bool prime, VALUE box_value, void *arg)
2501{
2502 size_t *size = (size_t *)arg;
2503 size_t s = 0;
2504
2505 if (RCLASSEXT_M_TBL(ext)) {
2506 s += rb_id_table_memsize(RCLASSEXT_M_TBL(ext));
2507 }
2508 if (RCLASSEXT_CVC_TBL(ext)) {
2509 s += rb_id_table_memsize(RCLASSEXT_CVC_TBL(ext));
2510 }
2511 if (RCLASSEXT_CONST_TBL(ext)) {
2512 s += rb_id_table_memsize(RCLASSEXT_CONST_TBL(ext));
2513 }
2514 if (RCLASSEXT_SUPERCLASSES_WITH_SELF(ext)) {
2515 s += (RCLASSEXT_SUPERCLASS_DEPTH(ext) + 1) * sizeof(VALUE);
2516 }
2517 if (!prime) {
2518 s += sizeof(rb_classext_t);
2519 }
2520 *size += s;
2521}
2522
2523static void
2524classext_superclasses_memsize(rb_classext_t *ext, bool prime, VALUE box_value, void *arg)
2525{
2526 size_t *size = (size_t *)arg;
2527 size_t array_size;
2528 if (RCLASSEXT_SUPERCLASSES_WITH_SELF(ext)) {
2529 RUBY_ASSERT(prime);
2530 array_size = RCLASSEXT_SUPERCLASS_DEPTH(ext) + 1;
2531 *size += array_size * sizeof(VALUE);
2532 }
2533}
2534
2535size_t
2536rb_obj_memsize_of(VALUE obj)
2537{
2538 size_t size = 0;
2539
2540 if (SPECIAL_CONST_P(obj)) {
2541 return 0;
2542 }
2543
2544 switch (BUILTIN_TYPE(obj)) {
2545 case T_OBJECT:
2546 if (FL_TEST_RAW(obj, ROBJECT_HEAP)) {
2547 if (rb_shape_obj_too_complex_p(obj)) {
2548 size += rb_st_memsize(ROBJECT_FIELDS_HASH(obj));
2549 }
2550 else {
2551 size += ROBJECT_FIELDS_CAPACITY(obj) * sizeof(VALUE);
2552 }
2553 }
2554 break;
2555 case T_MODULE:
2556 case T_CLASS:
2557 rb_class_classext_foreach(obj, classext_memsize, (void *)&size);
2558 rb_class_classext_foreach(obj, classext_superclasses_memsize, (void *)&size);
2559 break;
2560 case T_ICLASS:
2561 if (RICLASS_OWNS_M_TBL_P(obj)) {
2562 if (RCLASS_M_TBL(obj)) {
2563 size += rb_id_table_memsize(RCLASS_M_TBL(obj));
2564 }
2565 }
2566 break;
2567 case T_STRING:
2568 size += rb_str_memsize(obj);
2569 break;
2570 case T_ARRAY:
2571 size += rb_ary_memsize(obj);
2572 break;
2573 case T_HASH:
2574 if (RHASH_ST_TABLE_P(obj)) {
2575 VM_ASSERT(RHASH_ST_TABLE(obj) != NULL);
2576 /* st_table is in the slot */
2577 size += st_memsize(RHASH_ST_TABLE(obj)) - sizeof(st_table);
2578 }
2579 break;
2580 case T_REGEXP:
2581 if (RREGEXP_PTR(obj)) {
2582 size += onig_memsize(RREGEXP_PTR(obj));
2583 }
2584 break;
2585 case T_DATA:
2586 size += rb_objspace_data_type_memsize(obj);
2587 break;
2588 case T_MATCH:
2589 {
2590 rb_matchext_t *rm = RMATCH_EXT(obj);
2591 size += onig_region_memsize(&rm->regs);
2592 size += sizeof(struct rmatch_offset) * rm->char_offset_num_allocated;
2593 }
2594 break;
2595 case T_FILE:
2596 if (RFILE(obj)->fptr) {
2597 size += rb_io_memsize(RFILE(obj)->fptr);
2598 }
2599 break;
2600 case T_RATIONAL:
2601 case T_COMPLEX:
2602 break;
2603 case T_IMEMO:
2604 size += rb_imemo_memsize(obj);
2605 break;
2606
2607 case T_FLOAT:
2608 case T_SYMBOL:
2609 break;
2610
2611 case T_BIGNUM:
2612 if (!(RBASIC(obj)->flags & BIGNUM_EMBED_FLAG) && BIGNUM_DIGITS(obj)) {
2613 size += BIGNUM_LEN(obj) * sizeof(BDIGIT);
2614 }
2615 break;
2616
2617 case T_NODE:
2618 UNEXPECTED_NODE(obj_memsize_of);
2619 break;
2620
2621 case T_STRUCT:
2622 if (RSTRUCT_EMBED_LEN(obj) == 0) {
2623 size += sizeof(VALUE) * RSTRUCT_LEN_RAW(obj);
2624 }
2625 break;
2626
2627 case T_ZOMBIE:
2628 case T_MOVED:
2629 break;
2630
2631 default:
2632 rb_bug("objspace/memsize_of(): unknown data type 0x%x(%p)",
2633 BUILTIN_TYPE(obj), (void*)obj);
2634 }
2635
2636 return size + rb_gc_obj_slot_size(obj);
2637}
2638
2639static int
2640set_zero(st_data_t key, st_data_t val, st_data_t arg)
2641{
2642 VALUE k = (VALUE)key;
2643 VALUE hash = (VALUE)arg;
2644 rb_hash_aset(hash, k, INT2FIX(0));
2645 return ST_CONTINUE;
2646}
2647
2649 size_t counts[T_MASK+1];
2650 size_t freed;
2651 size_t total;
2652};
2653
2654static void
2655count_objects_i(VALUE obj, void *d)
2656{
2657 struct count_objects_data *data = (struct count_objects_data *)d;
2658
2659 if (RBASIC(obj)->flags) {
2660 data->counts[BUILTIN_TYPE(obj)]++;
2661 }
2662 else {
2663 data->freed++;
2664 }
2665
2666 data->total++;
2667}
2668
2669/*
2670 * call-seq:
2671 * ObjectSpace.count_objects([result_hash]) -> hash
2672 *
2673 * Counts all objects grouped by type.
2674 *
2675 * It returns a hash, such as:
2676 * {
2677 * :TOTAL=>10000,
2678 * :FREE=>3011,
2679 * :T_OBJECT=>6,
2680 * :T_CLASS=>404,
2681 * # ...
2682 * }
2683 *
2684 * The contents of the returned hash are implementation specific.
2685 * It may be changed in future.
2686 *
2687 * The keys starting with +:T_+ means live objects.
2688 * For example, +:T_ARRAY+ is the number of arrays.
2689 * +:FREE+ means object slots which is not used now.
2690 * +:TOTAL+ means sum of above.
2691 *
2692 * If the optional argument +result_hash+ is given,
2693 * it is overwritten and returned. This is intended to avoid probe effect.
2694 *
2695 * h = {}
2696 * ObjectSpace.count_objects(h)
2697 * puts h
2698 * # => { :TOTAL=>10000, :T_CLASS=>158280, :T_MODULE=>20672, :T_STRING=>527249 }
2699 *
2700 * This method is only expected to work on C Ruby.
2701 *
2702 */
2703
2704static VALUE
2705count_objects(int argc, VALUE *argv, VALUE os)
2706{
2707 struct count_objects_data data = { 0 };
2708 VALUE hash = Qnil;
2709 VALUE types[T_MASK + 1];
2710
2711 if (rb_check_arity(argc, 0, 1) == 1) {
2712 hash = argv[0];
2713 if (!RB_TYPE_P(hash, T_HASH))
2714 rb_raise(rb_eTypeError, "non-hash given");
2715 }
2716
2717 for (size_t i = 0; i <= T_MASK; i++) {
2718 // type_sym can allocate an object,
2719 // so we need to create all key symbols in advance
2720 // not to disturb the result
2721 types[i] = type_sym(i);
2722 }
2723
2724 // Same as type_sym, we need to create all key symbols in advance
2725 VALUE total = ID2SYM(rb_intern("TOTAL"));
2726 VALUE free = ID2SYM(rb_intern("FREE"));
2727
2728 rb_gc_impl_each_object(rb_gc_get_objspace(), count_objects_i, &data);
2729
2730 if (NIL_P(hash)) {
2731 hash = rb_hash_new();
2732 }
2733 else if (!RHASH_EMPTY_P(hash)) {
2734 rb_hash_stlike_foreach(hash, set_zero, hash);
2735 }
2736 rb_hash_aset(hash, total, SIZET2NUM(data.total));
2737 rb_hash_aset(hash, free, SIZET2NUM(data.freed));
2738
2739 for (size_t i = 0; i <= T_MASK; i++) {
2740 if (data.counts[i]) {
2741 rb_hash_aset(hash, types[i], SIZET2NUM(data.counts[i]));
2742 }
2743 }
2744
2745 return hash;
2746}
2747
2748#define SET_STACK_END SET_MACHINE_STACK_END(&ec->machine.stack_end)
2749
2750#define STACK_START (ec->machine.stack_start)
2751#define STACK_END (ec->machine.stack_end)
2752#define STACK_LEVEL_MAX (ec->machine.stack_maxsize/sizeof(VALUE))
2753
2754#if STACK_GROW_DIRECTION < 0
2755# define STACK_LENGTH (size_t)(STACK_START - STACK_END)
2756#elif STACK_GROW_DIRECTION > 0
2757# define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
2758#else
2759# define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
2760 : (size_t)(STACK_END - STACK_START + 1))
2761#endif
2762#if !STACK_GROW_DIRECTION
2763int ruby_stack_grow_direction;
2764int
2765ruby_get_stack_grow_direction(volatile VALUE *addr)
2766{
2767 VALUE *end;
2768 SET_MACHINE_STACK_END(&end);
2769
2770 if (end > addr) return ruby_stack_grow_direction = 1;
2771 return ruby_stack_grow_direction = -1;
2772}
2773#endif
2774
2775size_t
2777{
2778 rb_execution_context_t *ec = GET_EC();
2779 SET_STACK_END;
2780 if (p) *p = STACK_UPPER(STACK_END, STACK_START, STACK_END);
2781 return STACK_LENGTH;
2782}
2783
2784#define PREVENT_STACK_OVERFLOW 1
2785#ifndef PREVENT_STACK_OVERFLOW
2786#if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
2787# define PREVENT_STACK_OVERFLOW 1
2788#else
2789# define PREVENT_STACK_OVERFLOW 0
2790#endif
2791#endif
2792#if PREVENT_STACK_OVERFLOW && !defined(__EMSCRIPTEN__)
2793static int
2794stack_check(rb_execution_context_t *ec, int water_mark)
2795{
2796 SET_STACK_END;
2797
2798 size_t length = STACK_LENGTH;
2799 size_t maximum_length = STACK_LEVEL_MAX - water_mark;
2800
2801 return length > maximum_length;
2802}
2803#else
2804#define stack_check(ec, water_mark) FALSE
2805#endif
2806
2807#define STACKFRAME_FOR_CALL_CFUNC 2048
2808
2809int
2810rb_ec_stack_check(rb_execution_context_t *ec)
2811{
2812 return stack_check(ec, STACKFRAME_FOR_CALL_CFUNC);
2813}
2814
2815int
2817{
2818 return stack_check(GET_EC(), STACKFRAME_FOR_CALL_CFUNC);
2819}
2820
2821/* ==================== Marking ==================== */
2822
2823#define RB_GC_MARK_OR_TRAVERSE(func, obj_or_ptr, obj, check_obj) do { \
2824 if (!RB_SPECIAL_CONST_P(obj)) { \
2825 rb_vm_t *vm = GET_VM(); \
2826 void *objspace = vm->gc.objspace; \
2827 if (LIKELY(vm->gc.mark_func_data == NULL)) { \
2828 GC_ASSERT(rb_gc_impl_during_gc_p(objspace)); \
2829 (func)(objspace, (obj_or_ptr)); \
2830 } \
2831 else if (check_obj ? \
2832 rb_gc_impl_pointer_to_heap_p(objspace, (const void *)obj) && \
2833 !rb_gc_impl_garbage_object_p(objspace, obj) : \
2834 true) { \
2835 GC_ASSERT(!rb_gc_impl_during_gc_p(objspace)); \
2836 struct gc_mark_func_data_struct *mark_func_data = vm->gc.mark_func_data; \
2837 vm->gc.mark_func_data = NULL; \
2838 mark_func_data->mark_func((obj), mark_func_data->data); \
2839 vm->gc.mark_func_data = mark_func_data; \
2840 } \
2841 } \
2842} while (0)
2843
2844static inline void
2845gc_mark_internal(VALUE obj)
2846{
2847 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark, obj, obj, false);
2848}
2849
2850void
2851rb_gc_mark_movable(VALUE obj)
2852{
2853 gc_mark_internal(obj);
2854}
2855
2856void
2857rb_gc_mark_and_move(VALUE *ptr)
2858{
2859 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark_and_move, ptr, *ptr, false);
2860}
2861
2862static inline void
2863gc_mark_and_pin_internal(VALUE obj)
2864{
2865 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark_and_pin, obj, obj, false);
2866}
2867
2868void
2869rb_gc_mark(VALUE obj)
2870{
2871 gc_mark_and_pin_internal(obj);
2872}
2873
2874static inline void
2875gc_mark_maybe_internal(VALUE obj)
2876{
2877 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark_maybe, obj, obj, true);
2878}
2879
2880void
2881rb_gc_mark_maybe(VALUE obj)
2882{
2883 gc_mark_maybe_internal(obj);
2884}
2885
2886ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS(static void each_location(register const VALUE *x, register long n, void (*cb)(VALUE, void *), void *data));
2887static void
2888each_location(register const VALUE *x, register long n, void (*cb)(VALUE, void *), void *data)
2889{
2890 VALUE v;
2891 while (n--) {
2892 v = *x;
2893 cb(v, data);
2894 x++;
2895 }
2896}
2897
2898static void
2899each_location_ptr(const VALUE *start, const VALUE *end, void (*cb)(VALUE, void *), void *data)
2900{
2901 if (end <= start) return;
2902 each_location(start, end - start, cb, data);
2903}
2904
2905static void
2906gc_mark_maybe_each_location(VALUE obj, void *data)
2907{
2908 gc_mark_maybe_internal(obj);
2909}
2910
2911void
2912rb_gc_mark_locations(const VALUE *start, const VALUE *end)
2913{
2914 each_location_ptr(start, end, gc_mark_maybe_each_location, NULL);
2915}
2916
2917void
2918rb_gc_mark_values(long n, const VALUE *values)
2919{
2920 for (long i = 0; i < n; i++) {
2921 gc_mark_internal(values[i]);
2922 }
2923}
2924
2925void
2926rb_gc_mark_vm_stack_values(long n, const VALUE *values)
2927{
2928 for (long i = 0; i < n; i++) {
2929 gc_mark_and_pin_internal(values[i]);
2930 }
2931}
2932
2933static int
2934mark_key(st_data_t key, st_data_t value, st_data_t data)
2935{
2936 gc_mark_and_pin_internal((VALUE)key);
2937
2938 return ST_CONTINUE;
2939}
2940
2941void
2942rb_mark_set(st_table *tbl)
2943{
2944 if (!tbl) return;
2945
2946 st_foreach(tbl, mark_key, (st_data_t)rb_gc_get_objspace());
2947}
2948
2949static int
2950mark_keyvalue(st_data_t key, st_data_t value, st_data_t data)
2951{
2952 gc_mark_internal((VALUE)key);
2953 gc_mark_internal((VALUE)value);
2954
2955 return ST_CONTINUE;
2956}
2957
2958static int
2959pin_key_pin_value(st_data_t key, st_data_t value, st_data_t data)
2960{
2961 gc_mark_and_pin_internal((VALUE)key);
2962 gc_mark_and_pin_internal((VALUE)value);
2963
2964 return ST_CONTINUE;
2965}
2966
2967static int
2968pin_key_mark_value(st_data_t key, st_data_t value, st_data_t data)
2969{
2970 gc_mark_and_pin_internal((VALUE)key);
2971 gc_mark_internal((VALUE)value);
2972
2973 return ST_CONTINUE;
2974}
2975
2976static void
2977mark_hash(VALUE hash)
2978{
2979 if (rb_hash_compare_by_id_p(hash)) {
2980 rb_hash_stlike_foreach(hash, pin_key_mark_value, 0);
2981 }
2982 else {
2983 rb_hash_stlike_foreach(hash, mark_keyvalue, 0);
2984 }
2985
2986 gc_mark_internal(RHASH(hash)->ifnone);
2987}
2988
2989void
2990rb_mark_hash(st_table *tbl)
2991{
2992 if (!tbl) return;
2993
2994 st_foreach(tbl, pin_key_pin_value, 0);
2995}
2996
2997static enum rb_id_table_iterator_result
2998mark_method_entry_i(VALUE me, void *objspace)
2999{
3000 gc_mark_internal(me);
3001
3002 return ID_TABLE_CONTINUE;
3003}
3004
3005static void
3006mark_m_tbl(void *objspace, struct rb_id_table *tbl)
3007{
3008 if (tbl) {
3009 rb_id_table_foreach_values(tbl, mark_method_entry_i, objspace);
3010 }
3011}
3012
3013static enum rb_id_table_iterator_result
3014mark_const_entry_i(VALUE value, void *objspace)
3015{
3016 const rb_const_entry_t *ce = (const rb_const_entry_t *)value;
3017
3018 if (!rb_gc_checking_shareable()) {
3019 gc_mark_internal(ce->value);
3020 gc_mark_internal(ce->file); // TODO: ce->file should be shareable?
3021 }
3022 return ID_TABLE_CONTINUE;
3023}
3024
3025static void
3026mark_const_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
3027{
3028 if (!tbl) return;
3029 rb_id_table_foreach_values(tbl, mark_const_entry_i, objspace);
3030}
3031
3032static enum rb_id_table_iterator_result
3033mark_cvc_tbl_i(VALUE cvc_entry, void *objspace)
3034{
3035 struct rb_cvar_class_tbl_entry *entry;
3036
3037 entry = (struct rb_cvar_class_tbl_entry *)cvc_entry;
3038
3039 RUBY_ASSERT(entry->cref == 0 || (BUILTIN_TYPE((VALUE)entry->cref) == T_IMEMO && IMEMO_TYPE_P(entry->cref, imemo_cref)));
3040 gc_mark_internal((VALUE)entry->cref);
3041
3042 return ID_TABLE_CONTINUE;
3043}
3044
3045static void
3046mark_cvc_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
3047{
3048 if (!tbl) return;
3049 rb_id_table_foreach_values(tbl, mark_cvc_tbl_i, objspace);
3050}
3051
3052#if STACK_GROW_DIRECTION < 0
3053#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
3054#elif STACK_GROW_DIRECTION > 0
3055#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
3056#else
3057#define GET_STACK_BOUNDS(start, end, appendix) \
3058 ((STACK_END < STACK_START) ? \
3059 ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
3060#endif
3061
3062static void
3063gc_mark_machine_stack_location_maybe(VALUE obj, void *data)
3064{
3065 gc_mark_maybe_internal(obj);
3066
3067#ifdef RUBY_ASAN_ENABLED
3068 const rb_execution_context_t *ec = (const rb_execution_context_t *)data;
3069 void *fake_frame_start;
3070 void *fake_frame_end;
3071 bool is_fake_frame = asan_get_fake_stack_extents(
3072 ec->machine.asan_fake_stack_handle, obj,
3073 ec->machine.stack_start, ec->machine.stack_end,
3074 &fake_frame_start, &fake_frame_end
3075 );
3076 if (is_fake_frame) {
3077 each_location_ptr(fake_frame_start, fake_frame_end, gc_mark_maybe_each_location, NULL);
3078 }
3079#endif
3080}
3081
3082static bool
3083gc_object_moved_p_internal(void *objspace, VALUE obj)
3084{
3085 if (SPECIAL_CONST_P(obj)) {
3086 return false;
3087 }
3088
3089 return rb_gc_impl_object_moved_p(objspace, obj);
3090}
3091
3092static VALUE
3093gc_location_internal(void *objspace, VALUE value)
3094{
3095 if (SPECIAL_CONST_P(value)) {
3096 return value;
3097 }
3098
3099 GC_ASSERT(rb_gc_impl_pointer_to_heap_p(objspace, (void *)value));
3100
3101 return rb_gc_impl_location(objspace, value);
3102}
3103
3104VALUE
3105rb_gc_location(VALUE value)
3106{
3107 return gc_location_internal(rb_gc_get_objspace(), value);
3108}
3109
3110#if defined(__wasm__)
3111
3112
3113static VALUE *rb_stack_range_tmp[2];
3114
3115static void
3116rb_mark_locations(void *begin, void *end)
3117{
3118 rb_stack_range_tmp[0] = begin;
3119 rb_stack_range_tmp[1] = end;
3120}
3121
3122void
3123rb_gc_save_machine_context(void)
3124{
3125 // no-op
3126}
3127
3128# if defined(__EMSCRIPTEN__)
3129
3130static void
3131mark_current_machine_context(const rb_execution_context_t *ec)
3132{
3133 emscripten_scan_stack(rb_mark_locations);
3134 each_location_ptr(rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe_each_location, NULL);
3135
3136 emscripten_scan_registers(rb_mark_locations);
3137 each_location_ptr(rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe_each_location, NULL);
3138}
3139# else // use Asyncify version
3140
3141static void
3142mark_current_machine_context(rb_execution_context_t *ec)
3143{
3144 VALUE *stack_start, *stack_end;
3145 SET_STACK_END;
3146 GET_STACK_BOUNDS(stack_start, stack_end, 1);
3147 each_location_ptr(stack_start, stack_end, gc_mark_maybe_each_location, NULL);
3148
3149 rb_wasm_scan_locals(rb_mark_locations);
3150 each_location_ptr(rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe_each_location, NULL);
3151}
3152
3153# endif
3154
3155#else // !defined(__wasm__)
3156
3157void
3158rb_gc_save_machine_context(void)
3159{
3160 rb_thread_t *thread = GET_THREAD();
3161
3162 RB_VM_SAVE_MACHINE_CONTEXT(thread);
3163}
3164
3165
3166static void
3167mark_current_machine_context(const rb_execution_context_t *ec)
3168{
3169 rb_gc_mark_machine_context(ec);
3170}
3171#endif
3172
3173void
3174rb_gc_mark_machine_context(const rb_execution_context_t *ec)
3175{
3176 VALUE *stack_start, *stack_end;
3177
3178 GET_STACK_BOUNDS(stack_start, stack_end, 0);
3179 RUBY_DEBUG_LOG("ec->th:%u stack_start:%p stack_end:%p", rb_ec_thread_ptr(ec)->serial, stack_start, stack_end);
3180
3181 void *data =
3182#ifdef RUBY_ASAN_ENABLED
3183 /* gc_mark_machine_stack_location_maybe() uses data as const */
3185#else
3186 NULL;
3187#endif
3188
3189 each_location_ptr(stack_start, stack_end, gc_mark_machine_stack_location_maybe, data);
3190 int num_regs = sizeof(ec->machine.regs)/(sizeof(VALUE));
3191 each_location((VALUE*)&ec->machine.regs, num_regs, gc_mark_machine_stack_location_maybe, data);
3192}
3193
3194static int
3195rb_mark_tbl_i(st_data_t key, st_data_t value, st_data_t data)
3196{
3197 gc_mark_and_pin_internal((VALUE)value);
3198
3199 return ST_CONTINUE;
3200}
3201
3202void
3203rb_mark_tbl(st_table *tbl)
3204{
3205 if (!tbl || tbl->num_entries == 0) return;
3206
3207 st_foreach(tbl, rb_mark_tbl_i, 0);
3208}
3209
3210static void
3211gc_mark_tbl_no_pin(st_table *tbl)
3212{
3213 if (!tbl || tbl->num_entries == 0) return;
3214
3215 st_foreach(tbl, gc_mark_tbl_no_pin_i, 0);
3216}
3217
3218void
3219rb_mark_tbl_no_pin(st_table *tbl)
3220{
3221 gc_mark_tbl_no_pin(tbl);
3222}
3223
3224static bool
3225gc_declarative_marking_p(const rb_data_type_t *type)
3226{
3227 return (type->flags & RUBY_TYPED_DECL_MARKING) != 0;
3228}
3229
3230void
3231rb_gc_mark_roots(void *objspace, const char **categoryp)
3232{
3233 rb_execution_context_t *ec = GET_EC();
3234 rb_vm_t *vm = rb_ec_vm_ptr(ec);
3235
3236#define MARK_CHECKPOINT(category) do { \
3237 if (categoryp) *categoryp = category; \
3238} while (0)
3239
3240 MARK_CHECKPOINT("vm");
3241 rb_vm_mark(vm);
3242
3243 MARK_CHECKPOINT("end_proc");
3244 rb_mark_end_proc();
3245
3246 MARK_CHECKPOINT("global_tbl");
3247 rb_gc_mark_global_tbl();
3248
3249#if USE_YJIT
3250 void rb_yjit_root_mark(void); // in Rust
3251
3252 if (rb_yjit_enabled_p) {
3253 MARK_CHECKPOINT("YJIT");
3254 rb_yjit_root_mark();
3255 }
3256#endif
3257
3258#if USE_ZJIT
3259 void rb_zjit_root_mark(void);
3260 if (rb_zjit_enabled_p) {
3261 MARK_CHECKPOINT("ZJIT");
3262 rb_zjit_root_mark();
3263 }
3264#endif
3265
3266 MARK_CHECKPOINT("machine_context");
3267 mark_current_machine_context(ec);
3268
3269 MARK_CHECKPOINT("global_symbols");
3270 rb_sym_global_symbols_mark_and_move();
3271
3272 MARK_CHECKPOINT("finish");
3273
3274#undef MARK_CHECKPOINT
3275}
3276
3281
3282static void
3283gc_mark_classext_module(rb_classext_t *ext, bool prime, VALUE box_value, void *arg)
3284{
3286 rb_objspace_t *objspace = foreach_arg->objspace;
3287
3288 if (RCLASSEXT_SUPER(ext)) {
3289 gc_mark_internal(RCLASSEXT_SUPER(ext));
3290 }
3291 mark_m_tbl(objspace, RCLASSEXT_M_TBL(ext));
3292
3293 if (!rb_gc_checking_shareable()) {
3294 // unshareable
3295 gc_mark_internal(RCLASSEXT_FIELDS_OBJ(ext));
3296 }
3297
3298 if (!RCLASSEXT_SHARED_CONST_TBL(ext) && RCLASSEXT_CONST_TBL(ext)) {
3299 mark_const_tbl(objspace, RCLASSEXT_CONST_TBL(ext));
3300 }
3301 mark_m_tbl(objspace, RCLASSEXT_CALLABLE_M_TBL(ext));
3302 gc_mark_internal(RCLASSEXT_CC_TBL(ext));
3303 mark_cvc_tbl(objspace, RCLASSEXT_CVC_TBL(ext));
3304 gc_mark_internal(RCLASSEXT_CLASSPATH(ext));
3305}
3306
3307static void
3308gc_mark_classext_iclass(rb_classext_t *ext, bool prime, VALUE box_value, void *arg)
3309{
3311 rb_objspace_t *objspace = foreach_arg->objspace;
3312
3313 if (RCLASSEXT_SUPER(ext)) {
3314 gc_mark_internal(RCLASSEXT_SUPER(ext));
3315 }
3316 if (RCLASSEXT_ICLASS_IS_ORIGIN(ext) && !RCLASSEXT_ICLASS_ORIGIN_SHARED_MTBL(ext)) {
3317 mark_m_tbl(objspace, RCLASSEXT_M_TBL(ext));
3318 }
3319 if (RCLASSEXT_INCLUDER(ext)) {
3320 gc_mark_internal(RCLASSEXT_INCLUDER(ext));
3321 }
3322 mark_m_tbl(objspace, RCLASSEXT_CALLABLE_M_TBL(ext));
3323 gc_mark_internal(RCLASSEXT_CC_TBL(ext));
3324}
3325
3326#define TYPED_DATA_REFS_OFFSET_LIST(d) (size_t *)(uintptr_t)RTYPEDDATA_TYPE(d)->function.dmark
3327
3328void
3329rb_gc_move_obj_during_marking(VALUE from, VALUE to)
3330{
3331 if (rb_obj_using_gen_fields_table_p(to)) {
3332 rb_mark_generic_ivar(from);
3333 }
3334}
3335
3336void
3337rb_gc_mark_children(void *objspace, VALUE obj)
3338{
3339 struct gc_mark_classext_foreach_arg foreach_args;
3340
3341 if (rb_obj_using_gen_fields_table_p(obj)) {
3342 rb_mark_generic_ivar(obj);
3343 }
3344
3345 switch (BUILTIN_TYPE(obj)) {
3346 case T_FLOAT:
3347 case T_BIGNUM:
3348 return;
3349
3350 case T_NIL:
3351 case T_FIXNUM:
3352 rb_bug("rb_gc_mark() called for broken object");
3353 break;
3354
3355 case T_NODE:
3356 UNEXPECTED_NODE(rb_gc_mark);
3357 break;
3358
3359 case T_IMEMO:
3360 rb_imemo_mark_and_move(obj, false);
3361 return;
3362
3363 default:
3364 break;
3365 }
3366
3367 gc_mark_internal(RBASIC(obj)->klass);
3368
3369 switch (BUILTIN_TYPE(obj)) {
3370 case T_CLASS:
3371 if (FL_TEST_RAW(obj, FL_SINGLETON) &&
3372 !rb_gc_checking_shareable()) {
3373 gc_mark_internal(RCLASS_ATTACHED_OBJECT(obj));
3374 }
3375 // Continue to the shared T_CLASS/T_MODULE
3376 case T_MODULE:
3377 foreach_args.objspace = objspace;
3378 foreach_args.obj = obj;
3379 rb_class_classext_foreach(obj, gc_mark_classext_module, (void *)&foreach_args);
3380 if (BOX_USER_P(RCLASS_PRIME_BOX(obj))) {
3381 gc_mark_internal(RCLASS_PRIME_BOX(obj)->box_object);
3382 }
3383 break;
3384
3385 case T_ICLASS:
3386 foreach_args.objspace = objspace;
3387 foreach_args.obj = obj;
3388 rb_class_classext_foreach(obj, gc_mark_classext_iclass, (void *)&foreach_args);
3389 if (BOX_USER_P(RCLASS_PRIME_BOX(obj))) {
3390 gc_mark_internal(RCLASS_PRIME_BOX(obj)->box_object);
3391 }
3392 break;
3393
3394 case T_ARRAY:
3395 if (ARY_SHARED_P(obj)) {
3396 VALUE root = ARY_SHARED_ROOT(obj);
3397 gc_mark_internal(root);
3398 }
3399 else {
3400 long len = RARRAY_LEN(obj);
3401 const VALUE *ptr = RARRAY_CONST_PTR(obj);
3402 for (long i = 0; i < len; i++) {
3403 gc_mark_internal(ptr[i]);
3404 }
3405 }
3406 break;
3407
3408 case T_HASH:
3409 mark_hash(obj);
3410 break;
3411
3412 case T_SYMBOL:
3413 gc_mark_internal(RSYMBOL(obj)->fstr);
3414 break;
3415
3416 case T_STRING:
3417 if (STR_SHARED_P(obj)) {
3418 if (STR_EMBED_P(RSTRING(obj)->as.heap.aux.shared)) {
3419 /* Embedded shared strings cannot be moved because this string
3420 * points into the slot of the shared string. There may be code
3421 * using the RSTRING_PTR on the stack, which would pin this
3422 * string but not pin the shared string, causing it to move. */
3423 gc_mark_and_pin_internal(RSTRING(obj)->as.heap.aux.shared);
3424 }
3425 else {
3426 gc_mark_internal(RSTRING(obj)->as.heap.aux.shared);
3427 }
3428 }
3429 break;
3430
3431 case T_DATA: {
3432 bool typed_data = RTYPEDDATA_P(obj);
3433 void *const ptr = typed_data ? RTYPEDDATA_GET_DATA(obj) : DATA_PTR(obj);
3434
3435 if (typed_data) {
3436 gc_mark_internal(RTYPEDDATA(obj)->fields_obj);
3437 }
3438
3439 if (ptr) {
3440 if (typed_data && gc_declarative_marking_p(RTYPEDDATA_TYPE(obj))) {
3441 size_t *offset_list = TYPED_DATA_REFS_OFFSET_LIST(obj);
3442
3443 for (size_t offset = *offset_list; offset != RUBY_REF_END; offset = *offset_list++) {
3444 gc_mark_internal(*(VALUE *)((char *)ptr + offset));
3445 }
3446 }
3447 else {
3448 RUBY_DATA_FUNC mark_func = typed_data ?
3450 RDATA(obj)->dmark;
3451 if (mark_func) (*mark_func)(ptr);
3452 }
3453 }
3454
3455 break;
3456 }
3457
3458 case T_OBJECT: {
3459 uint32_t len;
3460 if (rb_shape_obj_too_complex_p(obj)) {
3461 gc_mark_tbl_no_pin(ROBJECT_FIELDS_HASH(obj));
3462 len = ROBJECT_FIELDS_COUNT_COMPLEX(obj);
3463 }
3464 else {
3465 const VALUE * const ptr = ROBJECT_FIELDS(obj);
3466
3467 len = ROBJECT_FIELDS_COUNT_NOT_COMPLEX(obj);
3468 for (uint32_t i = 0; i < len; i++) {
3469 gc_mark_internal(ptr[i]);
3470 }
3471 }
3472
3473 attr_index_t fields_count = (attr_index_t)len;
3474 if (fields_count) {
3475 VALUE klass = RBASIC_CLASS(obj);
3476
3477 // Increment max_iv_count if applicable, used to determine size pool allocation
3478 if (RCLASS_MAX_IV_COUNT(klass) < fields_count) {
3479 RCLASS_SET_MAX_IV_COUNT(klass, fields_count);
3480 }
3481 }
3482
3483 break;
3484 }
3485
3486 case T_FILE:
3487 if (RFILE(obj)->fptr) {
3488 gc_mark_internal(RFILE(obj)->fptr->self);
3489 gc_mark_internal(RFILE(obj)->fptr->pathv);
3490 gc_mark_internal(RFILE(obj)->fptr->tied_io_for_writing);
3491 gc_mark_internal(RFILE(obj)->fptr->writeconv_asciicompat);
3492 gc_mark_internal(RFILE(obj)->fptr->writeconv_pre_ecopts);
3493 gc_mark_internal(RFILE(obj)->fptr->encs.ecopts);
3494 gc_mark_internal(RFILE(obj)->fptr->write_lock);
3495 gc_mark_internal(RFILE(obj)->fptr->timeout);
3496 gc_mark_internal(RFILE(obj)->fptr->wakeup_mutex);
3497 }
3498 break;
3499
3500 case T_REGEXP:
3501 gc_mark_internal(RREGEXP(obj)->src);
3502 break;
3503
3504 case T_MATCH:
3505 gc_mark_internal(RMATCH(obj)->regexp);
3506 if (RMATCH(obj)->str) {
3507 gc_mark_internal(RMATCH(obj)->str);
3508 }
3509 break;
3510
3511 case T_RATIONAL:
3512 gc_mark_internal(RRATIONAL(obj)->num);
3513 gc_mark_internal(RRATIONAL(obj)->den);
3514 break;
3515
3516 case T_COMPLEX:
3517 gc_mark_internal(RCOMPLEX(obj)->real);
3518 gc_mark_internal(RCOMPLEX(obj)->imag);
3519 break;
3520
3521 case T_STRUCT: {
3522 const long len = RSTRUCT_LEN(obj);
3523 const VALUE * const ptr = RSTRUCT_CONST_PTR(obj);
3524
3525 for (long i = 0; i < len; i++) {
3526 gc_mark_internal(ptr[i]);
3527 }
3528
3529 if (rb_shape_obj_has_fields(obj) && !FL_TEST_RAW(obj, RSTRUCT_GEN_FIELDS)) {
3530 gc_mark_internal(RSTRUCT_FIELDS_OBJ(obj));
3531 }
3532
3533 break;
3534 }
3535
3536 default:
3537 if (BUILTIN_TYPE(obj) == T_MOVED) rb_bug("rb_gc_mark(): %p is T_MOVED", (void *)obj);
3538 if (BUILTIN_TYPE(obj) == T_NONE) rb_bug("rb_gc_mark(): %p is T_NONE", (void *)obj);
3539 if (BUILTIN_TYPE(obj) == T_ZOMBIE) rb_bug("rb_gc_mark(): %p is T_ZOMBIE", (void *)obj);
3540 rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
3541 BUILTIN_TYPE(obj), (void *)obj,
3542 rb_gc_impl_pointer_to_heap_p(objspace, (void *)obj) ? "corrupted object" : "non object");
3543 }
3544}
3545
3546size_t
3547rb_gc_obj_optimal_size(VALUE obj)
3548{
3549 switch (BUILTIN_TYPE(obj)) {
3550 case T_ARRAY:
3551 {
3552 size_t size = rb_ary_size_as_embedded(obj);
3553 if (rb_gc_size_allocatable_p(size)) {
3554 return size;
3555 }
3556 else {
3557 return sizeof(struct RArray);
3558 }
3559 }
3560
3561 case T_OBJECT:
3562 if (rb_shape_obj_too_complex_p(obj)) {
3563 return sizeof(struct RObject);
3564 }
3565 else {
3566 size_t size = rb_obj_embedded_size(ROBJECT_FIELDS_CAPACITY(obj));
3567 if (rb_gc_size_allocatable_p(size)) {
3568 return size;
3569 }
3570 else {
3571 return sizeof(struct RObject);
3572 }
3573 }
3574
3575 case T_STRING:
3576 {
3577 size_t size = rb_str_size_as_embedded(obj);
3578 if (rb_gc_size_allocatable_p(size)) {
3579 return size;
3580 }
3581 else {
3582 return sizeof(struct RString);
3583 }
3584 }
3585
3586 case T_HASH:
3587 return sizeof(struct RHash) + (RHASH_ST_TABLE_P(obj) ? sizeof(st_table) : sizeof(ar_table));
3588
3589 default:
3590 return 0;
3591 }
3592}
3593
3594void
3595rb_gc_writebarrier(VALUE a, VALUE b)
3596{
3597 rb_gc_impl_writebarrier(rb_gc_get_objspace(), a, b);
3598}
3599
3600void
3601rb_gc_writebarrier_unprotect(VALUE obj)
3602{
3603 rb_gc_impl_writebarrier_unprotect(rb_gc_get_objspace(), obj);
3604}
3605
3606/*
3607 * remember `obj' if needed.
3608 */
3609void
3610rb_gc_writebarrier_remember(VALUE obj)
3611{
3612 rb_gc_impl_writebarrier_remember(rb_gc_get_objspace(), obj);
3613}
3614
3615void
3616rb_gc_copy_attributes(VALUE dest, VALUE obj)
3617{
3618 rb_gc_impl_copy_attributes(rb_gc_get_objspace(), dest, obj);
3619}
3620
3621int
3622rb_gc_modular_gc_loaded_p(void)
3623{
3624#if USE_MODULAR_GC
3625 return rb_gc_functions.modular_gc_loaded_p;
3626#else
3627 return false;
3628#endif
3629}
3630
3631const char *
3632rb_gc_active_gc_name(void)
3633{
3634 const char *gc_name = rb_gc_impl_active_gc_name();
3635
3636 const size_t len = strlen(gc_name);
3637 if (len > RB_GC_MAX_NAME_LEN) {
3638 rb_bug("GC should have a name no more than %d chars long. Currently: %zu (%s)",
3639 RB_GC_MAX_NAME_LEN, len, gc_name);
3640 }
3641
3642 return gc_name;
3643}
3644
3646rb_gc_object_metadata(VALUE obj)
3647{
3648 return rb_gc_impl_object_metadata(rb_gc_get_objspace(), obj);
3649}
3650
3651/* GC */
3652
3653void *
3654rb_gc_ractor_cache_alloc(rb_ractor_t *ractor)
3655{
3656 return rb_gc_impl_ractor_cache_alloc(rb_gc_get_objspace(), ractor);
3657}
3658
3659void
3660rb_gc_ractor_cache_free(void *cache)
3661{
3662 rb_gc_impl_ractor_cache_free(rb_gc_get_objspace(), cache);
3663}
3664
3665void
3666rb_gc_register_mark_object(VALUE obj)
3667{
3668 if (!rb_gc_impl_pointer_to_heap_p(rb_gc_get_objspace(), (void *)obj))
3669 return;
3670
3671 rb_vm_register_global_object(obj);
3672}
3673
3674void
3675rb_gc_register_address(VALUE *addr)
3676{
3677 rb_vm_t *vm = GET_VM();
3678
3679 VALUE obj = *addr;
3680
3681 struct global_object_list *tmp = ALLOC(struct global_object_list);
3682 RB_VM_LOCKING() {
3683 tmp->next = vm->global_object_list;
3684 tmp->varptr = addr;
3685 vm->global_object_list = tmp;
3686 }
3687
3688 /*
3689 * Because some C extensions have assignment-then-register bugs,
3690 * we guard `obj` here so that it would not get swept defensively.
3691 */
3692 RB_GC_GUARD(obj);
3693 if (0 && !SPECIAL_CONST_P(obj)) {
3694 rb_warn("Object is assigned to registering address already: %"PRIsVALUE,
3695 rb_obj_class(obj));
3696 rb_print_backtrace(stderr);
3697 }
3698}
3699
3700void
3701rb_gc_unregister_address(VALUE *addr)
3702{
3703 rb_vm_t *vm = GET_VM();
3704 struct global_object_list *tmp;
3705 RB_VM_LOCKING() {
3706 tmp = vm->global_object_list;
3707 if (tmp->varptr == addr) {
3708 vm->global_object_list = tmp->next;
3709 SIZED_FREE(tmp);
3710 }
3711 else {
3712 while (tmp->next) {
3713 if (tmp->next->varptr == addr) {
3714 struct global_object_list *t = tmp->next;
3715
3716 tmp->next = tmp->next->next;
3717 SIZED_FREE(t);
3718 break;
3719 }
3720 tmp = tmp->next;
3721 }
3722 }
3723 }
3724}
3725
3726void
3728{
3729 rb_gc_register_address(var);
3730}
3731
3732static VALUE
3733gc_start_internal(rb_execution_context_t *ec, VALUE self, VALUE full_mark, VALUE immediate_mark, VALUE immediate_sweep, VALUE compact)
3734{
3735 rb_gc_impl_start(rb_gc_get_objspace(), RTEST(full_mark), RTEST(immediate_mark), RTEST(immediate_sweep), RTEST(compact));
3736
3737 return Qnil;
3738}
3739
3740/*
3741 * rb_objspace_each_objects() is special C API to walk through
3742 * Ruby object space. This C API is too difficult to use it.
3743 * To be frank, you should not use it. Or you need to read the
3744 * source code of this function and understand what this function does.
3745 *
3746 * 'callback' will be called several times (the number of heap page,
3747 * at current implementation) with:
3748 * vstart: a pointer to the first living object of the heap_page.
3749 * vend: a pointer to next to the valid heap_page area.
3750 * stride: a distance to next VALUE.
3751 *
3752 * If callback() returns non-zero, the iteration will be stopped.
3753 *
3754 * This is a sample callback code to iterate liveness objects:
3755 *
3756 * static int
3757 * sample_callback(void *vstart, void *vend, int stride, void *data)
3758 * {
3759 * VALUE v = (VALUE)vstart;
3760 * for (; v != (VALUE)vend; v += stride) {
3761 * if (!rb_objspace_internal_object_p(v)) { // liveness check
3762 * // do something with live object 'v'
3763 * }
3764 * }
3765 * return 0; // continue to iteration
3766 * }
3767 *
3768 * Note: 'vstart' is not a top of heap_page. This point the first
3769 * living object to grasp at least one object to avoid GC issue.
3770 * This means that you can not walk through all Ruby object page
3771 * including freed object page.
3772 *
3773 * Note: On this implementation, 'stride' is the same as sizeof(RVALUE).
3774 * However, there are possibilities to pass variable values with
3775 * 'stride' with some reasons. You must use stride instead of
3776 * use some constant value in the iteration.
3777 */
3778void
3779rb_objspace_each_objects(int (*callback)(void *, void *, size_t, void *), void *data)
3780{
3781 rb_gc_impl_each_objects(rb_gc_get_objspace(), callback, data);
3782}
3783
3784static void
3785gc_ref_update_array(void *objspace, VALUE v)
3786{
3787 if (ARY_SHARED_P(v)) {
3788 VALUE old_root = RARRAY(v)->as.heap.aux.shared_root;
3789
3790 UPDATE_IF_MOVED(objspace, RARRAY(v)->as.heap.aux.shared_root);
3791
3792 VALUE new_root = RARRAY(v)->as.heap.aux.shared_root;
3793 // If the root is embedded and its location has changed
3794 if (ARY_EMBED_P(new_root) && new_root != old_root) {
3795 size_t offset = (size_t)(RARRAY(v)->as.heap.ptr - RARRAY(old_root)->as.ary);
3796 GC_ASSERT(RARRAY(v)->as.heap.ptr >= RARRAY(old_root)->as.ary);
3797 RARRAY(v)->as.heap.ptr = RARRAY(new_root)->as.ary + offset;
3798 }
3799 }
3800 else {
3801 long len = RARRAY_LEN(v);
3802
3803 if (len > 0) {
3804 VALUE *ptr = (VALUE *)RARRAY_CONST_PTR(v);
3805 for (long i = 0; i < len; i++) {
3806 UPDATE_IF_MOVED(objspace, ptr[i]);
3807 }
3808 }
3809
3810 if (rb_gc_obj_slot_size(v) >= rb_ary_size_as_embedded(v)) {
3811 if (rb_ary_embeddable_p(v)) {
3812 rb_ary_make_embedded(v);
3813 }
3814 }
3815 }
3816}
3817
3818static void
3819gc_ref_update_object(void *objspace, VALUE v)
3820{
3821 VALUE *ptr = ROBJECT_FIELDS(v);
3822
3823 if (FL_TEST_RAW(v, ROBJECT_HEAP)) {
3824 if (rb_shape_obj_too_complex_p(v)) {
3825 gc_ref_update_table_values_only(ROBJECT_FIELDS_HASH(v));
3826 return;
3827 }
3828
3829 size_t slot_size = rb_gc_obj_slot_size(v);
3830 size_t embed_size = rb_obj_embedded_size(ROBJECT_FIELDS_CAPACITY(v));
3831 if (slot_size >= embed_size) {
3832 // Object can be re-embedded
3833 memcpy(ROBJECT(v)->as.ary, ptr, sizeof(VALUE) * ROBJECT_FIELDS_COUNT(v));
3834 SIZED_FREE_N(ptr, ROBJECT_FIELDS_CAPACITY(v));
3835 FL_UNSET_RAW(v, ROBJECT_HEAP);
3836 ptr = ROBJECT(v)->as.ary;
3837 }
3838 }
3839
3840 for (uint32_t i = 0; i < ROBJECT_FIELDS_COUNT(v); i++) {
3841 UPDATE_IF_MOVED(objspace, ptr[i]);
3842 }
3843}
3844
3845void
3846rb_gc_ref_update_table_values_only(st_table *tbl)
3847{
3848 gc_ref_update_table_values_only(tbl);
3849}
3850
3851/* Update MOVED references in a VALUE=>VALUE st_table */
3852void
3853rb_gc_update_tbl_refs(st_table *ptr)
3854{
3855 gc_update_table_refs(ptr);
3856}
3857
3858static void
3859gc_ref_update_hash(void *objspace, VALUE v)
3860{
3861 rb_hash_stlike_foreach_with_replace(v, hash_foreach_replace, hash_replace_ref, (st_data_t)objspace);
3862}
3863
3864static void
3865gc_update_values(void *objspace, long n, VALUE *values)
3866{
3867 for (long i = 0; i < n; i++) {
3868 UPDATE_IF_MOVED(objspace, values[i]);
3869 }
3870}
3871
3872void
3873rb_gc_update_values(long n, VALUE *values)
3874{
3875 gc_update_values(rb_gc_get_objspace(), n, values);
3876}
3877
3878static enum rb_id_table_iterator_result
3879check_id_table_move(VALUE value, void *data)
3880{
3881 void *objspace = (void *)data;
3882
3883 if (gc_object_moved_p_internal(objspace, (VALUE)value)) {
3884 return ID_TABLE_REPLACE;
3885 }
3886
3887 return ID_TABLE_CONTINUE;
3888}
3889
3890void
3891rb_gc_prepare_heap_process_object(VALUE obj)
3892{
3893 switch (BUILTIN_TYPE(obj)) {
3894 case T_STRING:
3895 // Precompute the string coderange. This both save time for when it will be
3896 // eventually needed, and avoid mutating heap pages after a potential fork.
3898 break;
3899 default:
3900 break;
3901 }
3902}
3903
3904void
3905rb_gc_prepare_heap(void)
3906{
3907 rb_gc_impl_prepare_heap(rb_gc_get_objspace());
3908}
3909
3910size_t
3911rb_gc_heap_id_for_size(size_t size)
3912{
3913 return rb_gc_impl_heap_id_for_size(rb_gc_get_objspace(), size);
3914}
3915
3916bool
3917rb_gc_size_allocatable_p(size_t size)
3918{
3919 return rb_gc_impl_size_allocatable_p(size);
3920}
3921
3922static enum rb_id_table_iterator_result
3923update_id_table(VALUE *value, void *data, int existing)
3924{
3925 void *objspace = (void *)data;
3926
3927 if (gc_object_moved_p_internal(objspace, (VALUE)*value)) {
3928 *value = gc_location_internal(objspace, (VALUE)*value);
3929 }
3930
3931 return ID_TABLE_CONTINUE;
3932}
3933
3934static void
3935update_m_tbl(void *objspace, struct rb_id_table *tbl)
3936{
3937 if (tbl) {
3938 rb_id_table_foreach_values_with_replace(tbl, check_id_table_move, update_id_table, objspace);
3939 }
3940}
3941
3942static enum rb_id_table_iterator_result
3943update_cvc_tbl_i(VALUE cvc_entry, void *objspace)
3944{
3945 struct rb_cvar_class_tbl_entry *entry;
3946
3947 entry = (struct rb_cvar_class_tbl_entry *)cvc_entry;
3948
3949 if (entry->cref) {
3950 TYPED_UPDATE_IF_MOVED(objspace, rb_cref_t *, entry->cref);
3951 }
3952
3953 entry->class_value = gc_location_internal(objspace, entry->class_value);
3954
3955 return ID_TABLE_CONTINUE;
3956}
3957
3958static void
3959update_cvc_tbl(void *objspace, struct rb_id_table *tbl)
3960{
3961 if (!tbl) return;
3962 rb_id_table_foreach_values(tbl, update_cvc_tbl_i, objspace);
3963}
3964
3965static enum rb_id_table_iterator_result
3966update_const_tbl_i(VALUE value, void *objspace)
3967{
3968 rb_const_entry_t *ce = (rb_const_entry_t *)value;
3969
3970 if (gc_object_moved_p_internal(objspace, ce->value)) {
3971 ce->value = gc_location_internal(objspace, ce->value);
3972 }
3973
3974 if (gc_object_moved_p_internal(objspace, ce->file)) {
3975 ce->file = gc_location_internal(objspace, ce->file);
3976 }
3977
3978 return ID_TABLE_CONTINUE;
3979}
3980
3981static void
3982update_const_tbl(void *objspace, struct rb_id_table *tbl)
3983{
3984 if (!tbl) return;
3985 rb_id_table_foreach_values(tbl, update_const_tbl_i, objspace);
3986}
3987
3988static void
3989update_subclasses(void *objspace, rb_classext_t *ext)
3990{
3991 rb_subclass_entry_t *entry = RCLASSEXT_SUBCLASSES(ext);
3992 if (!entry) return;
3993 while (entry) {
3994 if (entry->klass)
3995 UPDATE_IF_MOVED(objspace, entry->klass);
3996 entry = entry->next;
3997 }
3998}
3999
4000static void
4001update_superclasses(rb_objspace_t *objspace, rb_classext_t *ext)
4002{
4003 if (RCLASSEXT_SUPERCLASSES_WITH_SELF(ext)) {
4004 size_t array_size = RCLASSEXT_SUPERCLASS_DEPTH(ext) + 1;
4005 for (size_t i = 0; i < array_size; i++) {
4006 UPDATE_IF_MOVED(objspace, RCLASSEXT_SUPERCLASSES(ext)[i]);
4007 }
4008 }
4009}
4010
4011static void
4012update_classext_values(rb_objspace_t *objspace, rb_classext_t *ext, bool is_iclass)
4013{
4014 UPDATE_IF_MOVED(objspace, RCLASSEXT_ORIGIN(ext));
4015 UPDATE_IF_MOVED(objspace, RCLASSEXT_REFINED_CLASS(ext));
4016 UPDATE_IF_MOVED(objspace, RCLASSEXT_CLASSPATH(ext));
4017 if (is_iclass) {
4018 UPDATE_IF_MOVED(objspace, RCLASSEXT_INCLUDER(ext));
4019 }
4020}
4021
4022static void
4023update_classext(rb_classext_t *ext, bool is_prime, VALUE box_value, void *arg)
4024{
4025 struct classext_foreach_args *args = (struct classext_foreach_args *)arg;
4026 rb_objspace_t *objspace = args->objspace;
4027
4028 if (RCLASSEXT_SUPER(ext)) {
4029 UPDATE_IF_MOVED(objspace, RCLASSEXT_SUPER(ext));
4030 }
4031
4032 update_m_tbl(objspace, RCLASSEXT_M_TBL(ext));
4033
4034 UPDATE_IF_MOVED(objspace, ext->fields_obj);
4035 if (!RCLASSEXT_SHARED_CONST_TBL(ext)) {
4036 update_const_tbl(objspace, RCLASSEXT_CONST_TBL(ext));
4037 }
4038 UPDATE_IF_MOVED(objspace, RCLASSEXT_CC_TBL(ext));
4039 update_cvc_tbl(objspace, RCLASSEXT_CVC_TBL(ext));
4040 update_superclasses(objspace, ext);
4041 update_subclasses(objspace, ext);
4042
4043 update_classext_values(objspace, ext, false);
4044}
4045
4046static void
4047update_iclass_classext(rb_classext_t *ext, bool is_prime, VALUE box_value, void *arg)
4048{
4049 struct classext_foreach_args *args = (struct classext_foreach_args *)arg;
4050 rb_objspace_t *objspace = args->objspace;
4051
4052 if (RCLASSEXT_SUPER(ext)) {
4053 UPDATE_IF_MOVED(objspace, RCLASSEXT_SUPER(ext));
4054 }
4055 update_m_tbl(objspace, RCLASSEXT_M_TBL(ext));
4056 update_m_tbl(objspace, RCLASSEXT_CALLABLE_M_TBL(ext));
4057 UPDATE_IF_MOVED(objspace, RCLASSEXT_CC_TBL(ext));
4058 update_subclasses(objspace, ext);
4059
4060 update_classext_values(objspace, ext, true);
4061}
4062
4064 vm_table_foreach_callback_func callback;
4065 vm_table_update_callback_func update_callback;
4066 void *data;
4067 bool weak_only;
4068};
4069
4070static int
4071vm_weak_table_foreach_weak_key(st_data_t key, st_data_t value, st_data_t data, int error)
4072{
4073 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
4074
4075 int ret = iter_data->callback((VALUE)key, iter_data->data);
4076
4077 if (!iter_data->weak_only) {
4078 if (ret != ST_CONTINUE) return ret;
4079
4080 ret = iter_data->callback((VALUE)value, iter_data->data);
4081 }
4082
4083 return ret;
4084}
4085
4086static int
4087vm_weak_table_foreach_update_weak_key(st_data_t *key, st_data_t *value, st_data_t data, int existing)
4088{
4089 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
4090
4091 int ret = iter_data->update_callback((VALUE *)key, iter_data->data);
4092
4093 if (!iter_data->weak_only) {
4094 if (ret != ST_CONTINUE) return ret;
4095
4096 ret = iter_data->update_callback((VALUE *)value, iter_data->data);
4097 }
4098
4099 return ret;
4100}
4101
4102static int
4103vm_weak_table_sym_set_foreach(VALUE *sym_ptr, void *data)
4104{
4105 VALUE sym = *sym_ptr;
4106 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
4107
4108 if (RB_SPECIAL_CONST_P(sym)) return ST_CONTINUE;
4109
4110 int ret = iter_data->callback(sym, iter_data->data);
4111
4112 if (ret == ST_REPLACE) {
4113 ret = iter_data->update_callback(sym_ptr, iter_data->data);
4114 }
4115
4116 return ret;
4117}
4118
4119struct st_table *rb_generic_fields_tbl_get(void);
4120
4121static int
4122vm_weak_table_id2ref_foreach(st_data_t key, st_data_t value, st_data_t data, int error)
4123{
4124 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
4125
4126 if (!iter_data->weak_only && !FIXNUM_P((VALUE)key)) {
4127 int ret = iter_data->callback((VALUE)key, iter_data->data);
4128 if (ret != ST_CONTINUE) return ret;
4129 }
4130
4131 return iter_data->callback((VALUE)value, iter_data->data);
4132}
4133
4134static int
4135vm_weak_table_id2ref_foreach_update(st_data_t *key, st_data_t *value, st_data_t data, int existing)
4136{
4137 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
4138
4139 iter_data->update_callback((VALUE *)value, iter_data->data);
4140
4141 if (!iter_data->weak_only && !FIXNUM_P((VALUE)*key)) {
4142 iter_data->update_callback((VALUE *)key, iter_data->data);
4143 }
4144
4145 return ST_CONTINUE;
4146}
4147
4148static int
4149vm_weak_table_gen_fields_foreach(st_data_t key, st_data_t value, st_data_t data)
4150{
4151 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
4152
4153 int ret = iter_data->callback((VALUE)key, iter_data->data);
4154
4155 VALUE new_value = (VALUE)value;
4156 VALUE new_key = (VALUE)key;
4157
4158 switch (ret) {
4159 case ST_CONTINUE:
4160 break;
4161
4162 case ST_DELETE:
4163 RBASIC_SET_SHAPE_ID((VALUE)key, ROOT_SHAPE_ID);
4164 return ST_DELETE;
4165
4166 case ST_REPLACE: {
4167 ret = iter_data->update_callback(&new_key, iter_data->data);
4168 if (key != new_key) {
4169 ret = ST_DELETE;
4170 }
4171 break;
4172 }
4173
4174 default:
4175 rb_bug("vm_weak_table_gen_fields_foreach: return value %d not supported", ret);
4176 }
4177
4178 if (!iter_data->weak_only) {
4179 int ivar_ret = iter_data->callback(new_value, iter_data->data);
4180 switch (ivar_ret) {
4181 case ST_CONTINUE:
4182 break;
4183
4184 case ST_REPLACE:
4185 iter_data->update_callback(&new_value, iter_data->data);
4186 break;
4187
4188 default:
4189 rb_bug("vm_weak_table_gen_fields_foreach: return value %d not supported", ivar_ret);
4190 }
4191 }
4192
4193 if (key != new_key || value != new_value) {
4194 DURING_GC_COULD_MALLOC_REGION_START();
4195 {
4196 st_insert(rb_generic_fields_tbl_get(), (st_data_t)new_key, new_value);
4197 }
4198 DURING_GC_COULD_MALLOC_REGION_END();
4199 }
4200
4201 return ret;
4202}
4203
4204static int
4205vm_weak_table_frozen_strings_foreach(VALUE *str, void *data)
4206{
4207 // int retval = vm_weak_table_foreach_weak_key(key, value, data, error);
4208 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
4209 int retval = iter_data->callback(*str, iter_data->data);
4210
4211 if (retval == ST_REPLACE) {
4212 retval = iter_data->update_callback(str, iter_data->data);
4213 }
4214
4215 if (retval == ST_DELETE) {
4216 FL_UNSET(*str, RSTRING_FSTR);
4217 }
4218
4219 return retval;
4220}
4221
4222void rb_fstring_foreach_with_replace(int (*callback)(VALUE *str, void *data), void *data);
4223void
4224rb_gc_vm_weak_table_foreach(vm_table_foreach_callback_func callback,
4225 vm_table_update_callback_func update_callback,
4226 void *data,
4227 bool weak_only,
4228 enum rb_gc_vm_weak_tables table)
4229{
4230 rb_vm_t *vm = GET_VM();
4231
4232 struct global_vm_table_foreach_data foreach_data = {
4233 .callback = callback,
4234 .update_callback = update_callback,
4235 .data = data,
4236 .weak_only = weak_only,
4237 };
4238
4239 switch (table) {
4240 case RB_GC_VM_CI_TABLE: {
4241 st_foreach_with_replace(
4242 &vm->ci_table,
4243 vm_weak_table_foreach_weak_key,
4244 vm_weak_table_foreach_update_weak_key,
4245 (st_data_t)&foreach_data
4246 );
4247 break;
4248 }
4249 case RB_GC_VM_OVERLOADED_CME_TABLE: {
4250 st_foreach_with_replace(
4251 &vm->overloaded_cme_table,
4252 vm_weak_table_foreach_weak_key,
4253 vm_weak_table_foreach_update_weak_key,
4254 (st_data_t)&foreach_data
4255 );
4256 break;
4257 }
4258 case RB_GC_VM_GLOBAL_SYMBOLS_TABLE: {
4259 rb_sym_global_symbol_table_foreach_weak_reference(
4260 vm_weak_table_sym_set_foreach,
4261 &foreach_data
4262 );
4263 break;
4264 }
4265 case RB_GC_VM_ID2REF_TABLE: {
4266 if (id2ref_tbl) {
4267 st_foreach_with_replace(
4268 id2ref_tbl,
4269 vm_weak_table_id2ref_foreach,
4270 vm_weak_table_id2ref_foreach_update,
4271 (st_data_t)&foreach_data
4272 );
4273 }
4274 break;
4275 }
4276 case RB_GC_VM_GENERIC_FIELDS_TABLE: {
4277 st_table *generic_fields_tbl = rb_generic_fields_tbl_get();
4278 if (generic_fields_tbl) {
4279 st_foreach(
4280 generic_fields_tbl,
4281 vm_weak_table_gen_fields_foreach,
4282 (st_data_t)&foreach_data
4283 );
4284 }
4285 break;
4286 }
4287 case RB_GC_VM_FROZEN_STRINGS_TABLE: {
4288 rb_fstring_foreach_with_replace(
4289 vm_weak_table_frozen_strings_foreach,
4290 &foreach_data
4291 );
4292 break;
4293 }
4294 case RB_GC_VM_WEAK_TABLE_COUNT:
4295 rb_bug("Unreachable");
4296 default:
4297 rb_bug("rb_gc_vm_weak_table_foreach: unknown table %d", table);
4298 }
4299}
4300
4301void
4302rb_gc_update_vm_references(void *objspace)
4303{
4304 rb_execution_context_t *ec = GET_EC();
4305 rb_vm_t *vm = rb_ec_vm_ptr(ec);
4306
4307 rb_vm_update_references(vm);
4308 rb_gc_update_global_tbl();
4309 rb_sym_global_symbols_mark_and_move();
4310
4311#if USE_YJIT
4312 void rb_yjit_root_update_references(void); // in Rust
4313
4314 if (rb_yjit_enabled_p) {
4315 rb_yjit_root_update_references();
4316 }
4317#endif
4318
4319#if USE_ZJIT
4320 void rb_zjit_root_update_references(void); // in Rust
4321
4322 if (rb_zjit_enabled_p) {
4323 rb_zjit_root_update_references();
4324 }
4325#endif
4326}
4327
4328void
4329rb_gc_update_object_references(void *objspace, VALUE obj)
4330{
4331 struct classext_foreach_args args;
4332
4333 switch (BUILTIN_TYPE(obj)) {
4334 case T_CLASS:
4335 if (FL_TEST_RAW(obj, FL_SINGLETON)) {
4336 UPDATE_IF_MOVED(objspace, RCLASS_ATTACHED_OBJECT(obj));
4337 }
4338 // Continue to the shared T_CLASS/T_MODULE
4339 case T_MODULE:
4340 args.klass = obj;
4341 args.objspace = objspace;
4342 rb_class_classext_foreach(obj, update_classext, (void *)&args);
4343 break;
4344
4345 case T_ICLASS:
4346 args.objspace = objspace;
4347 rb_class_classext_foreach(obj, update_iclass_classext, (void *)&args);
4348 break;
4349
4350 case T_IMEMO:
4351 rb_imemo_mark_and_move(obj, true);
4352 return;
4353
4354 case T_NIL:
4355 case T_FIXNUM:
4356 case T_NODE:
4357 case T_MOVED:
4358 case T_NONE:
4359 /* These can't move */
4360 return;
4361
4362 case T_ARRAY:
4363 gc_ref_update_array(objspace, obj);
4364 break;
4365
4366 case T_HASH:
4367 gc_ref_update_hash(objspace, obj);
4368 UPDATE_IF_MOVED(objspace, RHASH(obj)->ifnone);
4369 break;
4370
4371 case T_STRING:
4372 {
4373 if (STR_SHARED_P(obj)) {
4374 UPDATE_IF_MOVED(objspace, RSTRING(obj)->as.heap.aux.shared);
4375 }
4376
4377 /* If, after move the string is not embedded, and can fit in the
4378 * slot it's been placed in, then re-embed it. */
4379 if (rb_gc_obj_slot_size(obj) >= rb_str_size_as_embedded(obj)) {
4380 if (!STR_EMBED_P(obj) && rb_str_reembeddable_p(obj)) {
4381 rb_str_make_embedded(obj);
4382 }
4383 }
4384
4385 break;
4386 }
4387 case T_DATA:
4388 /* Call the compaction callback, if it exists */
4389 {
4390 bool typed_data = RTYPEDDATA_P(obj);
4391 void *const ptr = typed_data ? RTYPEDDATA_GET_DATA(obj) : DATA_PTR(obj);
4392
4393 if (typed_data) {
4394 UPDATE_IF_MOVED(objspace, RTYPEDDATA(obj)->fields_obj);
4395 }
4396
4397 if (ptr) {
4398 if (typed_data && gc_declarative_marking_p(RTYPEDDATA_TYPE(obj))) {
4399 size_t *offset_list = TYPED_DATA_REFS_OFFSET_LIST(obj);
4400
4401 for (size_t offset = *offset_list; offset != RUBY_REF_END; offset = *offset_list++) {
4402 VALUE *ref = (VALUE *)((char *)ptr + offset);
4403 *ref = gc_location_internal(objspace, *ref);
4404 }
4405 }
4406 else if (typed_data) {
4407 RUBY_DATA_FUNC compact_func = RTYPEDDATA_TYPE(obj)->function.dcompact;
4408 if (compact_func) (*compact_func)(ptr);
4409 }
4410 }
4411 }
4412 break;
4413
4414 case T_OBJECT:
4415 gc_ref_update_object(objspace, obj);
4416 break;
4417
4418 case T_FILE:
4419 if (RFILE(obj)->fptr) {
4420 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->self);
4421 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->pathv);
4422 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->tied_io_for_writing);
4423 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->writeconv_asciicompat);
4424 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->writeconv_pre_ecopts);
4425 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->encs.ecopts);
4426 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->write_lock);
4427 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->timeout);
4428 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->wakeup_mutex);
4429 }
4430 break;
4431 case T_REGEXP:
4432 UPDATE_IF_MOVED(objspace, RREGEXP(obj)->src);
4433 break;
4434
4435 case T_SYMBOL:
4436 UPDATE_IF_MOVED(objspace, RSYMBOL(obj)->fstr);
4437 break;
4438
4439 case T_FLOAT:
4440 case T_BIGNUM:
4441 break;
4442
4443 case T_MATCH:
4444 UPDATE_IF_MOVED(objspace, RMATCH(obj)->regexp);
4445
4446 if (RMATCH(obj)->str) {
4447 UPDATE_IF_MOVED(objspace, RMATCH(obj)->str);
4448 }
4449 break;
4450
4451 case T_RATIONAL:
4452 UPDATE_IF_MOVED(objspace, RRATIONAL(obj)->num);
4453 UPDATE_IF_MOVED(objspace, RRATIONAL(obj)->den);
4454 break;
4455
4456 case T_COMPLEX:
4457 UPDATE_IF_MOVED(objspace, RCOMPLEX(obj)->real);
4458 UPDATE_IF_MOVED(objspace, RCOMPLEX(obj)->imag);
4459
4460 break;
4461
4462 case T_STRUCT:
4463 {
4464 long i, len = RSTRUCT_LEN(obj);
4465 VALUE *ptr = (VALUE *)RSTRUCT_CONST_PTR(obj);
4466
4467 for (i = 0; i < len; i++) {
4468 UPDATE_IF_MOVED(objspace, ptr[i]);
4469 }
4470
4471 if (RSTRUCT_EMBED_LEN(obj)) {
4472 if (!FL_TEST_RAW(obj, RSTRUCT_GEN_FIELDS)) {
4473 UPDATE_IF_MOVED(objspace, ptr[len]);
4474 }
4475 }
4476 else {
4477 UPDATE_IF_MOVED(objspace, RSTRUCT(obj)->as.heap.fields_obj);
4478 }
4479 }
4480 break;
4481 default:
4482 rb_bug("unreachable");
4483 break;
4484 }
4485
4486 UPDATE_IF_MOVED(objspace, RBASIC(obj)->klass);
4487}
4488
4489VALUE
4490rb_gc_start(void)
4491{
4492 rb_gc();
4493 return Qnil;
4494}
4495
4496void
4497rb_gc(void)
4498{
4499 unless_objspace(objspace) { return; }
4500
4501 rb_gc_impl_start(objspace, true, true, true, false);
4502}
4503
4504int
4505rb_during_gc(void)
4506{
4507 unless_objspace(objspace) { return FALSE; }
4508
4509 return rb_gc_impl_during_gc_p(objspace);
4510}
4511
4512size_t
4513rb_gc_count(void)
4514{
4515 return rb_gc_impl_gc_count(rb_gc_get_objspace());
4516}
4517
4518static VALUE
4519gc_count(rb_execution_context_t *ec, VALUE self)
4520{
4521 return SIZET2NUM(rb_gc_count());
4522}
4523
4524VALUE
4525rb_gc_latest_gc_info(VALUE key)
4526{
4527 if (!SYMBOL_P(key) && !RB_TYPE_P(key, T_HASH)) {
4528 rb_raise(rb_eTypeError, "non-hash or symbol given");
4529 }
4530
4531 VALUE val = rb_gc_impl_latest_gc_info(rb_gc_get_objspace(), key);
4532
4533 if (val == Qundef) {
4534 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
4535 }
4536
4537 return val;
4538}
4539
4540static VALUE
4541gc_stat(rb_execution_context_t *ec, VALUE self, VALUE arg) // arg is (nil || hash || symbol)
4542{
4543 if (NIL_P(arg)) {
4544 arg = rb_hash_new();
4545 }
4546 else if (!RB_TYPE_P(arg, T_HASH) && !SYMBOL_P(arg)) {
4547 rb_raise(rb_eTypeError, "non-hash or symbol given");
4548 }
4549
4550 VALUE ret = rb_gc_impl_stat(rb_gc_get_objspace(), arg);
4551
4552 if (ret == Qundef) {
4553 GC_ASSERT(SYMBOL_P(arg));
4554
4555 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(arg));
4556 }
4557
4558 return ret;
4559}
4560
4561size_t
4562rb_gc_stat(VALUE arg)
4563{
4564 if (!RB_TYPE_P(arg, T_HASH) && !SYMBOL_P(arg)) {
4565 rb_raise(rb_eTypeError, "non-hash or symbol given");
4566 }
4567
4568 VALUE ret = rb_gc_impl_stat(rb_gc_get_objspace(), arg);
4569
4570 if (ret == Qundef) {
4571 GC_ASSERT(SYMBOL_P(arg));
4572
4573 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(arg));
4574 }
4575
4576 if (SYMBOL_P(arg)) {
4577 return NUM2SIZET(ret);
4578 }
4579 else {
4580 return 0;
4581 }
4582}
4583
4584static VALUE
4585gc_stat_heap(rb_execution_context_t *ec, VALUE self, VALUE heap_name, VALUE arg)
4586{
4587 if (NIL_P(arg)) {
4588 arg = rb_hash_new();
4589 }
4590
4591 if (NIL_P(heap_name)) {
4592 if (!RB_TYPE_P(arg, T_HASH)) {
4593 rb_raise(rb_eTypeError, "non-hash given");
4594 }
4595 }
4596 else if (FIXNUM_P(heap_name)) {
4597 if (!SYMBOL_P(arg) && !RB_TYPE_P(arg, T_HASH)) {
4598 rb_raise(rb_eTypeError, "non-hash or symbol given");
4599 }
4600 }
4601 else {
4602 rb_raise(rb_eTypeError, "heap_name must be nil or an Integer");
4603 }
4604
4605 VALUE ret = rb_gc_impl_stat_heap(rb_gc_get_objspace(), heap_name, arg);
4606
4607 if (ret == Qundef) {
4608 GC_ASSERT(SYMBOL_P(arg));
4609
4610 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(arg));
4611 }
4612
4613 return ret;
4614}
4615
4616static VALUE
4617gc_config_get(rb_execution_context_t *ec, VALUE self)
4618{
4619 VALUE cfg_hash = rb_gc_impl_config_get(rb_gc_get_objspace());
4620 rb_hash_aset(cfg_hash, sym("implementation"), rb_fstring_cstr(rb_gc_impl_active_gc_name()));
4621
4622 return cfg_hash;
4623}
4624
4625static VALUE
4626gc_config_set(rb_execution_context_t *ec, VALUE self, VALUE hash)
4627{
4628 void *objspace = rb_gc_get_objspace();
4629
4630 rb_gc_impl_config_set(objspace, hash);
4631
4632 return Qnil;
4633}
4634
4635static VALUE
4636gc_stress_get(rb_execution_context_t *ec, VALUE self)
4637{
4638 return rb_gc_impl_stress_get(rb_gc_get_objspace());
4639}
4640
4641static VALUE
4642gc_stress_set_m(rb_execution_context_t *ec, VALUE self, VALUE flag)
4643{
4644 rb_gc_impl_stress_set(rb_gc_get_objspace(), flag);
4645
4646 return flag;
4647}
4648
4649void
4650rb_gc_initial_stress_set(VALUE flag)
4651{
4652 initial_stress = flag;
4653}
4654
4655size_t *
4656rb_gc_heap_sizes(void)
4657{
4658 return rb_gc_impl_heap_sizes(rb_gc_get_objspace());
4659}
4660
4661VALUE
4662rb_gc_enable(void)
4663{
4664 return rb_objspace_gc_enable(rb_gc_get_objspace());
4665}
4666
4667VALUE
4668rb_objspace_gc_enable(void *objspace)
4669{
4670 bool disabled = !rb_gc_impl_gc_enabled_p(objspace);
4671 rb_gc_impl_gc_enable(objspace);
4672 return RBOOL(disabled);
4673}
4674
4675static VALUE
4676gc_enable(rb_execution_context_t *ec, VALUE _)
4677{
4678 return rb_gc_enable();
4679}
4680
4681static VALUE
4682gc_disable_no_rest(void *objspace)
4683{
4684 bool disabled = !rb_gc_impl_gc_enabled_p(objspace);
4685 rb_gc_impl_gc_disable(objspace, false);
4686 return RBOOL(disabled);
4687}
4688
4689VALUE
4690rb_gc_disable_no_rest(void)
4691{
4692 return gc_disable_no_rest(rb_gc_get_objspace());
4693}
4694
4695VALUE
4696rb_gc_disable(void)
4697{
4698 return rb_objspace_gc_disable(rb_gc_get_objspace());
4699}
4700
4701VALUE
4702rb_objspace_gc_disable(void *objspace)
4703{
4704 bool disabled = !rb_gc_impl_gc_enabled_p(objspace);
4705 rb_gc_impl_gc_disable(objspace, true);
4706 return RBOOL(disabled);
4707}
4708
4709static VALUE
4710gc_disable(rb_execution_context_t *ec, VALUE _)
4711{
4712 return rb_gc_disable();
4713}
4714
4715// TODO: think about moving ruby_gc_set_params into Init_heap or Init_gc
4716void
4717ruby_gc_set_params(void)
4718{
4719 rb_gc_impl_set_params(rb_gc_get_objspace());
4720}
4721
4722void
4723rb_objspace_reachable_objects_from(VALUE obj, void (func)(VALUE, void *), void *data)
4724{
4725 RB_VM_LOCKING() {
4726 if (rb_gc_impl_during_gc_p(rb_gc_get_objspace())) rb_bug("rb_objspace_reachable_objects_from() is not supported while during GC");
4727
4728 if (!RB_SPECIAL_CONST_P(obj)) {
4729 rb_vm_t *vm = GET_VM();
4730 struct gc_mark_func_data_struct *prev_mfd = vm->gc.mark_func_data;
4731 struct gc_mark_func_data_struct mfd = {
4732 .mark_func = func,
4733 .data = data,
4734 };
4735
4736 vm->gc.mark_func_data = &mfd;
4737 rb_gc_mark_children(rb_gc_get_objspace(), obj);
4738 vm->gc.mark_func_data = prev_mfd;
4739 }
4740 }
4741}
4742
4744 const char *category;
4745 void (*func)(const char *category, VALUE, void *);
4746 void *data;
4747};
4748
4749static void
4750root_objects_from(VALUE obj, void *ptr)
4751{
4752 const struct root_objects_data *data = (struct root_objects_data *)ptr;
4753 (*data->func)(data->category, obj, data->data);
4754}
4755
4756void
4757rb_objspace_reachable_objects_from_root(void (func)(const char *category, VALUE, void *), void *passing_data)
4758{
4759 if (rb_gc_impl_during_gc_p(rb_gc_get_objspace())) rb_bug("rb_gc_impl_objspace_reachable_objects_from_root() is not supported while during GC");
4760
4761 rb_vm_t *vm = GET_VM();
4762
4763 struct root_objects_data data = {
4764 .func = func,
4765 .data = passing_data,
4766 };
4767
4768 struct gc_mark_func_data_struct *prev_mfd = vm->gc.mark_func_data;
4769 struct gc_mark_func_data_struct mfd = {
4770 .mark_func = root_objects_from,
4771 .data = &data,
4772 };
4773
4774 vm->gc.mark_func_data = &mfd;
4775 rb_gc_save_machine_context();
4776 rb_gc_mark_roots(vm->gc.objspace, &data.category);
4777 vm->gc.mark_func_data = prev_mfd;
4778}
4779
4780/*
4781 ------------------------------ DEBUG ------------------------------
4782*/
4783
4784static const char *
4785type_name(int type, VALUE obj)
4786{
4787 switch (type) {
4788#define TYPE_NAME(t) case (t): return #t;
4789 TYPE_NAME(T_NONE);
4790 TYPE_NAME(T_OBJECT);
4791 TYPE_NAME(T_CLASS);
4792 TYPE_NAME(T_MODULE);
4793 TYPE_NAME(T_FLOAT);
4794 TYPE_NAME(T_STRING);
4795 TYPE_NAME(T_REGEXP);
4796 TYPE_NAME(T_ARRAY);
4797 TYPE_NAME(T_HASH);
4798 TYPE_NAME(T_STRUCT);
4799 TYPE_NAME(T_BIGNUM);
4800 TYPE_NAME(T_FILE);
4801 TYPE_NAME(T_MATCH);
4802 TYPE_NAME(T_COMPLEX);
4803 TYPE_NAME(T_RATIONAL);
4804 TYPE_NAME(T_NIL);
4805 TYPE_NAME(T_TRUE);
4806 TYPE_NAME(T_FALSE);
4807 TYPE_NAME(T_SYMBOL);
4808 TYPE_NAME(T_FIXNUM);
4809 TYPE_NAME(T_UNDEF);
4810 TYPE_NAME(T_IMEMO);
4811 TYPE_NAME(T_ICLASS);
4812 TYPE_NAME(T_MOVED);
4813 TYPE_NAME(T_ZOMBIE);
4814 case T_DATA:
4815 if (obj && rb_objspace_data_type_name(obj)) {
4816 return rb_objspace_data_type_name(obj);
4817 }
4818 return "T_DATA";
4819#undef TYPE_NAME
4820 }
4821 return "unknown";
4822}
4823
4824static const char *
4825obj_type_name(VALUE obj)
4826{
4827 return type_name(TYPE(obj), obj);
4828}
4829
4830const char *
4831rb_method_type_name(rb_method_type_t type)
4832{
4833 switch (type) {
4834 case VM_METHOD_TYPE_ISEQ: return "iseq";
4835 case VM_METHOD_TYPE_ATTRSET: return "attrest";
4836 case VM_METHOD_TYPE_IVAR: return "ivar";
4837 case VM_METHOD_TYPE_BMETHOD: return "bmethod";
4838 case VM_METHOD_TYPE_ALIAS: return "alias";
4839 case VM_METHOD_TYPE_REFINED: return "refined";
4840 case VM_METHOD_TYPE_CFUNC: return "cfunc";
4841 case VM_METHOD_TYPE_ZSUPER: return "zsuper";
4842 case VM_METHOD_TYPE_MISSING: return "missing";
4843 case VM_METHOD_TYPE_OPTIMIZED: return "optimized";
4844 case VM_METHOD_TYPE_UNDEF: return "undef";
4845 case VM_METHOD_TYPE_NOTIMPLEMENTED: return "notimplemented";
4846 }
4847 rb_bug("rb_method_type_name: unreachable (type: %d)", type);
4848}
4849
4850static void
4851rb_raw_iseq_info(char *const buff, const size_t buff_size, const rb_iseq_t *iseq)
4852{
4853 if (buff_size > 0 && ISEQ_BODY(iseq) && ISEQ_BODY(iseq)->location.label && !RB_TYPE_P(ISEQ_BODY(iseq)->location.pathobj, T_MOVED)) {
4854 VALUE path = rb_iseq_path(iseq);
4855 int n = ISEQ_BODY(iseq)->location.first_lineno;
4856 snprintf(buff, buff_size, " %s@%s:%d",
4857 RSTRING_PTR(ISEQ_BODY(iseq)->location.label),
4858 RSTRING_PTR(path), n);
4859 }
4860}
4861
4862static int
4863str_len_no_raise(VALUE str)
4864{
4865 long len = RSTRING_LEN(str);
4866 if (len < 0) return 0;
4867 if (len > INT_MAX) return INT_MAX;
4868 return (int)len;
4869}
4870
4871#define BUFF_ARGS buff + pos, buff_size - pos
4872#define APPEND_F(...) if ((pos += snprintf(BUFF_ARGS, "" __VA_ARGS__)) >= buff_size) goto end
4873#define APPEND_S(s) do { \
4874 if ((pos + (int)rb_strlen_lit(s)) >= buff_size) { \
4875 goto end; \
4876 } \
4877 else { \
4878 memcpy(buff + pos, (s), rb_strlen_lit(s) + 1); \
4879 } \
4880 } while (0)
4881#define C(c, s) ((c) != 0 ? (s) : " ")
4882
4883static size_t
4884rb_raw_obj_info_common(char *const buff, const size_t buff_size, const VALUE obj)
4885{
4886 size_t pos = 0;
4887
4888 if (SPECIAL_CONST_P(obj)) {
4889 APPEND_F("%s", obj_type_name(obj));
4890
4891 if (FIXNUM_P(obj)) {
4892 APPEND_F(" %ld", FIX2LONG(obj));
4893 }
4894 else if (SYMBOL_P(obj)) {
4895 APPEND_F(" %s", rb_id2name(SYM2ID(obj)));
4896 }
4897 }
4898 else {
4899 // const int age = RVALUE_AGE_GET(obj);
4900
4901 if (rb_gc_impl_pointer_to_heap_p(rb_gc_get_objspace(), (void *)obj)) {
4902 APPEND_F("%p %s/", (void *)obj, obj_type_name(obj));
4903 // TODO: fixme
4904 // APPEND_F("%p [%d%s%s%s%s%s%s] %s ",
4905 // (void *)obj, age,
4906 // C(RVALUE_UNCOLLECTIBLE_BITMAP(obj), "L"),
4907 // C(RVALUE_MARK_BITMAP(obj), "M"),
4908 // C(RVALUE_PIN_BITMAP(obj), "P"),
4909 // C(RVALUE_MARKING_BITMAP(obj), "R"),
4910 // C(RVALUE_WB_UNPROTECTED_BITMAP(obj), "U"),
4911 // C(rb_objspace_garbage_object_p(obj), "G"),
4912 // obj_type_name(obj));
4913 }
4914 else {
4915 /* fake */
4916 // APPEND_F("%p [%dXXXX] %s",
4917 // (void *)obj, age,
4918 // obj_type_name(obj));
4919 }
4920
4921 if (internal_object_p(obj)) {
4922 /* ignore */
4923 }
4924 else if (RBASIC(obj)->klass == 0) {
4925 APPEND_S("(temporary internal)");
4926 }
4927 else if (RTEST(RBASIC(obj)->klass)) {
4928 VALUE class_path = rb_class_path_cached(RBASIC(obj)->klass);
4929 if (!NIL_P(class_path)) {
4930 APPEND_F("%s ", RSTRING_PTR(class_path));
4931 }
4932 }
4933 }
4934 end:
4935
4936 return pos;
4937}
4938
4939const char *rb_raw_obj_info(char *const buff, const size_t buff_size, VALUE obj);
4940
4941static size_t
4942rb_raw_obj_info_buitin_type(char *const buff, const size_t buff_size, const VALUE obj, size_t pos)
4943{
4944 if (LIKELY(pos < buff_size) && !SPECIAL_CONST_P(obj)) {
4945 const enum ruby_value_type type = BUILTIN_TYPE(obj);
4946
4947 switch (type) {
4948 case T_NODE:
4949 UNEXPECTED_NODE(rb_raw_obj_info);
4950 break;
4951 case T_ARRAY:
4952 if (ARY_SHARED_P(obj)) {
4953 APPEND_S("shared -> ");
4954 rb_raw_obj_info(BUFF_ARGS, ARY_SHARED_ROOT(obj));
4955 }
4956 else {
4957 APPEND_F("[%s%s%s] ",
4958 C(ARY_EMBED_P(obj), "E"),
4959 C(ARY_SHARED_P(obj), "S"),
4960 C(ARY_SHARED_ROOT_P(obj), "R"));
4961
4962 if (ARY_EMBED_P(obj)) {
4963 APPEND_F("len: %ld (embed)",
4964 RARRAY_LEN(obj));
4965 }
4966 else {
4967 APPEND_F("len: %ld, capa:%ld ptr:%p",
4968 RARRAY_LEN(obj),
4969 RARRAY(obj)->as.heap.aux.capa,
4970 (void *)RARRAY_CONST_PTR(obj));
4971 }
4972 }
4973 break;
4974 case T_STRING: {
4975 APPEND_F("[%s%s] ",
4976 C(FL_TEST(obj, RSTRING_FSTR), "F"),
4977 C(RB_OBJ_FROZEN(obj), "R"));
4978
4979 if (STR_SHARED_P(obj)) {
4980 APPEND_F(" [shared] len: %ld", RSTRING_LEN(obj));
4981 }
4982 else {
4983 if (STR_EMBED_P(obj)) APPEND_S(" [embed]");
4984
4985 APPEND_F(" len: %ld, capa: %" PRIdSIZE, RSTRING_LEN(obj), rb_str_capacity(obj));
4986 }
4987 APPEND_F(" \"%.*s\"", str_len_no_raise(obj), RSTRING_PTR(obj));
4988 break;
4989 }
4990 case T_SYMBOL: {
4991 VALUE fstr = RSYMBOL(obj)->fstr;
4992 ID id = RSYMBOL(obj)->id;
4993 if (RB_TYPE_P(fstr, T_STRING)) {
4994 APPEND_F(":%s id:%d", RSTRING_PTR(fstr), (unsigned int)id);
4995 }
4996 else {
4997 APPEND_F("(%p) id:%d", (void *)fstr, (unsigned int)id);
4998 }
4999 break;
5000 }
5001 case T_MOVED: {
5002 APPEND_F("-> %p", (void*)gc_location_internal(rb_gc_get_objspace(), obj));
5003 break;
5004 }
5005 case T_HASH: {
5006 APPEND_F("[%c] %"PRIdSIZE,
5007 RHASH_AR_TABLE_P(obj) ? 'A' : 'S',
5008 RHASH_SIZE(obj));
5009 break;
5010 }
5011 case T_CLASS:
5012 case T_MODULE:
5013 {
5014 VALUE class_path = rb_class_path_cached(obj);
5015 if (!NIL_P(class_path)) {
5016 APPEND_F("%s", RSTRING_PTR(class_path));
5017 }
5018 else {
5019 APPEND_S("(anon)");
5020 }
5021 break;
5022 }
5023 case T_ICLASS:
5024 {
5025 VALUE class_path = rb_class_path_cached(RBASIC_CLASS(obj));
5026 if (!NIL_P(class_path)) {
5027 APPEND_F("src:%s", RSTRING_PTR(class_path));
5028 }
5029 break;
5030 }
5031 case T_OBJECT:
5032 {
5033 if (FL_TEST_RAW(obj, ROBJECT_HEAP)) {
5034 if (rb_shape_obj_too_complex_p(obj)) {
5035 size_t hash_len = rb_st_table_size(ROBJECT_FIELDS_HASH(obj));
5036 APPEND_F("(too_complex) len:%zu", hash_len);
5037 }
5038 else {
5039 APPEND_F("(embed) len:%d capa:%d", RSHAPE_LEN(RBASIC_SHAPE_ID(obj)), ROBJECT_FIELDS_CAPACITY(obj));
5040 }
5041 }
5042 else {
5043 APPEND_F("len:%d capa:%d ptr:%p", RSHAPE_LEN(RBASIC_SHAPE_ID(obj)), ROBJECT_FIELDS_CAPACITY(obj), (void *)ROBJECT_FIELDS(obj));
5044 }
5045 }
5046 break;
5047 case T_DATA: {
5048 const struct rb_block *block;
5049 const rb_iseq_t *iseq;
5050 if (rb_obj_is_proc(obj) &&
5051 (block = vm_proc_block(obj)) != NULL &&
5052 (vm_block_type(block) == block_type_iseq) &&
5053 (iseq = vm_block_iseq(block)) != NULL) {
5054 rb_raw_iseq_info(BUFF_ARGS, iseq);
5055 }
5056 else if (rb_ractor_p(obj)) {
5057 rb_ractor_t *r = (void *)DATA_PTR(obj);
5058 if (r) {
5059 APPEND_F("r:%d", r->pub.id);
5060 }
5061 }
5062 break;
5063 }
5064 case T_IMEMO: {
5065 APPEND_F("<%s> ", rb_imemo_name(imemo_type(obj)));
5066
5067 switch (imemo_type(obj)) {
5068 case imemo_ment:
5069 {
5070 const rb_method_entry_t *me = (const rb_method_entry_t *)obj;
5071
5072 APPEND_F(":%s (%s%s%s%s) type:%s aliased:%d owner:%p defined_class:%p",
5073 rb_id2name(me->called_id),
5074 METHOD_ENTRY_VISI(me) == METHOD_VISI_PUBLIC ? "pub" :
5075 METHOD_ENTRY_VISI(me) == METHOD_VISI_PRIVATE ? "pri" : "pro",
5076 METHOD_ENTRY_COMPLEMENTED(me) ? ",cmp" : "",
5077 METHOD_ENTRY_CACHED(me) ? ",cc" : "",
5078 METHOD_ENTRY_INVALIDATED(me) ? ",inv" : "",
5079 me->def ? rb_method_type_name(me->def->type) : "NULL",
5080 me->def ? me->def->aliased : -1,
5081 (void *)me->owner, // obj_info(me->owner),
5082 (void *)me->defined_class); //obj_info(me->defined_class)));
5083
5084 if (me->def) {
5085 switch (me->def->type) {
5086 case VM_METHOD_TYPE_ISEQ:
5087 APPEND_S(" (iseq:");
5088 rb_raw_obj_info(BUFF_ARGS, (VALUE)me->def->body.iseq.iseqptr);
5089 APPEND_S(")");
5090 break;
5091 default:
5092 break;
5093 }
5094 }
5095
5096 break;
5097 }
5098 case imemo_iseq: {
5099 const rb_iseq_t *iseq = (const rb_iseq_t *)obj;
5100 rb_raw_iseq_info(BUFF_ARGS, iseq);
5101 break;
5102 }
5103 case imemo_callinfo:
5104 {
5105 const struct rb_callinfo *ci = (const struct rb_callinfo *)obj;
5106 APPEND_F("(mid:%s, flag:%x argc:%d, kwarg:%s)",
5107 rb_id2name(vm_ci_mid(ci)),
5108 vm_ci_flag(ci),
5109 vm_ci_argc(ci),
5110 vm_ci_kwarg(ci) ? "available" : "NULL");
5111 break;
5112 }
5113 case imemo_callcache:
5114 {
5115 const struct rb_callcache *cc = (const struct rb_callcache *)obj;
5116 VALUE class_path = vm_cc_valid(cc) ? rb_class_path_cached(cc->klass) : Qnil;
5117 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
5118
5119 APPEND_F("(klass:%s cme:%s%s (%p) call:%p",
5120 NIL_P(class_path) ? (vm_cc_valid(cc) ? "??" : "<NULL>") : RSTRING_PTR(class_path),
5121 cme ? rb_id2name(cme->called_id) : "<NULL>",
5122 cme ? (METHOD_ENTRY_INVALIDATED(cme) ? " [inv]" : "") : "",
5123 (void *)cme,
5124 (void *)(uintptr_t)vm_cc_call(cc));
5125 break;
5126 }
5127 default:
5128 break;
5129 }
5130 }
5131 default:
5132 break;
5133 }
5134 }
5135 end:
5136
5137 return pos;
5138}
5139
5140#undef C
5141
5142#ifdef RUBY_ASAN_ENABLED
5143void
5144rb_asan_poison_object(VALUE obj)
5145{
5146 MAYBE_UNUSED(struct RVALUE *) ptr = (void *)obj;
5147 asan_poison_memory_region(ptr, rb_gc_obj_slot_size(obj));
5148}
5149
5150void
5151rb_asan_unpoison_object(VALUE obj, bool newobj_p)
5152{
5153 MAYBE_UNUSED(struct RVALUE *) ptr = (void *)obj;
5154 asan_unpoison_memory_region(ptr, rb_gc_obj_slot_size(obj), newobj_p);
5155}
5156
5157void *
5158rb_asan_poisoned_object_p(VALUE obj)
5159{
5160 MAYBE_UNUSED(struct RVALUE *) ptr = (void *)obj;
5161 return __asan_region_is_poisoned(ptr, rb_gc_obj_slot_size(obj));
5162}
5163#endif
5164
5165static void
5166raw_obj_info(char *const buff, const size_t buff_size, VALUE obj)
5167{
5168 size_t pos = rb_raw_obj_info_common(buff, buff_size, obj);
5169 pos = rb_raw_obj_info_buitin_type(buff, buff_size, obj, pos);
5170 if (pos >= buff_size) {} // truncated
5171}
5172
5173const char *
5174rb_raw_obj_info(char *const buff, const size_t buff_size, VALUE obj)
5175{
5176 void *objspace = rb_gc_get_objspace();
5177
5178 if (SPECIAL_CONST_P(obj)) {
5179 raw_obj_info(buff, buff_size, obj);
5180 }
5181 else if (!rb_gc_impl_pointer_to_heap_p(objspace, (const void *)obj)) {
5182 snprintf(buff, buff_size, "out-of-heap:%p", (void *)obj);
5183 }
5184#if 0 // maybe no need to check it?
5185 else if (0 && rb_gc_impl_garbage_object_p(objspace, obj)) {
5186 snprintf(buff, buff_size, "garbage:%p", (void *)obj);
5187 }
5188#endif
5189 else {
5190 asan_unpoisoning_object(obj) {
5191 raw_obj_info(buff, buff_size, obj);
5192 }
5193 }
5194 return buff;
5195}
5196
5197#undef APPEND_S
5198#undef APPEND_F
5199#undef BUFF_ARGS
5200
5201/* Increments *var atomically and resets *var to 0 when maxval is
5202 * reached. Returns the wraparound old *var value (0...maxval). */
5203static rb_atomic_t
5204atomic_inc_wraparound(rb_atomic_t *var, const rb_atomic_t maxval)
5205{
5206 rb_atomic_t oldval = RUBY_ATOMIC_FETCH_ADD(*var, 1);
5207 if (RB_UNLIKELY(oldval >= maxval - 1)) { // wraparound *var
5208 const rb_atomic_t newval = oldval + 1;
5209 RUBY_ATOMIC_CAS(*var, newval, newval % maxval);
5210 oldval %= maxval;
5211 }
5212 return oldval;
5213}
5214
5215static const char *
5216obj_info(VALUE obj)
5217{
5218 if (RGENGC_OBJ_INFO) {
5219 static struct {
5220 rb_atomic_t index;
5221 char buffers[10][0x100];
5222 } info = {0};
5223
5224 rb_atomic_t index = atomic_inc_wraparound(&info.index, numberof(info.buffers));
5225 char *const buff = info.buffers[index];
5226 return rb_raw_obj_info(buff, sizeof(info.buffers[0]), obj);
5227 }
5228 return obj_type_name(obj);
5229}
5230
5231/*
5232 ------------------------ Extended allocator ------------------------
5233*/
5234
5236 VALUE exc;
5237 const char *fmt;
5238 va_list *ap;
5239};
5240
5241static void *
5242gc_vraise(void *ptr)
5243{
5244 struct gc_raise_tag *argv = ptr;
5245 rb_vraise(argv->exc, argv->fmt, *argv->ap);
5246 UNREACHABLE_RETURN(NULL);
5247}
5248
5249static void
5250gc_raise(VALUE exc, const char *fmt, ...)
5251{
5252 va_list ap;
5253 va_start(ap, fmt);
5254 struct gc_raise_tag argv = {
5255 exc, fmt, &ap,
5256 };
5257
5258 if (ruby_native_thread_p()) {
5259 rb_thread_call_with_gvl(gc_vraise, &argv);
5261 }
5262 else {
5263 /* Not in a ruby thread */
5264 fprintf(stderr, "%s", "[FATAL] ");
5265 vfprintf(stderr, fmt, ap);
5266 }
5267
5268 va_end(ap);
5269 abort();
5270}
5271
5272NORETURN(static void negative_size_allocation_error(const char *));
5273static void
5274negative_size_allocation_error(const char *msg)
5275{
5276 gc_raise(rb_eNoMemError, "%s", msg);
5277}
5278
5279static void *
5280ruby_memerror_body(void *dummy)
5281{
5282 rb_memerror();
5283 return 0;
5284}
5285
5286NORETURN(static void ruby_memerror(void));
5288static void
5289ruby_memerror(void)
5290{
5291 if (ruby_thread_has_gvl_p()) {
5292 rb_memerror();
5293 }
5294 else {
5295 if (ruby_native_thread_p()) {
5296 rb_thread_call_with_gvl(ruby_memerror_body, 0);
5297 }
5298 else {
5299 /* no ruby thread */
5300 fprintf(stderr, "[FATAL] failed to allocate memory\n");
5301 }
5302 }
5303
5304 /* We have discussions whether we should die here; */
5305 /* We might rethink about it later. */
5306 exit(EXIT_FAILURE);
5307}
5308
5309void
5310rb_memerror(void)
5311{
5312 /* the `GET_VM()->special_exceptions` below assumes that
5313 * the VM is reachable from the current thread. We should
5314 * definitely make sure of that. */
5315 RUBY_ASSERT_ALWAYS(ruby_thread_has_gvl_p());
5316
5317 rb_execution_context_t *ec = GET_EC();
5318 VALUE exc = GET_VM()->special_exceptions[ruby_error_nomemory];
5319
5320 if (!exc ||
5321 rb_ec_raised_p(ec, RAISED_NOMEMORY) ||
5322 rb_ec_vm_lock_rec(ec) != ec->tag->lock_rec) {
5323 fprintf(stderr, "[FATAL] failed to allocate memory\n");
5324 exit(EXIT_FAILURE);
5325 }
5326 if (rb_ec_raised_p(ec, RAISED_NOMEMORY)) {
5327 rb_ec_raised_clear(ec);
5328 }
5329 else {
5330 rb_ec_raised_set(ec, RAISED_NOMEMORY);
5331 exc = ruby_vm_special_exception_copy(exc);
5332 }
5333 ec->errinfo = exc;
5334 EC_JUMP_TAG(ec, TAG_RAISE);
5335}
5336
5337bool
5338rb_memerror_reentered(void)
5339{
5340 rb_execution_context_t *ec = GET_EC();
5341 return (ec && rb_ec_raised_p(ec, RAISED_NOMEMORY));
5342}
5343
5344static void *
5345handle_malloc_failure(void *ptr)
5346{
5347 if (LIKELY(ptr)) {
5348 return ptr;
5349 }
5350 else {
5351 ruby_memerror();
5352 UNREACHABLE_RETURN(ptr);
5353 }
5354}
5355
5356static void *ruby_xmalloc_body(size_t size);
5357
5358void *
5359ruby_xmalloc(size_t size)
5360{
5361 return handle_malloc_failure(ruby_xmalloc_body(size));
5362}
5363
5364static bool
5365malloc_gc_allowed(void)
5366{
5367 rb_ractor_t *r = rb_current_ractor_raw(false);
5368
5369 return r == NULL || !r->malloc_gc_disabled;
5370}
5371
5372static void *
5373ruby_xmalloc_body(size_t size)
5374{
5375 if ((ssize_t)size < 0) {
5376 negative_size_allocation_error("too large allocation size");
5377 }
5378
5379 return rb_gc_impl_malloc(rb_gc_get_objspace(), size, malloc_gc_allowed());
5380}
5381
5382void
5383ruby_malloc_size_overflow(size_t count, size_t elsize)
5384{
5385 rb_raise(rb_eArgError,
5386 "malloc: possible integer overflow (%"PRIuSIZE"*%"PRIuSIZE")",
5387 count, elsize);
5388}
5389
5390void
5391ruby_malloc_add_size_overflow(size_t x, size_t y)
5392{
5393 rb_raise(rb_eArgError,
5394 "malloc: possible integer overflow (%"PRIuSIZE"+%"PRIuSIZE")",
5395 x, y);
5396}
5397
5398static void *ruby_xmalloc2_body(size_t n, size_t size);
5399
5400void *
5401ruby_xmalloc2(size_t n, size_t size)
5402{
5403 return handle_malloc_failure(ruby_xmalloc2_body(n, size));
5404}
5405
5406static void *
5407ruby_xmalloc2_body(size_t n, size_t size)
5408{
5409 return rb_gc_impl_malloc(rb_gc_get_objspace(), xmalloc2_size(n, size), malloc_gc_allowed());
5410}
5411
5412static void *ruby_xcalloc_body(size_t n, size_t size);
5413
5414void *
5415ruby_xcalloc(size_t n, size_t size)
5416{
5417 return handle_malloc_failure(ruby_xcalloc_body(n, size));
5418}
5419
5420static void *
5421ruby_xcalloc_body(size_t n, size_t size)
5422{
5423 return rb_gc_impl_calloc(rb_gc_get_objspace(), xmalloc2_size(n, size), malloc_gc_allowed());
5424}
5425
5426static void *ruby_xrealloc_sized_body(void *ptr, size_t new_size, size_t old_size);
5427
5428#ifdef ruby_xrealloc_sized
5429#undef ruby_xrealloc_sized
5430#endif
5431void *
5432ruby_xrealloc_sized(void *ptr, size_t new_size, size_t old_size)
5433{
5434 return handle_malloc_failure(ruby_xrealloc_sized_body(ptr, new_size, old_size));
5435}
5436
5437static void *
5438ruby_xrealloc_sized_body(void *ptr, size_t new_size, size_t old_size)
5439{
5440 if ((ssize_t)new_size < 0) {
5441 negative_size_allocation_error("too large allocation size");
5442 }
5443
5444 return rb_gc_impl_realloc(rb_gc_get_objspace(), ptr, new_size, old_size, malloc_gc_allowed());
5445}
5446
5447void *
5448ruby_xrealloc(void *ptr, size_t new_size)
5449{
5450 return ruby_xrealloc_sized(ptr, new_size, 0);
5451}
5452
5453static void *ruby_xrealloc2_sized_body(void *ptr, size_t n, size_t size, size_t old_n);
5454
5455#ifdef ruby_xrealloc2_sized
5456#undef ruby_xrealloc2_sized
5457#endif
5458void *
5459ruby_xrealloc2_sized(void *ptr, size_t n, size_t size, size_t old_n)
5460{
5461 return handle_malloc_failure(ruby_xrealloc2_sized_body(ptr, n, size, old_n));
5462}
5463
5464static void *
5465ruby_xrealloc2_sized_body(void *ptr, size_t n, size_t size, size_t old_n)
5466{
5467 size_t len = xmalloc2_size(n, size);
5468 return rb_gc_impl_realloc(rb_gc_get_objspace(), ptr, len, old_n * size, malloc_gc_allowed());
5469}
5470
5471void *
5472ruby_xrealloc2(void *ptr, size_t n, size_t size)
5473{
5474 return ruby_xrealloc2_sized(ptr, n, size, 0);
5475}
5476
5477#ifdef ruby_xfree_sized
5478#undef ruby_xfree_sized
5479#endif
5480void
5481ruby_xfree_sized(void *x, size_t size)
5482{
5483 if (LIKELY(x)) {
5484 /* It's possible for a C extension's pthread destructor function set by pthread_key_create
5485 * to be called after ruby_vm_destruct and attempt to free memory. Fall back to mimfree in
5486 * that case. */
5487 if (LIKELY(GET_VM())) {
5488 rb_gc_impl_free(rb_gc_get_objspace(), x, size);
5489 }
5490 else {
5491 ruby_mimfree(x);
5492 }
5493 }
5494}
5495
5496void
5497ruby_xfree(void *x)
5498{
5499 ruby_xfree_sized(x, 0);
5500}
5501
5502void *
5503rb_xmalloc_mul_add(size_t x, size_t y, size_t z) /* x * y + z */
5504{
5505 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
5506 return ruby_xmalloc(w);
5507}
5508
5509void *
5510rb_xcalloc_mul_add(size_t x, size_t y, size_t z) /* x * y + z */
5511{
5512 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
5513 return ruby_xcalloc(w, 1);
5514}
5515
5516void *
5517rb_xrealloc_mul_add(const void *p, size_t x, size_t y, size_t z) /* x * y + z */
5518{
5519 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
5520 return ruby_xrealloc((void *)p, w);
5521}
5522
5523void *
5524rb_xmalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
5525{
5526 size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
5527 return ruby_xmalloc(u);
5528}
5529
5530void *
5531rb_xcalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
5532{
5533 size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
5534 return ruby_xcalloc(u, 1);
5535}
5536
5537/* Mimic ruby_xmalloc, but need not rb_objspace.
5538 * should return pointer suitable for ruby_xfree
5539 */
5540void *
5541ruby_mimmalloc(size_t size)
5542{
5543 void *mem;
5544#if CALC_EXACT_MALLOC_SIZE
5545 size += sizeof(struct malloc_obj_info);
5546#endif
5547 mem = malloc(size);
5548#if CALC_EXACT_MALLOC_SIZE
5549 if (!mem) {
5550 return NULL;
5551 }
5552 else
5553 /* set 0 for consistency of allocated_size/allocations */
5554 {
5555 struct malloc_obj_info *info = mem;
5556 info->size = 0;
5557 mem = info + 1;
5558 }
5559#endif
5560 return mem;
5561}
5562
5563void *
5564ruby_mimcalloc(size_t num, size_t size)
5565{
5566 void *mem;
5567#if CALC_EXACT_MALLOC_SIZE
5568 struct rbimpl_size_overflow_tag t = rbimpl_size_mul_overflow(num, size);
5569 if (UNLIKELY(t.overflowed)) {
5570 return NULL;
5571 }
5572 size = t.result + sizeof(struct malloc_obj_info);
5573 mem = calloc1(size);
5574 if (!mem) {
5575 return NULL;
5576 }
5577 else
5578 /* set 0 for consistency of allocated_size/allocations */
5579 {
5580 struct malloc_obj_info *info = mem;
5581 info->size = 0;
5582 mem = info + 1;
5583 }
5584#else
5585 mem = calloc(num, size);
5586#endif
5587 return mem;
5588}
5589
5590void
5591ruby_mimfree(void *ptr)
5592{
5593#if CALC_EXACT_MALLOC_SIZE
5594 struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
5595 ptr = info;
5596#endif
5597 free(ptr);
5598}
5599
5600void
5601rb_gc_adjust_memory_usage(ssize_t diff)
5602{
5603 unless_objspace(objspace) { return; }
5604
5605 rb_gc_impl_adjust_memory_usage(objspace, diff);
5606}
5607
5608const char *
5609rb_obj_info(VALUE obj)
5610{
5611 return obj_info(obj);
5612}
5613
5614void
5615rb_obj_info_dump(VALUE obj)
5616{
5617 char buff[0x100];
5618 fprintf(stderr, "rb_obj_info_dump: %s\n", rb_raw_obj_info(buff, 0x100, obj));
5619}
5620
5621void
5622rb_obj_info_dump_loc(VALUE obj, const char *file, int line, const char *func)
5623{
5624 char buff[0x100];
5625 fprintf(stderr, "<OBJ_INFO:%s@%s:%d> %s\n", func, file, line, rb_raw_obj_info(buff, 0x100, obj));
5626}
5627
5628void
5629rb_gc_before_fork(void)
5630{
5631 rb_gc_impl_before_fork(rb_gc_get_objspace());
5632}
5633
5634void
5635rb_gc_after_fork(rb_pid_t pid)
5636{
5637 rb_gc_impl_after_fork(rb_gc_get_objspace(), pid);
5638}
5639
5640bool
5641rb_gc_obj_shareable_p(VALUE obj)
5642{
5643 return RB_OBJ_SHAREABLE_P(obj);
5644}
5645
5646void
5647rb_gc_rp(VALUE obj)
5648{
5649 rp(obj);
5650}
5651
5653 VALUE parent;
5654 long err_count;
5655};
5656
5657static void
5658check_shareable_i(const VALUE child, void *ptr)
5659{
5660 struct check_shareable_data *data = (struct check_shareable_data *)ptr;
5661
5662 if (!rb_gc_obj_shareable_p(child)) {
5663 fprintf(stderr, "(a) ");
5664 rb_gc_rp(data->parent);
5665 fprintf(stderr, "(b) ");
5666 rb_gc_rp(child);
5667 fprintf(stderr, "check_shareable_i: shareable (a) -> unshareable (b)\n");
5668
5669 data->err_count++;
5670 rb_bug("!! violate shareable constraint !!");
5671 }
5672}
5673
5674static bool gc_checking_shareable = false;
5675
5676static void
5677gc_verify_shareable(void *objspace, VALUE obj, void *data)
5678{
5679 // while gc_checking_shareable is true,
5680 // other Ractors should not run the GC, until the flag is not local.
5681 // TODO: remove VM locking if the flag is Ractor local
5682
5683 unsigned int lev = RB_GC_VM_LOCK();
5684 {
5685 gc_checking_shareable = true;
5686 rb_objspace_reachable_objects_from(obj, check_shareable_i, (void *)data);
5687 gc_checking_shareable = false;
5688 }
5689 RB_GC_VM_UNLOCK(lev);
5690}
5691
5692// TODO: only one level (non-recursive)
5693void
5694rb_gc_verify_shareable(VALUE obj)
5695{
5696 rb_objspace_t *objspace = rb_gc_get_objspace();
5697 struct check_shareable_data data = {
5698 .parent = obj,
5699 .err_count = 0,
5700 };
5701 gc_verify_shareable(objspace, obj, &data);
5702
5703 if (data.err_count > 0) {
5704 rb_bug("rb_gc_verify_shareable");
5705 }
5706}
5707
5708bool
5709rb_gc_checking_shareable(void)
5710{
5711 return gc_checking_shareable;
5712}
5713
5714/*
5715 * Document-module: ObjectSpace
5716 *
5717 * The ObjectSpace module contains a number of routines
5718 * that interact with the garbage collection facility and allow you to
5719 * traverse all living objects with an iterator.
5720 *
5721 * ObjectSpace also provides support for object finalizers, procs that will be
5722 * called after a specific object was destroyed by garbage collection. See
5723 * the documentation for +ObjectSpace.define_finalizer+ for important
5724 * information on how to use this method correctly.
5725 *
5726 * a = "A"
5727 * b = "B"
5728 *
5729 * ObjectSpace.define_finalizer(a, proc {|id| puts "Finalizer one on #{id}" })
5730 * ObjectSpace.define_finalizer(b, proc {|id| puts "Finalizer two on #{id}" })
5731 *
5732 * a = nil
5733 * b = nil
5734 *
5735 * _produces:_
5736 *
5737 * Finalizer two on 537763470
5738 * Finalizer one on 537763480
5739 */
5740
5741/* Document-class: GC::Profiler
5742 *
5743 * The GC profiler provides access to information on GC runs including time,
5744 * length and object space size.
5745 *
5746 * Example:
5747 *
5748 * GC::Profiler.enable
5749 *
5750 * require 'rdoc/rdoc'
5751 *
5752 * GC::Profiler.report
5753 *
5754 * GC::Profiler.disable
5755 *
5756 * See also GC.count, GC.malloc_allocated_size and GC.malloc_allocations
5757 */
5758
5759#include "gc.rbinc"
5760
5761void
5762Init_GC(void)
5763{
5764#undef rb_intern
5765 rb_gc_register_address(&id2ref_value);
5766
5767 malloc_offset = gc_compute_malloc_offset();
5768
5769 rb_mGC = rb_define_module("GC");
5770
5771 VALUE rb_mObjSpace = rb_define_module("ObjectSpace");
5772
5773 rb_define_module_function(rb_mObjSpace, "each_object", os_each_obj, -1);
5774
5775 rb_define_module_function(rb_mObjSpace, "define_finalizer", define_final, -1);
5776 rb_define_module_function(rb_mObjSpace, "undefine_finalizer", undefine_final, 1);
5777
5778 rb_define_module_function(rb_mObjSpace, "_id2ref", os_id2ref, 1);
5779
5780 rb_vm_register_special_exception(ruby_error_nomemory, rb_eNoMemError, "failed to allocate memory");
5781
5782 rb_define_method(rb_cBasicObject, "__id__", rb_obj_id, 0);
5783 rb_define_method(rb_mKernel, "object_id", rb_obj_id, 0);
5784
5785 rb_define_module_function(rb_mObjSpace, "count_objects", count_objects, -1);
5786
5787 rb_gc_impl_init();
5788}
5789
5790// Set a name for the anonymous virtual memory area. `addr` is the starting
5791// address of the area and `size` is its length in bytes. `name` is a
5792// NUL-terminated human-readable string.
5793//
5794// This function is usually called after calling `mmap()`. The human-readable
5795// annotation helps developers identify the call site of `mmap()` that created
5796// the memory mapping.
5797//
5798// This function currently only works on Linux 5.17 or higher. After calling
5799// this function, we can see annotations in the form of "[anon:...]" in
5800// `/proc/self/maps`, where `...` is the content of `name`. This function has
5801// no effect when called on other platforms.
5802void
5803ruby_annotate_mmap(const void *addr, unsigned long size, const char *name)
5804{
5805#if defined(HAVE_SYS_PRCTL_H) && defined(PR_SET_VMA) && defined(PR_SET_VMA_ANON_NAME)
5806 // The name length cannot exceed 80 (including the '\0').
5807 RUBY_ASSERT(strlen(name) < 80);
5808 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, (unsigned long)addr, size, name);
5809 // We ignore errors in prctl. prctl may set errno to EINVAL for several
5810 // reasons.
5811 // 1. The attr (PR_SET_VMA_ANON_NAME) is not a valid attribute.
5812 // 2. addr is an invalid address.
5813 // 3. The string pointed by name is too long.
5814 // The first error indicates PR_SET_VMA_ANON_NAME is not available, and may
5815 // happen if we run the compiled binary on an old kernel. In theory, all
5816 // other errors should result in a failure. But since EINVAL cannot tell
5817 // the first error from others, and this function is mainly used for
5818 // debugging, we silently ignore the error.
5819 errno = 0;
5820#endif
5821}
#define RUBY_ASSERT_ALWAYS(expr,...)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
Definition assert.h:199
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
#define RUBY_ATOMIC_VALUE_CAS(var, oldval, newval)
Identical to RUBY_ATOMIC_CAS, except it expects its arguments are VALUE.
Definition atomic.h:406
#define RUBY_ATOMIC_SIZE_FETCH_ADD(var, val)
Identical to RUBY_ATOMIC_FETCH_ADD, except it expects its arguments to be size_t.
Definition atomic.h:235
#define RUBY_ATOMIC_CAS(var, oldval, newval)
Atomic compare-and-swap.
Definition atomic.h:165
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ATOMIC_FETCH_ADD(var, val)
Atomically replaces the value pointed by var with the result of addition of val to the old value of v...
Definition atomic.h:118
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_module_function(klass, mid, func, arity)
Defines klass#mid and makes it a module function.
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RUBY_INTERNAL_EVENT_NEWOBJ
Object allocated.
Definition event.h:93
static bool RB_OBJ_FROZEN(VALUE obj)
Checks if an object is frozen.
Definition fl_type.h:711
@ RUBY_FL_WB_PROTECTED
Definition fl_type.h:186
VALUE rb_define_module(const char *name)
Defines a top-level module.
Definition class.c:1636
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
Retrieves argument from argc and argv to given VALUE references according to the format string.
Definition class.c:3180
#define T_COMPLEX
Old name of RUBY_T_COMPLEX.
Definition value_type.h:59
#define TYPE(_)
Old name of rb_type.
Definition value_type.h:108
#define FL_SINGLETON
Old name of RUBY_FL_SINGLETON.
Definition fl_type.h:58
#define T_FILE
Old name of RUBY_T_FILE.
Definition value_type.h:62
#define FL_UNSET_RAW
Old name of RB_FL_UNSET_RAW.
Definition fl_type.h:130
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:400
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define T_MASK
Old name of RUBY_T_MASK.
Definition value_type.h:68
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define T_NIL
Old name of RUBY_T_NIL.
Definition value_type.h:72
#define UNREACHABLE
Old name of RBIMPL_UNREACHABLE.
Definition assume.h:28
#define T_FLOAT
Old name of RUBY_T_FLOAT.
Definition value_type.h:64
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition value_type.h:67
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
Definition value_type.h:57
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
Definition value_type.h:79
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition fl_type.h:131
#define T_FIXNUM
Old name of RUBY_T_FIXNUM.
Definition value_type.h:63
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition assume.h:29
#define SYM2ID
Old name of RB_SYM2ID.
Definition symbol.h:45
#define T_DATA
Old name of RUBY_T_DATA.
Definition value_type.h:60
#define FIXNUM_FLAG
Old name of RUBY_FIXNUM_FLAG.
#define LL2NUM
Old name of RB_LL2NUM.
Definition long_long.h:30
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:205
#define T_NONE
Old name of RUBY_T_NONE.
Definition value_type.h:74
#define T_NODE
Old name of RUBY_T_NODE.
Definition value_type.h:73
#define SIZET2NUM
Old name of RB_SIZE2NUM.
Definition size_t.h:62
#define FL_FINALIZE
Old name of RUBY_FL_FINALIZE.
Definition fl_type.h:61
#define T_MODULE
Old name of RUBY_T_MODULE.
Definition value_type.h:70
#define ASSUME
Old name of RBIMPL_ASSUME.
Definition assume.h:27
#define T_TRUE
Old name of RUBY_T_TRUE.
Definition value_type.h:81
#define T_RATIONAL
Old name of RUBY_T_RATIONAL.
Definition value_type.h:76
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define FL_ABLE
Old name of RB_FL_ABLE.
Definition fl_type.h:118
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:128
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition array.h:658
#define LONG2NUM
Old name of RB_LONG2NUM.
Definition long.h:50
#define T_FALSE
Old name of RUBY_T_FALSE.
Definition value_type.h:61
#define ULL2NUM
Old name of RB_ULL2NUM.
Definition long_long.h:31
#define T_UNDEF
Old name of RUBY_T_UNDEF.
Definition value_type.h:82
#define FLONUM_P
Old name of RB_FLONUM_P.
#define Qtrue
Old name of RUBY_Qtrue.
#define T_ZOMBIE
Old name of RUBY_T_ZOMBIE.
Definition value_type.h:83
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define NUM2ULL
Old name of RB_NUM2ULL.
Definition long_long.h:35
#define FL_WB_PROTECTED
Old name of RUBY_FL_WB_PROTECTED.
Definition fl_type.h:59
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
Definition value_type.h:80
#define T_MATCH
Old name of RUBY_T_MATCH.
Definition value_type.h:69
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define T_MOVED
Old name of RUBY_T_MOVED.
Definition value_type.h:71
#define FL_TEST
Old name of RB_FL_TEST.
Definition fl_type.h:127
#define xcalloc
Old name of ruby_xcalloc.
Definition xmalloc.h:55
#define FL_UNSET
Old name of RB_FL_UNSET.
Definition fl_type.h:129
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define NUM2SIZET
Old name of RB_NUM2SIZE.
Definition size_t.h:61
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
#define T_REGEXP
Old name of RUBY_T_REGEXP.
Definition value_type.h:77
size_t ruby_stack_length(VALUE **p)
Queries what Ruby thinks is the machine stack.
Definition gc.c:2776
int ruby_stack_check(void)
Checks for stack overflow.
Definition gc.c:2816
void rb_category_warn(rb_warning_category_t category, const char *fmt,...)
Identical to rb_category_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:477
VALUE rb_eNoMemError
NoMemoryError exception.
Definition error.c:1438
VALUE rb_eRangeError
RangeError exception.
Definition error.c:1431
#define ruby_verbose
This variable controls whether the interpreter is in debug mode.
Definition error.h:476
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1427
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:467
@ RB_WARN_CATEGORY_DEPRECATED
Warning is for deprecated features.
Definition error.h:48
VALUE rb_mKernel
Kernel module.
Definition object.c:60
VALUE rb_cObject
Object class.
Definition object.c:61
VALUE rb_mGC
GC module.
Definition gc.c:429
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:229
VALUE rb_cBasicObject
BasicObject class.
Definition object.c:59
VALUE rb_class_real(VALUE klass)
Finds a "real" class.
Definition object.c:220
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:888
VALUE rb_to_int(VALUE val)
Identical to rb_check_to_int(), except it raises in case of conversion mismatch.
Definition object.c:3327
#define RB_POSFIXABLE(_)
Checks if the passed value is in range of fixnum, assuming it is a positive number.
Definition fixnum.h:43
int rb_enc_str_coderange(VALUE str)
Scans the passed string to collect its code range.
Definition string.c:930
VALUE rb_funcall(VALUE recv, ID mid, int n,...)
Calls a method.
Definition vm_eval.c:1120
#define RGENGC_WB_PROTECTED_OBJECT
This is a compile-time flag to enable/disable write barrier for struct RObject.
Definition gc.h:490
Defines RBIMPL_HAS_BUILTIN.
void rb_ary_free(VALUE ary)
Destroys the given array for no reason.
#define RETURN_ENUMERATOR(obj, argc, argv)
Identical to RETURN_SIZED_ENUMERATOR(), except its size is unknown.
Definition enumerator.h:242
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:284
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition proc.c:988
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
Definition proc.c:122
void rb_str_free(VALUE str)
Destroys the given string for no reason.
Definition string.c:1735
size_t rb_str_capacity(VALUE str)
Queries the capacity of the given string.
Definition string.c:984
VALUE rb_class_path_cached(VALUE mod)
Just another name of rb_mod_name.
Definition variable.c:389
void rb_free_generic_ivar(VALUE obj)
Frees the list of instance variables.
Definition variable.c:1305
void rb_undef_alloc_func(VALUE klass)
Deletes the allocator function of a class.
Definition vm_method.c:1731
VALUE rb_check_funcall(VALUE recv, ID mid, int argc, const VALUE *argv)
Identical to rb_funcallv(), except it returns RUBY_Qundef instead of raising rb_eNoMethodError.
Definition vm_eval.c:689
rb_alloc_func_t rb_get_alloc_func(VALUE klass)
Queries the allocator function of a class.
Definition vm_method.c:1740
int rb_obj_respond_to(VALUE obj, ID mid, int private_p)
Identical to rb_respond_to(), except it additionally takes the visibility parameter.
Definition vm_method.c:3458
VALUE rb_sym2str(VALUE symbol)
Obtain a frozen string representation of a symbol (not including the leading colon).
Definition symbol.c:1024
int rb_io_fptr_finalize(rb_io_t *fptr)
Destroys the given IO.
Definition io.c:5700
int len
Length of the buffer.
Definition io.h:8
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
Definition ractor.h:249
#define RB_OBJ_SHAREABLE_P(obj)
Queries if the passed object has previously classified as shareable or not.
Definition ractor.h:235
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
Definition thread.c:2063
VALUE rb_yield(VALUE val)
Yields the block.
Definition vm_eval.c:1375
#define RBIMPL_ATTR_MAYBE_UNUSED()
Wraps (or simulates) [[maybe_unused]]
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
VALUE type(ANYARGS)
ANYARGS-ed function type.
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
#define RARRAY(obj)
Convenient casting macro.
Definition rarray.h:44
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:166
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS(obj)
Convenient casting macro.
Definition rclass.h:38
VALUE rb_data_object_wrap(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
This is the primitive way to wrap an existing C struct into RData.
Definition gc.c:1128
#define DATA_PTR(obj)
Convenient getter macro.
Definition rdata.h:67
VALUE rb_data_object_zalloc(VALUE klass, size_t size, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
Identical to rb_data_object_wrap(), except it allocates a new data region internally instead of takin...
Definition gc.c:1145
#define RDATA(obj)
Convenient casting macro.
Definition rdata.h:59
#define RUBY_DEFAULT_FREE
This is a value you can set to RData::dfree.
Definition rdata.h:78
void(* RUBY_DATA_FUNC)(void *)
This is the type of callbacks registered to RData.
Definition rdata.h:104
#define RUBY_NEVER_FREE
This is a value you can set to RData::dfree.
Definition rdata.h:85
#define RFILE(obj)
Convenient casting macro.
Definition rfile.h:50
#define RHASH_SIZE(h)
Queries the size of the hash.
Definition rhash.h:69
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
#define RMATCH(obj)
Convenient casting macro.
Definition rmatch.h:37
#define ROBJECT(obj)
Convenient casting macro.
Definition robject.h:43
static VALUE * ROBJECT_FIELDS(VALUE obj)
Queries the instance variables.
Definition robject.h:128
#define RREGEXP(obj)
Convenient casting macro.
Definition rregexp.h:37
#define RREGEXP_PTR(obj)
Convenient accessor macro.
Definition rregexp.h:45
#define RSTRING(obj)
Convenient casting macro.
Definition rstring.h:41
static long RSTRUCT_LEN(VALUE st)
Returns the number of struct members.
Definition rstruct.h:82
static bool RTYPEDDATA_P(VALUE obj)
Checks whether the passed object is RTypedData or RData.
Definition rtypeddata.h:669
#define RUBY_TYPED_DEFAULT_FREE
This is a value you can set to rb_data_type_struct::dfree.
Definition rtypeddata.h:81
VALUE rb_data_typed_object_wrap(VALUE klass, void *datap, const rb_data_type_t *type)
This is the primitive way to wrap an existing C struct into RTypedData.
Definition gc.c:1175
VALUE rb_data_typed_object_zalloc(VALUE klass, size_t size, const rb_data_type_t *type)
Identical to rb_data_typed_object_wrap(), except it allocates a new data region internally instead of...
Definition gc.c:1185
#define RUBY_TYPED_FREE_IMMEDIATELY
Macros to see if each corresponding flag is defined.
Definition rtypeddata.h:122
static const rb_data_type_t * RTYPEDDATA_TYPE(VALUE obj)
Queries for the type of given object.
Definition rtypeddata.h:687
#define TypedData_Wrap_Struct(klass, data_type, sval)
Converts sval, a pointer to your struct, into a Ruby object.
Definition rtypeddata.h:531
#define RTYPEDDATA(obj)
Convenient casting macro.
Definition rtypeddata.h:96
const char * rb_obj_classname(VALUE obj)
Queries the name of the class of the passed object.
Definition variable.c:515
void rb_p(VALUE obj)
Inspects an object.
Definition io.c:9063
#define errno
Ractor-aware version of errno.
Definition ruby.h:388
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition thread.c:5815
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
Defines old _.
#define _(args)
This was a transition path from K&R to ANSI.
Definition stdarg.h:35
Ruby's array.
Definition rarray.h:128
Ruby object's base components.
Definition rbasic.h:69
Definition rdata.h:120
RUBY_DATA_FUNC dfree
This function is called when the object is no longer used.
Definition rdata.h:143
RUBY_DATA_FUNC dmark
This function is called when the object is experiencing GC marks.
Definition rdata.h:134
void * data
Pointer to the actual C level struct that you want to wrap.
Definition rdata.h:149
Definition hash.h:53
Ruby's ordinal objects.
Definition robject.h:85
Ruby's String.
Definition rstring.h:196
"Typed" user data.
Definition rtypeddata.h:384
void * data
Pointer to the actual C level struct that you want to wrap.
Definition rtypeddata.h:404
VALUE fields_obj
Direct reference to the slots that holds instance variables, if any.
Definition rtypeddata.h:390
Definition method.h:63
Definition constant.h:33
CREF (Class REFerence)
Definition method.h:45
Definition class.h:37
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:229
RUBY_DATA_FUNC dfree
This function is called when the object is no longer used.
Definition rtypeddata.h:259
RUBY_DATA_FUNC dcompact
This function is called when the object is relocated.
Definition rtypeddata.h:280
struct rb_data_type_struct::@57 function
Function pointers.
const char * wrap_struct_name
Name of structs of this kind.
Definition rtypeddata.h:236
RUBY_DATA_FUNC dmark
This function is called when the object is experiencing GC marks.
Definition rtypeddata.h:250
VALUE flags
Type-specific behavioural characteristics.
Definition rtypeddata.h:343
Definition gc_impl.h:15
Ruby's IO, metadata and buffers.
Definition io.h:295
Represents a match.
Definition rmatch.h:71
struct rmatch_offset * char_offset
Capture group offsets, in C array.
Definition rmatch.h:79
int char_offset_num_allocated
Number of rmatch_offset that ::rmatch::char_offset holds.
Definition rmatch.h:82
struct re_registers regs
"Registers" of a match.
Definition rmatch.h:76
Definition method.h:55
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:143
Internal header for Class.
Definition class.h:30
Represents the region of a capture group.
Definition rmatch.h:65
Definition st.h:79
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
intptr_t SIGNED_VALUE
A signed integer type that has the same width with VALUE.
Definition value.h:63
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376
ruby_value_type
C-level type of an object.
Definition value_type.h:113
@ RUBY_T_MASK
Bitmask of ruby_value_type.
Definition value_type.h:145