Ruby 4.1.0dev (2026-05-17 revision 71749b882622c03028dfc55f40e5c3cceb24eaaf)
gc.c (71749b882622c03028dfc55f40e5c3cceb24eaaf)
1/**********************************************************************
2
3 gc.c -
4
5 $Author$
6 created at: Tue Oct 5 09:44:46 JST 1993
7
8 Copyright (C) 1993-2007 Yukihiro Matsumoto
9 Copyright (C) 2000 Network Applied Communication Laboratory, Inc.
10 Copyright (C) 2000 Information-technology Promotion Agency, Japan
11
12**********************************************************************/
13
14#include "ruby/internal/config.h"
15#ifdef _WIN32
16# include "ruby/ruby.h"
17#endif
18
19#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
20# include "wasm/setjmp.h"
21# include "wasm/machine.h"
22#else
23# include <setjmp.h>
24#endif
25#include <stdarg.h>
26#include <stdio.h>
27
28/* MALLOC_HEADERS_BEGIN */
29#ifndef HAVE_MALLOC_USABLE_SIZE
30# ifdef _WIN32
31# define HAVE_MALLOC_USABLE_SIZE
32# define malloc_usable_size(a) _msize(a)
33# elif defined HAVE_MALLOC_SIZE
34# define HAVE_MALLOC_USABLE_SIZE
35# define malloc_usable_size(a) malloc_size(a)
36# endif
37#endif
38
39#ifdef HAVE_MALLOC_USABLE_SIZE
40# ifdef RUBY_ALTERNATIVE_MALLOC_HEADER
41/* Alternative malloc header is included in ruby/missing.h */
42# elif defined(HAVE_MALLOC_H)
43# include <malloc.h>
44# elif defined(HAVE_MALLOC_NP_H)
45# include <malloc_np.h>
46# elif defined(HAVE_MALLOC_MALLOC_H)
47# include <malloc/malloc.h>
48# endif
49#endif
50
51/* MALLOC_HEADERS_END */
52
53#ifdef HAVE_SYS_TIME_H
54# include <sys/time.h>
55#endif
56
57#ifdef HAVE_SYS_RESOURCE_H
58# include <sys/resource.h>
59#endif
60
61#if defined _WIN32 || defined __CYGWIN__
62# include <windows.h>
63#elif defined(HAVE_POSIX_MEMALIGN)
64#elif defined(HAVE_MEMALIGN)
65# include <malloc.h>
66#endif
67
68#include <sys/types.h>
69
70#ifdef __EMSCRIPTEN__
71#include <emscripten.h>
72#endif
73
74/* For ruby_annotate_mmap */
75#ifdef HAVE_SYS_PRCTL_H
76#include <sys/prctl.h>
77#endif
78
79#undef LIST_HEAD /* ccan/list conflicts with BSD-origin sys/queue.h. */
80
81#include "constant.h"
82#include "darray.h"
83#include "debug_counter.h"
84#include "eval_intern.h"
85#include "gc/gc.h"
86#include "id_table.h"
87#include "internal.h"
88#include "internal/class.h"
89#include "internal/compile.h"
90#include "internal/complex.h"
91#include "internal/concurrent_set.h"
92#include "internal/cont.h"
93#include "internal/error.h"
94#include "internal/eval.h"
95#include "internal/gc.h"
96#include "internal/hash.h"
97#include "internal/imemo.h"
98#include "internal/io.h"
99#include "internal/numeric.h"
100#include "internal/object.h"
101#include "internal/proc.h"
102#include "internal/rational.h"
103#include "internal/re.h"
104#include "internal/sanitizers.h"
105#include "internal/struct.h"
106#include "internal/symbol.h"
107#include "internal/thread.h"
108#include "internal/variable.h"
109#include "internal/warnings.h"
110#include "probes.h"
111#include "regint.h"
112#include "ruby/debug.h"
113#include "ruby/io.h"
114#include "ruby/re.h"
115#include "ruby/st.h"
116#include "ruby/thread.h"
117#include "ruby/util.h"
118#include "ruby/vm.h"
119#include "ruby_assert.h"
120#include "ruby_atomic.h"
121#include "symbol.h"
122#include "variable.h"
123#include "vm_core.h"
124#include "vm_sync.h"
125#include "vm_callinfo.h"
126#include "ractor_core.h"
127#include "yjit.h"
128#include "zjit.h"
129
130#include "builtin.h"
131#include "shape.h"
132
133// TODO: Don't export this function in modular GC, instead MMTk should figure out
134// how to combine GC thread backtrace with mutator thread backtrace.
135void
136rb_gc_print_backtrace(void)
137{
138 rb_print_backtrace(stderr);
139}
140
141unsigned int
142rb_gc_vm_lock(const char *file, int line)
143{
144 unsigned int lev = 0;
145 rb_vm_lock_enter(&lev, file, line);
146 return lev;
147}
148
149void
150rb_gc_vm_unlock(unsigned int lev, const char *file, int line)
151{
152 rb_vm_lock_leave(&lev, file, line);
153}
154
155unsigned int
156rb_gc_cr_lock(const char *file, int line)
157{
158 unsigned int lev;
159 rb_vm_lock_enter_cr(GET_RACTOR(), &lev, file, line);
160 return lev;
161}
162
163void
164rb_gc_cr_unlock(unsigned int lev, const char *file, int line)
165{
166 rb_vm_lock_leave_cr(GET_RACTOR(), &lev, file, line);
167}
168
169unsigned int
170rb_gc_vm_lock_no_barrier(const char *file, int line)
171{
172 unsigned int lev = 0;
173 rb_vm_lock_enter_nb(&lev, file, line);
174 return lev;
175}
176
177void
178rb_gc_vm_unlock_no_barrier(unsigned int lev, const char *file, int line)
179{
180 rb_vm_lock_leave_nb(&lev, file, line);
181}
182
183void
184rb_gc_vm_barrier(void)
185{
186 rb_vm_barrier();
187}
188
189void *
190rb_gc_get_ractor_newobj_cache(void)
191{
192 return GET_RACTOR()->newobj_cache;
193}
194
195void
196rb_gc_initialize_vm_context(struct rb_gc_vm_context *context)
197{
198 rb_native_mutex_initialize(&context->lock);
199 context->ec = GET_EC();
200}
201
202bool
203rb_gc_event_hook_required_p(rb_event_flag_t event)
204{
205 return ruby_vm_event_flags & event;
206}
207
208void
209rb_gc_event_hook(VALUE obj, rb_event_flag_t event)
210{
211 if (LIKELY(!rb_gc_event_hook_required_p(event))) return;
212
213 rb_execution_context_t *ec = rb_gc_get_ec();
214 if (!ec->cfp) return;
215
216#if USE_MODULAR_GC
217 bool gc_thread_p = false;
218 if (!GET_EC()) {
219 gc_thread_p = true;
220
221# ifdef RB_THREAD_LOCAL_SPECIFIER
222 rb_current_ec_set(ec);
223# else
224 native_tls_set(ruby_current_ec_key, ec);
225# endif
226 }
227#endif
228
229 EXEC_EVENT_HOOK(ec, event, ec->cfp->self, 0, 0, 0, obj);
230
231#if USE_MODULAR_GC
232 if (gc_thread_p) {
233# ifdef RB_THREAD_LOCAL_SPECIFIER
234 rb_current_ec_set(NULL);
235# else
236 native_tls_set(ruby_current_ec_key, NULL);
237# endif
238 }
239#endif
240}
241
242void *
243rb_gc_get_objspace(void)
244{
245 return GET_VM()->gc.objspace;
246}
247
248void
249rb_gc_ractor_newobj_cache_foreach(void (*func)(void *cache, void *data), void *data)
250{
251 rb_ractor_t *r = NULL;
252 if (RB_LIKELY(ruby_single_main_ractor)) {
253 GC_ASSERT(
254 ccan_list_empty(&GET_VM()->ractor.set) ||
255 (ccan_list_top(&GET_VM()->ractor.set, rb_ractor_t, vmlr_node) == ruby_single_main_ractor &&
256 ccan_list_tail(&GET_VM()->ractor.set, rb_ractor_t, vmlr_node) == ruby_single_main_ractor)
257 );
258
259 func(ruby_single_main_ractor->newobj_cache, data);
260 }
261 else {
262 ccan_list_for_each(&GET_VM()->ractor.set, r, vmlr_node) {
263 func(r->newobj_cache, data);
264 }
265 }
266}
267
268void
269rb_gc_run_obj_finalizer(VALUE objid, long count, VALUE (*callback)(long i, void *data), void *data)
270{
271 volatile struct {
272 VALUE errinfo;
273 VALUE final;
275 VALUE *sp;
276 long finished;
277 } saved;
278
279 rb_execution_context_t * volatile ec = GET_EC();
280#define RESTORE_FINALIZER() (\
281 ec->cfp = saved.cfp, \
282 ec->cfp->sp = saved.sp, \
283 ec->errinfo = saved.errinfo)
284
285 saved.errinfo = ec->errinfo;
286 saved.cfp = ec->cfp;
287 saved.sp = ec->cfp->sp;
288 saved.finished = 0;
289 saved.final = Qundef;
290
291 ASSERT_vm_unlocking();
292 rb_ractor_ignore_belonging(true);
293 EC_PUSH_TAG(ec);
294 enum ruby_tag_type state = EC_EXEC_TAG();
295 if (state != TAG_NONE) {
296 ++saved.finished; /* skip failed finalizer */
297
298 VALUE failed_final = saved.final;
299 saved.final = Qundef;
300 if (!UNDEF_P(failed_final) && !NIL_P(ruby_verbose)) {
301 rb_warn("Exception in finalizer %+"PRIsVALUE, failed_final);
302 rb_ec_error_print(ec, ec->errinfo);
303 }
304 }
305
306 for (long i = saved.finished; RESTORE_FINALIZER(), i < count; saved.finished = ++i) {
307 saved.final = callback(i, data);
308 rb_check_funcall(saved.final, idCall, 1, &objid);
309 }
310 EC_POP_TAG();
311 rb_ractor_ignore_belonging(false);
312#undef RESTORE_FINALIZER
313}
314
315void
316rb_gc_set_pending_interrupt(void)
317{
318 rb_execution_context_t *ec = GET_EC();
319 ec->interrupt_mask |= PENDING_INTERRUPT_MASK;
320}
321
322void
323rb_gc_unset_pending_interrupt(void)
324{
325 rb_execution_context_t *ec = GET_EC();
326 ec->interrupt_mask &= ~PENDING_INTERRUPT_MASK;
327}
328
329bool
330rb_gc_multi_ractor_p(void)
331{
332 return rb_multi_ractor_p();
333}
334
335bool
336rb_gc_shutdown_call_finalizer_p(VALUE obj)
337{
338 switch (BUILTIN_TYPE(obj)) {
339 case T_DATA:
340 if (!ruby_free_at_exit_p() && (!DATA_PTR(obj) || !RDATA(obj)->dfree)) return false;
341 if (rb_obj_is_thread(obj)) return false;
342 if (rb_obj_is_mutex(obj)) return false;
343 if (rb_obj_is_fiber(obj)) return false;
344 if (rb_ractor_p(obj)) return false;
345 if (rb_obj_is_fstring_table(obj)) return false;
346 if (rb_obj_is_symbol_table(obj)) return false;
347
348 return true;
349
350 case T_FILE:
351 return true;
352
353 case T_SYMBOL:
354 return true;
355
356 case T_NONE:
357 return false;
358
359 default:
360 return ruby_free_at_exit_p();
361 }
362}
363
364void
365rb_gc_obj_changed_pool(VALUE obj, size_t heap_id)
366{
368
369 RBASIC_SET_SHAPE_ID(obj, rb_obj_shape_transition_heap(obj, heap_id));
370}
371
372void rb_vm_update_references(void *ptr);
373
374#define rb_setjmp(env) RUBY_SETJMP(env)
375#define rb_jmp_buf rb_jmpbuf_t
376#undef rb_data_object_wrap
377
378#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
379#define MAP_ANONYMOUS MAP_ANON
380#endif
381
382#define unless_objspace(objspace) \
383 void *objspace; \
384 rb_vm_t *unless_objspace_vm = GET_VM(); \
385 if (unless_objspace_vm) objspace = unless_objspace_vm->gc.objspace; \
386 else /* return; or objspace will be warned uninitialized */
387
388#define RMOVED(obj) ((struct RMoved *)(obj))
389
390#define TYPED_UPDATE_IF_MOVED(_objspace, _type, _thing) do { \
391 if (gc_object_moved_p_internal((_objspace), (VALUE)(_thing))) { \
392 *(_type *)&(_thing) = (_type)gc_location_internal(_objspace, (VALUE)_thing); \
393 } \
394} while (0)
395
396#define UPDATE_IF_MOVED(_objspace, _thing) TYPED_UPDATE_IF_MOVED(_objspace, VALUE, _thing)
397
398#if RUBY_MARK_FREE_DEBUG
399int ruby_gc_debug_indent = 0;
400#endif
401
402#ifndef RGENGC_OBJ_INFO
403# define RGENGC_OBJ_INFO RGENGC_CHECK_MODE
404#endif
405
406#ifndef CALC_EXACT_MALLOC_SIZE
407# define CALC_EXACT_MALLOC_SIZE 0
408#endif
409
411
412static size_t malloc_offset = 0;
413#if defined(HAVE_MALLOC_USABLE_SIZE)
414static size_t
415gc_compute_malloc_offset(void)
416{
417 // Different allocators use different metadata storage strategies which result in different
418 // ideal sizes.
419 // For instance malloc(64) will waste 8B with glibc, but waste 0B with jemalloc.
420 // But malloc(56) will waste 0B with glibc, but waste 8B with jemalloc.
421 // So we try allocating 64, 56 and 48 bytes and select the first offset that doesn't
422 // waste memory.
423 // This was tested on Linux with glibc 2.35 and jemalloc 5, and for both it result in
424 // no wasted memory.
425 size_t offset = 0;
426 for (offset = 0; offset <= 16; offset += 8) {
427 size_t allocated = (64 - offset);
428 void *test_ptr = malloc(allocated);
429 size_t wasted = malloc_usable_size(test_ptr) - allocated;
430 free(test_ptr);
431
432 if (wasted == 0) {
433 return offset;
434 }
435 }
436 return 0;
437}
438#else
439static size_t
440gc_compute_malloc_offset(void)
441{
442 // If we don't have malloc_usable_size, we use powers of 2.
443 return 0;
444}
445#endif
446
447size_t
448rb_malloc_grow_capa(size_t current, size_t type_size)
449{
450 size_t current_capacity = current;
451 if (current_capacity < 4) {
452 current_capacity = 4;
453 }
454 current_capacity *= type_size;
455
456 // We double the current capacity.
457 size_t new_capacity = (current_capacity * 2);
458
459 // And round up to the next power of 2 if it's not already one.
460 if (rb_popcount64(new_capacity) != 1) {
461 new_capacity = (size_t)(1 << (64 - nlz_int64(new_capacity)));
462 }
463
464 new_capacity -= malloc_offset;
465 new_capacity /= type_size;
466 if (current > new_capacity) {
467 rb_bug("rb_malloc_grow_capa: current_capacity=%zu, new_capacity=%zu, malloc_offset=%zu", current, new_capacity, malloc_offset);
468 }
469 RUBY_ASSERT(new_capacity > current);
470 return new_capacity;
471}
472
473static inline struct rbimpl_size_overflow_tag
474size_mul_add_overflow(size_t x, size_t y, size_t z) /* x * y + z */
475{
476 struct rbimpl_size_overflow_tag t = rbimpl_size_mul_overflow(x, y);
477 struct rbimpl_size_overflow_tag u = rbimpl_size_add_overflow(t.result, z);
478 return (struct rbimpl_size_overflow_tag) { t.overflowed || u.overflowed, u.result };
479}
480
481static inline struct rbimpl_size_overflow_tag
482size_mul_add_mul_overflow(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
483{
484 struct rbimpl_size_overflow_tag t = rbimpl_size_mul_overflow(x, y);
485 struct rbimpl_size_overflow_tag u = rbimpl_size_mul_overflow(z, w);
486 struct rbimpl_size_overflow_tag v = rbimpl_size_add_overflow(t.result, u.result);
487 return (struct rbimpl_size_overflow_tag) { t.overflowed || u.overflowed || v.overflowed, v.result };
488}
489
490PRINTF_ARGS(NORETURN(static void gc_raise(VALUE, const char*, ...)), 2, 3);
491
492static inline size_t
493size_mul_or_raise(size_t x, size_t y, VALUE exc)
494{
495 struct rbimpl_size_overflow_tag t = rbimpl_size_mul_overflow(x, y);
496 if (LIKELY(!t.overflowed)) {
497 return t.result;
498 }
499 else if (rb_during_gc()) {
500 rb_memerror(); /* or...? */
501 }
502 else {
503 gc_raise(
504 exc,
505 "integer overflow: %"PRIuSIZE
506 " * %"PRIuSIZE
507 " > %"PRIuSIZE,
508 x, y, (size_t)SIZE_MAX);
509 }
510}
511
512size_t
513rb_size_mul_or_raise(size_t x, size_t y, VALUE exc)
514{
515 return size_mul_or_raise(x, y, exc);
516}
517
518static inline size_t
519size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
520{
521 struct rbimpl_size_overflow_tag t = size_mul_add_overflow(x, y, z);
522 if (LIKELY(!t.overflowed)) {
523 return t.result;
524 }
525 else if (rb_during_gc()) {
526 rb_memerror(); /* or...? */
527 }
528 else {
529 gc_raise(
530 exc,
531 "integer overflow: %"PRIuSIZE
532 " * %"PRIuSIZE
533 " + %"PRIuSIZE
534 " > %"PRIuSIZE,
535 x, y, z, (size_t)SIZE_MAX);
536 }
537}
538
539size_t
540rb_size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
541{
542 return size_mul_add_or_raise(x, y, z, exc);
543}
544
545static inline size_t
546size_mul_add_mul_or_raise(size_t x, size_t y, size_t z, size_t w, VALUE exc)
547{
548 struct rbimpl_size_overflow_tag t = size_mul_add_mul_overflow(x, y, z, w);
549 if (LIKELY(!t.overflowed)) {
550 return t.result;
551 }
552 else if (rb_during_gc()) {
553 rb_memerror(); /* or...? */
554 }
555 else {
556 gc_raise(
557 exc,
558 "integer overflow: %"PRIdSIZE
559 " * %"PRIdSIZE
560 " + %"PRIdSIZE
561 " * %"PRIdSIZE
562 " > %"PRIdSIZE,
563 x, y, z, w, (size_t)SIZE_MAX);
564 }
565}
566
567#if defined(HAVE_RB_GC_GUARDED_PTR_VAL) && HAVE_RB_GC_GUARDED_PTR_VAL
568/* trick the compiler into thinking a external signal handler uses this */
569volatile VALUE rb_gc_guarded_val;
570volatile VALUE *
571rb_gc_guarded_ptr_val(volatile VALUE *ptr, VALUE val)
572{
573 rb_gc_guarded_val = val;
574
575 return ptr;
576}
577#endif
578
579static const char *obj_type_name(VALUE obj);
580static st_table *id2ref_tbl;
581#include "gc/default/default.c"
582
583#if USE_MODULAR_GC && !defined(HAVE_DLOPEN)
584# error "Modular GC requires dlopen"
585#elif USE_MODULAR_GC
586#include <dlfcn.h>
587
588typedef struct gc_function_map {
589 // Bootup
590 void *(*objspace_alloc)(void);
591 void (*objspace_init)(void *objspace_ptr);
592 void *(*ractor_cache_alloc)(void *objspace_ptr, void *ractor);
593 void (*set_params)(void *objspace_ptr);
594 void (*init)(void);
595 size_t *(*heap_sizes)(void *objspace_ptr);
596 // Shutdown
597 void (*shutdown_free_objects)(void *objspace_ptr);
598 void (*objspace_free)(void *objspace_ptr);
599 void (*ractor_cache_free)(void *objspace_ptr, void *cache);
600 // GC
601 void (*start)(void *objspace_ptr, bool full_mark, bool immediate_mark, bool immediate_sweep, bool compact);
602 bool (*during_gc_p)(void *objspace_ptr);
603 void (*prepare_heap)(void *objspace_ptr);
604 void (*gc_enable)(void *objspace_ptr);
605 void (*gc_disable)(void *objspace_ptr, bool finish_current_gc);
606 bool (*gc_enabled_p)(void *objspace_ptr);
607 VALUE (*config_get)(void *objpace_ptr);
608 void (*config_set)(void *objspace_ptr, VALUE hash);
609 void (*stress_set)(void *objspace_ptr, VALUE flag);
610 VALUE (*stress_get)(void *objspace_ptr);
611 struct rb_gc_vm_context *(*get_vm_context)(void *objspace_ptr);
612 // Object allocation
613 VALUE (*new_obj)(void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags, bool wb_protected, size_t alloc_size);
614 size_t (*obj_slot_size)(VALUE obj);
615 size_t (*heap_id_for_size)(void *objspace_ptr, size_t size);
616 bool (*size_allocatable_p)(size_t size);
617 // Malloc
618 void *(*malloc)(void *objspace_ptr, size_t size, bool gc_allowed);
619 void *(*calloc)(void *objspace_ptr, size_t size, bool gc_allowed);
620 void *(*realloc)(void *objspace_ptr, void *ptr, size_t new_size, size_t old_size, bool gc_allowed);
621 void (*free)(void *objspace_ptr, void *ptr, size_t old_size);
622 void (*adjust_memory_usage)(void *objspace_ptr, ssize_t diff);
623 // Marking
624 void (*mark)(void *objspace_ptr, VALUE obj);
625 void (*mark_and_move)(void *objspace_ptr, VALUE *ptr);
626 void (*mark_and_pin)(void *objspace_ptr, VALUE obj);
627 void (*mark_maybe)(void *objspace_ptr, VALUE obj);
628 // Weak references
629 void (*declare_weak_references)(void *objspace_ptr, VALUE obj);
630 bool (*handle_weak_references_alive_p)(void *objspace_ptr, VALUE obj);
631 // Compaction
632 void (*register_pinning_obj)(void *objspace_ptr, VALUE obj);
633 bool (*object_moved_p)(void *objspace_ptr, VALUE obj);
634 VALUE (*location)(void *objspace_ptr, VALUE value);
635 // Write barriers
636 void (*writebarrier)(void *objspace_ptr, VALUE a, VALUE b);
637 void (*writebarrier_unprotect)(void *objspace_ptr, VALUE obj);
638 void (*writebarrier_remember)(void *objspace_ptr, VALUE obj);
639 // Heap walking
640 void (*each_objects)(void *objspace_ptr, int (*callback)(void *, void *, size_t, void *), void *data);
641 void (*each_object)(void *objspace_ptr, void (*func)(VALUE obj, void *data), void *data);
642 // Finalizers
643 void (*make_zombie)(void *objspace_ptr, VALUE obj, void (*dfree)(void *), void *data);
644 VALUE (*define_finalizer)(void *objspace_ptr, VALUE obj, VALUE block);
645 void (*undefine_finalizer)(void *objspace_ptr, VALUE obj);
646 void (*copy_finalizer)(void *objspace_ptr, VALUE dest, VALUE obj);
647 void (*shutdown_call_finalizer)(void *objspace_ptr);
648 // Forking
649 void (*before_fork)(void *objspace_ptr);
650 void (*after_fork)(void *objspace_ptr, rb_pid_t pid);
651 // Statistics
652 void (*set_measure_total_time)(void *objspace_ptr, VALUE flag);
653 bool (*get_measure_total_time)(void *objspace_ptr);
654 unsigned long long (*get_total_time)(void *objspace_ptr);
655 size_t (*gc_count)(void *objspace_ptr);
656 VALUE (*latest_gc_info)(void *objspace_ptr, VALUE key);
657 VALUE (*stat)(void *objspace_ptr, VALUE hash_or_sym);
658 VALUE (*stat_heap)(void *objspace_ptr, VALUE heap_name, VALUE hash_or_sym);
659 const char *(*active_gc_name)(void);
660 // Miscellaneous
661 struct rb_gc_object_metadata_entry *(*object_metadata)(void *objspace_ptr, VALUE obj);
662 bool (*pointer_to_heap_p)(void *objspace_ptr, const void *ptr);
663 bool (*garbage_object_p)(void *objspace_ptr, VALUE obj);
664 void (*set_event_hook)(void *objspace_ptr, const rb_event_flag_t event);
665 void (*copy_attributes)(void *objspace_ptr, VALUE dest, VALUE obj);
666
667 bool modular_gc_loaded_p;
668} rb_gc_function_map_t;
669
670static rb_gc_function_map_t rb_gc_functions;
671
672# define RUBY_GC_LIBRARY "RUBY_GC_LIBRARY"
673# define MODULAR_GC_DIR STRINGIZE(modular_gc_dir)
674
675static void
676ruby_modular_gc_init(void)
677{
678 // Assert that the directory path ends with a /
679 RUBY_ASSERT_ALWAYS(MODULAR_GC_DIR[sizeof(MODULAR_GC_DIR) - 2] == '/');
680
681 const char *gc_so_file = getenv(RUBY_GC_LIBRARY);
682
683 rb_gc_function_map_t gc_functions = { 0 };
684
685 char *gc_so_path = NULL;
686 void *handle = NULL;
687 if (gc_so_file) {
688 /* Check to make sure that gc_so_file matches /[\w-_]+/ so that it does
689 * not load a shared object outside of the directory. */
690 for (size_t i = 0; i < strlen(gc_so_file); i++) {
691 char c = gc_so_file[i];
692 if (isalnum(c)) continue;
693 switch (c) {
694 case '-':
695 case '_':
696 break;
697 default:
698 fprintf(stderr, "Only alphanumeric, dash, and underscore is allowed in "RUBY_GC_LIBRARY"\n");
699 exit(EXIT_FAILURE);
700 }
701 }
702
703 size_t gc_so_path_size = strlen(MODULAR_GC_DIR "librubygc." DLEXT) + strlen(gc_so_file) + 1;
704#ifdef LOAD_RELATIVE
705 Dl_info dli;
706 size_t prefix_len = 0;
707 if (dladdr((void *)(uintptr_t)ruby_modular_gc_init, &dli)) {
708 const char *base = strrchr(dli.dli_fname, '/');
709 if (base) {
710 size_t tail = 0;
711# define end_with_p(lit) \
712 (prefix_len >= (tail = rb_strlen_lit(lit)) && \
713 memcmp(base - tail, lit, tail) == 0)
714
715 prefix_len = base - dli.dli_fname;
716 if (end_with_p("/bin") || end_with_p("/lib")) {
717 prefix_len -= tail;
718 }
719 prefix_len += MODULAR_GC_DIR[0] != '/';
720 gc_so_path_size += prefix_len;
721 }
722 }
723#endif
724 gc_so_path = alloca(gc_so_path_size);
725 {
726 size_t gc_so_path_idx = 0;
727#define GC_SO_PATH_APPEND(str) do { \
728 gc_so_path_idx += strlcpy(gc_so_path + gc_so_path_idx, str, gc_so_path_size - gc_so_path_idx); \
729} while (0)
730#ifdef LOAD_RELATIVE
731 if (prefix_len > 0) {
732 memcpy(gc_so_path, dli.dli_fname, prefix_len);
733 gc_so_path_idx = prefix_len;
734 }
735#endif
736 GC_SO_PATH_APPEND(MODULAR_GC_DIR "librubygc.");
737 GC_SO_PATH_APPEND(gc_so_file);
738 GC_SO_PATH_APPEND(DLEXT);
739 GC_ASSERT(gc_so_path_idx == gc_so_path_size - 1);
740#undef GC_SO_PATH_APPEND
741 }
742
743 handle = dlopen(gc_so_path, RTLD_LAZY | RTLD_GLOBAL);
744 if (!handle) {
745 fprintf(stderr, "ruby_modular_gc_init: Shared library %s cannot be opened: %s\n", gc_so_path, dlerror());
746 exit(EXIT_FAILURE);
747 }
748
749 gc_functions.modular_gc_loaded_p = true;
750 }
751
752 unsigned int err_count = 0;
753
754# define load_modular_gc_func(name) do { \
755 if (handle) { \
756 const char *func_name = "rb_gc_impl_" #name; \
757 gc_functions.name = dlsym(handle, func_name); \
758 if (!gc_functions.name) { \
759 fprintf(stderr, "ruby_modular_gc_init: %s function not exported by library %s\n", func_name, gc_so_path); \
760 err_count++; \
761 } \
762 } \
763 else { \
764 gc_functions.name = rb_gc_impl_##name; \
765 } \
766} while (0)
767
768 // Bootup
769 load_modular_gc_func(objspace_alloc);
770 load_modular_gc_func(objspace_init);
771 load_modular_gc_func(ractor_cache_alloc);
772 load_modular_gc_func(set_params);
773 load_modular_gc_func(init);
774 load_modular_gc_func(heap_sizes);
775 // Shutdown
776 load_modular_gc_func(shutdown_free_objects);
777 load_modular_gc_func(objspace_free);
778 load_modular_gc_func(ractor_cache_free);
779 // GC
780 load_modular_gc_func(start);
781 load_modular_gc_func(during_gc_p);
782 load_modular_gc_func(prepare_heap);
783 load_modular_gc_func(gc_enable);
784 load_modular_gc_func(gc_disable);
785 load_modular_gc_func(gc_enabled_p);
786 load_modular_gc_func(config_set);
787 load_modular_gc_func(config_get);
788 load_modular_gc_func(stress_set);
789 load_modular_gc_func(stress_get);
790 load_modular_gc_func(get_vm_context);
791 // Object allocation
792 load_modular_gc_func(new_obj);
793 load_modular_gc_func(obj_slot_size);
794 load_modular_gc_func(heap_id_for_size);
795 load_modular_gc_func(size_allocatable_p);
796 // Malloc
797 load_modular_gc_func(malloc);
798 load_modular_gc_func(calloc);
799 load_modular_gc_func(realloc);
800 load_modular_gc_func(free);
801 load_modular_gc_func(adjust_memory_usage);
802 // Marking
803 load_modular_gc_func(mark);
804 load_modular_gc_func(mark_and_move);
805 load_modular_gc_func(mark_and_pin);
806 load_modular_gc_func(mark_maybe);
807 // Weak references
808 load_modular_gc_func(declare_weak_references);
809 load_modular_gc_func(handle_weak_references_alive_p);
810 // Compaction
811 load_modular_gc_func(register_pinning_obj);
812 load_modular_gc_func(object_moved_p);
813 load_modular_gc_func(location);
814 // Write barriers
815 load_modular_gc_func(writebarrier);
816 load_modular_gc_func(writebarrier_unprotect);
817 load_modular_gc_func(writebarrier_remember);
818 // Heap walking
819 load_modular_gc_func(each_objects);
820 load_modular_gc_func(each_object);
821 // Finalizers
822 load_modular_gc_func(make_zombie);
823 load_modular_gc_func(define_finalizer);
824 load_modular_gc_func(undefine_finalizer);
825 load_modular_gc_func(copy_finalizer);
826 load_modular_gc_func(shutdown_call_finalizer);
827 // Forking
828 load_modular_gc_func(before_fork);
829 load_modular_gc_func(after_fork);
830 // Statistics
831 load_modular_gc_func(set_measure_total_time);
832 load_modular_gc_func(get_measure_total_time);
833 load_modular_gc_func(get_total_time);
834 load_modular_gc_func(gc_count);
835 load_modular_gc_func(latest_gc_info);
836 load_modular_gc_func(stat);
837 load_modular_gc_func(stat_heap);
838 load_modular_gc_func(active_gc_name);
839 // Miscellaneous
840 load_modular_gc_func(object_metadata);
841 load_modular_gc_func(pointer_to_heap_p);
842 load_modular_gc_func(garbage_object_p);
843 load_modular_gc_func(set_event_hook);
844 load_modular_gc_func(copy_attributes);
845
846 if (err_count > 0) {
847 fprintf(stderr, "ruby_modular_gc_init: found %u missing exports in library %s\n", err_count, gc_so_path);
848 exit(EXIT_FAILURE);
849 }
850
851# undef load_modular_gc_func
852
853 rb_gc_functions = gc_functions;
854}
855
856// Bootup
857# define rb_gc_impl_objspace_alloc rb_gc_functions.objspace_alloc
858# define rb_gc_impl_objspace_init rb_gc_functions.objspace_init
859# define rb_gc_impl_ractor_cache_alloc rb_gc_functions.ractor_cache_alloc
860# define rb_gc_impl_set_params rb_gc_functions.set_params
861# define rb_gc_impl_init rb_gc_functions.init
862# define rb_gc_impl_heap_sizes rb_gc_functions.heap_sizes
863// Shutdown
864# define rb_gc_impl_shutdown_free_objects rb_gc_functions.shutdown_free_objects
865# define rb_gc_impl_objspace_free rb_gc_functions.objspace_free
866# define rb_gc_impl_ractor_cache_free rb_gc_functions.ractor_cache_free
867// GC
868# define rb_gc_impl_start rb_gc_functions.start
869# define rb_gc_impl_during_gc_p rb_gc_functions.during_gc_p
870# define rb_gc_impl_prepare_heap rb_gc_functions.prepare_heap
871# define rb_gc_impl_gc_enable rb_gc_functions.gc_enable
872# define rb_gc_impl_gc_disable rb_gc_functions.gc_disable
873# define rb_gc_impl_gc_enabled_p rb_gc_functions.gc_enabled_p
874# define rb_gc_impl_config_get rb_gc_functions.config_get
875# define rb_gc_impl_config_set rb_gc_functions.config_set
876# define rb_gc_impl_stress_set rb_gc_functions.stress_set
877# define rb_gc_impl_stress_get rb_gc_functions.stress_get
878# define rb_gc_impl_get_vm_context rb_gc_functions.get_vm_context
879// Object allocation
880# define rb_gc_impl_new_obj rb_gc_functions.new_obj
881# define rb_gc_impl_obj_slot_size rb_gc_functions.obj_slot_size
882# define rb_gc_impl_heap_id_for_size rb_gc_functions.heap_id_for_size
883# define rb_gc_impl_size_allocatable_p rb_gc_functions.size_allocatable_p
884// Malloc
885# define rb_gc_impl_malloc rb_gc_functions.malloc
886# define rb_gc_impl_calloc rb_gc_functions.calloc
887# define rb_gc_impl_realloc rb_gc_functions.realloc
888# define rb_gc_impl_free rb_gc_functions.free
889# define rb_gc_impl_adjust_memory_usage rb_gc_functions.adjust_memory_usage
890// Marking
891# define rb_gc_impl_mark rb_gc_functions.mark
892# define rb_gc_impl_mark_and_move rb_gc_functions.mark_and_move
893# define rb_gc_impl_mark_and_pin rb_gc_functions.mark_and_pin
894# define rb_gc_impl_mark_maybe rb_gc_functions.mark_maybe
895// Weak references
896# define rb_gc_impl_declare_weak_references rb_gc_functions.declare_weak_references
897# define rb_gc_impl_handle_weak_references_alive_p rb_gc_functions.handle_weak_references_alive_p
898// Compaction
899# define rb_gc_impl_register_pinning_obj rb_gc_functions.register_pinning_obj
900# define rb_gc_impl_object_moved_p rb_gc_functions.object_moved_p
901# define rb_gc_impl_location rb_gc_functions.location
902// Write barriers
903# define rb_gc_impl_writebarrier rb_gc_functions.writebarrier
904# define rb_gc_impl_writebarrier_unprotect rb_gc_functions.writebarrier_unprotect
905# define rb_gc_impl_writebarrier_remember rb_gc_functions.writebarrier_remember
906// Heap walking
907# define rb_gc_impl_each_objects rb_gc_functions.each_objects
908# define rb_gc_impl_each_object rb_gc_functions.each_object
909// Finalizers
910# define rb_gc_impl_make_zombie rb_gc_functions.make_zombie
911# define rb_gc_impl_define_finalizer rb_gc_functions.define_finalizer
912# define rb_gc_impl_undefine_finalizer rb_gc_functions.undefine_finalizer
913# define rb_gc_impl_copy_finalizer rb_gc_functions.copy_finalizer
914# define rb_gc_impl_shutdown_call_finalizer rb_gc_functions.shutdown_call_finalizer
915// Forking
916# define rb_gc_impl_before_fork rb_gc_functions.before_fork
917# define rb_gc_impl_after_fork rb_gc_functions.after_fork
918// Statistics
919# define rb_gc_impl_set_measure_total_time rb_gc_functions.set_measure_total_time
920# define rb_gc_impl_get_measure_total_time rb_gc_functions.get_measure_total_time
921# define rb_gc_impl_get_total_time rb_gc_functions.get_total_time
922# define rb_gc_impl_gc_count rb_gc_functions.gc_count
923# define rb_gc_impl_latest_gc_info rb_gc_functions.latest_gc_info
924# define rb_gc_impl_stat rb_gc_functions.stat
925# define rb_gc_impl_stat_heap rb_gc_functions.stat_heap
926# define rb_gc_impl_active_gc_name rb_gc_functions.active_gc_name
927// Miscellaneous
928# define rb_gc_impl_object_metadata rb_gc_functions.object_metadata
929# define rb_gc_impl_pointer_to_heap_p rb_gc_functions.pointer_to_heap_p
930# define rb_gc_impl_garbage_object_p rb_gc_functions.garbage_object_p
931# define rb_gc_impl_set_event_hook rb_gc_functions.set_event_hook
932# define rb_gc_impl_copy_attributes rb_gc_functions.copy_attributes
933#endif
934
935#ifdef RUBY_ASAN_ENABLED
936static void
937asan_death_callback(void)
938{
939 if (GET_VM()) {
940 rb_bug_without_die("ASAN error");
941 }
942}
943#endif
944
945static VALUE initial_stress = Qfalse;
946
947void *
948rb_objspace_alloc(void)
949{
950#if USE_MODULAR_GC
951 ruby_modular_gc_init();
952#endif
953
954 void *objspace = rb_gc_impl_objspace_alloc();
955 ruby_current_vm_ptr->gc.objspace = objspace;
956 rb_gc_impl_objspace_init(objspace);
957 rb_gc_impl_stress_set(objspace, initial_stress);
958
959#ifdef RUBY_ASAN_ENABLED
960 __sanitizer_set_death_callback(asan_death_callback);
961#endif
962
963 return objspace;
964}
965
966void
967rb_objspace_free(void *objspace)
968{
969 rb_gc_impl_objspace_free(objspace);
970}
971
972size_t
973rb_gc_obj_slot_size(VALUE obj)
974{
975 return rb_gc_impl_obj_slot_size(obj);
976}
977
978static inline void
979gc_validate_pc(VALUE obj)
980{
981#if RUBY_DEBUG
982 // IMEMOs and objects without a class (e.g managed id table) are not traceable
983 if (RB_TYPE_P(obj, T_IMEMO) || !CLASS_OF(obj)) return;
984
985 rb_execution_context_t *ec = GET_EC();
986 const rb_control_frame_t *cfp = ec->cfp;
987 if (cfp && VM_FRAME_RUBYFRAME_P(cfp) && CFP_PC(cfp)) {
988 const VALUE *iseq_encoded = ISEQ_BODY(CFP_ISEQ(cfp))->iseq_encoded;
989 const VALUE *iseq_encoded_end = iseq_encoded + ISEQ_BODY(CFP_ISEQ(cfp))->iseq_size;
990 RUBY_ASSERT(CFP_PC(cfp) >= iseq_encoded, "PC not set when allocating, breaking tracing");
991 RUBY_ASSERT(CFP_PC(cfp) <= iseq_encoded_end, "PC not set when allocating, breaking tracing");
992 }
993#endif
994}
995
996NOINLINE(static void gc_newobj_hook(VALUE obj));
997static void
998gc_newobj_hook(VALUE obj)
999{
1000 int lev = RB_GC_VM_LOCK_NO_BARRIER();
1001 {
1002 size_t slot_size = rb_gc_obj_slot_size(obj);
1003 memset((char *)obj + sizeof(struct RBasic), 0, slot_size - sizeof(struct RBasic));
1004
1005 /* We must disable GC here because the callback could call xmalloc
1006 * which could potentially trigger a GC, and a lot of code is unsafe
1007 * to trigger a GC right after an object has been allocated because
1008 * they perform initialization for the object and assume that the
1009 * GC does not trigger before then. */
1010 bool gc_disabled = RTEST(rb_gc_disable_no_rest());
1011 {
1012 rb_gc_event_hook(obj, RUBY_INTERNAL_EVENT_NEWOBJ);
1013 }
1014 if (!gc_disabled) rb_gc_enable();
1015 }
1016 RB_GC_VM_UNLOCK_NO_BARRIER(lev);
1017}
1018
1019VALUE
1020rb_newobj(rb_execution_context_t *ec, VALUE klass, VALUE flags, shape_id_t shape_id, bool wb_protected, size_t size)
1021{
1022 GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
1023 rb_ractor_t *cr = rb_ec_ractor_ptr(ec);
1024 VALUE obj = rb_gc_impl_new_obj(rb_gc_get_objspace(), cr->newobj_cache, klass, flags, wb_protected, size);
1025
1026#if RACTOR_CHECK_MODE
1027 void rb_ractor_setup_belonging(VALUE obj);
1028 rb_ractor_setup_belonging(obj);
1029#endif
1030
1031 RBASIC_SET_SHAPE_ID_NO_CHECKS(obj, shape_id);
1032
1033 gc_validate_pc(obj);
1034
1035 if (UNLIKELY(rb_gc_event_hook_required_p(RUBY_INTERNAL_EVENT_NEWOBJ))) {
1036 gc_newobj_hook(obj);
1037 }
1038
1039#if RGENGC_CHECK_MODE
1040# ifndef GC_DEBUG_SLOT_FILL_SPECIAL_VALUE
1041# define GC_DEBUG_SLOT_FILL_SPECIAL_VALUE 255
1042# endif
1043
1044 memset(
1045 (void *)(obj + sizeof(struct RBasic)),
1046 GC_DEBUG_SLOT_FILL_SPECIAL_VALUE,
1047 rb_gc_obj_slot_size(obj) - sizeof(struct RBasic)
1048 );
1049#endif
1050
1051 return obj;
1052}
1053
1054VALUE
1055rb_ec_newobj_of(rb_execution_context_t *ec, VALUE klass, VALUE flags, size_t size)
1056{
1057 return rb_newobj(ec, klass, flags, ROOT_SHAPE_ID, true, size);
1058}
1059
1060VALUE
1061rb_newobj_of_with_shape(VALUE klass, VALUE flags, shape_id_t shape_id, size_t size)
1062{
1063 return rb_newobj(GET_EC(), klass, flags, shape_id, true, size);
1064}
1065
1066VALUE
1067rb_newobj_of(VALUE klass, VALUE flags, size_t size)
1068{
1069 return rb_newobj(GET_EC(), klass, flags, ROOT_SHAPE_ID, true, size);
1070}
1071
1072static
1073VALUE class_allocate_complex_instance(VALUE klass, uint32_t capacity)
1074{
1075 shape_id_t initial_shape_id = rb_shape_root(rb_gc_heap_id_for_size(sizeof(struct RObject)));
1076 VALUE obj = rb_newobj_of_with_shape(klass, T_OBJECT, initial_shape_id, sizeof(struct RObject));
1077 rb_obj_init_complex(obj, rb_st_init_numtable_with_size(capacity));
1078 return obj;
1079}
1080
1081VALUE
1082rb_class_allocate_instance(VALUE klass)
1083{
1084 uint32_t index_tbl_num_entries = RCLASS_MAX_IV_COUNT(klass);
1085 VALUE obj;
1086
1087 // Directly start as COMPLEX if we know we're over the limit.
1088 RUBY_ASSERT(rb_shape_tree.max_capacity > 0);
1089 if (RB_UNLIKELY(index_tbl_num_entries > rb_shape_tree.max_capacity)) {
1090 obj = class_allocate_complex_instance(klass, index_tbl_num_entries);
1091 }
1092 else {
1093 size_t size = rb_obj_embedded_size(index_tbl_num_entries);
1094 if (!rb_gc_size_allocatable_p(size)) {
1095 size = sizeof(struct RObject);
1096 }
1097
1098 // There might be a NEWOBJ tracepoint callback, and it may set fields.
1099 // So the shape must be passed to `NEWOBJ_OF`.
1100 obj = rb_newobj_of_with_shape(klass, T_OBJECT, rb_shape_root(rb_gc_heap_id_for_size(size)), size);
1101
1102 #if RUBY_DEBUG
1103 VALUE *ptr = ROBJECT_FIELDS(obj);
1104 size_t fields_count = RSHAPE_LEN(RBASIC_SHAPE_ID(obj));
1105 for (size_t i = fields_count; i < ROBJECT_FIELDS_CAPACITY(obj); i++) {
1106 ptr[i] = Qundef;
1107 }
1108 #endif
1109 }
1110
1111#if RUBY_DEBUG
1112 if (rb_obj_class(obj) != rb_class_real(klass)) {
1113 rb_bug("Expected rb_class_allocate_instance to set the class correctly");
1114 }
1115#endif
1116
1117 return obj;
1118}
1119
1120void
1121rb_gc_register_pinning_obj(VALUE obj)
1122{
1123 rb_gc_impl_register_pinning_obj(rb_gc_get_objspace(), obj);
1124}
1125
1126#define UNEXPECTED_NODE(func) \
1127 rb_bug(#func"(): GC does not handle T_NODE 0x%x(%p) 0x%"PRIxVALUE, \
1128 BUILTIN_TYPE(obj), (void*)(obj), RBASIC(obj)->flags)
1129
1130static inline void
1131rb_data_object_check(VALUE klass)
1132{
1133 RUBY_ASSERT(!RCLASS_SINGLETON_P(klass));
1134 if (klass != rb_cObject && (rb_get_alloc_func(klass) == rb_class_allocate_instance)) {
1135 rb_undef_alloc_func(klass);
1136 rb_warn("undefining the allocator of T_DATA class %"PRIsVALUE, klass);
1137 }
1138}
1139
1140VALUE
1141rb_data_object_wrap(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
1142{
1144 if (klass) rb_data_object_check(klass);
1145 VALUE obj = rb_newobj(GET_EC(), klass, T_DATA, ROOT_SHAPE_ID, !dmark, sizeof(struct RTypedData));
1146
1147 rb_gc_register_pinning_obj(obj);
1148
1149 struct RData *data = (struct RData *)obj;
1150 data->dmark = dmark;
1151 data->dfree = dfree;
1152 data->data = datap;
1153
1154 return obj;
1155}
1156
1157VALUE
1159{
1160 VALUE obj = rb_data_object_wrap(klass, 0, dmark, dfree);
1161 DATA_PTR(obj) = xcalloc(1, size);
1162 return obj;
1163}
1164
1165#define RTYPEDDATA_EMBEDDED_P rbimpl_typeddata_embedded_p
1166#define RB_DATA_TYPE_EMBEDDABLE_P(type) ((type)->flags & RUBY_TYPED_EMBEDDABLE)
1167#define RTYPEDDATA_EMBEDDABLE_P(obj) RB_DATA_TYPE_EMBEDDABLE_P(RTYPEDDATA_TYPE(obj))
1168
1169static VALUE
1170typed_data_alloc(VALUE klass, VALUE typed_flag, void *datap, const rb_data_type_t *type, size_t size)
1171{
1172 RBIMPL_NONNULL_ARG(type);
1173 if (klass) rb_data_object_check(klass);
1174 bool wb_protected = (type->flags & RUBY_FL_WB_PROTECTED) || !type->function.dmark;
1175 VALUE obj = rb_newobj(GET_EC(), klass, T_DATA | RUBY_TYPED_FL_IS_TYPED_DATA, ROOT_SHAPE_ID, wb_protected, size);
1176
1177 rb_gc_register_pinning_obj(obj);
1178
1179 struct RTypedData *data = (struct RTypedData *)obj;
1180 data->fields_obj = 0;
1181 *(VALUE *)&data->type = ((VALUE)type) | typed_flag;
1182 data->data = datap;
1183
1184 return obj;
1185}
1186
1187VALUE
1189{
1190 if (UNLIKELY(RB_DATA_TYPE_EMBEDDABLE_P(type))) {
1191 rb_raise(rb_eTypeError, "Cannot wrap an embeddable TypedData");
1192 }
1193
1194 return typed_data_alloc(klass, 0, datap, type, sizeof(struct RTypedData));
1195}
1196
1197VALUE
1199{
1200 if (RB_DATA_TYPE_EMBEDDABLE_P(type)) {
1201 if (!(type->flags & RUBY_TYPED_FREE_IMMEDIATELY)) {
1202 rb_raise(rb_eTypeError, "Embeddable TypedData must be freed immediately");
1203 }
1204
1205 size_t embed_size = offsetof(struct RTypedData, data) + size;
1206 if (rb_gc_size_allocatable_p(embed_size)) {
1207 VALUE obj = typed_data_alloc(klass, TYPED_DATA_EMBEDDED, 0, type, embed_size);
1208 memset((char *)obj + offsetof(struct RTypedData, data), 0, size);
1209 return obj;
1210 }
1211 }
1212
1213 VALUE obj = typed_data_alloc(klass, 0, NULL, type, sizeof(struct RTypedData));
1214 DATA_PTR(obj) = xcalloc(1, size);
1215 return obj;
1216}
1217
1218static size_t
1219ruby_xmalloc_usable_size(void *ptr)
1220{
1221#ifdef HAVE_MALLOC_USABLE_SIZE
1222#if CALC_EXACT_MALLOC_SIZE
1223 struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
1224 return malloc_usable_size(info) - sizeof(struct malloc_obj_info);
1225#else
1226 return malloc_usable_size(ptr);
1227#endif
1228#else
1229 return 0;
1230#endif
1231}
1232
1233static size_t
1234rb_objspace_data_type_memsize(VALUE obj)
1235{
1236 size_t size = 0;
1237 if (RTYPEDDATA_P(obj)) {
1238 const void *ptr = RTYPEDDATA_GET_DATA(obj);
1239
1240 if (ptr) {
1241 if (RTYPEDDATA_EMBEDDABLE_P(obj) && !RTYPEDDATA_EMBEDDED_P(obj)) {
1242 size += ruby_xmalloc_usable_size((void *)ptr);
1243 }
1244
1245 const rb_data_type_t *type = RTYPEDDATA_TYPE(obj);
1246 if (type->function.dsize) {
1247 size += type->function.dsize(ptr);
1248 }
1249 }
1250 }
1251
1252 return size;
1253}
1254
1255const char *
1256rb_objspace_data_type_name(VALUE obj)
1257{
1258 if (RTYPEDDATA_P(obj)) {
1259 return RTYPEDDATA_TYPE(obj)->wrap_struct_name;
1260 }
1261 else {
1262 return 0;
1263 }
1264}
1265
1266void
1267rb_gc_declare_weak_references(VALUE obj)
1268{
1269 rb_gc_impl_declare_weak_references(rb_gc_get_objspace(), obj);
1270}
1271
1272bool
1273rb_gc_handle_weak_references_alive_p(VALUE obj)
1274{
1275 if (SPECIAL_CONST_P(obj)) return true;
1276
1277 return rb_gc_impl_handle_weak_references_alive_p(rb_gc_get_objspace(), obj);
1278}
1279
1280void
1281rb_gc_handle_weak_references(VALUE obj)
1282{
1283 switch (BUILTIN_TYPE(obj)) {
1284 case T_DATA:
1285 if (RTYPEDDATA_P(obj)) {
1286 const rb_data_type_t *type = RTYPEDDATA_TYPE(obj);
1287
1288 if (type->function.handle_weak_references) {
1289 (type->function.handle_weak_references)(RTYPEDDATA_GET_DATA(obj));
1290 }
1291 else {
1292 rb_bug(
1293 "rb_gc_handle_weak_references: TypedData %s does not implement handle_weak_references",
1294 RTYPEDDATA_TYPE(obj)->wrap_struct_name
1295 );
1296 }
1297 }
1298 else {
1299 rb_bug("rb_gc_handle_weak_references: unknown T_DATA");
1300 }
1301 break;
1302
1303 case T_IMEMO: {
1304 switch (imemo_type(obj)) {
1305 case imemo_callcache: {
1306 struct rb_callcache *cc = (struct rb_callcache *)obj;
1307 if (cc->klass != Qundef &&
1308 (!rb_gc_handle_weak_references_alive_p(cc->klass) ||
1309 !rb_gc_handle_weak_references_alive_p((VALUE)cc->cme_))) {
1310 vm_cc_invalidate(cc);
1311 }
1312 break;
1313 }
1314 case imemo_subclasses: {
1315 struct rb_subclasses *subs = (struct rb_subclasses *)obj;
1316 VALUE *entries = rb_imemo_subclasses_entries(obj);
1317 for (uint32_t i = 0; i < subs->count; i++) {
1318 if (entries[i] && !rb_gc_handle_weak_references_alive_p(entries[i])) {
1319 entries[i] = 0;
1320 }
1321 }
1322 break;
1323 }
1324 default:
1325 rb_bug("rb_gc_handle_weak_references: unexpected imemo type");
1326 }
1327
1328 break;
1329 }
1330 default:
1331 rb_bug("rb_gc_handle_weak_references: type not supported\n");
1332 }
1333}
1334
1335static inline bool
1336rb_gc_imemo_needs_cleanup_p(VALUE obj)
1337{
1338 switch (imemo_type(obj)) {
1339 case imemo_constcache:
1340 case imemo_cref:
1341 case imemo_ifunc:
1342 case imemo_memo:
1343 case imemo_svar:
1344 case imemo_callcache:
1345 case imemo_throw_data:
1346 case imemo_cvar_entry:
1347 return false;
1348
1349 case imemo_env:
1350 case imemo_ment:
1351 case imemo_iseq:
1352 case imemo_callinfo:
1353 return true;
1354
1355 case imemo_subclasses:
1356 return FL_TEST_RAW(obj, IMEMO_SUBCLASSES_HEAP);
1357
1358 case imemo_tmpbuf:
1359 return ((rb_imemo_tmpbuf_t *)obj)->ptr != NULL;
1360
1361 case imemo_fields:
1362 return FL_TEST_RAW(obj, OBJ_FIELD_HEAP) || (id2ref_tbl && rb_obj_shape_has_id(obj));
1363 }
1364 UNREACHABLE_RETURN(true);
1365}
1366
1367/*
1368 * Returns true if the object requires a full rb_gc_obj_free() call during sweep,
1369 * false if it can be freed quickly without calling destructors or cleanup.
1370 *
1371 * Objects that return false are:
1372 * - Simple embedded objects without external allocations
1373 * - Objects without finalizers
1374 * - Objects without object IDs registered in id2ref
1375 * - Objects without generic instance variables
1376 *
1377 * This is used by the GC sweep fast path to avoid function call overhead
1378 * for the majority of simple objects.
1379 */
1380bool
1381rb_gc_obj_needs_cleanup_p(VALUE obj)
1382{
1383 VALUE flags = RBASIC(obj)->flags;
1384
1385 if (flags & FL_FINALIZE) return true;
1386
1387 switch (flags & RUBY_T_MASK) {
1388 case T_IMEMO:
1389 return rb_gc_imemo_needs_cleanup_p(obj);
1390
1391 case T_DATA:
1392 case T_OBJECT:
1393 case T_STRING:
1394 case T_ARRAY:
1395 case T_HASH:
1396 case T_BIGNUM:
1397 case T_STRUCT:
1398 case T_FLOAT:
1399 case T_RATIONAL:
1400 case T_COMPLEX:
1401 case T_MATCH:
1402 break;
1403
1404 case T_FILE:
1405 case T_SYMBOL:
1406 case T_CLASS:
1407 case T_ICLASS:
1408 case T_MODULE:
1409 case T_REGEXP:
1410 return true;
1411 }
1412
1413 shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
1414 if (id2ref_tbl && rb_shape_has_object_id(shape_id)) return true;
1415
1416 switch (flags & RUBY_T_MASK) {
1417 case T_OBJECT:
1418 if (flags & ROBJECT_HEAP) return true;
1419 return false;
1420
1421 case T_DATA:
1422 if (flags & RUBY_TYPED_FL_IS_TYPED_DATA) {
1423 uintptr_t type = (uintptr_t)RTYPEDDATA(obj)->type;
1424 if (type & TYPED_DATA_EMBEDDED) {
1425 RUBY_DATA_FUNC dfree = ((const rb_data_type_t *)(type & TYPED_DATA_PTR_MASK))->function.dfree;
1426 if (dfree == RUBY_NEVER_FREE || dfree == RUBY_TYPED_DEFAULT_FREE) {
1427 return false;
1428 }
1429 }
1430 }
1431 return true;
1432
1433 case T_STRING:
1434 if (flags & (RSTRING_NOEMBED | RSTRING_FSTR)) return true;
1435 return rb_shape_has_fields(shape_id);
1436
1437 case T_ARRAY:
1438 if (!(flags & RARRAY_EMBED_FLAG)) return true;
1439 return rb_shape_has_fields(shape_id);
1440
1441 case T_HASH:
1442 if (flags & RHASH_ST_TABLE_FLAG) return true;
1443 return rb_shape_has_fields(shape_id);
1444
1445 case T_MATCH:
1446 if ((flags & (RMATCH_ONIG | RMATCH_OFFSETS_EXTERNAL)) || USE_DEBUG_COUNTER) return true;
1447 return rb_shape_has_fields(shape_id);
1448
1449 case T_BIGNUM:
1450 if (!(flags & BIGNUM_EMBED_FLAG)) return true;
1451 return rb_shape_has_fields(shape_id);
1452
1453 case T_STRUCT:
1454 if (!(flags & RSTRUCT_EMBED_LEN_MASK)) return true;
1455 if (flags & RSTRUCT_GEN_FIELDS) return rb_shape_has_fields(shape_id);
1456 return false;
1457
1458 case T_FLOAT:
1459 case T_RATIONAL:
1460 case T_COMPLEX:
1461 return rb_shape_has_fields(shape_id);
1462
1463 default:
1464 UNREACHABLE_RETURN(true);
1465 }
1466}
1467
1468static void
1469io_fptr_finalize(void *fptr)
1470{
1471 rb_io_fptr_finalize((struct rb_io *)fptr);
1472}
1473
1474static inline void
1475make_io_zombie(void *objspace, VALUE obj)
1476{
1477 rb_io_t *fptr = RFILE(obj)->fptr;
1478 rb_gc_impl_make_zombie(objspace, obj, io_fptr_finalize, fptr);
1479}
1480
1481static bool
1482rb_data_free(void *objspace, VALUE obj)
1483{
1484 void *data = RTYPEDDATA_P(obj) ? RTYPEDDATA_GET_DATA(obj) : DATA_PTR(obj);
1485 if (data) {
1486 int free_immediately = false;
1487 void (*dfree)(void *);
1488
1489 if (RTYPEDDATA_P(obj)) {
1490 free_immediately = (RTYPEDDATA_TYPE(obj)->flags & RUBY_TYPED_FREE_IMMEDIATELY) != 0;
1491 dfree = RTYPEDDATA_TYPE(obj)->function.dfree;
1492 }
1493 else {
1494 dfree = RDATA(obj)->dfree;
1495 }
1496
1497 if (dfree) {
1498 if (dfree == RUBY_DEFAULT_FREE) {
1499 if (!RTYPEDDATA_P(obj) || !RTYPEDDATA_EMBEDDED_P(obj)) {
1500 xfree(data);
1501 RB_DEBUG_COUNTER_INC(obj_data_xfree);
1502 }
1503 }
1504 else if (free_immediately) {
1505 (*dfree)(data);
1506 if (RTYPEDDATA_EMBEDDABLE_P(obj) && !RTYPEDDATA_EMBEDDED_P(obj)) {
1507 xfree(data);
1508 }
1509
1510 RB_DEBUG_COUNTER_INC(obj_data_imm_free);
1511 }
1512 else {
1513 rb_gc_impl_make_zombie(objspace, obj, dfree, data);
1514 RB_DEBUG_COUNTER_INC(obj_data_zombie);
1515 return FALSE;
1516 }
1517 }
1518 else {
1519 RB_DEBUG_COUNTER_INC(obj_data_empty);
1520 }
1521 }
1522
1523 return true;
1524}
1525
1527 VALUE klass;
1528 rb_objspace_t *objspace; // used for update_*
1529};
1530
1531static void
1532classext_free(rb_classext_t *ext, bool is_prime, VALUE box_value, void *arg)
1533{
1534 struct classext_foreach_args *args = (struct classext_foreach_args *)arg;
1535
1536 rb_class_classext_free(args->klass, ext, is_prime);
1537}
1538
1539static void
1540classext_iclass_free(rb_classext_t *ext, bool is_prime, VALUE box_value, void *arg)
1541{
1542 struct classext_foreach_args *args = (struct classext_foreach_args *)arg;
1543
1544 rb_iclass_classext_free(args->klass, ext, is_prime);
1545}
1546
1547bool
1548rb_gc_obj_free(void *objspace, VALUE obj)
1549{
1550 struct classext_foreach_args args;
1551
1552 RB_DEBUG_COUNTER_INC(obj_free);
1553
1554 switch (BUILTIN_TYPE(obj)) {
1555 case T_NIL:
1556 case T_FIXNUM:
1557 case T_TRUE:
1558 case T_FALSE:
1559 rb_bug("obj_free() called for broken object");
1560 break;
1561 default:
1562 break;
1563 }
1564
1565 switch (BUILTIN_TYPE(obj)) {
1566 case T_OBJECT:
1567 if (FL_TEST_RAW(obj, ROBJECT_HEAP)) {
1568 if (rb_obj_shape_complex_p(obj)) {
1569 RB_DEBUG_COUNTER_INC(obj_obj_complex);
1570 st_free_table(ROBJECT_FIELDS_HASH(obj));
1571 }
1572 else {
1573 SIZED_FREE_N(ROBJECT(obj)->as.heap.fields, ROBJECT_FIELDS_CAPACITY(obj));
1574 RB_DEBUG_COUNTER_INC(obj_obj_ptr);
1575 }
1576 }
1577 else {
1578 RB_DEBUG_COUNTER_INC(obj_obj_embed);
1579 }
1580 break;
1581 case T_MODULE:
1582 case T_CLASS:
1583#if USE_ZJIT
1584 rb_zjit_klass_free(obj);
1585#endif
1586 args.klass = obj;
1587 rb_class_classext_foreach(obj, classext_free, (void *)&args);
1588 if (RCLASS_CLASSEXT_TBL(obj)) {
1589 st_free_table(RCLASS_CLASSEXT_TBL(obj));
1590 }
1591 (void)RB_DEBUG_COUNTER_INC_IF(obj_module_ptr, BUILTIN_TYPE(obj) == T_MODULE);
1592 (void)RB_DEBUG_COUNTER_INC_IF(obj_class_ptr, BUILTIN_TYPE(obj) == T_CLASS);
1593 break;
1594 case T_STRING:
1595 rb_str_free(obj);
1596 break;
1597 case T_ARRAY:
1598 rb_ary_free(obj);
1599 break;
1600 case T_HASH:
1601#if USE_DEBUG_COUNTER
1602 switch (RHASH_SIZE(obj)) {
1603 case 0:
1604 RB_DEBUG_COUNTER_INC(obj_hash_empty);
1605 break;
1606 case 1:
1607 RB_DEBUG_COUNTER_INC(obj_hash_1);
1608 break;
1609 case 2:
1610 RB_DEBUG_COUNTER_INC(obj_hash_2);
1611 break;
1612 case 3:
1613 RB_DEBUG_COUNTER_INC(obj_hash_3);
1614 break;
1615 case 4:
1616 RB_DEBUG_COUNTER_INC(obj_hash_4);
1617 break;
1618 case 5:
1619 case 6:
1620 case 7:
1621 case 8:
1622 RB_DEBUG_COUNTER_INC(obj_hash_5_8);
1623 break;
1624 default:
1625 GC_ASSERT(RHASH_SIZE(obj) > 8);
1626 RB_DEBUG_COUNTER_INC(obj_hash_g8);
1627 }
1628
1629 if (RHASH_AR_TABLE_P(obj)) {
1630 if (RHASH_AR_TABLE(obj) == NULL) {
1631 RB_DEBUG_COUNTER_INC(obj_hash_null);
1632 }
1633 else {
1634 RB_DEBUG_COUNTER_INC(obj_hash_ar);
1635 }
1636 }
1637 else {
1638 RB_DEBUG_COUNTER_INC(obj_hash_st);
1639 }
1640#endif
1641
1642 rb_hash_free(obj);
1643 break;
1644 case T_REGEXP:
1645 if (RREGEXP(obj)->ptr) {
1646 onig_free(RREGEXP(obj)->ptr);
1647 RB_DEBUG_COUNTER_INC(obj_regexp_ptr);
1648 }
1649 break;
1650 case T_DATA:
1651 if (!rb_data_free(objspace, obj)) return false;
1652 break;
1653 case T_MATCH:
1654 {
1655 struct RMatch *rm = RMATCH(obj);
1656#if USE_DEBUG_COUNTER
1657 if (rm->num_regs >= 8) {
1658 RB_DEBUG_COUNTER_INC(obj_match_ge8);
1659 }
1660 else if (rm->num_regs >= 4) {
1661 RB_DEBUG_COUNTER_INC(obj_match_ge4);
1662 }
1663 else if (rm->num_regs >= 1) {
1664 RB_DEBUG_COUNTER_INC(obj_match_under4);
1665 }
1666#endif
1667 if (FL_TEST_RAW(obj, RMATCH_ONIG)) {
1668 onig_region_free(&rm->as.onig, 0);
1669 }
1670 SIZED_FREE_N(rm->char_offset, rm->char_offset_num_allocated);
1671
1672 RB_DEBUG_COUNTER_INC(obj_match_ptr);
1673 }
1674 break;
1675 case T_FILE:
1676 if (RFILE(obj)->fptr) {
1677 make_io_zombie(objspace, obj);
1678 RB_DEBUG_COUNTER_INC(obj_file_ptr);
1679 return FALSE;
1680 }
1681 break;
1682 case T_RATIONAL:
1683 RB_DEBUG_COUNTER_INC(obj_rational);
1684 break;
1685 case T_COMPLEX:
1686 RB_DEBUG_COUNTER_INC(obj_complex);
1687 break;
1688 case T_MOVED:
1689 break;
1690 case T_ICLASS:
1691 args.klass = obj;
1692
1693 rb_class_classext_foreach(obj, classext_iclass_free, (void *)&args);
1694 if (RCLASS_CLASSEXT_TBL(obj)) {
1695 st_free_table(RCLASS_CLASSEXT_TBL(obj));
1696 }
1697
1698 RB_DEBUG_COUNTER_INC(obj_iclass_ptr);
1699 break;
1700
1701 case T_FLOAT:
1702 RB_DEBUG_COUNTER_INC(obj_float);
1703 break;
1704
1705 case T_BIGNUM:
1706 if (!BIGNUM_EMBED_P(obj) && BIGNUM_DIGITS(obj)) {
1707 SIZED_FREE_N(BIGNUM_DIGITS(obj), BIGNUM_LEN(obj));
1708 RB_DEBUG_COUNTER_INC(obj_bignum_ptr);
1709 }
1710 else {
1711 RB_DEBUG_COUNTER_INC(obj_bignum_embed);
1712 }
1713 break;
1714
1715 case T_NODE:
1716 UNEXPECTED_NODE(obj_free);
1717 break;
1718
1719 case T_STRUCT:
1720 if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) ||
1721 RSTRUCT(obj)->as.heap.ptr == NULL) {
1722 RB_DEBUG_COUNTER_INC(obj_struct_embed);
1723 }
1724 else {
1725 SIZED_FREE_N(RSTRUCT(obj)->as.heap.ptr, RSTRUCT(obj)->as.heap.len);
1726 RB_DEBUG_COUNTER_INC(obj_struct_ptr);
1727 }
1728 break;
1729
1730 case T_SYMBOL:
1731 RB_DEBUG_COUNTER_INC(obj_symbol);
1732 break;
1733
1734 case T_IMEMO:
1735 rb_imemo_free((VALUE)obj);
1736 break;
1737
1738 default:
1739 rb_bug("gc_sweep(): unknown data type 0x%x(%p) 0x%"PRIxVALUE,
1740 BUILTIN_TYPE(obj), (void*)obj, RBASIC(obj)->flags);
1741 }
1742
1743 if (FL_TEST_RAW(obj, FL_FINALIZE)) {
1744 rb_gc_impl_make_zombie(objspace, obj, 0, 0);
1745 return FALSE;
1746 }
1747 else {
1748 return TRUE;
1749 }
1750}
1751
1752void
1753rb_objspace_set_event_hook(const rb_event_flag_t event)
1754{
1755 rb_gc_impl_set_event_hook(rb_gc_get_objspace(), event);
1756}
1757
1758static int
1759internal_object_p(VALUE obj)
1760{
1761 void *ptr = asan_unpoison_object_temporary(obj);
1762
1763 if (RBASIC(obj)->flags) {
1764 switch (BUILTIN_TYPE(obj)) {
1765 case T_NODE:
1766 UNEXPECTED_NODE(internal_object_p);
1767 break;
1768 case T_NONE:
1769 case T_MOVED:
1770 case T_IMEMO:
1771 case T_ICLASS:
1772 case T_ZOMBIE:
1773 break;
1774 case T_CLASS:
1775 if (obj == rb_mRubyVMFrozenCore)
1776 return 1;
1777
1778 if (!RBASIC_CLASS(obj)) break;
1779 if (RCLASS_SINGLETON_P(obj)) {
1780 return rb_singleton_class_internal_p(obj);
1781 }
1782 return 0;
1783 default:
1784 if (!RBASIC(obj)->klass) break;
1785 return 0;
1786 }
1787 }
1788 if (ptr || !RBASIC(obj)->flags) {
1789 rb_asan_poison_object(obj);
1790 }
1791 return 1;
1792}
1793
1794int
1795rb_objspace_internal_object_p(VALUE obj)
1796{
1797 return internal_object_p(obj);
1798}
1799
1801 size_t num;
1802 VALUE of;
1803};
1804
1805static int
1806os_obj_of_i(void *vstart, void *vend, size_t stride, void *data)
1807{
1808 struct os_each_struct *oes = (struct os_each_struct *)data;
1809
1810 VALUE v = (VALUE)vstart;
1811 for (; v != (VALUE)vend; v += stride) {
1812 if (!internal_object_p(v)) {
1813 if (!oes->of || rb_obj_is_kind_of(v, oes->of)) {
1814 if (!rb_multi_ractor_p() || rb_ractor_shareable_p(v)) {
1815 rb_yield(v);
1816 oes->num++;
1817 }
1818 }
1819 }
1820 }
1821
1822 return 0;
1823}
1824
1825static VALUE
1826os_obj_of(VALUE of)
1827{
1828 struct os_each_struct oes;
1829
1830 oes.num = 0;
1831 oes.of = of;
1832 rb_objspace_each_objects(os_obj_of_i, &oes);
1833 return SIZET2NUM(oes.num);
1834}
1835
1836/*
1837 * call-seq:
1838 * ObjectSpace.each_object([module]) {|obj| ... } -> integer
1839 * ObjectSpace.each_object([module]) -> an_enumerator
1840 *
1841 * Calls the block once for each living, nonimmediate object in this
1842 * Ruby process. If <i>module</i> is specified, calls the block
1843 * for only those classes or modules that match (or are a subclass of)
1844 * <i>module</i>. Returns the number of objects found. Immediate
1845 * objects (such as <code>Fixnum</code>s, static <code>Symbol</code>s
1846 * <code>true</code>, <code>false</code> and <code>nil</code>) are
1847 * never returned.
1848 *
1849 * If no block is given, an enumerator is returned instead.
1850 *
1851 * Job = Class.new
1852 * jobs = [Job.new, Job.new]
1853 * count = ObjectSpace.each_object(Job) {|x| p x }
1854 * puts "Total count: #{count}"
1855 *
1856 * <em>produces:</em>
1857 *
1858 * #<Job:0x000000011d6cbbf0>
1859 * #<Job:0x000000011d6cbc68>
1860 * Total count: 2
1861 *
1862 * Due to a current Ractor implementation issue, this method does not yield
1863 * Ractor-unshareable objects when the process is in multi-Ractor mode. Multi-ractor
1864 * mode is enabled when <code>Ractor.new</code> has been called for the first time.
1865 * See https://bugs.ruby-lang.org/issues/19387 for more information.
1866 *
1867 * a = 12345678987654321 # shareable
1868 * b = [].freeze # shareable
1869 * c = {} # not shareable
1870 * ObjectSpace.each_object {|x| x } # yields a, b, and c
1871 * Ractor.new {} # enter multi-Ractor mode
1872 * ObjectSpace.each_object {|x| x } # does not yield c
1873 *
1874 */
1875
1876static VALUE
1877os_each_obj(int argc, VALUE *argv, VALUE os)
1878{
1879 VALUE of;
1880
1881 of = (!rb_check_arity(argc, 0, 1) ? 0 : argv[0]);
1882 RETURN_ENUMERATOR(os, 1, &of);
1883 return os_obj_of(of);
1884}
1885
1886/*
1887 * call-seq:
1888 * ObjectSpace.undefine_finalizer(obj)
1889 *
1890 * Removes all finalizers for <i>obj</i>.
1891 *
1892 */
1893
1894static VALUE
1895undefine_final(VALUE os, VALUE obj)
1896{
1897 return rb_undefine_finalizer(obj);
1898}
1899
1900VALUE
1901rb_undefine_finalizer(VALUE obj)
1902{
1903 rb_check_frozen(obj);
1904
1905 rb_gc_impl_undefine_finalizer(rb_gc_get_objspace(), obj);
1906
1907 return obj;
1908}
1909
1910static void
1911should_be_callable(VALUE block)
1912{
1913 if (!rb_obj_respond_to(block, idCall, TRUE)) {
1914 rb_raise(rb_eArgError, "wrong type argument %"PRIsVALUE" (should be callable)",
1915 rb_obj_class(block));
1916 }
1917}
1918
1919static void
1920should_be_finalizable(VALUE obj)
1921{
1922 if (!FL_ABLE(obj)) {
1923 rb_raise(rb_eArgError, "cannot define finalizer for %s",
1924 rb_obj_classname(obj));
1925 }
1926 rb_check_frozen(obj);
1927}
1928
1929void
1930rb_gc_copy_finalizer(VALUE dest, VALUE obj)
1931{
1932 rb_gc_impl_copy_finalizer(rb_gc_get_objspace(), dest, obj);
1933}
1934
1935/*
1936 * call-seq:
1937 * ObjectSpace.define_finalizer(obj) {|id| ... } -> array
1938 * ObjectSpace.define_finalizer(obj, finalizer) -> array
1939 *
1940 * Adds a new finalizer for +obj+ that is called when +obj+ is destroyed
1941 * by the garbage collector or when Ruby shuts down (which ever comes first).
1942 *
1943 * With a block given, uses the block as the callback. Without a block given,
1944 * uses a callable object +finalizer+ as the callback. The callback is called
1945 * when +obj+ is destroyed with a single argument +id+ which is the object
1946 * ID of +obj+ (see Object#object_id).
1947 *
1948 * The return value is an array <code>[0, callback]</code>, where +callback+
1949 * is a Proc created from the block if one was given or +finalizer+ otherwise.
1950 *
1951 * Note that defining a finalizer in an instance method of the object may prevent
1952 * the object from being garbage collected since if the block or +finalizer+ refers
1953 * to +obj+ then +obj+ will never be reclaimed by the garbage collector. For example,
1954 * the following script demonstrates the issue:
1955 *
1956 * class Foo
1957 * def define_final
1958 * ObjectSpace.define_finalizer(self) do |id|
1959 * puts "Running finalizer for #{id}!"
1960 * end
1961 * end
1962 * end
1963 *
1964 * obj = Foo.new
1965 * obj.define_final
1966 *
1967 * There are two patterns to solve this issue:
1968 *
1969 * - Create the finalizer in a non-instance method so it can safely capture
1970 * the needed state:
1971 *
1972 * class Foo
1973 * def define_final
1974 * ObjectSpace.define_finalizer(self, self.class.create_finalizer)
1975 * end
1976 *
1977 * def self.create_finalizer
1978 * proc do |id|
1979 * puts "Running finalizer for #{id}!"
1980 * end
1981 * end
1982 * end
1983 *
1984 * - Use a callable object:
1985 *
1986 * class Foo
1987 * class Finalizer
1988 * def call(id)
1989 * puts "Running finalizer for #{id}!"
1990 * end
1991 * end
1992 *
1993 * def define_final
1994 * ObjectSpace.define_finalizer(self, Finalizer.new)
1995 * end
1996 * end
1997 *
1998 * Note that finalization can be unpredictable and is never guaranteed
1999 * to be run except on exit.
2000 */
2001
2002static VALUE
2003define_final(int argc, VALUE *argv, VALUE os)
2004{
2005 VALUE obj, block;
2006
2007 rb_scan_args(argc, argv, "11", &obj, &block);
2008 if (argc == 1) {
2009 block = rb_block_proc();
2010 }
2011
2012 if (rb_callable_receiver(block) == obj) {
2013 rb_warn("finalizer references object to be finalized");
2014 }
2015
2016 return rb_define_finalizer(obj, block);
2017}
2018
2019VALUE
2020rb_define_finalizer(VALUE obj, VALUE block)
2021{
2022 should_be_finalizable(obj);
2023 should_be_callable(block);
2024
2025 block = rb_gc_impl_define_finalizer(rb_gc_get_objspace(), obj, block);
2026
2027 block = rb_ary_new3(2, INT2FIX(0), block);
2028 OBJ_FREEZE(block);
2029 return block;
2030}
2031
2032void
2033rb_objspace_call_finalizer(void)
2034{
2035 rb_gc_impl_shutdown_call_finalizer(rb_gc_get_objspace());
2036}
2037
2038void
2039rb_objspace_free_objects(void *objspace)
2040{
2041 rb_gc_impl_shutdown_free_objects(objspace);
2042}
2043
2044int
2045rb_objspace_garbage_object_p(VALUE obj)
2046{
2047 return !SPECIAL_CONST_P(obj) && rb_gc_impl_garbage_object_p(rb_gc_get_objspace(), obj);
2048}
2049
2050bool
2051rb_gc_pointer_to_heap_p(VALUE obj)
2052{
2053 return rb_gc_impl_pointer_to_heap_p(rb_gc_get_objspace(), (void *)obj);
2054}
2055
2056#define OBJ_ID_INCREMENT (RUBY_IMMEDIATE_MASK + 1)
2057#define LAST_OBJECT_ID() (object_id_counter * OBJ_ID_INCREMENT)
2058static VALUE id2ref_value = 0;
2059
2060#if SIZEOF_SIZE_T == SIZEOF_LONG_LONG
2061static size_t object_id_counter = 1;
2062#else
2063static unsigned long long object_id_counter = 1;
2064#endif
2065
2066static inline VALUE
2067generate_next_object_id(void)
2068{
2069#if SIZEOF_SIZE_T == SIZEOF_LONG_LONG
2070 // 64bit atomics are available
2071 return SIZET2NUM(RUBY_ATOMIC_SIZE_FETCH_ADD(object_id_counter, 1) * OBJ_ID_INCREMENT);
2072#else
2073 unsigned int lock_lev = RB_GC_VM_LOCK();
2074 VALUE id = ULL2NUM(++object_id_counter * OBJ_ID_INCREMENT);
2075 RB_GC_VM_UNLOCK(lock_lev);
2076 return id;
2077#endif
2078}
2079
2080void
2081rb_gc_obj_id_moved(VALUE obj)
2082{
2083 if (UNLIKELY(id2ref_tbl)) {
2084 st_insert(id2ref_tbl, (st_data_t)rb_obj_id(obj), (st_data_t)obj);
2085 }
2086}
2087
2088static int
2089object_id_cmp(st_data_t x, st_data_t y)
2090{
2091 if (RB_TYPE_P(x, T_BIGNUM)) {
2092 return !rb_big_eql(x, y);
2093 }
2094 else {
2095 return x != y;
2096 }
2097}
2098
2099static st_index_t
2100object_id_hash(st_data_t n)
2101{
2102 return FIX2LONG(rb_hash((VALUE)n));
2103}
2104
2105static const struct st_hash_type object_id_hash_type = {
2106 object_id_cmp,
2107 object_id_hash,
2108};
2109
2110static void gc_mark_tbl_no_pin(st_table *table);
2111
2112static void
2113id2ref_tbl_mark(void *data)
2114{
2115 st_table *table = (st_table *)data;
2116 if (UNLIKELY(!RB_POSFIXABLE(LAST_OBJECT_ID()))) {
2117 // It's very unlikely, but if enough object ids were generated, keys may be T_BIGNUM
2118 rb_mark_set(table);
2119 }
2120 // We purposely don't mark values, as they are weak references.
2121 // rb_gc_obj_free_vm_weak_references takes care of cleaning them up.
2122}
2123
2124static size_t
2125id2ref_tbl_memsize(const void *data)
2126{
2127 return rb_st_memsize(data);
2128}
2129
2130static void
2131id2ref_tbl_free(void *data)
2132{
2133 id2ref_tbl = NULL; // clear global ref
2134 st_table *table = (st_table *)data;
2135 st_free_table(table);
2136}
2137
2138static const rb_data_type_t id2ref_tbl_type = {
2139 .wrap_struct_name = "VM/_id2ref_table",
2140 .function = {
2141 .dmark = id2ref_tbl_mark,
2142 .dfree = id2ref_tbl_free,
2143 .dsize = id2ref_tbl_memsize,
2144 // dcompact function not required because the table is reference updated
2145 // in rb_gc_vm_weak_table_foreach
2146 },
2147 .flags = RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_FREE_IMMEDIATELY
2148};
2149
2150static VALUE
2151class_object_id(VALUE klass)
2152{
2153 VALUE id = RUBY_ATOMIC_VALUE_LOAD(RCLASS(klass)->object_id);
2154 if (!id) {
2155 unsigned int lock_lev = RB_GC_VM_LOCK();
2156 id = generate_next_object_id();
2157 VALUE existing_id = RUBY_ATOMIC_VALUE_CAS(RCLASS(klass)->object_id, 0, id);
2158 if (existing_id) {
2159 id = existing_id;
2160 }
2161 else if (RB_UNLIKELY(id2ref_tbl)) {
2162 st_insert(id2ref_tbl, id, klass);
2163 }
2164 RB_GC_VM_UNLOCK(lock_lev);
2165 }
2166 return id;
2167}
2168
2169static inline VALUE
2170object_id_get(VALUE obj, shape_id_t shape_id)
2171{
2172 VALUE id;
2173 if (rb_shape_complex_p(shape_id)) {
2174 id = rb_obj_field_get(obj, ROOT_COMPLEX_WITH_OBJ_ID);
2175 }
2176 else {
2177 id = rb_obj_field_get(obj, rb_shape_object_id(shape_id));
2178 }
2179
2180#if RUBY_DEBUG
2181 if (!(FIXNUM_P(id) || RB_TYPE_P(id, T_BIGNUM))) {
2182 rb_p(obj);
2183 rb_bug("Object's shape includes object_id, but it's missing %s", rb_obj_info(obj));
2184 }
2185#endif
2186
2187 return id;
2188}
2189
2190static VALUE
2191object_id0(VALUE obj)
2192{
2193 VALUE id = Qfalse;
2194 shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
2195
2196 if (rb_shape_has_object_id(shape_id)) {
2197 return object_id_get(obj, shape_id);
2198 }
2199
2200 shape_id_t object_id_shape_id = rb_obj_shape_transition_object_id(obj);
2201
2202 id = generate_next_object_id();
2203 rb_obj_field_set(obj, object_id_shape_id, 0, id);
2204
2205 RUBY_ASSERT(RBASIC_SHAPE_ID(obj) == object_id_shape_id);
2206 RUBY_ASSERT(rb_obj_shape_has_id(obj));
2207
2208 if (RB_UNLIKELY(id2ref_tbl)) {
2209 RB_VM_LOCKING() {
2210 st_insert(id2ref_tbl, (st_data_t)id, (st_data_t)obj);
2211 }
2212 }
2213 return id;
2214}
2215
2216static VALUE
2217object_id(VALUE obj)
2218{
2219 switch (BUILTIN_TYPE(obj)) {
2220 case T_CLASS:
2221 case T_MODULE:
2222 // With Ruby Box, classes and modules have different fields
2223 // in different boxes, so we cannot store the object id
2224 // in fields.
2225 return class_object_id(obj);
2226 case T_IMEMO:
2227 RUBY_ASSERT(IMEMO_TYPE_P(obj, imemo_fields));
2228 break;
2229 default:
2230 break;
2231 }
2232
2233 if (UNLIKELY(rb_gc_multi_ractor_p() && rb_ractor_shareable_p(obj))) {
2234 unsigned int lock_lev = RB_GC_VM_LOCK();
2235 VALUE id = object_id0(obj);
2236 RB_GC_VM_UNLOCK(lock_lev);
2237 return id;
2238 }
2239
2240 return object_id0(obj);
2241}
2242
2243static void
2244build_id2ref_i(VALUE obj, void *data)
2245{
2246 st_table *id2ref_tbl = (st_table *)data;
2247
2248 switch (BUILTIN_TYPE(obj)) {
2249 case T_CLASS:
2250 case T_MODULE:
2251 RUBY_ASSERT(!rb_objspace_garbage_object_p(obj));
2252 if (RCLASS(obj)->object_id) {
2253 st_insert(id2ref_tbl, RCLASS(obj)->object_id, obj);
2254 }
2255 break;
2256 case T_IMEMO:
2257 RUBY_ASSERT(!rb_objspace_garbage_object_p(obj));
2258 if (IMEMO_TYPE_P(obj, imemo_fields) && rb_obj_shape_has_id(obj)) {
2259 st_insert(id2ref_tbl, rb_obj_id(obj), rb_imemo_fields_owner(obj));
2260 }
2261 break;
2262 case T_OBJECT:
2263 RUBY_ASSERT(!rb_objspace_garbage_object_p(obj));
2264 if (rb_obj_shape_has_id(obj)) {
2265 st_insert(id2ref_tbl, rb_obj_id(obj), obj);
2266 }
2267 break;
2268 default:
2269 // For generic_fields, the T_IMEMO/fields is responsible for populating the entry.
2270 break;
2271 }
2272}
2273
2274static VALUE
2275object_id_to_ref(void *objspace_ptr, VALUE object_id)
2276{
2277 rb_objspace_t *objspace = objspace_ptr;
2278
2279 unsigned int lev = RB_GC_VM_LOCK();
2280
2281 if (!id2ref_tbl) {
2282 rb_gc_vm_barrier(); // stop other ractors
2283
2284 // GC Must not trigger while we build the table, otherwise if we end
2285 // up freeing an object that had an ID, we might try to delete it from
2286 // the table even though it wasn't inserted yet.
2287 st_table *tmp_id2ref_tbl = st_init_table(&object_id_hash_type);
2288 VALUE tmp_id2ref_value = TypedData_Wrap_Struct(0, &id2ref_tbl_type, tmp_id2ref_tbl);
2289
2290 // build_id2ref_i will most certainly malloc, which could trigger GC and sweep
2291 // objects we just added to the table.
2292 // By calling rb_gc_disable() we also save having to handle potentially garbage objects.
2293 bool gc_disabled = RTEST(rb_gc_disable());
2294 {
2295 id2ref_tbl = tmp_id2ref_tbl;
2296 id2ref_value = tmp_id2ref_value;
2297
2298 rb_gc_impl_each_object(objspace, build_id2ref_i, (void *)id2ref_tbl);
2299 }
2300 if (!gc_disabled) rb_gc_enable();
2301 }
2302
2303 VALUE obj;
2304 bool found = st_lookup(id2ref_tbl, object_id, &obj) && !rb_gc_impl_garbage_object_p(objspace, obj);
2305
2306 RB_GC_VM_UNLOCK(lev);
2307
2308 if (found) {
2309 return obj;
2310 }
2311
2312 if (rb_funcall(object_id, rb_intern(">="), 1, ULL2NUM(LAST_OBJECT_ID()))) {
2313 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is not an id value", rb_funcall(object_id, rb_intern("to_s"), 1, INT2FIX(10)));
2314 }
2315 else {
2316 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is a recycled object", rb_funcall(object_id, rb_intern("to_s"), 1, INT2FIX(10)));
2317 }
2318}
2319
2320static inline void
2321obj_free_object_id(VALUE obj)
2322{
2323 VALUE obj_id = 0;
2324 if (RB_UNLIKELY(id2ref_tbl)) {
2325 switch (BUILTIN_TYPE(obj)) {
2326 case T_CLASS:
2327 case T_MODULE:
2328 obj_id = RCLASS(obj)->object_id;
2329 break;
2330 case T_IMEMO:
2331 if (!IMEMO_TYPE_P(obj, imemo_fields)) {
2332 return;
2333 }
2334 // fallthrough
2335 case T_OBJECT:
2336 {
2337 shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
2338 if (rb_shape_has_object_id(shape_id)) {
2339 obj_id = object_id_get(obj, shape_id);
2340 }
2341 break;
2342 }
2343 default:
2344 // For generic_fields, the T_IMEMO/fields is responsible for freeing the id.
2345 return;
2346 }
2347
2348 if (RB_UNLIKELY(obj_id)) {
2349 RUBY_ASSERT(FIXNUM_P(obj_id) || RB_TYPE_P(obj_id, T_BIGNUM));
2350
2351 if (!st_delete(id2ref_tbl, (st_data_t *)&obj_id, NULL)) {
2352 // The the object is a T_IMEMO/fields, then it's possible the actual object
2353 // has been garbage collected already.
2354 if (!RB_TYPE_P(obj, T_IMEMO)) {
2355 rb_bug("Object ID seen, but not in _id2ref table: object_id=%llu object=%s", NUM2ULL(obj_id), rb_obj_info(obj));
2356 }
2357 }
2358 }
2359 }
2360}
2361
2362void
2363rb_gc_obj_free_vm_weak_references(VALUE obj)
2364{
2366 obj_free_object_id(obj);
2367
2368 if (rb_obj_gen_fields_p(obj)) {
2370 }
2371
2372 switch (BUILTIN_TYPE(obj)) {
2373 case T_STRING:
2374 if (FL_TEST_RAW(obj, RSTRING_FSTR)) {
2375 rb_gc_free_fstring(obj);
2376 }
2377 break;
2378 case T_SYMBOL:
2379 rb_gc_free_dsymbol(obj);
2380 break;
2381 case T_IMEMO:
2382 switch (imemo_type(obj)) {
2383 case imemo_callinfo:
2384 rb_vm_ci_free((const struct rb_callinfo *)obj);
2385 break;
2386 case imemo_ment:
2387 rb_free_method_entry_vm_weak_references((const rb_method_entry_t *)obj);
2388 break;
2389 default:
2390 break;
2391 }
2392 break;
2393 default:
2394 break;
2395 }
2396}
2397
2398/*
2399 * call-seq:
2400 * ObjectSpace._id2ref(object_id) -> an_object
2401 *
2402 * Converts an object id to a reference to the object. May not be
2403 * called on an object id passed as a parameter to a finalizer.
2404 *
2405 * s = "I am a string" #=> "I am a string"
2406 * r = ObjectSpace._id2ref(s.object_id) #=> "I am a string"
2407 * r == s #=> true
2408 *
2409 * On multi-ractor mode, if the object is not shareable, it raises
2410 * RangeError.
2411 *
2412 * This method is deprecated and should no longer be used.
2413 */
2414
2415static VALUE
2416id2ref(VALUE objid)
2417{
2418 objid = rb_to_int(objid);
2419 if (FIXNUM_P(objid) || rb_big_size(objid) <= SIZEOF_VOIDP) {
2420 VALUE ptr = (VALUE)NUM2PTR(objid);
2421 if (SPECIAL_CONST_P(ptr)) {
2422 if (ptr == Qtrue) return Qtrue;
2423 if (ptr == Qfalse) return Qfalse;
2424 if (NIL_P(ptr)) return Qnil;
2425 if (FIXNUM_P(ptr)) return ptr;
2426 if (FLONUM_P(ptr)) return ptr;
2427
2428 if (SYMBOL_P(ptr)) {
2429 // Check that the symbol is valid
2430 if (rb_static_id_valid_p(SYM2ID(ptr))) {
2431 return ptr;
2432 }
2433 else {
2434 rb_raise(rb_eRangeError, "%p is not a symbol id value", (void *)ptr);
2435 }
2436 }
2437
2438 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is not an id value", rb_int2str(objid, 10));
2439 }
2440 }
2441
2442 VALUE obj = object_id_to_ref(rb_gc_get_objspace(), objid);
2443 if (!rb_multi_ractor_p() || rb_ractor_shareable_p(obj)) {
2444 return obj;
2445 }
2446 else {
2447 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is the id of an unshareable object on multi-ractor", rb_int2str(objid, 10));
2448 }
2449}
2450
2451/* :nodoc: */
2452static VALUE
2453os_id2ref(VALUE os, VALUE objid)
2454{
2455 rb_category_warn(RB_WARN_CATEGORY_DEPRECATED, "ObjectSpace._id2ref is deprecated");
2456 return id2ref(objid);
2457}
2458
2459static VALUE
2460rb_find_object_id(void *objspace, VALUE obj, VALUE (*get_heap_object_id)(VALUE))
2461{
2462 if (SPECIAL_CONST_P(obj)) {
2463#if SIZEOF_LONG == SIZEOF_VOIDP
2464 return LONG2NUM((SIGNED_VALUE)obj);
2465#else
2466 return LL2NUM((SIGNED_VALUE)obj);
2467#endif
2468 }
2469
2470 return get_heap_object_id(obj);
2471}
2472
2473static VALUE
2474nonspecial_obj_id(VALUE obj)
2475{
2476#if SIZEOF_LONG == SIZEOF_VOIDP
2477 return (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG);
2478#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
2479 return LL2NUM((SIGNED_VALUE)(obj) / 2);
2480#else
2481# error not supported
2482#endif
2483}
2484
2485VALUE
2486rb_memory_id(VALUE obj)
2487{
2488 return rb_find_object_id(NULL, obj, nonspecial_obj_id);
2489}
2490
2491/*
2492 * Document-method: __id__
2493 * Document-method: object_id
2494 *
2495 * call-seq:
2496 * obj.__id__ -> integer
2497 * obj.object_id -> integer
2498 *
2499 * Returns an integer identifier for +obj+.
2500 *
2501 * The same number will be returned on all calls to +object_id+ for a given
2502 * object, and no two active objects will share an id.
2503 *
2504 * Note: that some objects of builtin classes are reused for optimization.
2505 * This is the case for immediate values and frozen string literals.
2506 *
2507 * BasicObject implements +__id__+, Kernel implements +object_id+.
2508 *
2509 * Immediate values are not passed by reference but are passed by value:
2510 * +nil+, +true+, +false+, Fixnums, Symbols, and some Floats.
2511 *
2512 * Object.new.object_id == Object.new.object_id # => false
2513 * (21 * 2).object_id == (21 * 2).object_id # => true
2514 * "hello".object_id == "hello".object_id # => false
2515 * "hi".freeze.object_id == "hi".freeze.object_id # => true
2516 */
2517
2518VALUE
2519rb_obj_id(VALUE obj)
2520{
2521 /* If obj is an immediate, the object ID is obj directly converted to a Numeric.
2522 * Otherwise, the object ID is a Numeric that is a non-zero multiple of
2523 * (RUBY_IMMEDIATE_MASK + 1) which guarantees that it does not collide with
2524 * any immediates. */
2525 return rb_find_object_id(rb_gc_get_objspace(), obj, object_id);
2526}
2527
2528bool
2529rb_obj_id_p(VALUE obj)
2530{
2531 return !RB_TYPE_P(obj, T_IMEMO) && rb_obj_shape_has_id(obj);
2532}
2533
2534/*
2535 * GC implementations should call this function before the GC phase that updates references
2536 * embedded in the machine code generated by JIT compilers. JIT compilers usually enforce the
2537 * "W^X" policy and protect the code memory from being modified during execution. This function
2538 * makes the code memory writeable.
2539 */
2540void
2541rb_gc_before_updating_jit_code(void)
2542{
2543#if USE_YJIT
2544 rb_yjit_mark_all_writeable();
2545#endif
2546#if USE_ZJIT
2547 rb_zjit_mark_all_writable();
2548#endif
2549}
2550
2551/*
2552 * GC implementations should call this function before the GC phase that updates references
2553 * embedded in the machine code generated by JIT compilers. This function makes the code memory
2554 * executable again.
2555 */
2556void
2557rb_gc_after_updating_jit_code(void)
2558{
2559#if USE_YJIT
2560 rb_yjit_mark_all_executable();
2561#endif
2562#if USE_ZJIT
2563 rb_zjit_mark_all_executable();
2564#endif
2565}
2566
2567static void
2568classext_memsize(rb_classext_t *ext, bool prime, VALUE box_value, void *arg)
2569{
2570 size_t *size = (size_t *)arg;
2571 size_t s = 0;
2572
2573 if (RCLASSEXT_M_TBL(ext)) {
2574 s += rb_id_table_memsize(RCLASSEXT_M_TBL(ext));
2575 }
2576 if (RCLASSEXT_CONST_TBL(ext)) {
2577 s += rb_id_table_memsize(RCLASSEXT_CONST_TBL(ext));
2578 }
2579 if (RCLASSEXT_SUPERCLASSES_WITH_SELF(ext)) {
2580 s += (RCLASSEXT_SUPERCLASS_DEPTH(ext) + 1) * sizeof(VALUE);
2581 }
2582 if (!prime) {
2583 s += sizeof(rb_classext_t);
2584 }
2585 *size += s;
2586}
2587
2588static void
2589classext_superclasses_memsize(rb_classext_t *ext, bool prime, VALUE box_value, void *arg)
2590{
2591 size_t *size = (size_t *)arg;
2592 size_t array_size;
2593 if (RCLASSEXT_SUPERCLASSES_WITH_SELF(ext)) {
2594 RUBY_ASSERT(prime);
2595 array_size = RCLASSEXT_SUPERCLASS_DEPTH(ext) + 1;
2596 *size += array_size * sizeof(VALUE);
2597 }
2598}
2599
2600size_t
2601rb_obj_memsize_of(VALUE obj)
2602{
2603 size_t size = 0;
2604
2605 if (SPECIAL_CONST_P(obj)) {
2606 return 0;
2607 }
2608
2609 switch (BUILTIN_TYPE(obj)) {
2610 case T_OBJECT:
2611 if (FL_TEST_RAW(obj, ROBJECT_HEAP)) {
2612 if (rb_obj_shape_complex_p(obj)) {
2613 size += rb_st_memsize(ROBJECT_FIELDS_HASH(obj));
2614 }
2615 else {
2616 size += ROBJECT_FIELDS_CAPACITY(obj) * sizeof(VALUE);
2617 }
2618 }
2619 break;
2620 case T_MODULE:
2621 case T_CLASS:
2622 rb_class_classext_foreach(obj, classext_memsize, (void *)&size);
2623 rb_class_classext_foreach(obj, classext_superclasses_memsize, (void *)&size);
2624 break;
2625 case T_ICLASS:
2626 if (RICLASS_OWNS_M_TBL_P(obj)) {
2627 if (RCLASS_M_TBL(obj)) {
2628 size += rb_id_table_memsize(RCLASS_M_TBL(obj));
2629 }
2630 }
2631 break;
2632 case T_STRING:
2633 size += rb_str_memsize(obj);
2634 break;
2635 case T_ARRAY:
2636 size += rb_ary_memsize(obj);
2637 break;
2638 case T_HASH:
2639 if (RHASH_ST_TABLE_P(obj)) {
2640 VM_ASSERT(RHASH_ST_TABLE(obj) != NULL);
2641 /* st_table is in the slot */
2642 size += st_memsize(RHASH_ST_TABLE(obj)) - sizeof(st_table);
2643 }
2644 break;
2645 case T_REGEXP:
2646 if (RREGEXP_PTR(obj)) {
2647 size += onig_memsize(RREGEXP_PTR(obj));
2648 }
2649 break;
2650 case T_DATA:
2651 size += rb_objspace_data_type_memsize(obj);
2652 break;
2653 case T_MATCH:
2654 {
2655 struct RMatch *rm = RMATCH(obj);
2656 if (FL_TEST_RAW(obj, RMATCH_ONIG)) {
2657 size += onig_region_memsize(&rm->as.onig);
2658 }
2659 size += sizeof(struct rmatch_offset) * rm->char_offset_num_allocated;
2660 }
2661 break;
2662 case T_FILE:
2663 if (RFILE(obj)->fptr) {
2664 size += rb_io_memsize(RFILE(obj)->fptr);
2665 }
2666 break;
2667 case T_RATIONAL:
2668 case T_COMPLEX:
2669 break;
2670 case T_IMEMO:
2671 size += rb_imemo_memsize(obj);
2672 break;
2673
2674 case T_FLOAT:
2675 case T_SYMBOL:
2676 break;
2677
2678 case T_BIGNUM:
2679 if (!(RBASIC(obj)->flags & BIGNUM_EMBED_FLAG) && BIGNUM_DIGITS(obj)) {
2680 size += BIGNUM_LEN(obj) * sizeof(BDIGIT);
2681 }
2682 break;
2683
2684 case T_NODE:
2685 UNEXPECTED_NODE(obj_memsize_of);
2686 break;
2687
2688 case T_STRUCT:
2689 if (RSTRUCT_EMBED_LEN(obj) == 0) {
2690 size += sizeof(VALUE) * RSTRUCT_LEN_RAW(obj);
2691 }
2692 break;
2693
2694 case T_ZOMBIE:
2695 case T_MOVED:
2696 break;
2697
2698 default:
2699 rb_bug("objspace/memsize_of(): unknown data type 0x%x(%p)",
2700 BUILTIN_TYPE(obj), (void*)obj);
2701 }
2702
2703 return size + rb_gc_obj_slot_size(obj);
2704}
2705
2706static int
2707set_zero(st_data_t key, st_data_t val, st_data_t arg)
2708{
2709 VALUE k = (VALUE)key;
2710 VALUE hash = (VALUE)arg;
2711 rb_hash_aset(hash, k, INT2FIX(0));
2712 return ST_CONTINUE;
2713}
2714
2716 size_t counts[T_MASK+1];
2717 size_t freed;
2718 size_t total;
2719};
2720
2721static void
2722count_objects_i(VALUE obj, void *d)
2723{
2724 struct count_objects_data *data = (struct count_objects_data *)d;
2725
2726 if (RBASIC(obj)->flags) {
2727 data->counts[BUILTIN_TYPE(obj)]++;
2728 }
2729 else {
2730 data->freed++;
2731 }
2732
2733 data->total++;
2734}
2735
2736/*
2737 * call-seq:
2738 * ObjectSpace.count_objects(result_hash = {}) -> hash
2739 *
2740 * Counts the number of objects, grouped by type.
2741 *
2742 * It returns a hash that looks like:
2743 *
2744 * {
2745 * TOTAL: 10000,
2746 * FREE: 3011,
2747 * T_OBJECT: 6,
2748 * T_CLASS: 404,
2749 * # ...
2750 * }
2751 *
2752 * The contents of the returned hash are implementation specific and
2753 * may be changed in future versions without notice.
2754 *
2755 * The keys starting with +:T_+ are live objects of a particular type.
2756 * For example, +:T_ARRAY+ is the number of arrays.
2757 *
2758 * The key +:FREE+ is the number of object slots which are empty.
2759 *
2760 * The key +:TOTAL+ is the total number of slots (which is the sum of
2761 * all of the other values).
2762 *
2763 * If the optional argument +result_hash+ is given,
2764 * it is overwritten and returned.
2765 * This is intended to avoid the probe effect.
2766 *
2767 * h = {}
2768 * ObjectSpace.count_objects(h)
2769 * puts h
2770 * # => { TOTAL: 10000, T_CLASS: 158280, T_MODULE: 20672, T_STRING: 527249 }
2771 *
2772 * This method is only expected to work on C Ruby.
2773 *
2774 */
2775
2776static VALUE
2777count_objects(int argc, VALUE *argv, VALUE os)
2778{
2779 struct count_objects_data data = { 0 };
2780 VALUE hash = Qnil;
2781 VALUE types[T_MASK + 1];
2782
2783 if (rb_check_arity(argc, 0, 1) == 1) {
2784 hash = argv[0];
2785 if (!RB_TYPE_P(hash, T_HASH))
2786 rb_raise(rb_eTypeError, "non-hash given");
2787 }
2788
2789 for (size_t i = 0; i <= T_MASK; i++) {
2790 // type_sym can allocate an object,
2791 // so we need to create all key symbols in advance
2792 // not to disturb the result
2793 types[i] = type_sym(i);
2794 }
2795
2796 // Same as type_sym, we need to create all key symbols in advance
2797 VALUE total = ID2SYM(rb_intern("TOTAL"));
2798 VALUE free = ID2SYM(rb_intern("FREE"));
2799
2800 rb_gc_impl_each_object(rb_gc_get_objspace(), count_objects_i, &data);
2801
2802 if (NIL_P(hash)) {
2803 hash = rb_hash_new();
2804 }
2805 else if (!RHASH_EMPTY_P(hash)) {
2806 rb_hash_stlike_foreach(hash, set_zero, hash);
2807 }
2808 rb_hash_aset(hash, total, SIZET2NUM(data.total));
2809 rb_hash_aset(hash, free, SIZET2NUM(data.freed));
2810
2811 for (size_t i = 0; i <= T_MASK; i++) {
2812 if (data.counts[i]) {
2813 rb_hash_aset(hash, types[i], SIZET2NUM(data.counts[i]));
2814 }
2815 }
2816
2817 return hash;
2818}
2819
2820#define SET_STACK_END SET_MACHINE_STACK_END(&ec->machine.stack_end)
2821
2822#define STACK_START (ec->machine.stack_start)
2823#define STACK_END (ec->machine.stack_end)
2824#define STACK_LEVEL_MAX (ec->machine.stack_maxsize/sizeof(VALUE))
2825
2826#if STACK_GROW_DIRECTION < 0
2827# define STACK_LENGTH (size_t)(STACK_START - STACK_END)
2828#elif STACK_GROW_DIRECTION > 0
2829# define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
2830#else
2831# define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
2832 : (size_t)(STACK_END - STACK_START + 1))
2833#endif
2834#if !STACK_GROW_DIRECTION
2835int ruby_stack_grow_direction;
2836int
2837ruby_get_stack_grow_direction(volatile VALUE *addr)
2838{
2839 VALUE *end;
2840 SET_MACHINE_STACK_END(&end);
2841
2842 if (end > addr) return ruby_stack_grow_direction = 1;
2843 return ruby_stack_grow_direction = -1;
2844}
2845#endif
2846
2847size_t
2849{
2850 rb_execution_context_t *ec = GET_EC();
2851 SET_STACK_END;
2852 if (p) *p = STACK_UPPER(STACK_END, STACK_START, STACK_END);
2853 return STACK_LENGTH;
2854}
2855
2856#define PREVENT_STACK_OVERFLOW 1
2857#ifndef PREVENT_STACK_OVERFLOW
2858#if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
2859# define PREVENT_STACK_OVERFLOW 1
2860#else
2861# define PREVENT_STACK_OVERFLOW 0
2862#endif
2863#endif
2864#if PREVENT_STACK_OVERFLOW && !defined(__EMSCRIPTEN__)
2865static int
2866stack_check(rb_execution_context_t *ec, int water_mark)
2867{
2868 SET_STACK_END;
2869
2870 size_t length = STACK_LENGTH;
2871 size_t maximum_length = STACK_LEVEL_MAX - water_mark;
2872
2873 return length > maximum_length;
2874}
2875#else
2876#define stack_check(ec, water_mark) FALSE
2877#endif
2878
2879#define STACKFRAME_FOR_CALL_CFUNC 2048
2880
2881int
2882rb_ec_stack_check(rb_execution_context_t *ec)
2883{
2884 return stack_check(ec, STACKFRAME_FOR_CALL_CFUNC);
2885}
2886
2887int
2889{
2890 return stack_check(GET_EC(), STACKFRAME_FOR_CALL_CFUNC);
2891}
2892
2893/* ==================== Marking ==================== */
2894
2895#define RB_GC_MARK_OR_TRAVERSE(func, obj_or_ptr, obj, check_obj) do { \
2896 if (!RB_SPECIAL_CONST_P(obj)) { \
2897 rb_vm_t *vm = GET_VM(); \
2898 void *objspace = vm->gc.objspace; \
2899 if (LIKELY(vm->gc.mark_func_data == NULL)) { \
2900 GC_ASSERT(rb_gc_impl_during_gc_p(objspace)); \
2901 (func)(objspace, (obj_or_ptr)); \
2902 } \
2903 else if (check_obj ? \
2904 rb_gc_impl_pointer_to_heap_p(objspace, (const void *)obj) && \
2905 !rb_gc_impl_garbage_object_p(objspace, obj) : \
2906 true) { \
2907 GC_ASSERT(!rb_gc_impl_during_gc_p(objspace)); \
2908 struct gc_mark_func_data_struct *mark_func_data = vm->gc.mark_func_data; \
2909 vm->gc.mark_func_data = NULL; \
2910 mark_func_data->mark_func((obj), mark_func_data->data); \
2911 vm->gc.mark_func_data = mark_func_data; \
2912 } \
2913 } \
2914} while (0)
2915
2916static inline void
2917gc_mark_internal(VALUE obj)
2918{
2919 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark, obj, obj, false);
2920}
2921
2922void
2923rb_gc_mark_movable(VALUE obj)
2924{
2925 gc_mark_internal(obj);
2926}
2927
2928void
2929rb_gc_mark_and_move(VALUE *ptr)
2930{
2931 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark_and_move, ptr, *ptr, false);
2932}
2933
2934static inline void
2935gc_mark_and_pin_internal(VALUE obj)
2936{
2937 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark_and_pin, obj, obj, false);
2938}
2939
2940void
2941rb_gc_mark(VALUE obj)
2942{
2943 gc_mark_and_pin_internal(obj);
2944}
2945
2946static inline void
2947gc_mark_maybe_internal(VALUE obj)
2948{
2949 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark_maybe, obj, obj, true);
2950}
2951
2952void
2953rb_gc_mark_maybe(VALUE obj)
2954{
2955 gc_mark_maybe_internal(obj);
2956}
2957
2958ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS(static void each_location(register const VALUE *x, register long n, void (*cb)(VALUE, void *), void *data));
2959static void
2960each_location(register const VALUE *x, register long n, void (*cb)(VALUE, void *), void *data)
2961{
2962 VALUE v;
2963 while (n--) {
2964 v = *x;
2965 cb(v, data);
2966 x++;
2967 }
2968}
2969
2970static void
2971each_location_ptr(const VALUE *start, const VALUE *end, void (*cb)(VALUE, void *), void *data)
2972{
2973 if (end <= start) return;
2974 each_location(start, end - start, cb, data);
2975}
2976
2977static void
2978gc_mark_maybe_each_location(VALUE obj, void *data)
2979{
2980 gc_mark_maybe_internal(obj);
2981}
2982
2983void
2984rb_gc_mark_locations(const VALUE *start, const VALUE *end)
2985{
2986 each_location_ptr(start, end, gc_mark_maybe_each_location, NULL);
2987}
2988
2989void
2990rb_gc_mark_values(long n, const VALUE *values)
2991{
2992 for (long i = 0; i < n; i++) {
2993 gc_mark_internal(values[i]);
2994 }
2995}
2996
2997void
2998rb_gc_mark_vm_stack_values(long n, const VALUE *values)
2999{
3000 for (long i = 0; i < n; i++) {
3001 gc_mark_and_pin_internal(values[i]);
3002 }
3003}
3004
3005static int
3006mark_key(st_data_t key, st_data_t value, st_data_t data)
3007{
3008 gc_mark_and_pin_internal((VALUE)key);
3009
3010 return ST_CONTINUE;
3011}
3012
3013void
3014rb_mark_set(st_table *tbl)
3015{
3016 if (!tbl) return;
3017
3018 st_foreach(tbl, mark_key, (st_data_t)rb_gc_get_objspace());
3019}
3020
3021static int
3022mark_keyvalue(st_data_t key, st_data_t value, st_data_t data)
3023{
3024 gc_mark_internal((VALUE)key);
3025 gc_mark_internal((VALUE)value);
3026
3027 return ST_CONTINUE;
3028}
3029
3030static int
3031pin_key_pin_value(st_data_t key, st_data_t value, st_data_t data)
3032{
3033 gc_mark_and_pin_internal((VALUE)key);
3034 gc_mark_and_pin_internal((VALUE)value);
3035
3036 return ST_CONTINUE;
3037}
3038
3039static int
3040pin_key_mark_value(st_data_t key, st_data_t value, st_data_t data)
3041{
3042 gc_mark_and_pin_internal((VALUE)key);
3043 gc_mark_internal((VALUE)value);
3044
3045 return ST_CONTINUE;
3046}
3047
3048static void
3049mark_hash(VALUE hash)
3050{
3051 if (rb_hash_compare_by_id_p(hash)) {
3052 rb_hash_stlike_foreach(hash, pin_key_mark_value, 0);
3053 }
3054 else {
3055 rb_hash_stlike_foreach(hash, mark_keyvalue, 0);
3056 }
3057
3058 gc_mark_internal(RHASH(hash)->ifnone);
3059}
3060
3061void
3062rb_mark_hash(st_table *tbl)
3063{
3064 if (!tbl) return;
3065
3066 st_foreach(tbl, pin_key_pin_value, 0);
3067}
3068
3069static enum rb_id_table_iterator_result
3070mark_method_entry_i(VALUE me, void *objspace)
3071{
3072 gc_mark_internal(me);
3073
3074 return ID_TABLE_CONTINUE;
3075}
3076
3077static void
3078mark_m_tbl(void *objspace, struct rb_id_table *tbl)
3079{
3080 if (tbl) {
3081 rb_id_table_foreach_values(tbl, mark_method_entry_i, objspace);
3082 }
3083}
3084
3085static enum rb_id_table_iterator_result
3086mark_const_entry_i(VALUE value, void *objspace)
3087{
3088 const rb_const_entry_t *ce = (const rb_const_entry_t *)value;
3089
3090 if (!rb_gc_checking_shareable()) {
3091 gc_mark_internal(ce->value);
3092 gc_mark_internal(ce->file); // TODO: ce->file should be shareable?
3093 }
3094 return ID_TABLE_CONTINUE;
3095}
3096
3097static void
3098mark_const_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
3099{
3100 if (!tbl) return;
3101 rb_id_table_foreach_values(tbl, mark_const_entry_i, objspace);
3102}
3103
3104#if STACK_GROW_DIRECTION < 0
3105#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
3106#elif STACK_GROW_DIRECTION > 0
3107#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
3108#else
3109#define GET_STACK_BOUNDS(start, end, appendix) \
3110 ((STACK_END < STACK_START) ? \
3111 ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
3112#endif
3113
3114static void
3115gc_mark_machine_stack_location_maybe(VALUE obj, void *data)
3116{
3117 gc_mark_maybe_internal(obj);
3118
3119#ifdef RUBY_ASAN_ENABLED
3120 const rb_execution_context_t *ec = (const rb_execution_context_t *)data;
3121 void *fake_frame_start;
3122 void *fake_frame_end;
3123 bool is_fake_frame = asan_get_fake_stack_extents(
3124 ec->machine.asan_fake_stack_handle, obj,
3125 ec->machine.stack_start, ec->machine.stack_end,
3126 &fake_frame_start, &fake_frame_end
3127 );
3128 if (is_fake_frame) {
3129 each_location_ptr(fake_frame_start, fake_frame_end, gc_mark_maybe_each_location, NULL);
3130 }
3131#endif
3132}
3133
3134static bool
3135gc_object_moved_p_internal(void *objspace, VALUE obj)
3136{
3137 if (SPECIAL_CONST_P(obj)) {
3138 return false;
3139 }
3140
3141 return rb_gc_impl_object_moved_p(objspace, obj);
3142}
3143
3144static VALUE
3145gc_location_internal(void *objspace, VALUE value)
3146{
3147 if (SPECIAL_CONST_P(value)) {
3148 return value;
3149 }
3150
3151 GC_ASSERT(rb_gc_impl_pointer_to_heap_p(objspace, (void *)value));
3152
3153 return rb_gc_impl_location(objspace, value);
3154}
3155
3156VALUE
3157rb_gc_location(VALUE value)
3158{
3159 return gc_location_internal(rb_gc_get_objspace(), value);
3160}
3161
3162#if defined(__wasm__)
3163
3164
3165static VALUE *rb_stack_range_tmp[2];
3166
3167static void
3168rb_mark_locations(void *begin, void *end)
3169{
3170 rb_stack_range_tmp[0] = begin;
3171 rb_stack_range_tmp[1] = end;
3172}
3173
3174void
3175rb_gc_save_machine_context(void)
3176{
3177 // no-op
3178}
3179
3180# if defined(__EMSCRIPTEN__)
3181
3182static void
3183mark_current_machine_context(const rb_execution_context_t *ec)
3184{
3185 emscripten_scan_stack(rb_mark_locations);
3186 each_location_ptr(rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe_each_location, NULL);
3187
3188 emscripten_scan_registers(rb_mark_locations);
3189 each_location_ptr(rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe_each_location, NULL);
3190}
3191# else // use Asyncify version
3192
3193static void
3194mark_current_machine_context(rb_execution_context_t *ec)
3195{
3196 VALUE *stack_start, *stack_end;
3197 SET_STACK_END;
3198 GET_STACK_BOUNDS(stack_start, stack_end, 1);
3199 each_location_ptr(stack_start, stack_end, gc_mark_maybe_each_location, NULL);
3200
3201 rb_wasm_scan_locals(rb_mark_locations);
3202 each_location_ptr(rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe_each_location, NULL);
3203}
3204
3205# endif
3206
3207#else // !defined(__wasm__)
3208
3209void
3210rb_gc_save_machine_context(void)
3211{
3212 rb_thread_t *thread = GET_THREAD();
3213
3214 RB_VM_SAVE_MACHINE_CONTEXT(thread);
3215}
3216
3217
3218static void
3219mark_current_machine_context(const rb_execution_context_t *ec)
3220{
3221 rb_gc_mark_machine_context(ec);
3222}
3223#endif
3224
3225void
3226rb_gc_mark_machine_context(const rb_execution_context_t *ec)
3227{
3228 VALUE *stack_start, *stack_end;
3229
3230 GET_STACK_BOUNDS(stack_start, stack_end, 0);
3231 RUBY_DEBUG_LOG("ec->th:%u stack_start:%p stack_end:%p", rb_ec_thread_ptr(ec)->serial, stack_start, stack_end);
3232
3233 void *data =
3234#ifdef RUBY_ASAN_ENABLED
3235 /* gc_mark_machine_stack_location_maybe() uses data as const */
3237#else
3238 NULL;
3239#endif
3240
3241 each_location_ptr(stack_start, stack_end, gc_mark_machine_stack_location_maybe, data);
3242 int num_regs = sizeof(ec->machine.regs)/(sizeof(VALUE));
3243 each_location((VALUE*)&ec->machine.regs, num_regs, gc_mark_machine_stack_location_maybe, data);
3244}
3245
3246static int
3247rb_mark_tbl_i(st_data_t key, st_data_t value, st_data_t data)
3248{
3249 gc_mark_and_pin_internal((VALUE)value);
3250
3251 return ST_CONTINUE;
3252}
3253
3254void
3255rb_mark_tbl(st_table *tbl)
3256{
3257 if (!tbl || tbl->num_entries == 0) return;
3258
3259 st_foreach(tbl, rb_mark_tbl_i, 0);
3260}
3261
3262static void
3263gc_mark_tbl_no_pin(st_table *tbl)
3264{
3265 if (!tbl || tbl->num_entries == 0) return;
3266
3267 st_foreach(tbl, gc_mark_tbl_no_pin_i, 0);
3268}
3269
3270void
3271rb_mark_tbl_no_pin(st_table *tbl)
3272{
3273 gc_mark_tbl_no_pin(tbl);
3274}
3275
3276static bool
3277gc_declarative_marking_p(const rb_data_type_t *type)
3278{
3279 return (type->flags & RUBY_TYPED_DECL_MARKING) != 0;
3280}
3281
3283rb_gc_get_ec(void)
3284{
3285 void *objspace = rb_gc_get_objspace();
3286
3287 if (RB_LIKELY(rb_gc_impl_during_gc_p(objspace))) {
3288 return rb_gc_impl_get_vm_context(objspace)->ec;
3289 }
3290 else {
3291 return GET_EC();
3292 }
3293}
3294
3295void
3296rb_gc_mark_roots(void *objspace, const char **categoryp)
3297{
3298 rb_execution_context_t *ec = rb_gc_get_ec();
3299 rb_vm_t *vm = rb_ec_vm_ptr(ec);
3300
3301#define MARK_CHECKPOINT(category) do { \
3302 if (categoryp) *categoryp = category; \
3303} while (0)
3304
3305 MARK_CHECKPOINT("vm");
3306 rb_vm_mark(vm);
3307
3308 MARK_CHECKPOINT("end_proc");
3309 rb_mark_end_proc();
3310
3311 MARK_CHECKPOINT("global_tbl");
3312 rb_gc_mark_global_tbl();
3313
3314#if USE_YJIT
3315 void rb_yjit_root_mark(void); // in Rust
3316
3317 if (rb_yjit_enabled_p) {
3318 MARK_CHECKPOINT("YJIT");
3319 rb_yjit_root_mark();
3320 }
3321#endif
3322
3323#if USE_ZJIT
3324 void rb_zjit_root_mark(void);
3325 if (rb_zjit_enabled_p) {
3326 MARK_CHECKPOINT("ZJIT");
3327 rb_zjit_root_mark();
3328 }
3329#endif
3330
3331 MARK_CHECKPOINT("machine_context");
3332 mark_current_machine_context(ec);
3333
3334 MARK_CHECKPOINT("global_symbols");
3335 rb_sym_global_symbols_mark_and_move();
3336
3337 MARK_CHECKPOINT("finish");
3338
3339#undef MARK_CHECKPOINT
3340}
3341
3346
3347static void
3348gc_mark_classext_module(rb_classext_t *ext, bool prime, VALUE box_value, void *arg)
3349{
3351 rb_objspace_t *objspace = foreach_arg->objspace;
3352
3353 if (RCLASSEXT_SUPER(ext)) {
3354 gc_mark_internal(RCLASSEXT_SUPER(ext));
3355 }
3356 mark_m_tbl(objspace, RCLASSEXT_M_TBL(ext));
3357
3358 if (!rb_gc_checking_shareable()) {
3359 // unshareable
3360 gc_mark_internal(RCLASSEXT_FIELDS_OBJ(ext));
3361 gc_mark_internal(RCLASSEXT_CVC_TBL(ext));
3362 }
3363
3364 if (!RCLASSEXT_SHARED_CONST_TBL(ext) && RCLASSEXT_CONST_TBL(ext)) {
3365 mark_const_tbl(objspace, RCLASSEXT_CONST_TBL(ext));
3366 }
3367 mark_m_tbl(objspace, RCLASSEXT_CALLABLE_M_TBL(ext));
3368 gc_mark_internal(RCLASSEXT_CC_TBL(ext));
3369 if (RCLASSEXT_SUBCLASSES(ext)) {
3370 gc_mark_internal(RCLASSEXT_SUBCLASSES(ext));
3371 }
3372 gc_mark_internal(RCLASSEXT_CLASSPATH(ext));
3373}
3374
3375static void
3376gc_mark_classext_iclass(rb_classext_t *ext, bool prime, VALUE box_value, void *arg)
3377{
3379 rb_objspace_t *objspace = foreach_arg->objspace;
3380
3381 if (RCLASSEXT_SUPER(ext)) {
3382 gc_mark_internal(RCLASSEXT_SUPER(ext));
3383 }
3384 if (RCLASSEXT_ICLASS_IS_ORIGIN(ext) && !RCLASSEXT_ICLASS_ORIGIN_SHARED_MTBL(ext)) {
3385 mark_m_tbl(objspace, RCLASSEXT_M_TBL(ext));
3386 }
3387 if (RCLASSEXT_INCLUDER(ext)) {
3388 gc_mark_internal(RCLASSEXT_INCLUDER(ext));
3389 }
3390 mark_m_tbl(objspace, RCLASSEXT_CALLABLE_M_TBL(ext));
3391 gc_mark_internal(RCLASSEXT_CC_TBL(ext));
3392 if (RCLASSEXT_SUBCLASSES(ext)) {
3393 gc_mark_internal(RCLASSEXT_SUBCLASSES(ext));
3394 }
3395}
3396
3397#define TYPED_DATA_REFS_OFFSET_LIST(d) (size_t *)(uintptr_t)RTYPEDDATA_TYPE(d)->function.dmark
3398
3399void
3400rb_gc_move_obj_during_marking(VALUE from, VALUE to)
3401{
3402 if (rb_obj_using_gen_fields_table_p(to)) {
3403 rb_mark_generic_ivar(from);
3404 }
3405}
3406
3407void
3408rb_gc_mark_children(void *objspace, VALUE obj)
3409{
3410 struct gc_mark_classext_foreach_arg foreach_args;
3411
3412 if (rb_obj_using_gen_fields_table_p(obj)) {
3413 rb_mark_generic_ivar(obj);
3414 }
3415
3416 switch (BUILTIN_TYPE(obj)) {
3417 case T_FLOAT:
3418 case T_BIGNUM:
3419 return;
3420
3421 case T_NIL:
3422 case T_FIXNUM:
3423 rb_bug("rb_gc_mark() called for broken object");
3424 break;
3425
3426 case T_NODE:
3427 UNEXPECTED_NODE(rb_gc_mark);
3428 break;
3429
3430 case T_IMEMO:
3431 rb_imemo_mark_and_move(obj, false);
3432 return;
3433
3434 default:
3435 break;
3436 }
3437
3438 gc_mark_internal(RBASIC(obj)->klass);
3439
3440 switch (BUILTIN_TYPE(obj)) {
3441 case T_CLASS:
3442 if (FL_TEST_RAW(obj, FL_SINGLETON) &&
3443 !rb_gc_checking_shareable()) {
3444 gc_mark_internal(RCLASS_ATTACHED_OBJECT(obj));
3445 }
3446 // Continue to the shared T_CLASS/T_MODULE
3447 case T_MODULE:
3448 foreach_args.objspace = objspace;
3449 foreach_args.obj = obj;
3450 rb_class_classext_foreach(obj, gc_mark_classext_module, (void *)&foreach_args);
3451 if (BOX_USER_P(RCLASS_PRIME_BOX(obj))) {
3452 gc_mark_internal(RCLASS_PRIME_BOX(obj)->box_object);
3453 }
3454 break;
3455
3456 case T_ICLASS:
3457 foreach_args.objspace = objspace;
3458 foreach_args.obj = obj;
3459 rb_class_classext_foreach(obj, gc_mark_classext_iclass, (void *)&foreach_args);
3460 if (BOX_USER_P(RCLASS_PRIME_BOX(obj))) {
3461 gc_mark_internal(RCLASS_PRIME_BOX(obj)->box_object);
3462 }
3463 break;
3464
3465 case T_ARRAY:
3466 if (ARY_SHARED_P(obj)) {
3467 VALUE root = ARY_SHARED_ROOT(obj);
3468 gc_mark_internal(root);
3469 }
3470 else {
3471 long len = RARRAY_LEN(obj);
3472 const VALUE *ptr = RARRAY_CONST_PTR(obj);
3473 for (long i = 0; i < len; i++) {
3474 gc_mark_internal(ptr[i]);
3475 }
3476 }
3477 break;
3478
3479 case T_HASH:
3480 mark_hash(obj);
3481 break;
3482
3483 case T_SYMBOL:
3484 gc_mark_internal(RSYMBOL(obj)->fstr);
3485 break;
3486
3487 case T_STRING:
3488 if (STR_SHARED_P(obj)) {
3489 if (STR_EMBED_P(RSTRING(obj)->as.heap.aux.shared)) {
3490 /* Embedded shared strings cannot be moved because this string
3491 * points into the slot of the shared string. There may be code
3492 * using the RSTRING_PTR on the stack, which would pin this
3493 * string but not pin the shared string, causing it to move. */
3494 gc_mark_and_pin_internal(RSTRING(obj)->as.heap.aux.shared);
3495 }
3496 else {
3497 gc_mark_internal(RSTRING(obj)->as.heap.aux.shared);
3498 }
3499 }
3500 break;
3501
3502 case T_DATA: {
3503 bool typed_data = RTYPEDDATA_P(obj);
3504 void *const ptr = typed_data ? RTYPEDDATA_GET_DATA(obj) : DATA_PTR(obj);
3505
3506 if (typed_data) {
3507 gc_mark_internal(RTYPEDDATA(obj)->fields_obj);
3508 }
3509
3510 if (ptr) {
3511 if (typed_data && gc_declarative_marking_p(RTYPEDDATA_TYPE(obj))) {
3512 size_t *offset_list = TYPED_DATA_REFS_OFFSET_LIST(obj);
3513
3514 for (size_t offset = *offset_list; offset != RUBY_REF_END; offset = *offset_list++) {
3515 gc_mark_internal(*(VALUE *)((char *)ptr + offset));
3516 }
3517 }
3518 else {
3519 RUBY_DATA_FUNC mark_func = typed_data ?
3521 RDATA(obj)->dmark;
3522 if (mark_func) (*mark_func)(ptr);
3523 }
3524 }
3525
3526 break;
3527 }
3528
3529 case T_OBJECT: {
3530 uint32_t len;
3531 if (rb_obj_shape_complex_p(obj)) {
3532 gc_mark_tbl_no_pin(ROBJECT_FIELDS_HASH(obj));
3533 len = ROBJECT_FIELDS_COUNT_COMPLEX(obj);
3534 }
3535 else {
3536 const VALUE * const ptr = ROBJECT_FIELDS(obj);
3537
3538 len = ROBJECT_FIELDS_COUNT_NOT_COMPLEX(obj);
3539 for (uint32_t i = 0; i < len; i++) {
3540 gc_mark_internal(ptr[i]);
3541 }
3542 }
3543 break;
3544 }
3545
3546 case T_FILE:
3547 if (RFILE(obj)->fptr) {
3548 gc_mark_internal(RFILE(obj)->fptr->self);
3549 gc_mark_internal(RFILE(obj)->fptr->pathv);
3550 gc_mark_internal(RFILE(obj)->fptr->tied_io_for_writing);
3551 gc_mark_internal(RFILE(obj)->fptr->writeconv_asciicompat);
3552 gc_mark_internal(RFILE(obj)->fptr->writeconv_pre_ecopts);
3553 gc_mark_internal(RFILE(obj)->fptr->encs.ecopts);
3554 gc_mark_internal(RFILE(obj)->fptr->write_lock);
3555 gc_mark_internal(RFILE(obj)->fptr->timeout);
3556 gc_mark_internal(RFILE(obj)->fptr->wakeup_mutex);
3557 }
3558 break;
3559
3560 case T_REGEXP:
3561 gc_mark_internal(RREGEXP(obj)->src);
3562 break;
3563
3564 case T_MATCH:
3565 gc_mark_internal(RMATCH(obj)->regexp);
3566 if (RMATCH(obj)->str) {
3567 gc_mark_internal(RMATCH(obj)->str);
3568 }
3569 break;
3570
3571 case T_RATIONAL:
3572 gc_mark_internal(RRATIONAL(obj)->num);
3573 gc_mark_internal(RRATIONAL(obj)->den);
3574 break;
3575
3576 case T_COMPLEX:
3577 gc_mark_internal(RCOMPLEX(obj)->real);
3578 gc_mark_internal(RCOMPLEX(obj)->imag);
3579 break;
3580
3581 case T_STRUCT: {
3582 const long len = RSTRUCT_LEN(obj);
3583 const VALUE * const ptr = RSTRUCT_CONST_PTR(obj);
3584
3585 for (long i = 0; i < len; i++) {
3586 gc_mark_internal(ptr[i]);
3587 }
3588
3589 if (rb_obj_shape_has_fields(obj) && !FL_TEST_RAW(obj, RSTRUCT_GEN_FIELDS)) {
3590 gc_mark_internal(RSTRUCT_FIELDS_OBJ(obj));
3591 }
3592
3593 break;
3594 }
3595
3596 default:
3597 if (BUILTIN_TYPE(obj) == T_MOVED) rb_bug("rb_gc_mark(): %p is T_MOVED", (void *)obj);
3598 if (BUILTIN_TYPE(obj) == T_NONE) rb_bug("rb_gc_mark(): %p is T_NONE", (void *)obj);
3599 if (BUILTIN_TYPE(obj) == T_ZOMBIE) rb_bug("rb_gc_mark(): %p is T_ZOMBIE", (void *)obj);
3600 rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
3601 BUILTIN_TYPE(obj), (void *)obj,
3602 rb_gc_impl_pointer_to_heap_p(objspace, (void *)obj) ? "corrupted object" : "non object");
3603 }
3604}
3605
3606size_t
3607rb_gc_obj_optimal_size(VALUE obj)
3608{
3609 switch (BUILTIN_TYPE(obj)) {
3610 case T_ARRAY:
3611 {
3612 size_t size = rb_ary_size_as_embedded(obj);
3613 if (rb_gc_size_allocatable_p(size)) {
3614 return size;
3615 }
3616 else {
3617 return sizeof(struct RArray);
3618 }
3619 }
3620
3621 case T_OBJECT:
3622 if (rb_obj_shape_complex_p(obj)) {
3623 return sizeof(struct RObject);
3624 }
3625 else {
3626 size_t size = rb_obj_embedded_size(ROBJECT_FIELDS_CAPACITY(obj));
3627 if (rb_gc_size_allocatable_p(size)) {
3628 return size;
3629 }
3630 else {
3631 return sizeof(struct RObject);
3632 }
3633 }
3634
3635 case T_STRING:
3636 {
3637 size_t size = rb_str_size_as_embedded(obj);
3638 if (rb_gc_size_allocatable_p(size)) {
3639 return size;
3640 }
3641 else {
3642 return sizeof(struct RString);
3643 }
3644 }
3645
3646 case T_HASH:
3647 {
3648 if (RB_OBJ_FROZEN(obj) && RHASH_AR_TABLE_P(obj)) {
3649 return sizeof(struct RHash) + offsetof(ar_table, pairs) + RHASH_AR_TABLE_BOUND(obj) * sizeof(ar_table_pair);
3650 }
3651 return sizeof(struct RHash) + (RHASH_ST_TABLE_P(obj) ? sizeof(st_table) : sizeof(ar_table));
3652 }
3653
3654 default:
3655 return 0;
3656 }
3657}
3658
3659void
3660rb_gc_writebarrier(VALUE a, VALUE b)
3661{
3662 rb_gc_impl_writebarrier(rb_gc_get_objspace(), a, b);
3663}
3664
3665void
3666rb_gc_writebarrier_unprotect(VALUE obj)
3667{
3668 rb_gc_impl_writebarrier_unprotect(rb_gc_get_objspace(), obj);
3669}
3670
3671/*
3672 * remember `obj' if needed.
3673 */
3674void
3675rb_gc_writebarrier_remember(VALUE obj)
3676{
3677 rb_gc_impl_writebarrier_remember(rb_gc_get_objspace(), obj);
3678}
3679
3680void
3681rb_gc_copy_attributes(VALUE dest, VALUE obj)
3682{
3683 rb_gc_impl_copy_attributes(rb_gc_get_objspace(), dest, obj);
3684}
3685
3686int
3687rb_gc_modular_gc_loaded_p(void)
3688{
3689#if USE_MODULAR_GC
3690 return rb_gc_functions.modular_gc_loaded_p;
3691#else
3692 return false;
3693#endif
3694}
3695
3696const char *
3697rb_gc_active_gc_name(void)
3698{
3699 const char *gc_name = rb_gc_impl_active_gc_name();
3700
3701 const size_t len = strlen(gc_name);
3702 if (len > RB_GC_MAX_NAME_LEN) {
3703 rb_bug("GC should have a name no more than %d chars long. Currently: %zu (%s)",
3704 RB_GC_MAX_NAME_LEN, len, gc_name);
3705 }
3706
3707 return gc_name;
3708}
3709
3711rb_gc_object_metadata(VALUE obj)
3712{
3713 return rb_gc_impl_object_metadata(rb_gc_get_objspace(), obj);
3714}
3715
3716/* GC */
3717
3718void *
3719rb_gc_ractor_cache_alloc(rb_ractor_t *ractor)
3720{
3721 return rb_gc_impl_ractor_cache_alloc(rb_gc_get_objspace(), ractor);
3722}
3723
3724void
3725rb_gc_ractor_cache_free(void *cache)
3726{
3727 rb_gc_impl_ractor_cache_free(rb_gc_get_objspace(), cache);
3728}
3729
3730void
3731rb_gc_register_mark_object(VALUE obj)
3732{
3733 if (!rb_gc_impl_pointer_to_heap_p(rb_gc_get_objspace(), (void *)obj))
3734 return;
3735
3736 rb_vm_register_global_object(obj);
3737}
3738
3739void
3740rb_gc_register_address(VALUE *addr)
3741{
3742 rb_vm_t *vm = GET_VM();
3743
3744 VALUE obj = *addr;
3745
3746 struct global_object_list *tmp = ALLOC(struct global_object_list);
3747 RB_VM_LOCKING() {
3748 tmp->next = vm->global_object_list;
3749 tmp->varptr = addr;
3750 vm->global_object_list = tmp;
3751 }
3752
3753 /*
3754 * Because some C extensions have assignment-then-register bugs,
3755 * we guard `obj` here so that it would not get swept defensively.
3756 */
3757 RB_GC_GUARD(obj);
3758 if (0 && !SPECIAL_CONST_P(obj)) {
3759 rb_warn("Object is assigned to registering address already: %"PRIsVALUE,
3760 rb_obj_class(obj));
3761 rb_print_backtrace(stderr);
3762 }
3763}
3764
3765void
3766rb_gc_unregister_address(VALUE *addr)
3767{
3768 rb_vm_t *vm = GET_VM();
3769 struct global_object_list *tmp;
3770 RB_VM_LOCKING() {
3771 tmp = vm->global_object_list;
3772 if (tmp->varptr == addr) {
3773 vm->global_object_list = tmp->next;
3774 SIZED_FREE(tmp);
3775 }
3776 else {
3777 while (tmp->next) {
3778 if (tmp->next->varptr == addr) {
3779 struct global_object_list *t = tmp->next;
3780
3781 tmp->next = tmp->next->next;
3782 SIZED_FREE(t);
3783 break;
3784 }
3785 tmp = tmp->next;
3786 }
3787 }
3788 }
3789}
3790
3791void
3793{
3794 rb_gc_register_address(var);
3795}
3796
3797static VALUE
3798gc_start_internal(rb_execution_context_t *ec, VALUE self, VALUE full_mark, VALUE immediate_mark, VALUE immediate_sweep, VALUE compact)
3799{
3800 rb_gc_impl_start(rb_gc_get_objspace(), RTEST(full_mark), RTEST(immediate_mark), RTEST(immediate_sweep), RTEST(compact));
3801
3802 return Qnil;
3803}
3804
3805/*
3806 * rb_objspace_each_objects() is special C API to walk through
3807 * Ruby object space. This C API is too difficult to use it.
3808 * To be frank, you should not use it. Or you need to read the
3809 * source code of this function and understand what this function does.
3810 *
3811 * 'callback' will be called several times (the number of heap page,
3812 * at current implementation) with:
3813 * vstart: a pointer to the first living object of the heap_page.
3814 * vend: a pointer to next to the valid heap_page area.
3815 * stride: a distance to next VALUE.
3816 *
3817 * If callback() returns non-zero, the iteration will be stopped.
3818 *
3819 * This is a sample callback code to iterate liveness objects:
3820 *
3821 * static int
3822 * sample_callback(void *vstart, void *vend, int stride, void *data)
3823 * {
3824 * VALUE v = (VALUE)vstart;
3825 * for (; v != (VALUE)vend; v += stride) {
3826 * if (!rb_objspace_internal_object_p(v)) { // liveness check
3827 * // do something with live object 'v'
3828 * }
3829 * }
3830 * return 0; // continue to iteration
3831 * }
3832 *
3833 * Note: 'vstart' is not a top of heap_page. This point the first
3834 * living object to grasp at least one object to avoid GC issue.
3835 * This means that you can not walk through all Ruby object page
3836 * including freed object page.
3837 *
3838 * Note: On this implementation, 'stride' is the same as sizeof(RVALUE).
3839 * However, there are possibilities to pass variable values with
3840 * 'stride' with some reasons. You must use stride instead of
3841 * use some constant value in the iteration.
3842 */
3843void
3844rb_objspace_each_objects(int (*callback)(void *, void *, size_t, void *), void *data)
3845{
3846 rb_gc_impl_each_objects(rb_gc_get_objspace(), callback, data);
3847}
3848
3849static void
3850gc_ref_update_array(void *objspace, VALUE v)
3851{
3852 if (ARY_SHARED_P(v)) {
3853 VALUE old_root = RARRAY(v)->as.heap.aux.shared_root;
3854
3855 UPDATE_IF_MOVED(objspace, RARRAY(v)->as.heap.aux.shared_root);
3856
3857 VALUE new_root = RARRAY(v)->as.heap.aux.shared_root;
3858 // If the root is embedded and its location has changed
3859 if (ARY_EMBED_P(new_root) && new_root != old_root) {
3860 size_t offset = (size_t)(RARRAY(v)->as.heap.ptr - RARRAY(old_root)->as.ary);
3861 GC_ASSERT(RARRAY(v)->as.heap.ptr >= RARRAY(old_root)->as.ary);
3862 RARRAY(v)->as.heap.ptr = RARRAY(new_root)->as.ary + offset;
3863 }
3864 }
3865 else {
3866 long len = RARRAY_LEN(v);
3867
3868 if (len > 0) {
3869 VALUE *ptr = (VALUE *)RARRAY_CONST_PTR(v);
3870 for (long i = 0; i < len; i++) {
3871 UPDATE_IF_MOVED(objspace, ptr[i]);
3872 }
3873 }
3874
3875 if (rb_gc_obj_slot_size(v) >= rb_ary_size_as_embedded(v)) {
3876 if (rb_ary_embeddable_p(v)) {
3877 rb_ary_make_embedded(v);
3878 }
3879 }
3880 }
3881}
3882
3883static void
3884gc_ref_update_object(void *objspace, VALUE v)
3885{
3886 VALUE *ptr = ROBJECT_FIELDS(v);
3887
3888 if (FL_TEST_RAW(v, ROBJECT_HEAP)) {
3889 if (rb_obj_shape_complex_p(v)) {
3890 gc_ref_update_table_values_only(ROBJECT_FIELDS_HASH(v));
3891 return;
3892 }
3893
3894 size_t slot_size = rb_gc_obj_slot_size(v);
3895 size_t embed_size = rb_obj_embedded_size(ROBJECT_FIELDS_CAPACITY(v));
3896 if (slot_size >= embed_size) {
3897 // Object can be re-embedded
3898 memcpy(ROBJECT(v)->as.ary, ptr, sizeof(VALUE) * ROBJECT_FIELDS_COUNT(v));
3899 SIZED_FREE_N(ptr, ROBJECT_FIELDS_CAPACITY(v));
3900 FL_UNSET_RAW(v, ROBJECT_HEAP);
3901 ptr = ROBJECT(v)->as.ary;
3902 }
3903 }
3904
3905 for (uint32_t i = 0; i < ROBJECT_FIELDS_COUNT(v); i++) {
3906 UPDATE_IF_MOVED(objspace, ptr[i]);
3907 }
3908}
3909
3910void
3911rb_gc_ref_update_table_values_only(st_table *tbl)
3912{
3913 gc_ref_update_table_values_only(tbl);
3914}
3915
3916/* Update MOVED references in a VALUE=>VALUE st_table */
3917void
3918rb_gc_update_tbl_refs(st_table *ptr)
3919{
3920 gc_update_table_refs(ptr);
3921}
3922
3923static void
3924gc_ref_update_hash(void *objspace, VALUE v)
3925{
3926 rb_hash_stlike_foreach_with_replace(v, hash_foreach_replace, hash_replace_ref, (st_data_t)objspace);
3927}
3928
3929static void
3930gc_update_values(void *objspace, long n, VALUE *values)
3931{
3932 for (long i = 0; i < n; i++) {
3933 UPDATE_IF_MOVED(objspace, values[i]);
3934 }
3935}
3936
3937void
3938rb_gc_update_values(long n, VALUE *values)
3939{
3940 gc_update_values(rb_gc_get_objspace(), n, values);
3941}
3942
3943static enum rb_id_table_iterator_result
3944check_id_table_move(VALUE value, void *data)
3945{
3946 void *objspace = (void *)data;
3947
3948 if (gc_object_moved_p_internal(objspace, (VALUE)value)) {
3949 return ID_TABLE_REPLACE;
3950 }
3951
3952 return ID_TABLE_CONTINUE;
3953}
3954
3955void
3956rb_gc_prepare_heap_process_object(VALUE obj)
3957{
3958 switch (BUILTIN_TYPE(obj)) {
3959 case T_STRING:
3960 // Precompute the string coderange. This both save time for when it will be
3961 // eventually needed, and avoid mutating heap pages after a potential fork.
3962 rb_enc_str_coderange(obj);
3963 break;
3964 default:
3965 break;
3966 }
3967}
3968
3969void
3970rb_gc_prepare_heap(void)
3971{
3972 rb_gc_impl_prepare_heap(rb_gc_get_objspace());
3973}
3974
3975size_t
3976rb_gc_heap_id_for_size(size_t size)
3977{
3978 return rb_gc_impl_heap_id_for_size(rb_gc_get_objspace(), size);
3979}
3980
3981bool
3982rb_gc_size_allocatable_p(size_t size)
3983{
3984 return rb_gc_impl_size_allocatable_p(size);
3985}
3986
3987static enum rb_id_table_iterator_result
3988update_id_table(VALUE *value, void *data, int existing)
3989{
3990 void *objspace = (void *)data;
3991
3992 if (gc_object_moved_p_internal(objspace, (VALUE)*value)) {
3993 *value = gc_location_internal(objspace, (VALUE)*value);
3994 }
3995
3996 return ID_TABLE_CONTINUE;
3997}
3998
3999static void
4000update_m_tbl(void *objspace, struct rb_id_table *tbl)
4001{
4002 if (tbl) {
4003 rb_id_table_foreach_values_with_replace(tbl, check_id_table_move, update_id_table, objspace);
4004 }
4005}
4006
4007static enum rb_id_table_iterator_result
4008update_const_tbl_i(VALUE value, void *objspace)
4009{
4010 rb_const_entry_t *ce = (rb_const_entry_t *)value;
4011
4012 if (gc_object_moved_p_internal(objspace, ce->value)) {
4013 ce->value = gc_location_internal(objspace, ce->value);
4014 }
4015
4016 if (gc_object_moved_p_internal(objspace, ce->file)) {
4017 ce->file = gc_location_internal(objspace, ce->file);
4018 }
4019
4020 return ID_TABLE_CONTINUE;
4021}
4022
4023static void
4024update_const_tbl(void *objspace, struct rb_id_table *tbl)
4025{
4026 if (!tbl) return;
4027 rb_id_table_foreach_values(tbl, update_const_tbl_i, objspace);
4028}
4029
4030static void
4031update_superclasses(rb_objspace_t *objspace, rb_classext_t *ext)
4032{
4033 if (RCLASSEXT_SUPERCLASSES_WITH_SELF(ext)) {
4034 size_t array_size = RCLASSEXT_SUPERCLASS_DEPTH(ext) + 1;
4035 for (size_t i = 0; i < array_size; i++) {
4036 UPDATE_IF_MOVED(objspace, RCLASSEXT_SUPERCLASSES(ext)[i]);
4037 }
4038 }
4039}
4040
4041static void
4042update_classext_values(rb_objspace_t *objspace, rb_classext_t *ext, bool is_iclass)
4043{
4044 UPDATE_IF_MOVED(objspace, RCLASSEXT_ORIGIN(ext));
4045 UPDATE_IF_MOVED(objspace, RCLASSEXT_REFINED_CLASS(ext));
4046 UPDATE_IF_MOVED(objspace, RCLASSEXT_CLASSPATH(ext));
4047 if (is_iclass) {
4048 UPDATE_IF_MOVED(objspace, RCLASSEXT_INCLUDER(ext));
4049 }
4050}
4051
4052static void
4053update_classext(rb_classext_t *ext, bool is_prime, VALUE box_value, void *arg)
4054{
4055 struct classext_foreach_args *args = (struct classext_foreach_args *)arg;
4056 rb_objspace_t *objspace = args->objspace;
4057
4058 if (RCLASSEXT_SUPER(ext)) {
4059 UPDATE_IF_MOVED(objspace, RCLASSEXT_SUPER(ext));
4060 }
4061
4062 update_m_tbl(objspace, RCLASSEXT_M_TBL(ext));
4063
4064 UPDATE_IF_MOVED(objspace, ext->fields_obj);
4065 if (!RCLASSEXT_SHARED_CONST_TBL(ext)) {
4066 update_const_tbl(objspace, RCLASSEXT_CONST_TBL(ext));
4067 }
4068 UPDATE_IF_MOVED(objspace, RCLASSEXT_CC_TBL(ext));
4069 UPDATE_IF_MOVED(objspace, RCLASSEXT_CVC_TBL(ext));
4070 update_superclasses(objspace, ext);
4071 if (RCLASSEXT_SUBCLASSES(ext)) {
4072 UPDATE_IF_MOVED(objspace, RCLASSEXT_SUBCLASSES(ext));
4073 }
4074
4075 update_classext_values(objspace, ext, false);
4076}
4077
4078static void
4079update_iclass_classext(rb_classext_t *ext, bool is_prime, VALUE box_value, void *arg)
4080{
4081 struct classext_foreach_args *args = (struct classext_foreach_args *)arg;
4082 rb_objspace_t *objspace = args->objspace;
4083
4084 if (RCLASSEXT_SUPER(ext)) {
4085 UPDATE_IF_MOVED(objspace, RCLASSEXT_SUPER(ext));
4086 }
4087 update_m_tbl(objspace, RCLASSEXT_M_TBL(ext));
4088 update_m_tbl(objspace, RCLASSEXT_CALLABLE_M_TBL(ext));
4089 UPDATE_IF_MOVED(objspace, RCLASSEXT_CC_TBL(ext));
4090 UPDATE_IF_MOVED(objspace, RCLASSEXT_CVC_TBL(ext));
4091 if (RCLASSEXT_SUBCLASSES(ext)) {
4092 UPDATE_IF_MOVED(objspace, RCLASSEXT_SUBCLASSES(ext));
4093 }
4094
4095 update_classext_values(objspace, ext, true);
4096}
4097
4099 vm_table_foreach_callback_func callback;
4100 vm_table_update_callback_func update_callback;
4101 void *data;
4102 bool weak_only;
4103};
4104
4105static int
4106vm_weak_table_foreach_weak_key(st_data_t key, st_data_t value, st_data_t data, int error)
4107{
4108 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
4109
4110 int ret = iter_data->callback((VALUE)key, iter_data->data);
4111
4112 if (!iter_data->weak_only) {
4113 if (ret != ST_CONTINUE) return ret;
4114
4115 ret = iter_data->callback((VALUE)value, iter_data->data);
4116 }
4117
4118 return ret;
4119}
4120
4121static int
4122vm_weak_table_foreach_update_weak_key(st_data_t *key, st_data_t *value, st_data_t data, int existing)
4123{
4124 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
4125
4126 int ret = iter_data->update_callback((VALUE *)key, iter_data->data);
4127
4128 if (!iter_data->weak_only) {
4129 if (ret != ST_CONTINUE) return ret;
4130
4131 ret = iter_data->update_callback((VALUE *)value, iter_data->data);
4132 }
4133
4134 return ret;
4135}
4136
4137static int
4138vm_weak_table_sym_set_foreach(VALUE *sym_ptr, void *data)
4139{
4140 VALUE sym = *sym_ptr;
4141 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
4142
4143 if (RB_SPECIAL_CONST_P(sym)) return ST_CONTINUE;
4144
4145 int ret = iter_data->callback(sym, iter_data->data);
4146
4147 if (ret == ST_REPLACE) {
4148 ret = iter_data->update_callback(sym_ptr, iter_data->data);
4149 }
4150
4151 return ret;
4152}
4153
4154struct st_table *rb_generic_fields_tbl_get(void);
4155
4156static int
4157vm_weak_table_id2ref_foreach(st_data_t key, st_data_t value, st_data_t data, int error)
4158{
4159 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
4160
4161 if (!iter_data->weak_only && !FIXNUM_P((VALUE)key)) {
4162 int ret = iter_data->callback((VALUE)key, iter_data->data);
4163 if (ret != ST_CONTINUE) return ret;
4164 }
4165
4166 return iter_data->callback((VALUE)value, iter_data->data);
4167}
4168
4169static int
4170vm_weak_table_id2ref_foreach_update(st_data_t *key, st_data_t *value, st_data_t data, int existing)
4171{
4172 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
4173
4174 iter_data->update_callback((VALUE *)value, iter_data->data);
4175
4176 if (!iter_data->weak_only && !FIXNUM_P((VALUE)*key)) {
4177 iter_data->update_callback((VALUE *)key, iter_data->data);
4178 }
4179
4180 return ST_CONTINUE;
4181}
4182
4183static int
4184vm_weak_table_gen_fields_foreach(st_data_t key, st_data_t value, st_data_t data)
4185{
4186 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
4187
4188 int ret = iter_data->callback((VALUE)key, iter_data->data);
4189
4190 VALUE new_value = (VALUE)value;
4191 VALUE new_key = (VALUE)key;
4192
4193 switch (ret) {
4194 case ST_CONTINUE:
4195 break;
4196
4197 case ST_DELETE:
4198 RBASIC_SET_SHAPE_ID((VALUE)key, ROOT_SHAPE_ID);
4199 return ST_DELETE;
4200
4201 case ST_REPLACE: {
4202 ret = iter_data->update_callback(&new_key, iter_data->data);
4203 if (key != new_key) {
4204 ret = ST_DELETE;
4205 }
4206 break;
4207 }
4208
4209 default:
4210 rb_bug("vm_weak_table_gen_fields_foreach: return value %d not supported", ret);
4211 }
4212
4213 if (!iter_data->weak_only) {
4214 int ivar_ret = iter_data->callback(new_value, iter_data->data);
4215 switch (ivar_ret) {
4216 case ST_CONTINUE:
4217 break;
4218
4219 case ST_REPLACE:
4220 iter_data->update_callback(&new_value, iter_data->data);
4221 break;
4222
4223 default:
4224 rb_bug("vm_weak_table_gen_fields_foreach: return value %d not supported", ivar_ret);
4225 }
4226 }
4227
4228 if (key != new_key || value != new_value) {
4229 DURING_GC_COULD_MALLOC_REGION_START();
4230 {
4231 st_insert(rb_generic_fields_tbl_get(), (st_data_t)new_key, new_value);
4232 }
4233 DURING_GC_COULD_MALLOC_REGION_END();
4234 }
4235
4236 return ret;
4237}
4238
4239static int
4240vm_weak_table_frozen_strings_foreach(VALUE *str, void *data)
4241{
4242 // int retval = vm_weak_table_foreach_weak_key(key, value, data, error);
4243 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
4244 int retval = iter_data->callback(*str, iter_data->data);
4245
4246 if (retval == ST_REPLACE) {
4247 retval = iter_data->update_callback(str, iter_data->data);
4248 }
4249
4250 if (retval == ST_DELETE) {
4251 FL_UNSET(*str, RSTRING_FSTR);
4252 }
4253
4254 return retval;
4255}
4256
4257void rb_fstring_foreach_with_replace(int (*callback)(VALUE *str, void *data), void *data);
4258void
4259rb_gc_vm_weak_table_foreach(vm_table_foreach_callback_func callback,
4260 vm_table_update_callback_func update_callback,
4261 void *data,
4262 bool weak_only,
4263 enum rb_gc_vm_weak_tables table)
4264{
4265 rb_vm_t *vm = GET_VM();
4266
4267 struct global_vm_table_foreach_data foreach_data = {
4268 .callback = callback,
4269 .update_callback = update_callback,
4270 .data = data,
4271 .weak_only = weak_only,
4272 };
4273
4274 switch (table) {
4275 case RB_GC_VM_CI_TABLE: {
4276 st_foreach_with_replace(
4277 &vm->ci_table,
4278 vm_weak_table_foreach_weak_key,
4279 vm_weak_table_foreach_update_weak_key,
4280 (st_data_t)&foreach_data
4281 );
4282 break;
4283 }
4284 case RB_GC_VM_OVERLOADED_CME_TABLE: {
4285 st_foreach_with_replace(
4286 &vm->overloaded_cme_table,
4287 vm_weak_table_foreach_weak_key,
4288 vm_weak_table_foreach_update_weak_key,
4289 (st_data_t)&foreach_data
4290 );
4291 break;
4292 }
4293 case RB_GC_VM_GLOBAL_SYMBOLS_TABLE: {
4294 rb_sym_global_symbol_table_foreach_weak_reference(
4295 vm_weak_table_sym_set_foreach,
4296 &foreach_data
4297 );
4298 break;
4299 }
4300 case RB_GC_VM_ID2REF_TABLE: {
4301 if (id2ref_tbl) {
4302 st_foreach_with_replace(
4303 id2ref_tbl,
4304 vm_weak_table_id2ref_foreach,
4305 vm_weak_table_id2ref_foreach_update,
4306 (st_data_t)&foreach_data
4307 );
4308 }
4309 break;
4310 }
4311 case RB_GC_VM_GENERIC_FIELDS_TABLE: {
4312 st_table *generic_fields_tbl = rb_generic_fields_tbl_get();
4313 if (generic_fields_tbl) {
4314 st_foreach(
4315 generic_fields_tbl,
4316 vm_weak_table_gen_fields_foreach,
4317 (st_data_t)&foreach_data
4318 );
4319 }
4320 break;
4321 }
4322 case RB_GC_VM_FROZEN_STRINGS_TABLE: {
4323 rb_fstring_foreach_with_replace(
4324 vm_weak_table_frozen_strings_foreach,
4325 &foreach_data
4326 );
4327 break;
4328 }
4329 case RB_GC_VM_WEAK_TABLE_COUNT:
4330 rb_bug("Unreachable");
4331 default:
4332 rb_bug("rb_gc_vm_weak_table_foreach: unknown table %d", table);
4333 }
4334}
4335
4336void
4337rb_gc_update_vm_references(void *objspace)
4338{
4339 rb_execution_context_t *ec = GET_EC();
4340 rb_vm_t *vm = rb_ec_vm_ptr(ec);
4341
4342 rb_vm_update_references(vm);
4343 rb_gc_update_global_tbl();
4344 rb_sym_global_symbols_mark_and_move();
4345
4346#if USE_YJIT
4347 void rb_yjit_root_update_references(void); // in Rust
4348
4349 if (rb_yjit_enabled_p) {
4350 rb_yjit_root_update_references();
4351 }
4352#endif
4353
4354#if USE_ZJIT
4355 void rb_zjit_root_update_references(void); // in Rust
4356
4357 if (rb_zjit_enabled_p) {
4358 rb_zjit_root_update_references();
4359 }
4360#endif
4361}
4362
4363void
4364rb_gc_update_object_references(void *objspace, VALUE obj)
4365{
4366 struct classext_foreach_args args;
4367
4368 switch (BUILTIN_TYPE(obj)) {
4369 case T_CLASS:
4370 if (FL_TEST_RAW(obj, FL_SINGLETON)) {
4371 UPDATE_IF_MOVED(objspace, RCLASS_ATTACHED_OBJECT(obj));
4372 }
4373 // Continue to the shared T_CLASS/T_MODULE
4374 case T_MODULE:
4375 args.klass = obj;
4376 args.objspace = objspace;
4377 rb_class_classext_foreach(obj, update_classext, (void *)&args);
4378 break;
4379
4380 case T_ICLASS:
4381 args.objspace = objspace;
4382 rb_class_classext_foreach(obj, update_iclass_classext, (void *)&args);
4383 break;
4384
4385 case T_IMEMO:
4386 rb_imemo_mark_and_move(obj, true);
4387 return;
4388
4389 case T_NIL:
4390 case T_FIXNUM:
4391 case T_NODE:
4392 case T_MOVED:
4393 case T_NONE:
4394 /* These can't move */
4395 return;
4396
4397 case T_ARRAY:
4398 gc_ref_update_array(objspace, obj);
4399 break;
4400
4401 case T_HASH:
4402 gc_ref_update_hash(objspace, obj);
4403 UPDATE_IF_MOVED(objspace, RHASH(obj)->ifnone);
4404 break;
4405
4406 case T_STRING:
4407 {
4408 if (STR_SHARED_P(obj)) {
4409 UPDATE_IF_MOVED(objspace, RSTRING(obj)->as.heap.aux.shared);
4410 }
4411
4412 /* If, after move the string is not embedded, and can fit in the
4413 * slot it's been placed in, then re-embed it. */
4414 if (rb_gc_obj_slot_size(obj) >= rb_str_size_as_embedded(obj)) {
4415 if (!STR_EMBED_P(obj) && rb_str_reembeddable_p(obj)) {
4416 rb_str_make_embedded(obj);
4417 }
4418 }
4419
4420 break;
4421 }
4422 case T_DATA:
4423 /* Call the compaction callback, if it exists */
4424 {
4425 bool typed_data = RTYPEDDATA_P(obj);
4426 void *const ptr = typed_data ? RTYPEDDATA_GET_DATA(obj) : DATA_PTR(obj);
4427
4428 if (typed_data) {
4429 UPDATE_IF_MOVED(objspace, RTYPEDDATA(obj)->fields_obj);
4430 }
4431
4432 if (ptr) {
4433 if (typed_data && gc_declarative_marking_p(RTYPEDDATA_TYPE(obj))) {
4434 size_t *offset_list = TYPED_DATA_REFS_OFFSET_LIST(obj);
4435
4436 for (size_t offset = *offset_list; offset != RUBY_REF_END; offset = *offset_list++) {
4437 VALUE *ref = (VALUE *)((char *)ptr + offset);
4438 *ref = gc_location_internal(objspace, *ref);
4439 }
4440 }
4441 else if (typed_data) {
4442 RUBY_DATA_FUNC compact_func = RTYPEDDATA_TYPE(obj)->function.dcompact;
4443 if (compact_func) (*compact_func)(ptr);
4444 }
4445 }
4446 }
4447 break;
4448
4449 case T_OBJECT:
4450 gc_ref_update_object(objspace, obj);
4451 break;
4452
4453 case T_FILE:
4454 if (RFILE(obj)->fptr) {
4455 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->self);
4456 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->pathv);
4457 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->tied_io_for_writing);
4458 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->writeconv_asciicompat);
4459 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->writeconv_pre_ecopts);
4460 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->encs.ecopts);
4461 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->write_lock);
4462 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->timeout);
4463 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->wakeup_mutex);
4464 }
4465 break;
4466 case T_REGEXP:
4467 UPDATE_IF_MOVED(objspace, RREGEXP(obj)->src);
4468 break;
4469
4470 case T_SYMBOL:
4471 UPDATE_IF_MOVED(objspace, RSYMBOL(obj)->fstr);
4472 break;
4473
4474 case T_FLOAT:
4475 case T_BIGNUM:
4476 break;
4477
4478 case T_MATCH:
4479 UPDATE_IF_MOVED(objspace, RMATCH(obj)->regexp);
4480
4481 if (RMATCH(obj)->str) {
4482 UPDATE_IF_MOVED(objspace, RMATCH(obj)->str);
4483 }
4484 break;
4485
4486 case T_RATIONAL:
4487 UPDATE_IF_MOVED(objspace, RRATIONAL(obj)->num);
4488 UPDATE_IF_MOVED(objspace, RRATIONAL(obj)->den);
4489 break;
4490
4491 case T_COMPLEX:
4492 UPDATE_IF_MOVED(objspace, RCOMPLEX(obj)->real);
4493 UPDATE_IF_MOVED(objspace, RCOMPLEX(obj)->imag);
4494
4495 break;
4496
4497 case T_STRUCT:
4498 {
4499 long i, len = RSTRUCT_LEN(obj);
4500 VALUE *ptr = (VALUE *)RSTRUCT_CONST_PTR(obj);
4501
4502 for (i = 0; i < len; i++) {
4503 UPDATE_IF_MOVED(objspace, ptr[i]);
4504 }
4505
4506 if (RSTRUCT_EMBED_LEN(obj)) {
4507 if (!FL_TEST_RAW(obj, RSTRUCT_GEN_FIELDS)) {
4508 UPDATE_IF_MOVED(objspace, ptr[len]);
4509 }
4510 }
4511 else {
4512 UPDATE_IF_MOVED(objspace, RSTRUCT(obj)->as.heap.fields_obj);
4513 }
4514 }
4515 break;
4516 default:
4517 rb_bug("unreachable");
4518 break;
4519 }
4520
4521 UPDATE_IF_MOVED(objspace, RBASIC(obj)->klass);
4522}
4523
4524VALUE
4525rb_gc_start(void)
4526{
4527 rb_gc();
4528 return Qnil;
4529}
4530
4531void
4532rb_gc(void)
4533{
4534 unless_objspace(objspace) { return; }
4535
4536 rb_gc_impl_start(objspace, true, true, true, false);
4537}
4538
4539int
4540rb_during_gc(void)
4541{
4542 unless_objspace(objspace) { return FALSE; }
4543
4544 return rb_gc_impl_during_gc_p(objspace);
4545}
4546
4547size_t
4548rb_gc_count(void)
4549{
4550 return rb_gc_impl_gc_count(rb_gc_get_objspace());
4551}
4552
4553static VALUE
4554gc_count(rb_execution_context_t *ec, VALUE self)
4555{
4556 return SIZET2NUM(rb_gc_count());
4557}
4558
4559VALUE
4560rb_gc_latest_gc_info(VALUE key)
4561{
4562 if (!SYMBOL_P(key) && !RB_TYPE_P(key, T_HASH)) {
4563 rb_raise(rb_eTypeError, "non-hash or symbol given");
4564 }
4565
4566 VALUE val = rb_gc_impl_latest_gc_info(rb_gc_get_objspace(), key);
4567
4568 if (val == Qundef) {
4569 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
4570 }
4571
4572 return val;
4573}
4574
4575static VALUE
4576gc_stat(rb_execution_context_t *ec, VALUE self, VALUE arg) // arg is (nil || hash || symbol)
4577{
4578 if (NIL_P(arg)) {
4579 arg = rb_hash_new();
4580 }
4581 else if (!RB_TYPE_P(arg, T_HASH) && !SYMBOL_P(arg)) {
4582 rb_raise(rb_eTypeError, "non-hash or symbol given");
4583 }
4584
4585 VALUE ret = rb_gc_impl_stat(rb_gc_get_objspace(), arg);
4586
4587 if (ret == Qundef) {
4588 GC_ASSERT(SYMBOL_P(arg));
4589
4590 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(arg));
4591 }
4592
4593 return ret;
4594}
4595
4596size_t
4597rb_gc_stat(VALUE arg)
4598{
4599 if (!RB_TYPE_P(arg, T_HASH) && !SYMBOL_P(arg)) {
4600 rb_raise(rb_eTypeError, "non-hash or symbol given");
4601 }
4602
4603 VALUE ret = rb_gc_impl_stat(rb_gc_get_objspace(), arg);
4604
4605 if (ret == Qundef) {
4606 GC_ASSERT(SYMBOL_P(arg));
4607
4608 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(arg));
4609 }
4610
4611 if (SYMBOL_P(arg)) {
4612 return NUM2SIZET(ret);
4613 }
4614 else {
4615 return 0;
4616 }
4617}
4618
4619static VALUE
4620gc_stat_heap(rb_execution_context_t *ec, VALUE self, VALUE heap_name, VALUE arg)
4621{
4622 if (NIL_P(arg)) {
4623 arg = rb_hash_new();
4624 }
4625
4626 if (NIL_P(heap_name)) {
4627 if (!RB_TYPE_P(arg, T_HASH)) {
4628 rb_raise(rb_eTypeError, "non-hash given");
4629 }
4630 }
4631 else if (FIXNUM_P(heap_name)) {
4632 if (!SYMBOL_P(arg) && !RB_TYPE_P(arg, T_HASH)) {
4633 rb_raise(rb_eTypeError, "non-hash or symbol given");
4634 }
4635 }
4636 else {
4637 rb_raise(rb_eTypeError, "heap_name must be nil or an Integer");
4638 }
4639
4640 VALUE ret = rb_gc_impl_stat_heap(rb_gc_get_objspace(), heap_name, arg);
4641
4642 if (ret == Qundef) {
4643 GC_ASSERT(SYMBOL_P(arg));
4644
4645 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(arg));
4646 }
4647
4648 return ret;
4649}
4650
4651static VALUE
4652gc_config_get(rb_execution_context_t *ec, VALUE self)
4653{
4654 VALUE cfg_hash = rb_gc_impl_config_get(rb_gc_get_objspace());
4655 rb_hash_aset(cfg_hash, sym("implementation"), rb_fstring_cstr(rb_gc_impl_active_gc_name()));
4656
4657 return cfg_hash;
4658}
4659
4660static VALUE
4661gc_config_set(rb_execution_context_t *ec, VALUE self, VALUE hash)
4662{
4663 void *objspace = rb_gc_get_objspace();
4664
4665 rb_gc_impl_config_set(objspace, hash);
4666
4667 return Qnil;
4668}
4669
4670static VALUE
4671gc_stress_get(rb_execution_context_t *ec, VALUE self)
4672{
4673 return rb_gc_impl_stress_get(rb_gc_get_objspace());
4674}
4675
4676static VALUE
4677gc_stress_set_m(rb_execution_context_t *ec, VALUE self, VALUE flag)
4678{
4679 rb_gc_impl_stress_set(rb_gc_get_objspace(), flag);
4680
4681 return flag;
4682}
4683
4684void
4685rb_gc_initial_stress_set(VALUE flag)
4686{
4687 initial_stress = flag;
4688}
4689
4690size_t *
4691rb_gc_heap_sizes(void)
4692{
4693 return rb_gc_impl_heap_sizes(rb_gc_get_objspace());
4694}
4695
4696VALUE
4697rb_gc_enable(void)
4698{
4699 return rb_objspace_gc_enable(rb_gc_get_objspace());
4700}
4701
4702VALUE
4703rb_objspace_gc_enable(void *objspace)
4704{
4705 bool disabled = !rb_gc_impl_gc_enabled_p(objspace);
4706 rb_gc_impl_gc_enable(objspace);
4707 return RBOOL(disabled);
4708}
4709
4710static VALUE
4711gc_enable(rb_execution_context_t *ec, VALUE _)
4712{
4713 return rb_gc_enable();
4714}
4715
4716static VALUE
4717gc_disable_no_rest(void *objspace)
4718{
4719 bool disabled = !rb_gc_impl_gc_enabled_p(objspace);
4720 rb_gc_impl_gc_disable(objspace, false);
4721 return RBOOL(disabled);
4722}
4723
4724VALUE
4725rb_gc_disable_no_rest(void)
4726{
4727 return gc_disable_no_rest(rb_gc_get_objspace());
4728}
4729
4730VALUE
4731rb_gc_disable(void)
4732{
4733 return rb_objspace_gc_disable(rb_gc_get_objspace());
4734}
4735
4736VALUE
4737rb_objspace_gc_disable(void *objspace)
4738{
4739 bool disabled = !rb_gc_impl_gc_enabled_p(objspace);
4740 rb_gc_impl_gc_disable(objspace, true);
4741 return RBOOL(disabled);
4742}
4743
4744static VALUE
4745gc_disable(rb_execution_context_t *ec, VALUE _)
4746{
4747 return rb_gc_disable();
4748}
4749
4750// TODO: think about moving ruby_gc_set_params into Init_heap or Init_gc
4751void
4752ruby_gc_set_params(void)
4753{
4754 rb_gc_impl_set_params(rb_gc_get_objspace());
4755}
4756
4757void
4758rb_objspace_reachable_objects_from(VALUE obj, void (func)(VALUE, void *), void *data)
4759{
4760 RB_VM_LOCKING() {
4761 if (rb_gc_impl_during_gc_p(rb_gc_get_objspace())) rb_bug("rb_objspace_reachable_objects_from() is not supported while during GC");
4762
4763 if (!RB_SPECIAL_CONST_P(obj)) {
4764 rb_vm_t *vm = GET_VM();
4765 struct gc_mark_func_data_struct *prev_mfd = vm->gc.mark_func_data;
4766 struct gc_mark_func_data_struct mfd = {
4767 .mark_func = func,
4768 .data = data,
4769 };
4770
4771 vm->gc.mark_func_data = &mfd;
4772 rb_gc_mark_children(rb_gc_get_objspace(), obj);
4773 vm->gc.mark_func_data = prev_mfd;
4774 }
4775 }
4776}
4777
4779 const char *category;
4780 void (*func)(const char *category, VALUE, void *);
4781 void *data;
4782};
4783
4784static void
4785root_objects_from(VALUE obj, void *ptr)
4786{
4787 const struct root_objects_data *data = (struct root_objects_data *)ptr;
4788 (*data->func)(data->category, obj, data->data);
4789}
4790
4791void
4792rb_objspace_reachable_objects_from_root(void (func)(const char *category, VALUE, void *), void *passing_data)
4793{
4794 if (rb_gc_impl_during_gc_p(rb_gc_get_objspace())) rb_bug("rb_gc_impl_objspace_reachable_objects_from_root() is not supported while during GC");
4795
4796 rb_vm_t *vm = GET_VM();
4797
4798 struct root_objects_data data = {
4799 .func = func,
4800 .data = passing_data,
4801 };
4802
4803 struct gc_mark_func_data_struct *prev_mfd = vm->gc.mark_func_data;
4804 struct gc_mark_func_data_struct mfd = {
4805 .mark_func = root_objects_from,
4806 .data = &data,
4807 };
4808
4809 vm->gc.mark_func_data = &mfd;
4810 rb_gc_save_machine_context();
4811 rb_gc_mark_roots(vm->gc.objspace, &data.category);
4812 vm->gc.mark_func_data = prev_mfd;
4813}
4814
4815/*
4816 ------------------------------ DEBUG ------------------------------
4817*/
4818
4819static const char *
4820type_name(int type, VALUE obj)
4821{
4822 switch (type) {
4823#define TYPE_NAME(t) case (t): return #t;
4824 TYPE_NAME(T_NONE);
4825 TYPE_NAME(T_OBJECT);
4826 TYPE_NAME(T_CLASS);
4827 TYPE_NAME(T_MODULE);
4828 TYPE_NAME(T_FLOAT);
4829 TYPE_NAME(T_STRING);
4830 TYPE_NAME(T_REGEXP);
4831 TYPE_NAME(T_ARRAY);
4832 TYPE_NAME(T_HASH);
4833 TYPE_NAME(T_STRUCT);
4834 TYPE_NAME(T_BIGNUM);
4835 TYPE_NAME(T_FILE);
4836 TYPE_NAME(T_MATCH);
4837 TYPE_NAME(T_COMPLEX);
4838 TYPE_NAME(T_RATIONAL);
4839 TYPE_NAME(T_NIL);
4840 TYPE_NAME(T_TRUE);
4841 TYPE_NAME(T_FALSE);
4842 TYPE_NAME(T_SYMBOL);
4843 TYPE_NAME(T_FIXNUM);
4844 TYPE_NAME(T_UNDEF);
4845 TYPE_NAME(T_IMEMO);
4846 TYPE_NAME(T_ICLASS);
4847 TYPE_NAME(T_MOVED);
4848 TYPE_NAME(T_ZOMBIE);
4849 case T_DATA:
4850 if (obj && rb_objspace_data_type_name(obj)) {
4851 return rb_objspace_data_type_name(obj);
4852 }
4853 return "T_DATA";
4854#undef TYPE_NAME
4855 }
4856 return "unknown";
4857}
4858
4859static const char *
4860obj_type_name(VALUE obj)
4861{
4862 return type_name(TYPE(obj), obj);
4863}
4864
4865const char *
4866rb_method_type_name(rb_method_type_t type)
4867{
4868 switch (type) {
4869 case VM_METHOD_TYPE_ISEQ: return "iseq";
4870 case VM_METHOD_TYPE_ATTRSET: return "attrset";
4871 case VM_METHOD_TYPE_IVAR: return "ivar";
4872 case VM_METHOD_TYPE_BMETHOD: return "bmethod";
4873 case VM_METHOD_TYPE_ALIAS: return "alias";
4874 case VM_METHOD_TYPE_REFINED: return "refined";
4875 case VM_METHOD_TYPE_CFUNC: return "cfunc";
4876 case VM_METHOD_TYPE_ZSUPER: return "zsuper";
4877 case VM_METHOD_TYPE_MISSING: return "missing";
4878 case VM_METHOD_TYPE_OPTIMIZED: return "optimized";
4879 case VM_METHOD_TYPE_UNDEF: return "undef";
4880 case VM_METHOD_TYPE_NOTIMPLEMENTED: return "notimplemented";
4881 }
4882 rb_bug("rb_method_type_name: unreachable (type: %d)", type);
4883}
4884
4885static void
4886rb_raw_iseq_info(char *const buff, const size_t buff_size, const rb_iseq_t *iseq)
4887{
4888 if (buff_size > 0 && ISEQ_BODY(iseq) && ISEQ_BODY(iseq)->location.label && !RB_TYPE_P(ISEQ_BODY(iseq)->location.pathobj, T_MOVED)) {
4889 VALUE path = rb_iseq_path(iseq);
4890 int n = ISEQ_BODY(iseq)->location.first_lineno;
4891 snprintf(buff, buff_size, " %s@%s:%d",
4892 RSTRING_PTR(ISEQ_BODY(iseq)->location.label),
4893 RSTRING_PTR(path), n);
4894 }
4895}
4896
4897static int
4898str_len_no_raise(VALUE str)
4899{
4900 long len = RSTRING_LEN(str);
4901 if (len < 0) return 0;
4902 if (len > INT_MAX) return INT_MAX;
4903 return (int)len;
4904}
4905
4906#define BUFF_ARGS buff + pos, buff_size - pos
4907#define APPEND_F(...) if ((pos += snprintf(BUFF_ARGS, "" __VA_ARGS__)) >= buff_size) goto end
4908#define APPEND_S(s) do { \
4909 if ((pos + (int)rb_strlen_lit(s)) >= buff_size) { \
4910 goto end; \
4911 } \
4912 else { \
4913 memcpy(buff + pos, (s), rb_strlen_lit(s) + 1); \
4914 } \
4915 } while (0)
4916#define C(c, s) ((c) != 0 ? (s) : " ")
4917
4918static size_t
4919rb_raw_obj_info_common(char *const buff, const size_t buff_size, const VALUE obj)
4920{
4921 size_t pos = 0;
4922
4923 if (SPECIAL_CONST_P(obj)) {
4924 APPEND_F("%s", obj_type_name(obj));
4925
4926 if (FIXNUM_P(obj)) {
4927 APPEND_F(" %ld", FIX2LONG(obj));
4928 }
4929 else if (SYMBOL_P(obj)) {
4930 APPEND_F(" %s", rb_id2name(SYM2ID(obj)));
4931 }
4932 }
4933 else {
4934 // const int age = RVALUE_AGE_GET(obj);
4935
4936 if (rb_gc_impl_pointer_to_heap_p(rb_gc_get_objspace(), (void *)obj)) {
4937 APPEND_F("%p %s/", (void *)obj, obj_type_name(obj));
4938 // TODO: fixme
4939 // APPEND_F("%p [%d%s%s%s%s%s%s] %s ",
4940 // (void *)obj, age,
4941 // C(RVALUE_UNCOLLECTIBLE_BITMAP(obj), "L"),
4942 // C(RVALUE_MARK_BITMAP(obj), "M"),
4943 // C(RVALUE_PIN_BITMAP(obj), "P"),
4944 // C(RVALUE_MARKING_BITMAP(obj), "R"),
4945 // C(RVALUE_WB_UNPROTECTED_BITMAP(obj), "U"),
4946 // C(rb_objspace_garbage_object_p(obj), "G"),
4947 // obj_type_name(obj));
4948 }
4949 else {
4950 /* fake */
4951 // APPEND_F("%p [%dXXXX] %s",
4952 // (void *)obj, age,
4953 // obj_type_name(obj));
4954 }
4955
4956 if (internal_object_p(obj)) {
4957 /* ignore */
4958 }
4959 else if (RBASIC(obj)->klass == 0) {
4960 APPEND_S("(temporary internal)");
4961 }
4962 else if (RTEST(RBASIC(obj)->klass)) {
4963 VALUE class_path = rb_class_path_cached(RBASIC(obj)->klass);
4964 if (!NIL_P(class_path)) {
4965 APPEND_F("%s ", RSTRING_PTR(class_path));
4966 }
4967 }
4968 }
4969 end:
4970
4971 return pos;
4972}
4973
4974const char *rb_raw_obj_info(char *const buff, const size_t buff_size, VALUE obj);
4975
4976static size_t
4977rb_raw_obj_info_buitin_type(char *const buff, const size_t buff_size, const VALUE obj, size_t pos)
4978{
4979 if (LIKELY(pos < buff_size) && !SPECIAL_CONST_P(obj)) {
4980 const enum ruby_value_type type = BUILTIN_TYPE(obj);
4981
4982 switch (type) {
4983 case T_NODE:
4984 UNEXPECTED_NODE(rb_raw_obj_info);
4985 break;
4986 case T_ARRAY:
4987 if (ARY_SHARED_P(obj)) {
4988 APPEND_S("shared -> ");
4989 rb_raw_obj_info(BUFF_ARGS, ARY_SHARED_ROOT(obj));
4990 }
4991 else {
4992 APPEND_F("[%s%s%s] ",
4993 C(ARY_EMBED_P(obj), "E"),
4994 C(ARY_SHARED_P(obj), "S"),
4995 C(ARY_SHARED_ROOT_P(obj), "R"));
4996
4997 if (ARY_EMBED_P(obj)) {
4998 APPEND_F("len: %ld (embed)",
4999 RARRAY_LEN(obj));
5000 }
5001 else {
5002 APPEND_F("len: %ld, capa:%ld ptr:%p",
5003 RARRAY_LEN(obj),
5004 RARRAY(obj)->as.heap.aux.capa,
5005 (void *)RARRAY_CONST_PTR(obj));
5006 }
5007 }
5008 break;
5009 case T_STRING: {
5010 APPEND_F("[%s%s] ",
5011 C(FL_TEST(obj, RSTRING_FSTR), "F"),
5012 C(RB_OBJ_FROZEN(obj), "R"));
5013
5014 if (STR_SHARED_P(obj)) {
5015 APPEND_F(" [shared] len: %ld", RSTRING_LEN(obj));
5016 }
5017 else {
5018 if (STR_EMBED_P(obj)) APPEND_S(" [embed]");
5019
5020 APPEND_F(" len: %ld, capa: %" PRIdSIZE, RSTRING_LEN(obj), rb_str_capacity(obj));
5021 }
5022 APPEND_F(" \"%.*s\"", str_len_no_raise(obj), RSTRING_PTR(obj));
5023 break;
5024 }
5025 case T_SYMBOL: {
5026 VALUE fstr = RSYMBOL(obj)->fstr;
5027 ID id = RSYMBOL(obj)->id;
5028 if (RB_TYPE_P(fstr, T_STRING)) {
5029 APPEND_F(":%s id:%d", RSTRING_PTR(fstr), (unsigned int)id);
5030 }
5031 else {
5032 APPEND_F("(%p) id:%d", (void *)fstr, (unsigned int)id);
5033 }
5034 break;
5035 }
5036 case T_MOVED: {
5037 APPEND_F("-> %p", (void*)gc_location_internal(rb_gc_get_objspace(), obj));
5038 break;
5039 }
5040 case T_HASH: {
5041 APPEND_F("[%c] %"PRIdSIZE,
5042 RHASH_AR_TABLE_P(obj) ? 'A' : 'S',
5043 RHASH_SIZE(obj));
5044 break;
5045 }
5046 case T_CLASS:
5047 case T_MODULE:
5048 {
5049 VALUE class_path = rb_class_path_cached(obj);
5050 if (!NIL_P(class_path)) {
5051 APPEND_F("%s", RSTRING_PTR(class_path));
5052 }
5053 else {
5054 APPEND_S("(anon)");
5055 }
5056 break;
5057 }
5058 case T_ICLASS:
5059 {
5060 VALUE class_path = rb_class_path_cached(RBASIC_CLASS(obj));
5061 if (!NIL_P(class_path)) {
5062 APPEND_F("src:%s", RSTRING_PTR(class_path));
5063 }
5064 break;
5065 }
5066 case T_OBJECT:
5067 {
5068 if (FL_TEST_RAW(obj, ROBJECT_HEAP)) {
5069 if (rb_obj_shape_complex_p(obj)) {
5070 size_t hash_len = rb_st_table_size(ROBJECT_FIELDS_HASH(obj));
5071 APPEND_F("(complex) len:%zu", hash_len);
5072 }
5073 else {
5074 APPEND_F("(embed) len:%d capa:%d", RSHAPE_LEN(RBASIC_SHAPE_ID(obj)), ROBJECT_FIELDS_CAPACITY(obj));
5075 }
5076 }
5077 else {
5078 APPEND_F("len:%d capa:%d ptr:%p", RSHAPE_LEN(RBASIC_SHAPE_ID(obj)), ROBJECT_FIELDS_CAPACITY(obj), (void *)ROBJECT_FIELDS(obj));
5079 }
5080 }
5081 break;
5082 case T_DATA: {
5083 const struct rb_block *block;
5084 const rb_iseq_t *iseq;
5085 if (rb_obj_is_proc(obj) &&
5086 (block = vm_proc_block(obj)) != NULL &&
5087 (vm_block_type(block) == block_type_iseq) &&
5088 (iseq = vm_block_iseq(block)) != NULL) {
5089 rb_raw_iseq_info(BUFF_ARGS, iseq);
5090 }
5091 else if (rb_ractor_p(obj)) {
5092 rb_ractor_t *r = (void *)DATA_PTR(obj);
5093 if (r) {
5094 APPEND_F("r:%d", r->pub.id);
5095 }
5096 }
5097 break;
5098 }
5099 case T_IMEMO: {
5100 APPEND_F("<%s> ", rb_imemo_name(imemo_type(obj)));
5101
5102 switch (imemo_type(obj)) {
5103 case imemo_ment:
5104 {
5105 const rb_method_entry_t *me = (const rb_method_entry_t *)obj;
5106
5107 APPEND_F(":%s (%s%s%s%s) type:%s aliased:%d owner:%p defined_class:%p",
5108 rb_id2name(me->called_id),
5109 METHOD_ENTRY_VISI(me) == METHOD_VISI_PUBLIC ? "pub" :
5110 METHOD_ENTRY_VISI(me) == METHOD_VISI_PRIVATE ? "pri" : "pro",
5111 METHOD_ENTRY_COMPLEMENTED(me) ? ",cmp" : "",
5112 METHOD_ENTRY_CACHED(me) ? ",cc" : "",
5113 METHOD_ENTRY_INVALIDATED(me) ? ",inv" : "",
5114 me->def ? rb_method_type_name(me->def->type) : "NULL",
5115 me->def ? me->def->aliased : -1,
5116 (void *)me->owner, // obj_info(me->owner),
5117 (void *)me->defined_class); //obj_info(me->defined_class)));
5118
5119 if (me->def) {
5120 switch (me->def->type) {
5121 case VM_METHOD_TYPE_ISEQ:
5122 APPEND_S(" (iseq:");
5123 rb_raw_obj_info(BUFF_ARGS, (VALUE)me->def->body.iseq.iseqptr);
5124 APPEND_S(")");
5125 break;
5126 default:
5127 break;
5128 }
5129 }
5130
5131 break;
5132 }
5133 case imemo_iseq: {
5134 const rb_iseq_t *iseq = (const rb_iseq_t *)obj;
5135 rb_raw_iseq_info(BUFF_ARGS, iseq);
5136 break;
5137 }
5138 case imemo_callinfo:
5139 {
5140 const struct rb_callinfo *ci = (const struct rb_callinfo *)obj;
5141 APPEND_F("(mid:%s, flag:%x argc:%d, kwarg:%s)",
5142 rb_id2name(vm_ci_mid(ci)),
5143 vm_ci_flag(ci),
5144 vm_ci_argc(ci),
5145 vm_ci_kwarg(ci) ? "available" : "NULL");
5146 break;
5147 }
5148 case imemo_callcache:
5149 {
5150 const struct rb_callcache *cc = (const struct rb_callcache *)obj;
5151 VALUE class_path = vm_cc_valid(cc) ? rb_class_path_cached(cc->klass) : Qnil;
5152 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
5153
5154 APPEND_F("(klass:%s cme:%s%s (%p) call:%p",
5155 NIL_P(class_path) ? (vm_cc_valid(cc) ? "??" : "<NULL>") : RSTRING_PTR(class_path),
5156 cme ? rb_id2name(cme->called_id) : "<NULL>",
5157 cme ? (METHOD_ENTRY_INVALIDATED(cme) ? " [inv]" : "") : "",
5158 (void *)cme,
5159 (void *)(uintptr_t)vm_cc_call(cc));
5160 break;
5161 }
5162 default:
5163 break;
5164 }
5165 }
5166 default:
5167 break;
5168 }
5169 }
5170 end:
5171
5172 return pos;
5173}
5174
5175#undef C
5176
5177#ifdef RUBY_ASAN_ENABLED
5178void
5179rb_asan_poison_object(VALUE obj)
5180{
5181 MAYBE_UNUSED(struct RVALUE *) ptr = (void *)obj;
5182 asan_poison_memory_region(ptr, rb_gc_obj_slot_size(obj));
5183}
5184
5185void
5186rb_asan_unpoison_object(VALUE obj, bool newobj_p)
5187{
5188 MAYBE_UNUSED(struct RVALUE *) ptr = (void *)obj;
5189 asan_unpoison_memory_region(ptr, rb_gc_obj_slot_size(obj), newobj_p);
5190}
5191
5192void *
5193rb_asan_poisoned_object_p(VALUE obj)
5194{
5195 MAYBE_UNUSED(struct RVALUE *) ptr = (void *)obj;
5196 return __asan_region_is_poisoned(ptr, rb_gc_obj_slot_size(obj));
5197}
5198#endif
5199
5200static void
5201raw_obj_info(char *const buff, const size_t buff_size, VALUE obj)
5202{
5203 size_t pos = rb_raw_obj_info_common(buff, buff_size, obj);
5204 pos = rb_raw_obj_info_buitin_type(buff, buff_size, obj, pos);
5205 if (pos >= buff_size) {} // truncated
5206}
5207
5208const char *
5209rb_raw_obj_info(char *const buff, const size_t buff_size, VALUE obj)
5210{
5211 void *objspace = rb_gc_get_objspace();
5212
5213 if (SPECIAL_CONST_P(obj)) {
5214 raw_obj_info(buff, buff_size, obj);
5215 }
5216 else if (!rb_gc_impl_pointer_to_heap_p(objspace, (const void *)obj)) {
5217 snprintf(buff, buff_size, "out-of-heap:%p", (void *)obj);
5218 }
5219#if 0 // maybe no need to check it?
5220 else if (0 && rb_gc_impl_garbage_object_p(objspace, obj)) {
5221 snprintf(buff, buff_size, "garbage:%p", (void *)obj);
5222 }
5223#endif
5224 else {
5225 asan_unpoisoning_object(obj) {
5226 raw_obj_info(buff, buff_size, obj);
5227 }
5228 }
5229 return buff;
5230}
5231
5232#undef APPEND_S
5233#undef APPEND_F
5234#undef BUFF_ARGS
5235
5236/* Increments *var atomically and resets *var to 0 when maxval is
5237 * reached. Returns the wraparound old *var value (0...maxval). */
5238static rb_atomic_t
5239atomic_inc_wraparound(rb_atomic_t *var, const rb_atomic_t maxval)
5240{
5241 rb_atomic_t oldval = RUBY_ATOMIC_FETCH_ADD(*var, 1);
5242 if (RB_UNLIKELY(oldval >= maxval - 1)) { // wraparound *var
5243 const rb_atomic_t newval = oldval + 1;
5244 RUBY_ATOMIC_CAS(*var, newval, newval % maxval);
5245 oldval %= maxval;
5246 }
5247 return oldval;
5248}
5249
5250static const char *
5251obj_info(VALUE obj)
5252{
5253 if (RGENGC_OBJ_INFO) {
5254 static struct {
5255 rb_atomic_t index;
5256 char buffers[10][0x100];
5257 } info = {0};
5258
5259 rb_atomic_t index = atomic_inc_wraparound(&info.index, numberof(info.buffers));
5260 char *const buff = info.buffers[index];
5261 return rb_raw_obj_info(buff, sizeof(info.buffers[0]), obj);
5262 }
5263 return obj_type_name(obj);
5264}
5265
5266/*
5267 ------------------------ Extended allocator ------------------------
5268*/
5269
5271 VALUE exc;
5272 const char *fmt;
5273 va_list *ap;
5274};
5275
5276static void *
5277gc_vraise(void *ptr)
5278{
5279 struct gc_raise_tag *argv = ptr;
5280 rb_vraise(argv->exc, argv->fmt, *argv->ap);
5281 UNREACHABLE_RETURN(NULL);
5282}
5283
5284static void
5285gc_raise(VALUE exc, const char *fmt, ...)
5286{
5287 va_list ap;
5288 va_start(ap, fmt);
5289 struct gc_raise_tag argv = {
5290 exc, fmt, &ap,
5291 };
5292
5293 if (ruby_native_thread_p()) {
5294 rb_thread_call_with_gvl(gc_vraise, &argv);
5296 }
5297 else {
5298 /* Not in a ruby thread */
5299 fprintf(stderr, "%s", "[FATAL] ");
5300 vfprintf(stderr, fmt, ap);
5301 }
5302
5303 va_end(ap);
5304 abort();
5305}
5306
5307NORETURN(static void negative_size_allocation_error(const char *));
5308static void
5309negative_size_allocation_error(const char *msg)
5310{
5311 gc_raise(rb_eNoMemError, "%s", msg);
5312}
5313
5314static void *
5315ruby_memerror_body(void *dummy)
5316{
5317 rb_memerror();
5318 return 0;
5319}
5320
5321NORETURN(static void ruby_memerror(void));
5323static void
5324ruby_memerror(void)
5325{
5326 if (ruby_thread_has_gvl_p()) {
5327 rb_memerror();
5328 }
5329 else {
5330 if (ruby_native_thread_p()) {
5331 rb_thread_call_with_gvl(ruby_memerror_body, 0);
5332 }
5333 else {
5334 /* no ruby thread */
5335 fprintf(stderr, "[FATAL] failed to allocate memory\n");
5336 }
5337 }
5338
5339 /* We have discussions whether we should die here; */
5340 /* We might rethink about it later. */
5341 exit(EXIT_FAILURE);
5342}
5343
5344void
5345rb_memerror(void)
5346{
5347 /* the `GET_VM()->special_exceptions` below assumes that
5348 * the VM is reachable from the current thread. We should
5349 * definitely make sure of that. */
5350 RUBY_ASSERT_ALWAYS(ruby_thread_has_gvl_p());
5351
5352 rb_execution_context_t *ec = GET_EC();
5353 VALUE exc = GET_VM()->special_exceptions[ruby_error_nomemory];
5354
5355 if (!exc ||
5356 rb_ec_raised_p(ec, RAISED_NOMEMORY) ||
5357 rb_ec_vm_lock_rec(ec) != ec->tag->lock_rec) {
5358 fprintf(stderr, "[FATAL] failed to allocate memory\n");
5359 exit(EXIT_FAILURE);
5360 }
5361 if (rb_ec_raised_p(ec, RAISED_NOMEMORY)) {
5362 rb_ec_raised_clear(ec);
5363 }
5364 else {
5365 rb_ec_raised_set(ec, RAISED_NOMEMORY);
5366 exc = ruby_vm_special_exception_copy(exc);
5367 }
5368 ec->errinfo = exc;
5369 EC_JUMP_TAG(ec, TAG_RAISE);
5370}
5371
5372bool
5373rb_memerror_reentered(void)
5374{
5375 rb_execution_context_t *ec = GET_EC();
5376 return (ec && rb_ec_raised_p(ec, RAISED_NOMEMORY));
5377}
5378
5379static void *
5380handle_malloc_failure(void *ptr)
5381{
5382 if (LIKELY(ptr)) {
5383 return ptr;
5384 }
5385 else {
5386 ruby_memerror();
5387 UNREACHABLE_RETURN(ptr);
5388 }
5389}
5390
5391static void *ruby_xmalloc_body(size_t size);
5392
5393void *
5394ruby_xmalloc(size_t size)
5395{
5396 return handle_malloc_failure(ruby_xmalloc_body(size));
5397}
5398
5399static bool
5400malloc_gc_allowed(void)
5401{
5402 rb_ractor_t *r = rb_current_ractor_raw(false);
5403
5404 return r == NULL || !r->malloc_gc_disabled;
5405}
5406
5407static void *
5408ruby_xmalloc_body(size_t size)
5409{
5410 if ((ssize_t)size < 0) {
5411 negative_size_allocation_error("too large allocation size");
5412 }
5413
5414 return rb_gc_impl_malloc(rb_gc_get_objspace(), size, malloc_gc_allowed());
5415}
5416
5417void
5418ruby_malloc_size_overflow(size_t count, size_t elsize)
5419{
5420 rb_raise(rb_eArgError,
5421 "malloc: possible integer overflow (%"PRIuSIZE"*%"PRIuSIZE")",
5422 count, elsize);
5423}
5424
5425void
5426ruby_malloc_add_size_overflow(size_t x, size_t y)
5427{
5428 rb_raise(rb_eArgError,
5429 "malloc: possible integer overflow (%"PRIuSIZE"+%"PRIuSIZE")",
5430 x, y);
5431}
5432
5433static void *ruby_xmalloc2_body(size_t n, size_t size);
5434
5435void *
5436ruby_xmalloc2(size_t n, size_t size)
5437{
5438 return handle_malloc_failure(ruby_xmalloc2_body(n, size));
5439}
5440
5441static void *
5442ruby_xmalloc2_body(size_t n, size_t size)
5443{
5444 return rb_gc_impl_malloc(rb_gc_get_objspace(), xmalloc2_size(n, size), malloc_gc_allowed());
5445}
5446
5447static void *ruby_xcalloc_body(size_t n, size_t size);
5448
5449void *
5450ruby_xcalloc(size_t n, size_t size)
5451{
5452 return handle_malloc_failure(ruby_xcalloc_body(n, size));
5453}
5454
5455static void *
5456ruby_xcalloc_body(size_t n, size_t size)
5457{
5458 return rb_gc_impl_calloc(rb_gc_get_objspace(), xmalloc2_size(n, size), malloc_gc_allowed());
5459}
5460
5461static void *ruby_xrealloc_sized_body(void *ptr, size_t new_size, size_t old_size);
5462
5463#ifdef ruby_xrealloc_sized
5464#undef ruby_xrealloc_sized
5465#endif
5466void *
5467ruby_xrealloc_sized(void *ptr, size_t new_size, size_t old_size)
5468{
5469 return handle_malloc_failure(ruby_xrealloc_sized_body(ptr, new_size, old_size));
5470}
5471
5472static void *
5473ruby_xrealloc_sized_body(void *ptr, size_t new_size, size_t old_size)
5474{
5475 if ((ssize_t)new_size < 0) {
5476 negative_size_allocation_error("too large allocation size");
5477 }
5478
5479 return rb_gc_impl_realloc(rb_gc_get_objspace(), ptr, new_size, old_size, malloc_gc_allowed());
5480}
5481
5482void *
5483ruby_xrealloc(void *ptr, size_t new_size)
5484{
5485 return ruby_xrealloc_sized(ptr, new_size, 0);
5486}
5487
5488static void *ruby_xrealloc2_sized_body(void *ptr, size_t n, size_t size, size_t old_n);
5489
5490#ifdef ruby_xrealloc2_sized
5491#undef ruby_xrealloc2_sized
5492#endif
5493void *
5494ruby_xrealloc2_sized(void *ptr, size_t n, size_t size, size_t old_n)
5495{
5496 return handle_malloc_failure(ruby_xrealloc2_sized_body(ptr, n, size, old_n));
5497}
5498
5499static void *
5500ruby_xrealloc2_sized_body(void *ptr, size_t n, size_t size, size_t old_n)
5501{
5502 size_t len = xmalloc2_size(n, size);
5503 return rb_gc_impl_realloc(rb_gc_get_objspace(), ptr, len, old_n * size, malloc_gc_allowed());
5504}
5505
5506void *
5507ruby_xrealloc2(void *ptr, size_t n, size_t size)
5508{
5509 return ruby_xrealloc2_sized(ptr, n, size, 0);
5510}
5511
5512#ifdef ruby_xfree_sized
5513#undef ruby_xfree_sized
5514#endif
5515void
5516ruby_xfree_sized(void *x, size_t size)
5517{
5518 if (LIKELY(x)) {
5519 /* It's possible for a C extension's pthread destructor function set by pthread_key_create
5520 * to be called after ruby_vm_destruct and attempt to free memory. Fall back to mimfree in
5521 * that case. */
5522 if (LIKELY(GET_VM())) {
5523 rb_gc_impl_free(rb_gc_get_objspace(), x, size);
5524 }
5525 else {
5526 ruby_mimfree(x);
5527 }
5528 }
5529}
5530
5531void
5532ruby_xfree(void *x)
5533{
5534 ruby_xfree_sized(x, 0);
5535}
5536
5537void *
5538rb_xmalloc_mul_add(size_t x, size_t y, size_t z) /* x * y + z */
5539{
5540 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
5541 return ruby_xmalloc(w);
5542}
5543
5544void *
5545rb_xcalloc_mul_add(size_t x, size_t y, size_t z) /* x * y + z */
5546{
5547 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
5548 return ruby_xcalloc(w, 1);
5549}
5550
5551void *
5552rb_xrealloc_mul_add(const void *p, size_t x, size_t y, size_t z) /* x * y + z */
5553{
5554 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
5555 return ruby_xrealloc((void *)p, w);
5556}
5557
5558void *
5559rb_xmalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
5560{
5561 size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
5562 return ruby_xmalloc(u);
5563}
5564
5565void *
5566rb_xcalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
5567{
5568 size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
5569 return ruby_xcalloc(u, 1);
5570}
5571
5572/* Mimic ruby_xmalloc, but need not rb_objspace.
5573 * should return pointer suitable for ruby_xfree
5574 */
5575void *
5576ruby_mimmalloc(size_t size)
5577{
5578 void *mem;
5579#if CALC_EXACT_MALLOC_SIZE
5580 size += sizeof(struct malloc_obj_info);
5581#endif
5582 mem = malloc(size);
5583#if CALC_EXACT_MALLOC_SIZE
5584 if (!mem) {
5585 return NULL;
5586 }
5587 else
5588 /* set 0 for consistency of allocated_size/allocations */
5589 {
5590 struct malloc_obj_info *info = mem;
5591 info->size = 0;
5592 mem = info + 1;
5593 }
5594#endif
5595 return mem;
5596}
5597
5598void *
5599ruby_mimcalloc(size_t num, size_t size)
5600{
5601 void *mem;
5602#if CALC_EXACT_MALLOC_SIZE
5603 struct rbimpl_size_overflow_tag t = rbimpl_size_mul_overflow(num, size);
5604 if (UNLIKELY(t.overflowed)) {
5605 return NULL;
5606 }
5607 size = t.result + sizeof(struct malloc_obj_info);
5608 mem = calloc1(size);
5609 if (!mem) {
5610 return NULL;
5611 }
5612 else
5613 /* set 0 for consistency of allocated_size/allocations */
5614 {
5615 struct malloc_obj_info *info = mem;
5616 info->size = 0;
5617 mem = info + 1;
5618 }
5619#else
5620 mem = calloc(num, size);
5621#endif
5622 return mem;
5623}
5624
5625void
5626ruby_mimfree(void *ptr)
5627{
5628#if CALC_EXACT_MALLOC_SIZE
5629 struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
5630 ptr = info;
5631#endif
5632 free(ptr);
5633}
5634
5635void
5636rb_gc_adjust_memory_usage(ssize_t diff)
5637{
5638 unless_objspace(objspace) { return; }
5639
5640 rb_gc_impl_adjust_memory_usage(objspace, diff);
5641}
5642
5643const char *
5644rb_obj_info(VALUE obj)
5645{
5646 return obj_info(obj);
5647}
5648
5649void
5650rb_obj_info_dump(VALUE obj)
5651{
5652 char buff[0x100];
5653 fprintf(stderr, "rb_obj_info_dump: %s\n", rb_raw_obj_info(buff, 0x100, obj));
5654}
5655
5656void
5657rb_obj_info_dump_loc(VALUE obj, const char *file, int line, const char *func)
5658{
5659 char buff[0x100];
5660 fprintf(stderr, "<OBJ_INFO:%s@%s:%d> %s\n", func, file, line, rb_raw_obj_info(buff, 0x100, obj));
5661}
5662
5663void
5664rb_gc_before_fork(void)
5665{
5666 rb_gc_impl_before_fork(rb_gc_get_objspace());
5667}
5668
5669void
5670rb_gc_after_fork(rb_pid_t pid)
5671{
5672 rb_gc_impl_after_fork(rb_gc_get_objspace(), pid);
5673}
5674
5675bool
5676rb_gc_obj_shareable_p(VALUE obj)
5677{
5678 return RB_OBJ_SHAREABLE_P(obj);
5679}
5680
5681void
5682rb_gc_rp(VALUE obj)
5683{
5684 rp(obj);
5685}
5686
5688 VALUE parent;
5689 long err_count;
5690};
5691
5692static void
5693check_shareable_i(const VALUE child, void *ptr)
5694{
5695 struct check_shareable_data *data = (struct check_shareable_data *)ptr;
5696
5697 if (!rb_gc_obj_shareable_p(child)) {
5698 fprintf(stderr, "(a) ");
5699 rb_gc_rp(data->parent);
5700 fprintf(stderr, "(b) ");
5701 rb_gc_rp(child);
5702 fprintf(stderr, "check_shareable_i: shareable (a) -> unshareable (b)\n");
5703
5704 data->err_count++;
5705 rb_bug("!! violate shareable constraint !!");
5706 }
5707}
5708
5709static bool gc_checking_shareable = false;
5710
5711static void
5712gc_verify_shareable(void *objspace, VALUE obj, void *data)
5713{
5714 // while gc_checking_shareable is true,
5715 // other Ractors should not run the GC, until the flag is not local.
5716 // TODO: remove VM locking if the flag is Ractor local
5717
5718 unsigned int lev = RB_GC_VM_LOCK();
5719 {
5720 gc_checking_shareable = true;
5721 rb_objspace_reachable_objects_from(obj, check_shareable_i, (void *)data);
5722 gc_checking_shareable = false;
5723 }
5724 RB_GC_VM_UNLOCK(lev);
5725}
5726
5727// TODO: only one level (non-recursive)
5728void
5729rb_gc_verify_shareable(VALUE obj)
5730{
5731 rb_objspace_t *objspace = rb_gc_get_objspace();
5732 struct check_shareable_data data = {
5733 .parent = obj,
5734 .err_count = 0,
5735 };
5736 gc_verify_shareable(objspace, obj, &data);
5737
5738 if (data.err_count > 0) {
5739 rb_bug("rb_gc_verify_shareable");
5740 }
5741}
5742
5743bool
5744rb_gc_checking_shareable(void)
5745{
5746 return gc_checking_shareable;
5747}
5748
5749/*
5750 * Document-module: ObjectSpace
5751 *
5752 * The ObjectSpace module contains a number of routines
5753 * that interact with the garbage collection facility and allow you to
5754 * traverse all living objects with an iterator.
5755 *
5756 * ObjectSpace also provides support for object finalizers, procs that will be
5757 * called after a specific object was destroyed by garbage collection. See
5758 * the documentation for +ObjectSpace.define_finalizer+ for important
5759 * information on how to use this method correctly.
5760 *
5761 * a = "A"
5762 * b = "B"
5763 *
5764 * ObjectSpace.define_finalizer(a, proc {|id| puts "Finalizer one on #{id}" })
5765 * ObjectSpace.define_finalizer(b, proc {|id| puts "Finalizer two on #{id}" })
5766 *
5767 * a = nil
5768 * b = nil
5769 *
5770 * _produces:_
5771 *
5772 * Finalizer two on 537763470
5773 * Finalizer one on 537763480
5774 */
5775
5776/* Document-class: GC::Profiler
5777 *
5778 * The GC profiler provides access to information on GC runs including time,
5779 * length and object space size.
5780 *
5781 * Example:
5782 *
5783 * GC::Profiler.enable
5784 *
5785 * require 'rdoc/rdoc'
5786 *
5787 * GC::Profiler.report
5788 *
5789 * GC::Profiler.disable
5790 *
5791 * See also GC.count, GC.malloc_allocated_size and GC.malloc_allocations
5792 */
5793
5794#include "gc.rbinc"
5795
5796void
5797Init_GC(void)
5798{
5799#undef rb_intern
5800 rb_gc_register_address(&id2ref_value);
5801
5802 malloc_offset = gc_compute_malloc_offset();
5803
5804 rb_mGC = rb_define_module("GC");
5805
5806 VALUE rb_mObjSpace = rb_define_module("ObjectSpace");
5807
5808 rb_define_module_function(rb_mObjSpace, "each_object", os_each_obj, -1);
5809
5810 rb_define_module_function(rb_mObjSpace, "define_finalizer", define_final, -1);
5811 rb_define_module_function(rb_mObjSpace, "undefine_finalizer", undefine_final, 1);
5812
5813 rb_define_module_function(rb_mObjSpace, "_id2ref", os_id2ref, 1);
5814
5815 rb_vm_register_special_exception(ruby_error_nomemory, rb_eNoMemError, "failed to allocate memory");
5816
5817 rb_define_method(rb_cBasicObject, "__id__", rb_obj_id, 0);
5818 rb_define_method(rb_mKernel, "object_id", rb_obj_id, 0);
5819
5820 rb_define_module_function(rb_mObjSpace, "count_objects", count_objects, -1);
5821
5822 rb_gc_impl_init();
5823}
5824
5825// Set a name for the anonymous virtual memory area. `addr` is the starting
5826// address of the area and `size` is its length in bytes. `name` is a
5827// NUL-terminated human-readable string.
5828//
5829// This function is usually called after calling `mmap()`. The human-readable
5830// annotation helps developers identify the call site of `mmap()` that created
5831// the memory mapping.
5832//
5833// This function currently only works on Linux 5.17 or higher. After calling
5834// this function, we can see annotations in the form of "[anon:...]" in
5835// `/proc/self/maps`, where `...` is the content of `name`. This function has
5836// no effect when called on other platforms.
5837void
5838ruby_annotate_mmap(const void *addr, unsigned long size, const char *name)
5839{
5840#if defined(HAVE_SYS_PRCTL_H) && defined(PR_SET_VMA) && defined(PR_SET_VMA_ANON_NAME)
5841 // The name length cannot exceed 80 (including the '\0').
5842 RUBY_ASSERT(strlen(name) < 80);
5843 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, (unsigned long)addr, size, name);
5844 // We ignore errors in prctl. prctl may set errno to EINVAL for several
5845 // reasons.
5846 // 1. The attr (PR_SET_VMA_ANON_NAME) is not a valid attribute.
5847 // 2. addr is an invalid address.
5848 // 3. The string pointed by name is too long.
5849 // The first error indicates PR_SET_VMA_ANON_NAME is not available, and may
5850 // happen if we run the compiled binary on an old kernel. In theory, all
5851 // other errors should result in a failure. But since EINVAL cannot tell
5852 // the first error from others, and this function is mainly used for
5853 // debugging, we silently ignore the error.
5854 errno = 0;
5855#endif
5856}
#define RUBY_ASSERT_ALWAYS(expr,...)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
Definition assert.h:199
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
#define RUBY_ATOMIC_VALUE_CAS(var, oldval, newval)
Identical to RUBY_ATOMIC_CAS, except it expects its arguments are VALUE.
Definition atomic.h:406
#define RUBY_ATOMIC_SIZE_FETCH_ADD(var, val)
Identical to RUBY_ATOMIC_FETCH_ADD, except it expects its arguments to be size_t.
Definition atomic.h:235
#define RUBY_ATOMIC_CAS(var, oldval, newval)
Atomic compare-and-swap.
Definition atomic.h:165
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ATOMIC_FETCH_ADD(var, val)
Atomically replaces the value pointed by var with the result of addition of val to the old value of v...
Definition atomic.h:118
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_module_function(klass, mid, func, arity)
Defines klass#mid and makes it a module function.
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RUBY_INTERNAL_EVENT_NEWOBJ
Object allocated.
Definition event.h:93
static bool RB_OBJ_FROZEN(VALUE obj)
Checks if an object is frozen.
Definition fl_type.h:711
@ RUBY_FL_WB_PROTECTED
Definition fl_type.h:186
VALUE rb_define_module(const char *name)
Defines a top-level module.
Definition class.c:1509
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
Retrieves argument from argc and argv to given VALUE references according to the format string.
Definition class.c:3061
#define T_COMPLEX
Old name of RUBY_T_COMPLEX.
Definition value_type.h:59
#define TYPE(_)
Old name of rb_type.
Definition value_type.h:108
#define FL_SINGLETON
Old name of RUBY_FL_SINGLETON.
Definition fl_type.h:58
#define T_FILE
Old name of RUBY_T_FILE.
Definition value_type.h:62
#define FL_UNSET_RAW
Old name of RB_FL_UNSET_RAW.
Definition fl_type.h:130
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:400
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define T_MASK
Old name of RUBY_T_MASK.
Definition value_type.h:68
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define T_NIL
Old name of RUBY_T_NIL.
Definition value_type.h:72
#define UNREACHABLE
Old name of RBIMPL_UNREACHABLE.
Definition assume.h:28
#define T_FLOAT
Old name of RUBY_T_FLOAT.
Definition value_type.h:64
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition value_type.h:67
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
Definition value_type.h:57
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
Definition value_type.h:79
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition fl_type.h:131
#define T_FIXNUM
Old name of RUBY_T_FIXNUM.
Definition value_type.h:63
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition assume.h:29
#define SYM2ID
Old name of RB_SYM2ID.
Definition symbol.h:45
#define T_DATA
Old name of RUBY_T_DATA.
Definition value_type.h:60
#define FIXNUM_FLAG
Old name of RUBY_FIXNUM_FLAG.
#define LL2NUM
Old name of RB_LL2NUM.
Definition long_long.h:30
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:205
#define T_NONE
Old name of RUBY_T_NONE.
Definition value_type.h:74
#define T_NODE
Old name of RUBY_T_NODE.
Definition value_type.h:73
#define SIZET2NUM
Old name of RB_SIZE2NUM.
Definition size_t.h:62
#define FL_FINALIZE
Old name of RUBY_FL_FINALIZE.
Definition fl_type.h:61
#define T_MODULE
Old name of RUBY_T_MODULE.
Definition value_type.h:70
#define ASSUME
Old name of RBIMPL_ASSUME.
Definition assume.h:27
#define T_TRUE
Old name of RUBY_T_TRUE.
Definition value_type.h:81
#define T_RATIONAL
Old name of RUBY_T_RATIONAL.
Definition value_type.h:76
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define FL_ABLE
Old name of RB_FL_ABLE.
Definition fl_type.h:118
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:128
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition array.h:658
#define LONG2NUM
Old name of RB_LONG2NUM.
Definition long.h:50
#define T_FALSE
Old name of RUBY_T_FALSE.
Definition value_type.h:61
#define ULL2NUM
Old name of RB_ULL2NUM.
Definition long_long.h:31
#define T_UNDEF
Old name of RUBY_T_UNDEF.
Definition value_type.h:82
#define FLONUM_P
Old name of RB_FLONUM_P.
#define Qtrue
Old name of RUBY_Qtrue.
#define T_ZOMBIE
Old name of RUBY_T_ZOMBIE.
Definition value_type.h:83
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define NUM2ULL
Old name of RB_NUM2ULL.
Definition long_long.h:35
#define FL_WB_PROTECTED
Old name of RUBY_FL_WB_PROTECTED.
Definition fl_type.h:59
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
Definition value_type.h:80
#define T_MATCH
Old name of RUBY_T_MATCH.
Definition value_type.h:69
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define T_MOVED
Old name of RUBY_T_MOVED.
Definition value_type.h:71
#define FL_TEST
Old name of RB_FL_TEST.
Definition fl_type.h:127
#define xcalloc
Old name of ruby_xcalloc.
Definition xmalloc.h:55
#define FL_UNSET
Old name of RB_FL_UNSET.
Definition fl_type.h:129
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define NUM2SIZET
Old name of RB_NUM2SIZE.
Definition size_t.h:61
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
#define T_REGEXP
Old name of RUBY_T_REGEXP.
Definition value_type.h:77
size_t ruby_stack_length(VALUE **p)
Queries what Ruby thinks is the machine stack.
Definition gc.c:2848
int ruby_stack_check(void)
Checks for stack overflow.
Definition gc.c:2888
void rb_category_warn(rb_warning_category_t category, const char *fmt,...)
Identical to rb_category_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:477
VALUE rb_eNoMemError
NoMemoryError exception.
Definition error.c:1438
VALUE rb_eRangeError
RangeError exception.
Definition error.c:1431
#define ruby_verbose
This variable controls whether the interpreter is in debug mode.
Definition error.h:476
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1427
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:467
@ RB_WARN_CATEGORY_DEPRECATED
Warning is for deprecated features.
Definition error.h:48
VALUE rb_mKernel
Kernel module.
Definition object.c:60
VALUE rb_cObject
Object class.
Definition object.c:61
VALUE rb_mGC
GC module.
Definition gc.c:410
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:235
VALUE rb_cBasicObject
BasicObject class.
Definition object.c:59
VALUE rb_class_real(VALUE klass)
Finds a "real" class.
Definition object.c:226
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:894
VALUE rb_to_int(VALUE val)
Identical to rb_check_to_int(), except it raises in case of conversion mismatch.
Definition object.c:3335
#define RB_POSFIXABLE(_)
Checks if the passed value is in range of fixnum, assuming it is a positive number.
Definition fixnum.h:43
VALUE rb_funcall(VALUE recv, ID mid, int n,...)
Calls a method.
Definition vm_eval.c:1121
Defines RBIMPL_HAS_BUILTIN.
void rb_ary_free(VALUE ary)
Destroys the given array for no reason.
#define RETURN_ENUMERATOR(obj, argc, argv)
Identical to RETURN_SIZED_ENUMERATOR(), except its size is unknown.
Definition enumerator.h:242
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:284
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition proc.c:988
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
Definition proc.c:122
void rb_str_free(VALUE str)
Destroys the given string for no reason.
Definition string.c:1754
size_t rb_str_capacity(VALUE str)
Queries the capacity of the given string.
Definition string.c:1005
VALUE rb_class_path_cached(VALUE mod)
Just another name of rb_mod_name.
Definition variable.c:389
void rb_free_generic_ivar(VALUE obj)
Frees the list of instance variables.
Definition variable.c:1318
void rb_undef_alloc_func(VALUE klass)
Deletes the allocator function of a class.
Definition vm_method.c:1742
VALUE rb_check_funcall(VALUE recv, ID mid, int argc, const VALUE *argv)
Identical to rb_funcallv(), except it returns RUBY_Qundef instead of raising rb_eNoMethodError.
Definition vm_eval.c:690
rb_alloc_func_t rb_get_alloc_func(VALUE klass)
Queries the allocator function of a class.
Definition vm_method.c:1751
int rb_obj_respond_to(VALUE obj, ID mid, int private_p)
Identical to rb_respond_to(), except it additionally takes the visibility parameter.
Definition vm_method.c:3469
VALUE rb_sym2str(VALUE symbol)
Obtain a frozen string representation of a symbol (not including the leading colon).
Definition symbol.c:1024
int rb_io_fptr_finalize(rb_io_t *fptr)
Destroys the given IO.
Definition io.c:5701
int len
Length of the buffer.
Definition io.h:8
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
Definition ractor.h:249
#define RB_OBJ_SHAREABLE_P(obj)
Queries if the passed object has previously classified as shareable or not.
Definition ractor.h:235
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
Definition thread.c:2063
VALUE rb_yield(VALUE val)
Yields the block.
Definition vm_eval.c:1376
#define RBIMPL_ATTR_MAYBE_UNUSED()
Wraps (or simulates) [[maybe_unused]]
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
VALUE type(ANYARGS)
ANYARGS-ed function type.
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
#define RARRAY(obj)
Convenient casting macro.
Definition rarray.h:44
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:166
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS(obj)
Convenient casting macro.
Definition rclass.h:38
VALUE rb_data_object_wrap(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
This is the primitive way to wrap an existing C struct into RData.
Definition gc.c:1141
#define DATA_PTR(obj)
Convenient getter macro.
Definition rdata.h:67
VALUE rb_data_object_zalloc(VALUE klass, size_t size, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
Identical to rb_data_object_wrap(), except it allocates a new data region internally instead of takin...
Definition gc.c:1158
#define RDATA(obj)
Convenient casting macro.
Definition rdata.h:59
#define RUBY_DEFAULT_FREE
This is a value you can set to RData::dfree.
Definition rdata.h:78
void(* RUBY_DATA_FUNC)(void *)
This is the type of callbacks registered to RData.
Definition rdata.h:104
#define RUBY_NEVER_FREE
This is a value you can set to RData::dfree.
Definition rdata.h:85
#define RFILE(obj)
Convenient casting macro.
Definition rfile.h:50
#define RHASH_SIZE(h)
Queries the size of the hash.
Definition rhash.h:69
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
#define RMATCH(obj)
Convenient casting macro.
Definition rmatch.h:37
#define ROBJECT(obj)
Convenient casting macro.
Definition robject.h:43
static VALUE * ROBJECT_FIELDS(VALUE obj)
Queries the instance variables.
Definition robject.h:133
#define RREGEXP(obj)
Convenient casting macro.
Definition rregexp.h:37
#define RREGEXP_PTR(obj)
Convenient accessor macro.
Definition rregexp.h:45
#define RSTRING(obj)
Convenient casting macro.
Definition rstring.h:41
static long RSTRUCT_LEN(VALUE st)
Returns the number of struct members.
Definition rstruct.h:82
static bool RTYPEDDATA_P(VALUE obj)
Checks whether the passed object is RTypedData or RData.
Definition rtypeddata.h:669
#define RUBY_TYPED_DEFAULT_FREE
This is a value you can set to rb_data_type_struct::dfree.
Definition rtypeddata.h:81
VALUE rb_data_typed_object_wrap(VALUE klass, void *datap, const rb_data_type_t *type)
This is the primitive way to wrap an existing C struct into RTypedData.
Definition gc.c:1188
VALUE rb_data_typed_object_zalloc(VALUE klass, size_t size, const rb_data_type_t *type)
Identical to rb_data_typed_object_wrap(), except it allocates a new data region internally instead of...
Definition gc.c:1198
#define RUBY_TYPED_FREE_IMMEDIATELY
Macros to see if each corresponding flag is defined.
Definition rtypeddata.h:122
static const rb_data_type_t * RTYPEDDATA_TYPE(VALUE obj)
Queries for the type of given object.
Definition rtypeddata.h:687
#define TypedData_Wrap_Struct(klass, data_type, sval)
Converts sval, a pointer to your struct, into a Ruby object.
Definition rtypeddata.h:531
#define RTYPEDDATA(obj)
Convenient casting macro.
Definition rtypeddata.h:96
const char * rb_obj_classname(VALUE obj)
Queries the name of the class of the passed object.
Definition variable.c:515
void rb_p(VALUE obj)
Inspects an object.
Definition io.c:9064
#define errno
Ractor-aware version of errno.
Definition ruby.h:388
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition thread.c:5815
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
Defines old _.
#define _(args)
This was a transition path from K&R to ANSI.
Definition stdarg.h:35
Ruby's array.
Definition rarray.h:128
Ruby object's base components.
Definition rbasic.h:69
Definition rdata.h:120
RUBY_DATA_FUNC dfree
This function is called when the object is no longer used.
Definition rdata.h:143
RUBY_DATA_FUNC dmark
This function is called when the object is experiencing GC marks.
Definition rdata.h:134
void * data
Pointer to the actual C level struct that you want to wrap.
Definition rdata.h:149
Definition hash.h:53
Regular expression execution context.
Definition rmatch.h:79
union RMatch::@55 as
"Registers" of a match.
struct rmatch_offset * char_offset
Capture group offsets, in C array.
Definition rmatch.h:98
int char_offset_num_allocated
Number of rmatch_offset that ::rmatch::char_offset holds.
Definition rmatch.h:95
int num_regs
Number of capture-group registers.
Definition rmatch.h:101
Ruby's ordinal objects.
Definition robject.h:85
Ruby's String.
Definition rstring.h:196
"Typed" user data.
Definition rtypeddata.h:384
void * data
Pointer to the actual C level struct that you want to wrap.
Definition rtypeddata.h:404
VALUE fields_obj
Direct reference to the slots that holds instance variables, if any.
Definition rtypeddata.h:390
Definition method.h:63
Definition constant.h:33
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:229
struct rb_data_type_struct::@62 function
Function pointers.
RUBY_DATA_FUNC dfree
This function is called when the object is no longer used.
Definition rtypeddata.h:259
RUBY_DATA_FUNC dcompact
This function is called when the object is relocated.
Definition rtypeddata.h:280
const char * wrap_struct_name
Name of structs of this kind.
Definition rtypeddata.h:236
RUBY_DATA_FUNC dmark
This function is called when the object is experiencing GC marks.
Definition rtypeddata.h:250
VALUE flags
Type-specific behavioural characteristics.
Definition rtypeddata.h:343
Definition gc_impl.h:15
Ruby's IO, metadata and buffers.
Definition io.h:295
Definition method.h:55
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:143
Represents the region of a capture group.
Definition rmatch.h:65
Definition st.h:79
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
intptr_t SIGNED_VALUE
A signed integer type that has the same width with VALUE.
Definition value.h:63
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376
ruby_value_type
C-level type of an object.
Definition value_type.h:113
@ RUBY_T_MASK
Bitmask of ruby_value_type.
Definition value_type.h:145