Ruby 4.0.0dev (2025-12-25 revision 7eb088084a4d6b93de511b359ce457f3559fcec3)
gc.c (7eb088084a4d6b93de511b359ce457f3559fcec3)
1/**********************************************************************
2
3 gc.c -
4
5 $Author$
6 created at: Tue Oct 5 09:44:46 JST 1993
7
8 Copyright (C) 1993-2007 Yukihiro Matsumoto
9 Copyright (C) 2000 Network Applied Communication Laboratory, Inc.
10 Copyright (C) 2000 Information-technology Promotion Agency, Japan
11
12**********************************************************************/
13
14#define rb_data_object_alloc rb_data_object_alloc
15#define rb_data_typed_object_alloc rb_data_typed_object_alloc
16
17#include "ruby/internal/config.h"
18#ifdef _WIN32
19# include "ruby/ruby.h"
20#endif
21
22#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
23# include "wasm/setjmp.h"
24# include "wasm/machine.h"
25#else
26# include <setjmp.h>
27#endif
28#include <stdarg.h>
29#include <stdio.h>
30
31/* MALLOC_HEADERS_BEGIN */
32#ifndef HAVE_MALLOC_USABLE_SIZE
33# ifdef _WIN32
34# define HAVE_MALLOC_USABLE_SIZE
35# define malloc_usable_size(a) _msize(a)
36# elif defined HAVE_MALLOC_SIZE
37# define HAVE_MALLOC_USABLE_SIZE
38# define malloc_usable_size(a) malloc_size(a)
39# endif
40#endif
41
42#ifdef HAVE_MALLOC_USABLE_SIZE
43# ifdef RUBY_ALTERNATIVE_MALLOC_HEADER
44/* Alternative malloc header is included in ruby/missing.h */
45# elif defined(HAVE_MALLOC_H)
46# include <malloc.h>
47# elif defined(HAVE_MALLOC_NP_H)
48# include <malloc_np.h>
49# elif defined(HAVE_MALLOC_MALLOC_H)
50# include <malloc/malloc.h>
51# endif
52#endif
53
54/* MALLOC_HEADERS_END */
55
56#ifdef HAVE_SYS_TIME_H
57# include <sys/time.h>
58#endif
59
60#ifdef HAVE_SYS_RESOURCE_H
61# include <sys/resource.h>
62#endif
63
64#if defined _WIN32 || defined __CYGWIN__
65# include <windows.h>
66#elif defined(HAVE_POSIX_MEMALIGN)
67#elif defined(HAVE_MEMALIGN)
68# include <malloc.h>
69#endif
70
71#include <sys/types.h>
72
73#ifdef __EMSCRIPTEN__
74#include <emscripten.h>
75#endif
76
77/* For ruby_annotate_mmap */
78#ifdef HAVE_SYS_PRCTL_H
79#include <sys/prctl.h>
80#endif
81
82#undef LIST_HEAD /* ccan/list conflicts with BSD-origin sys/queue.h. */
83
84#include "constant.h"
85#include "darray.h"
86#include "debug_counter.h"
87#include "eval_intern.h"
88#include "gc/gc.h"
89#include "id_table.h"
90#include "internal.h"
91#include "internal/class.h"
92#include "internal/compile.h"
93#include "internal/complex.h"
94#include "internal/concurrent_set.h"
95#include "internal/cont.h"
96#include "internal/error.h"
97#include "internal/eval.h"
98#include "internal/gc.h"
99#include "internal/hash.h"
100#include "internal/imemo.h"
101#include "internal/io.h"
102#include "internal/numeric.h"
103#include "internal/object.h"
104#include "internal/proc.h"
105#include "internal/rational.h"
106#include "internal/sanitizers.h"
107#include "internal/struct.h"
108#include "internal/symbol.h"
109#include "internal/thread.h"
110#include "internal/variable.h"
111#include "internal/warnings.h"
112#include "probes.h"
113#include "regint.h"
114#include "ruby/debug.h"
115#include "ruby/io.h"
116#include "ruby/re.h"
117#include "ruby/st.h"
118#include "ruby/thread.h"
119#include "ruby/util.h"
120#include "ruby/vm.h"
121#include "ruby_assert.h"
122#include "ruby_atomic.h"
123#include "symbol.h"
124#include "variable.h"
125#include "vm_core.h"
126#include "vm_sync.h"
127#include "vm_callinfo.h"
128#include "ractor_core.h"
129#include "yjit.h"
130#include "zjit.h"
131
132#include "builtin.h"
133#include "shape.h"
134
135unsigned int
136rb_gc_vm_lock(const char *file, int line)
137{
138 unsigned int lev = 0;
139 rb_vm_lock_enter(&lev, file, line);
140 return lev;
141}
142
143void
144rb_gc_vm_unlock(unsigned int lev, const char *file, int line)
145{
146 rb_vm_lock_leave(&lev, file, line);
147}
148
149unsigned int
150rb_gc_cr_lock(const char *file, int line)
151{
152 unsigned int lev;
153 rb_vm_lock_enter_cr(GET_RACTOR(), &lev, file, line);
154 return lev;
155}
156
157void
158rb_gc_cr_unlock(unsigned int lev, const char *file, int line)
159{
160 rb_vm_lock_leave_cr(GET_RACTOR(), &lev, file, line);
161}
162
163unsigned int
164rb_gc_vm_lock_no_barrier(const char *file, int line)
165{
166 unsigned int lev = 0;
167 rb_vm_lock_enter_nb(&lev, file, line);
168 return lev;
169}
170
171void
172rb_gc_vm_unlock_no_barrier(unsigned int lev, const char *file, int line)
173{
174 rb_vm_lock_leave_nb(&lev, file, line);
175}
176
177void
178rb_gc_vm_barrier(void)
179{
180 rb_vm_barrier();
181}
182
183void *
184rb_gc_get_ractor_newobj_cache(void)
185{
186 return GET_RACTOR()->newobj_cache;
187}
188
189#if USE_MODULAR_GC
190void
191rb_gc_initialize_vm_context(struct rb_gc_vm_context *context)
192{
193 rb_native_mutex_initialize(&context->lock);
194 context->ec = GET_EC();
195}
196
197void
198rb_gc_worker_thread_set_vm_context(struct rb_gc_vm_context *context)
199{
200 rb_native_mutex_lock(&context->lock);
201
202 GC_ASSERT(rb_current_execution_context(false) == NULL);
203
204#ifdef RB_THREAD_LOCAL_SPECIFIER
205 rb_current_ec_set(context->ec);
206#else
207 native_tls_set(ruby_current_ec_key, context->ec);
208#endif
209}
210
211void
212rb_gc_worker_thread_unset_vm_context(struct rb_gc_vm_context *context)
213{
214 rb_native_mutex_unlock(&context->lock);
215
216 GC_ASSERT(rb_current_execution_context(true) == context->ec);
217
218#ifdef RB_THREAD_LOCAL_SPECIFIER
219 rb_current_ec_set(NULL);
220#else
221 native_tls_set(ruby_current_ec_key, NULL);
222#endif
223}
224#endif
225
226bool
227rb_gc_event_hook_required_p(rb_event_flag_t event)
228{
229 return ruby_vm_event_flags & event;
230}
231
232void
233rb_gc_event_hook(VALUE obj, rb_event_flag_t event)
234{
235 if (LIKELY(!rb_gc_event_hook_required_p(event))) return;
236
237 rb_execution_context_t *ec = GET_EC();
238 if (!ec->cfp) return;
239
240 EXEC_EVENT_HOOK(ec, event, ec->cfp->self, 0, 0, 0, obj);
241}
242
243void *
244rb_gc_get_objspace(void)
245{
246 return GET_VM()->gc.objspace;
247}
248
249
250void
251rb_gc_ractor_newobj_cache_foreach(void (*func)(void *cache, void *data), void *data)
252{
253 rb_ractor_t *r = NULL;
254 if (RB_LIKELY(ruby_single_main_ractor)) {
255 GC_ASSERT(
256 ccan_list_empty(&GET_VM()->ractor.set) ||
257 (ccan_list_top(&GET_VM()->ractor.set, rb_ractor_t, vmlr_node) == ruby_single_main_ractor &&
258 ccan_list_tail(&GET_VM()->ractor.set, rb_ractor_t, vmlr_node) == ruby_single_main_ractor)
259 );
260
261 func(ruby_single_main_ractor->newobj_cache, data);
262 }
263 else {
264 ccan_list_for_each(&GET_VM()->ractor.set, r, vmlr_node) {
265 func(r->newobj_cache, data);
266 }
267 }
268}
269
270void
271rb_gc_run_obj_finalizer(VALUE objid, long count, VALUE (*callback)(long i, void *data), void *data)
272{
273 volatile struct {
274 VALUE errinfo;
275 VALUE final;
277 VALUE *sp;
278 long finished;
279 } saved;
280
281 rb_execution_context_t * volatile ec = GET_EC();
282#define RESTORE_FINALIZER() (\
283 ec->cfp = saved.cfp, \
284 ec->cfp->sp = saved.sp, \
285 ec->errinfo = saved.errinfo)
286
287 saved.errinfo = ec->errinfo;
288 saved.cfp = ec->cfp;
289 saved.sp = ec->cfp->sp;
290 saved.finished = 0;
291 saved.final = Qundef;
292
293 ASSERT_vm_unlocking();
294 rb_ractor_ignore_belonging(true);
295 EC_PUSH_TAG(ec);
296 enum ruby_tag_type state = EC_EXEC_TAG();
297 if (state != TAG_NONE) {
298 ++saved.finished; /* skip failed finalizer */
299
300 VALUE failed_final = saved.final;
301 saved.final = Qundef;
302 if (!UNDEF_P(failed_final) && !NIL_P(ruby_verbose)) {
303 rb_warn("Exception in finalizer %+"PRIsVALUE, failed_final);
304 rb_ec_error_print(ec, ec->errinfo);
305 }
306 }
307
308 for (long i = saved.finished; RESTORE_FINALIZER(), i < count; saved.finished = ++i) {
309 saved.final = callback(i, data);
310 rb_check_funcall(saved.final, idCall, 1, &objid);
311 }
312 EC_POP_TAG();
313 rb_ractor_ignore_belonging(false);
314#undef RESTORE_FINALIZER
315}
316
317void
318rb_gc_set_pending_interrupt(void)
319{
320 rb_execution_context_t *ec = GET_EC();
321 ec->interrupt_mask |= PENDING_INTERRUPT_MASK;
322}
323
324void
325rb_gc_unset_pending_interrupt(void)
326{
327 rb_execution_context_t *ec = GET_EC();
328 ec->interrupt_mask &= ~PENDING_INTERRUPT_MASK;
329}
330
331bool
332rb_gc_multi_ractor_p(void)
333{
334 return rb_multi_ractor_p();
335}
336
337bool
338rb_gc_shutdown_call_finalizer_p(VALUE obj)
339{
340 switch (BUILTIN_TYPE(obj)) {
341 case T_DATA:
342 if (!ruby_free_at_exit_p() && (!DATA_PTR(obj) || !RDATA(obj)->dfree)) return false;
343 if (rb_obj_is_thread(obj)) return false;
344 if (rb_obj_is_mutex(obj)) return false;
345 if (rb_obj_is_fiber(obj)) return false;
346 if (rb_ractor_p(obj)) return false;
347 if (rb_obj_is_fstring_table(obj)) return false;
348 if (rb_obj_is_symbol_table(obj)) return false;
349
350 return true;
351
352 case T_FILE:
353 return true;
354
355 case T_SYMBOL:
356 return true;
357
358 case T_NONE:
359 return false;
360
361 default:
362 return ruby_free_at_exit_p();
363 }
364}
365
366uint32_t
367rb_gc_get_shape(VALUE obj)
368{
369 return (uint32_t)rb_obj_shape_id(obj);
370}
371
372void
373rb_gc_set_shape(VALUE obj, uint32_t shape_id)
374{
375 RBASIC_SET_SHAPE_ID(obj, (uint32_t)shape_id);
376}
377
378uint32_t
379rb_gc_rebuild_shape(VALUE obj, size_t heap_id)
380{
382
383 return (uint32_t)rb_shape_transition_heap(obj, heap_id);
384}
385
386void rb_vm_update_references(void *ptr);
387
388#define rb_setjmp(env) RUBY_SETJMP(env)
389#define rb_jmp_buf rb_jmpbuf_t
390#undef rb_data_object_wrap
391
392#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
393#define MAP_ANONYMOUS MAP_ANON
394#endif
395
396#define unless_objspace(objspace) \
397 void *objspace; \
398 rb_vm_t *unless_objspace_vm = GET_VM(); \
399 if (unless_objspace_vm) objspace = unless_objspace_vm->gc.objspace; \
400 else /* return; or objspace will be warned uninitialized */
401
402#define RMOVED(obj) ((struct RMoved *)(obj))
403
404#define TYPED_UPDATE_IF_MOVED(_objspace, _type, _thing) do { \
405 if (gc_object_moved_p_internal((_objspace), (VALUE)(_thing))) { \
406 *(_type *)&(_thing) = (_type)gc_location_internal(_objspace, (VALUE)_thing); \
407 } \
408} while (0)
409
410#define UPDATE_IF_MOVED(_objspace, _thing) TYPED_UPDATE_IF_MOVED(_objspace, VALUE, _thing)
411
412#if RUBY_MARK_FREE_DEBUG
413int ruby_gc_debug_indent = 0;
414#endif
415
416#ifndef RGENGC_OBJ_INFO
417# define RGENGC_OBJ_INFO RGENGC_CHECK_MODE
418#endif
419
420#ifndef CALC_EXACT_MALLOC_SIZE
421# define CALC_EXACT_MALLOC_SIZE 0
422#endif
423
425
426static size_t malloc_offset = 0;
427#if defined(HAVE_MALLOC_USABLE_SIZE)
428static size_t
429gc_compute_malloc_offset(void)
430{
431 // Different allocators use different metadata storage strategies which result in different
432 // ideal sizes.
433 // For instance malloc(64) will waste 8B with glibc, but waste 0B with jemalloc.
434 // But malloc(56) will waste 0B with glibc, but waste 8B with jemalloc.
435 // So we try allocating 64, 56 and 48 bytes and select the first offset that doesn't
436 // waste memory.
437 // This was tested on Linux with glibc 2.35 and jemalloc 5, and for both it result in
438 // no wasted memory.
439 size_t offset = 0;
440 for (offset = 0; offset <= 16; offset += 8) {
441 size_t allocated = (64 - offset);
442 void *test_ptr = malloc(allocated);
443 size_t wasted = malloc_usable_size(test_ptr) - allocated;
444 free(test_ptr);
445
446 if (wasted == 0) {
447 return offset;
448 }
449 }
450 return 0;
451}
452#else
453static size_t
454gc_compute_malloc_offset(void)
455{
456 // If we don't have malloc_usable_size, we use powers of 2.
457 return 0;
458}
459#endif
460
461size_t
462rb_malloc_grow_capa(size_t current, size_t type_size)
463{
464 size_t current_capacity = current;
465 if (current_capacity < 4) {
466 current_capacity = 4;
467 }
468 current_capacity *= type_size;
469
470 // We double the current capacity.
471 size_t new_capacity = (current_capacity * 2);
472
473 // And round up to the next power of 2 if it's not already one.
474 if (rb_popcount64(new_capacity) != 1) {
475 new_capacity = (size_t)(1 << (64 - nlz_int64(new_capacity)));
476 }
477
478 new_capacity -= malloc_offset;
479 new_capacity /= type_size;
480 if (current > new_capacity) {
481 rb_bug("rb_malloc_grow_capa: current_capacity=%zu, new_capacity=%zu, malloc_offset=%zu", current, new_capacity, malloc_offset);
482 }
483 RUBY_ASSERT(new_capacity > current);
484 return new_capacity;
485}
486
487static inline struct rbimpl_size_overflow_tag
488size_mul_add_overflow(size_t x, size_t y, size_t z) /* x * y + z */
489{
490 struct rbimpl_size_overflow_tag t = rbimpl_size_mul_overflow(x, y);
491 struct rbimpl_size_overflow_tag u = rbimpl_size_add_overflow(t.result, z);
492 return (struct rbimpl_size_overflow_tag) { t.overflowed || u.overflowed, u.result };
493}
494
495static inline struct rbimpl_size_overflow_tag
496size_mul_add_mul_overflow(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
497{
498 struct rbimpl_size_overflow_tag t = rbimpl_size_mul_overflow(x, y);
499 struct rbimpl_size_overflow_tag u = rbimpl_size_mul_overflow(z, w);
500 struct rbimpl_size_overflow_tag v = rbimpl_size_add_overflow(t.result, u.result);
501 return (struct rbimpl_size_overflow_tag) { t.overflowed || u.overflowed || v.overflowed, v.result };
502}
503
504PRINTF_ARGS(NORETURN(static void gc_raise(VALUE, const char*, ...)), 2, 3);
505
506static inline size_t
507size_mul_or_raise(size_t x, size_t y, VALUE exc)
508{
509 struct rbimpl_size_overflow_tag t = rbimpl_size_mul_overflow(x, y);
510 if (LIKELY(!t.overflowed)) {
511 return t.result;
512 }
513 else if (rb_during_gc()) {
514 rb_memerror(); /* or...? */
515 }
516 else {
517 gc_raise(
518 exc,
519 "integer overflow: %"PRIuSIZE
520 " * %"PRIuSIZE
521 " > %"PRIuSIZE,
522 x, y, (size_t)SIZE_MAX);
523 }
524}
525
526size_t
527rb_size_mul_or_raise(size_t x, size_t y, VALUE exc)
528{
529 return size_mul_or_raise(x, y, exc);
530}
531
532static inline size_t
533size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
534{
535 struct rbimpl_size_overflow_tag t = size_mul_add_overflow(x, y, z);
536 if (LIKELY(!t.overflowed)) {
537 return t.result;
538 }
539 else if (rb_during_gc()) {
540 rb_memerror(); /* or...? */
541 }
542 else {
543 gc_raise(
544 exc,
545 "integer overflow: %"PRIuSIZE
546 " * %"PRIuSIZE
547 " + %"PRIuSIZE
548 " > %"PRIuSIZE,
549 x, y, z, (size_t)SIZE_MAX);
550 }
551}
552
553size_t
554rb_size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
555{
556 return size_mul_add_or_raise(x, y, z, exc);
557}
558
559static inline size_t
560size_mul_add_mul_or_raise(size_t x, size_t y, size_t z, size_t w, VALUE exc)
561{
562 struct rbimpl_size_overflow_tag t = size_mul_add_mul_overflow(x, y, z, w);
563 if (LIKELY(!t.overflowed)) {
564 return t.result;
565 }
566 else if (rb_during_gc()) {
567 rb_memerror(); /* or...? */
568 }
569 else {
570 gc_raise(
571 exc,
572 "integer overflow: %"PRIdSIZE
573 " * %"PRIdSIZE
574 " + %"PRIdSIZE
575 " * %"PRIdSIZE
576 " > %"PRIdSIZE,
577 x, y, z, w, (size_t)SIZE_MAX);
578 }
579}
580
581#if defined(HAVE_RB_GC_GUARDED_PTR_VAL) && HAVE_RB_GC_GUARDED_PTR_VAL
582/* trick the compiler into thinking a external signal handler uses this */
583volatile VALUE rb_gc_guarded_val;
584volatile VALUE *
585rb_gc_guarded_ptr_val(volatile VALUE *ptr, VALUE val)
586{
587 rb_gc_guarded_val = val;
588
589 return ptr;
590}
591#endif
592
593static const char *obj_type_name(VALUE obj);
594#include "gc/default/default.c"
595
596#if USE_MODULAR_GC && !defined(HAVE_DLOPEN)
597# error "Modular GC requires dlopen"
598#elif USE_MODULAR_GC
599#include <dlfcn.h>
600
601typedef struct gc_function_map {
602 // Bootup
603 void *(*objspace_alloc)(void);
604 void (*objspace_init)(void *objspace_ptr);
605 void *(*ractor_cache_alloc)(void *objspace_ptr, void *ractor);
606 void (*set_params)(void *objspace_ptr);
607 void (*init)(void);
608 size_t *(*heap_sizes)(void *objspace_ptr);
609 // Shutdown
610 void (*shutdown_free_objects)(void *objspace_ptr);
611 void (*objspace_free)(void *objspace_ptr);
612 void (*ractor_cache_free)(void *objspace_ptr, void *cache);
613 // GC
614 void (*start)(void *objspace_ptr, bool full_mark, bool immediate_mark, bool immediate_sweep, bool compact);
615 bool (*during_gc_p)(void *objspace_ptr);
616 void (*prepare_heap)(void *objspace_ptr);
617 void (*gc_enable)(void *objspace_ptr);
618 void (*gc_disable)(void *objspace_ptr, bool finish_current_gc);
619 bool (*gc_enabled_p)(void *objspace_ptr);
620 VALUE (*config_get)(void *objpace_ptr);
621 void (*config_set)(void *objspace_ptr, VALUE hash);
622 void (*stress_set)(void *objspace_ptr, VALUE flag);
623 VALUE (*stress_get)(void *objspace_ptr);
624 // Object allocation
625 VALUE (*new_obj)(void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags, bool wb_protected, size_t alloc_size);
626 size_t (*obj_slot_size)(VALUE obj);
627 size_t (*heap_id_for_size)(void *objspace_ptr, size_t size);
628 bool (*size_allocatable_p)(size_t size);
629 // Malloc
630 void *(*malloc)(void *objspace_ptr, size_t size, bool gc_allowed);
631 void *(*calloc)(void *objspace_ptr, size_t size, bool gc_allowed);
632 void *(*realloc)(void *objspace_ptr, void *ptr, size_t new_size, size_t old_size, bool gc_allowed);
633 void (*free)(void *objspace_ptr, void *ptr, size_t old_size);
634 void (*adjust_memory_usage)(void *objspace_ptr, ssize_t diff);
635 // Marking
636 void (*mark)(void *objspace_ptr, VALUE obj);
637 void (*mark_and_move)(void *objspace_ptr, VALUE *ptr);
638 void (*mark_and_pin)(void *objspace_ptr, VALUE obj);
639 void (*mark_maybe)(void *objspace_ptr, VALUE obj);
640 // Weak references
641 void (*declare_weak_references)(void *objspace_ptr, VALUE obj);
642 bool (*handle_weak_references_alive_p)(void *objspace_ptr, VALUE obj);
643 // Compaction
644 bool (*object_moved_p)(void *objspace_ptr, VALUE obj);
645 VALUE (*location)(void *objspace_ptr, VALUE value);
646 // Write barriers
647 void (*writebarrier)(void *objspace_ptr, VALUE a, VALUE b);
648 void (*writebarrier_unprotect)(void *objspace_ptr, VALUE obj);
649 void (*writebarrier_remember)(void *objspace_ptr, VALUE obj);
650 // Heap walking
651 void (*each_objects)(void *objspace_ptr, int (*callback)(void *, void *, size_t, void *), void *data);
652 void (*each_object)(void *objspace_ptr, void (*func)(VALUE obj, void *data), void *data);
653 // Finalizers
654 void (*make_zombie)(void *objspace_ptr, VALUE obj, void (*dfree)(void *), void *data);
655 VALUE (*define_finalizer)(void *objspace_ptr, VALUE obj, VALUE block);
656 void (*undefine_finalizer)(void *objspace_ptr, VALUE obj);
657 void (*copy_finalizer)(void *objspace_ptr, VALUE dest, VALUE obj);
658 void (*shutdown_call_finalizer)(void *objspace_ptr);
659 // Forking
660 void (*before_fork)(void *objspace_ptr);
661 void (*after_fork)(void *objspace_ptr, rb_pid_t pid);
662 // Statistics
663 void (*set_measure_total_time)(void *objspace_ptr, VALUE flag);
664 bool (*get_measure_total_time)(void *objspace_ptr);
665 unsigned long long (*get_total_time)(void *objspace_ptr);
666 size_t (*gc_count)(void *objspace_ptr);
667 VALUE (*latest_gc_info)(void *objspace_ptr, VALUE key);
668 VALUE (*stat)(void *objspace_ptr, VALUE hash_or_sym);
669 VALUE (*stat_heap)(void *objspace_ptr, VALUE heap_name, VALUE hash_or_sym);
670 const char *(*active_gc_name)(void);
671 // Miscellaneous
672 struct rb_gc_object_metadata_entry *(*object_metadata)(void *objspace_ptr, VALUE obj);
673 bool (*pointer_to_heap_p)(void *objspace_ptr, const void *ptr);
674 bool (*garbage_object_p)(void *objspace_ptr, VALUE obj);
675 void (*set_event_hook)(void *objspace_ptr, const rb_event_flag_t event);
676 void (*copy_attributes)(void *objspace_ptr, VALUE dest, VALUE obj);
677
678 bool modular_gc_loaded_p;
679} rb_gc_function_map_t;
680
681static rb_gc_function_map_t rb_gc_functions;
682
683# define RUBY_GC_LIBRARY "RUBY_GC_LIBRARY"
684# define MODULAR_GC_DIR STRINGIZE(modular_gc_dir)
685
686static void
687ruby_modular_gc_init(void)
688{
689 // Assert that the directory path ends with a /
690 RUBY_ASSERT_ALWAYS(MODULAR_GC_DIR[sizeof(MODULAR_GC_DIR) - 2] == '/');
691
692 const char *gc_so_file = getenv(RUBY_GC_LIBRARY);
693
694 rb_gc_function_map_t gc_functions = { 0 };
695
696 char *gc_so_path = NULL;
697 void *handle = NULL;
698 if (gc_so_file) {
699 /* Check to make sure that gc_so_file matches /[\w-_]+/ so that it does
700 * not load a shared object outside of the directory. */
701 for (size_t i = 0; i < strlen(gc_so_file); i++) {
702 char c = gc_so_file[i];
703 if (isalnum(c)) continue;
704 switch (c) {
705 case '-':
706 case '_':
707 break;
708 default:
709 fprintf(stderr, "Only alphanumeric, dash, and underscore is allowed in "RUBY_GC_LIBRARY"\n");
710 exit(1);
711 }
712 }
713
714 size_t gc_so_path_size = strlen(MODULAR_GC_DIR "librubygc." DLEXT) + strlen(gc_so_file) + 1;
715#ifdef LOAD_RELATIVE
716 Dl_info dli;
717 size_t prefix_len = 0;
718 if (dladdr((void *)(uintptr_t)ruby_modular_gc_init, &dli)) {
719 const char *base = strrchr(dli.dli_fname, '/');
720 if (base) {
721 size_t tail = 0;
722# define end_with_p(lit) \
723 (prefix_len >= (tail = rb_strlen_lit(lit)) && \
724 memcmp(base - tail, lit, tail) == 0)
725
726 prefix_len = base - dli.dli_fname;
727 if (end_with_p("/bin") || end_with_p("/lib")) {
728 prefix_len -= tail;
729 }
730 prefix_len += MODULAR_GC_DIR[0] != '/';
731 gc_so_path_size += prefix_len;
732 }
733 }
734#endif
735 gc_so_path = alloca(gc_so_path_size);
736 {
737 size_t gc_so_path_idx = 0;
738#define GC_SO_PATH_APPEND(str) do { \
739 gc_so_path_idx += strlcpy(gc_so_path + gc_so_path_idx, str, gc_so_path_size - gc_so_path_idx); \
740} while (0)
741#ifdef LOAD_RELATIVE
742 if (prefix_len > 0) {
743 memcpy(gc_so_path, dli.dli_fname, prefix_len);
744 gc_so_path_idx = prefix_len;
745 }
746#endif
747 GC_SO_PATH_APPEND(MODULAR_GC_DIR "librubygc.");
748 GC_SO_PATH_APPEND(gc_so_file);
749 GC_SO_PATH_APPEND(DLEXT);
750 GC_ASSERT(gc_so_path_idx == gc_so_path_size - 1);
751#undef GC_SO_PATH_APPEND
752 }
753
754 handle = dlopen(gc_so_path, RTLD_LAZY | RTLD_GLOBAL);
755 if (!handle) {
756 fprintf(stderr, "ruby_modular_gc_init: Shared library %s cannot be opened: %s\n", gc_so_path, dlerror());
757 exit(1);
758 }
759
760 gc_functions.modular_gc_loaded_p = true;
761 }
762
763# define load_modular_gc_func(name) do { \
764 if (handle) { \
765 const char *func_name = "rb_gc_impl_" #name; \
766 gc_functions.name = dlsym(handle, func_name); \
767 if (!gc_functions.name) { \
768 fprintf(stderr, "ruby_modular_gc_init: %s function not exported by library %s\n", func_name, gc_so_path); \
769 exit(1); \
770 } \
771 } \
772 else { \
773 gc_functions.name = rb_gc_impl_##name; \
774 } \
775} while (0)
776
777 // Bootup
778 load_modular_gc_func(objspace_alloc);
779 load_modular_gc_func(objspace_init);
780 load_modular_gc_func(ractor_cache_alloc);
781 load_modular_gc_func(set_params);
782 load_modular_gc_func(init);
783 load_modular_gc_func(heap_sizes);
784 // Shutdown
785 load_modular_gc_func(shutdown_free_objects);
786 load_modular_gc_func(objspace_free);
787 load_modular_gc_func(ractor_cache_free);
788 // GC
789 load_modular_gc_func(start);
790 load_modular_gc_func(during_gc_p);
791 load_modular_gc_func(prepare_heap);
792 load_modular_gc_func(gc_enable);
793 load_modular_gc_func(gc_disable);
794 load_modular_gc_func(gc_enabled_p);
795 load_modular_gc_func(config_set);
796 load_modular_gc_func(config_get);
797 load_modular_gc_func(stress_set);
798 load_modular_gc_func(stress_get);
799 // Object allocation
800 load_modular_gc_func(new_obj);
801 load_modular_gc_func(obj_slot_size);
802 load_modular_gc_func(heap_id_for_size);
803 load_modular_gc_func(size_allocatable_p);
804 // Malloc
805 load_modular_gc_func(malloc);
806 load_modular_gc_func(calloc);
807 load_modular_gc_func(realloc);
808 load_modular_gc_func(free);
809 load_modular_gc_func(adjust_memory_usage);
810 // Marking
811 load_modular_gc_func(mark);
812 load_modular_gc_func(mark_and_move);
813 load_modular_gc_func(mark_and_pin);
814 load_modular_gc_func(mark_maybe);
815 // Weak references
816 load_modular_gc_func(declare_weak_references);
817 load_modular_gc_func(handle_weak_references_alive_p);
818 // Compaction
819 load_modular_gc_func(object_moved_p);
820 load_modular_gc_func(location);
821 // Write barriers
822 load_modular_gc_func(writebarrier);
823 load_modular_gc_func(writebarrier_unprotect);
824 load_modular_gc_func(writebarrier_remember);
825 // Heap walking
826 load_modular_gc_func(each_objects);
827 load_modular_gc_func(each_object);
828 // Finalizers
829 load_modular_gc_func(make_zombie);
830 load_modular_gc_func(define_finalizer);
831 load_modular_gc_func(undefine_finalizer);
832 load_modular_gc_func(copy_finalizer);
833 load_modular_gc_func(shutdown_call_finalizer);
834 // Forking
835 load_modular_gc_func(before_fork);
836 load_modular_gc_func(after_fork);
837 // Statistics
838 load_modular_gc_func(set_measure_total_time);
839 load_modular_gc_func(get_measure_total_time);
840 load_modular_gc_func(get_total_time);
841 load_modular_gc_func(gc_count);
842 load_modular_gc_func(latest_gc_info);
843 load_modular_gc_func(stat);
844 load_modular_gc_func(stat_heap);
845 load_modular_gc_func(active_gc_name);
846 // Miscellaneous
847 load_modular_gc_func(object_metadata);
848 load_modular_gc_func(pointer_to_heap_p);
849 load_modular_gc_func(garbage_object_p);
850 load_modular_gc_func(set_event_hook);
851 load_modular_gc_func(copy_attributes);
852
853# undef load_modular_gc_func
854
855 rb_gc_functions = gc_functions;
856}
857
858// Bootup
859# define rb_gc_impl_objspace_alloc rb_gc_functions.objspace_alloc
860# define rb_gc_impl_objspace_init rb_gc_functions.objspace_init
861# define rb_gc_impl_ractor_cache_alloc rb_gc_functions.ractor_cache_alloc
862# define rb_gc_impl_set_params rb_gc_functions.set_params
863# define rb_gc_impl_init rb_gc_functions.init
864# define rb_gc_impl_heap_sizes rb_gc_functions.heap_sizes
865// Shutdown
866# define rb_gc_impl_shutdown_free_objects rb_gc_functions.shutdown_free_objects
867# define rb_gc_impl_objspace_free rb_gc_functions.objspace_free
868# define rb_gc_impl_ractor_cache_free rb_gc_functions.ractor_cache_free
869// GC
870# define rb_gc_impl_start rb_gc_functions.start
871# define rb_gc_impl_during_gc_p rb_gc_functions.during_gc_p
872# define rb_gc_impl_prepare_heap rb_gc_functions.prepare_heap
873# define rb_gc_impl_gc_enable rb_gc_functions.gc_enable
874# define rb_gc_impl_gc_disable rb_gc_functions.gc_disable
875# define rb_gc_impl_gc_enabled_p rb_gc_functions.gc_enabled_p
876# define rb_gc_impl_config_get rb_gc_functions.config_get
877# define rb_gc_impl_config_set rb_gc_functions.config_set
878# define rb_gc_impl_stress_set rb_gc_functions.stress_set
879# define rb_gc_impl_stress_get rb_gc_functions.stress_get
880// Object allocation
881# define rb_gc_impl_new_obj rb_gc_functions.new_obj
882# define rb_gc_impl_obj_slot_size rb_gc_functions.obj_slot_size
883# define rb_gc_impl_heap_id_for_size rb_gc_functions.heap_id_for_size
884# define rb_gc_impl_size_allocatable_p rb_gc_functions.size_allocatable_p
885// Malloc
886# define rb_gc_impl_malloc rb_gc_functions.malloc
887# define rb_gc_impl_calloc rb_gc_functions.calloc
888# define rb_gc_impl_realloc rb_gc_functions.realloc
889# define rb_gc_impl_free rb_gc_functions.free
890# define rb_gc_impl_adjust_memory_usage rb_gc_functions.adjust_memory_usage
891// Marking
892# define rb_gc_impl_mark rb_gc_functions.mark
893# define rb_gc_impl_mark_and_move rb_gc_functions.mark_and_move
894# define rb_gc_impl_mark_and_pin rb_gc_functions.mark_and_pin
895# define rb_gc_impl_mark_maybe rb_gc_functions.mark_maybe
896// Weak references
897# define rb_gc_impl_declare_weak_references rb_gc_functions.declare_weak_references
898# define rb_gc_impl_handle_weak_references_alive_p rb_gc_functions.handle_weak_references_alive_p
899// Compaction
900# define rb_gc_impl_object_moved_p rb_gc_functions.object_moved_p
901# define rb_gc_impl_location rb_gc_functions.location
902// Write barriers
903# define rb_gc_impl_writebarrier rb_gc_functions.writebarrier
904# define rb_gc_impl_writebarrier_unprotect rb_gc_functions.writebarrier_unprotect
905# define rb_gc_impl_writebarrier_remember rb_gc_functions.writebarrier_remember
906// Heap walking
907# define rb_gc_impl_each_objects rb_gc_functions.each_objects
908# define rb_gc_impl_each_object rb_gc_functions.each_object
909// Finalizers
910# define rb_gc_impl_make_zombie rb_gc_functions.make_zombie
911# define rb_gc_impl_define_finalizer rb_gc_functions.define_finalizer
912# define rb_gc_impl_undefine_finalizer rb_gc_functions.undefine_finalizer
913# define rb_gc_impl_copy_finalizer rb_gc_functions.copy_finalizer
914# define rb_gc_impl_shutdown_call_finalizer rb_gc_functions.shutdown_call_finalizer
915// Forking
916# define rb_gc_impl_before_fork rb_gc_functions.before_fork
917# define rb_gc_impl_after_fork rb_gc_functions.after_fork
918// Statistics
919# define rb_gc_impl_set_measure_total_time rb_gc_functions.set_measure_total_time
920# define rb_gc_impl_get_measure_total_time rb_gc_functions.get_measure_total_time
921# define rb_gc_impl_get_total_time rb_gc_functions.get_total_time
922# define rb_gc_impl_gc_count rb_gc_functions.gc_count
923# define rb_gc_impl_latest_gc_info rb_gc_functions.latest_gc_info
924# define rb_gc_impl_stat rb_gc_functions.stat
925# define rb_gc_impl_stat_heap rb_gc_functions.stat_heap
926# define rb_gc_impl_active_gc_name rb_gc_functions.active_gc_name
927// Miscellaneous
928# define rb_gc_impl_object_metadata rb_gc_functions.object_metadata
929# define rb_gc_impl_pointer_to_heap_p rb_gc_functions.pointer_to_heap_p
930# define rb_gc_impl_garbage_object_p rb_gc_functions.garbage_object_p
931# define rb_gc_impl_set_event_hook rb_gc_functions.set_event_hook
932# define rb_gc_impl_copy_attributes rb_gc_functions.copy_attributes
933#endif
934
935#ifdef RUBY_ASAN_ENABLED
936static void
937asan_death_callback(void)
938{
939 if (GET_VM()) {
940 rb_bug_without_die("ASAN error");
941 }
942}
943#endif
944
945static VALUE initial_stress = Qfalse;
946
947void *
948rb_objspace_alloc(void)
949{
950#if USE_MODULAR_GC
951 ruby_modular_gc_init();
952#endif
953
954 void *objspace = rb_gc_impl_objspace_alloc();
955 ruby_current_vm_ptr->gc.objspace = objspace;
956 rb_gc_impl_objspace_init(objspace);
957 rb_gc_impl_stress_set(objspace, initial_stress);
958
959#ifdef RUBY_ASAN_ENABLED
960 __sanitizer_set_death_callback(asan_death_callback);
961#endif
962
963 return objspace;
964}
965
966void
967rb_objspace_free(void *objspace)
968{
969 rb_gc_impl_objspace_free(objspace);
970}
971
972size_t
973rb_gc_obj_slot_size(VALUE obj)
974{
975 return rb_gc_impl_obj_slot_size(obj);
976}
977
978static inline void
979gc_validate_pc(VALUE obj)
980{
981#if RUBY_DEBUG
982 // IMEMOs and objects without a class (e.g managed id table) are not traceable
983 if (RB_TYPE_P(obj, T_IMEMO) || !CLASS_OF(obj)) return;
984
985 rb_execution_context_t *ec = GET_EC();
986 const rb_control_frame_t *cfp = ec->cfp;
987 if (cfp && VM_FRAME_RUBYFRAME_P(cfp) && cfp->pc) {
988 const VALUE *iseq_encoded = ISEQ_BODY(cfp->iseq)->iseq_encoded;
989 const VALUE *iseq_encoded_end = iseq_encoded + ISEQ_BODY(cfp->iseq)->iseq_size;
990 RUBY_ASSERT(cfp->pc >= iseq_encoded, "PC not set when allocating, breaking tracing");
991 RUBY_ASSERT(cfp->pc <= iseq_encoded_end, "PC not set when allocating, breaking tracing");
992 }
993#endif
994}
995
996static inline VALUE
997newobj_of(rb_ractor_t *cr, VALUE klass, VALUE flags, shape_id_t shape_id, bool wb_protected, size_t size)
998{
999 VALUE obj = rb_gc_impl_new_obj(rb_gc_get_objspace(), cr->newobj_cache, klass, flags, wb_protected, size);
1000 RBASIC_SET_SHAPE_ID_NO_CHECKS(obj, shape_id);
1001
1002 gc_validate_pc(obj);
1003
1004 if (UNLIKELY(rb_gc_event_hook_required_p(RUBY_INTERNAL_EVENT_NEWOBJ))) {
1005 int lev = RB_GC_VM_LOCK_NO_BARRIER();
1006 {
1007 size_t slot_size = rb_gc_obj_slot_size(obj);
1008 if (slot_size > RVALUE_SIZE) {
1009 memset((char *)obj + RVALUE_SIZE, 0, slot_size - RVALUE_SIZE);
1010 }
1011
1012 /* We must disable GC here because the callback could call xmalloc
1013 * which could potentially trigger a GC, and a lot of code is unsafe
1014 * to trigger a GC right after an object has been allocated because
1015 * they perform initialization for the object and assume that the
1016 * GC does not trigger before then. */
1017 bool gc_disabled = RTEST(rb_gc_disable_no_rest());
1018 {
1019 rb_gc_event_hook(obj, RUBY_INTERNAL_EVENT_NEWOBJ);
1020 }
1021 if (!gc_disabled) rb_gc_enable();
1022 }
1023 RB_GC_VM_UNLOCK_NO_BARRIER(lev);
1024 }
1025
1026#if RGENGC_CHECK_MODE
1027# ifndef GC_DEBUG_SLOT_FILL_SPECIAL_VALUE
1028# define GC_DEBUG_SLOT_FILL_SPECIAL_VALUE 255
1029# endif
1030
1031 memset(
1032 (void *)(obj + sizeof(struct RBasic)),
1033 GC_DEBUG_SLOT_FILL_SPECIAL_VALUE,
1034 rb_gc_obj_slot_size(obj) - sizeof(struct RBasic)
1035 );
1036#endif
1037
1038 return obj;
1039}
1040
1041VALUE
1042rb_wb_unprotected_newobj_of(VALUE klass, VALUE flags, shape_id_t shape_id, size_t size)
1043{
1044 GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
1045 return newobj_of(GET_RACTOR(), klass, flags, shape_id, FALSE, size);
1046}
1047
1048VALUE
1049rb_wb_protected_newobj_of(rb_execution_context_t *ec, VALUE klass, VALUE flags, shape_id_t shape_id, size_t size)
1050{
1051 GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
1052 return newobj_of(rb_ec_ractor_ptr(ec), klass, flags, shape_id, TRUE, size);
1053}
1054
1055#define UNEXPECTED_NODE(func) \
1056 rb_bug(#func"(): GC does not handle T_NODE 0x%x(%p) 0x%"PRIxVALUE, \
1057 BUILTIN_TYPE(obj), (void*)(obj), RBASIC(obj)->flags)
1058
1059static inline void
1060rb_data_object_check(VALUE klass)
1061{
1062 if (klass != rb_cObject && (rb_get_alloc_func(klass) == rb_class_allocate_instance)) {
1063 rb_undef_alloc_func(klass);
1064 rb_warn("undefining the allocator of T_DATA class %"PRIsVALUE, klass);
1065 }
1066}
1067
1068VALUE
1069rb_data_object_wrap(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
1070{
1072 if (klass) rb_data_object_check(klass);
1073 VALUE obj = newobj_of(GET_RACTOR(), klass, T_DATA, ROOT_SHAPE_ID, !dmark, sizeof(struct RTypedData));
1074
1075 struct RData *data = (struct RData *)obj;
1076 data->dmark = dmark;
1077 data->dfree = dfree;
1078 data->data = datap;
1079
1080 return obj;
1081}
1082
1083VALUE
1085{
1086 VALUE obj = rb_data_object_wrap(klass, 0, dmark, dfree);
1087 DATA_PTR(obj) = xcalloc(1, size);
1088 return obj;
1089}
1090
1091static VALUE
1092typed_data_alloc(VALUE klass, VALUE typed_flag, void *datap, const rb_data_type_t *type, size_t size)
1093{
1094 RBIMPL_NONNULL_ARG(type);
1095 if (klass) rb_data_object_check(klass);
1096 bool wb_protected = (type->flags & RUBY_FL_WB_PROTECTED) || !type->function.dmark;
1097 VALUE obj = newobj_of(GET_RACTOR(), klass, T_DATA | RUBY_TYPED_FL_IS_TYPED_DATA, ROOT_SHAPE_ID, wb_protected, size);
1098
1099 struct RTypedData *data = (struct RTypedData *)obj;
1100 data->fields_obj = 0;
1101 *(VALUE *)&data->type = ((VALUE)type) | typed_flag;
1102 data->data = datap;
1103
1104 return obj;
1105}
1106
1107VALUE
1109{
1110 if (UNLIKELY(type->flags & RUBY_TYPED_EMBEDDABLE)) {
1111 rb_raise(rb_eTypeError, "Cannot wrap an embeddable TypedData");
1112 }
1113
1114 return typed_data_alloc(klass, 0, datap, type, sizeof(struct RTypedData));
1115}
1116
1117VALUE
1119{
1120 if (type->flags & RUBY_TYPED_EMBEDDABLE) {
1121 if (!(type->flags & RUBY_TYPED_FREE_IMMEDIATELY)) {
1122 rb_raise(rb_eTypeError, "Embeddable TypedData must be freed immediately");
1123 }
1124
1125 size_t embed_size = offsetof(struct RTypedData, data) + size;
1126 if (rb_gc_size_allocatable_p(embed_size)) {
1127 VALUE obj = typed_data_alloc(klass, TYPED_DATA_EMBEDDED, 0, type, embed_size);
1128 memset((char *)obj + offsetof(struct RTypedData, data), 0, size);
1129 return obj;
1130 }
1131 }
1132
1133 VALUE obj = typed_data_alloc(klass, 0, NULL, type, sizeof(struct RTypedData));
1134 DATA_PTR(obj) = xcalloc(1, size);
1135 return obj;
1136}
1137
1138static size_t
1139rb_objspace_data_type_memsize(VALUE obj)
1140{
1141 size_t size = 0;
1142 if (RTYPEDDATA_P(obj)) {
1143 const rb_data_type_t *type = RTYPEDDATA_TYPE(obj);
1144 const void *ptr = RTYPEDDATA_GET_DATA(obj);
1145
1146 if (RTYPEDDATA_TYPE(obj)->flags & RUBY_TYPED_EMBEDDABLE && !RTYPEDDATA_EMBEDDED_P(obj)) {
1147#ifdef HAVE_MALLOC_USABLE_SIZE
1148 size += malloc_usable_size((void *)ptr);
1149#endif
1150 }
1151
1152 if (ptr && type->function.dsize) {
1153 size += type->function.dsize(ptr);
1154 }
1155 }
1156
1157 return size;
1158}
1159
1160const char *
1161rb_objspace_data_type_name(VALUE obj)
1162{
1163 if (RTYPEDDATA_P(obj)) {
1164 return RTYPEDDATA_TYPE(obj)->wrap_struct_name;
1165 }
1166 else {
1167 return 0;
1168 }
1169}
1170
1171void
1172rb_gc_declare_weak_references(VALUE obj)
1173{
1174 rb_gc_impl_declare_weak_references(rb_gc_get_objspace(), obj);
1175}
1176
1177bool
1178rb_gc_handle_weak_references_alive_p(VALUE obj)
1179{
1180 if (SPECIAL_CONST_P(obj)) return true;
1181
1182 return rb_gc_impl_handle_weak_references_alive_p(rb_gc_get_objspace(), obj);
1183}
1184
1185extern const rb_data_type_t rb_weakmap_type;
1186void rb_wmap_handle_weak_references(VALUE obj);
1187extern const rb_data_type_t rb_weakkeymap_type;
1188void rb_wkmap_handle_weak_references(VALUE obj);
1189
1190extern const rb_data_type_t rb_fiber_data_type;
1191void rb_fiber_handle_weak_references(VALUE obj);
1192
1193extern const rb_data_type_t rb_cont_data_type;
1194void rb_cont_handle_weak_references(VALUE obj);
1195
1196void
1197rb_gc_handle_weak_references(VALUE obj)
1198{
1199 switch (BUILTIN_TYPE(obj)) {
1200 case T_DATA:
1201 if (RTYPEDDATA_P(obj)) {
1202 const rb_data_type_t *type = RTYPEDDATA_TYPE(obj);
1203
1204 if (type == &rb_fiber_data_type) {
1205 rb_fiber_handle_weak_references(obj);
1206 }
1207 else if (type == &rb_cont_data_type) {
1208 rb_cont_handle_weak_references(obj);
1209 }
1210 else if (type == &rb_weakmap_type) {
1211 rb_wmap_handle_weak_references(obj);
1212 }
1213 else if (type == &rb_weakkeymap_type) {
1214 rb_wkmap_handle_weak_references(obj);
1215 }
1216 else {
1217 rb_bug("rb_gc_handle_weak_references: unknown TypedData %s", RTYPEDDATA_TYPE(obj)->wrap_struct_name);
1218 }
1219 }
1220 else {
1221 rb_bug("rb_gc_handle_weak_references: unknown T_DATA");
1222 }
1223 break;
1224
1225 case T_IMEMO: {
1226 GC_ASSERT(imemo_type(obj) == imemo_callcache);
1227
1228 struct rb_callcache *cc = (struct rb_callcache *)obj;
1229 if (!rb_gc_handle_weak_references_alive_p(cc->klass)) {
1230 vm_cc_invalidate(cc);
1231 }
1232
1233 break;
1234 }
1235 default:
1236 rb_bug("rb_gc_handle_weak_references: type not supported\n");
1237 }
1238}
1239
1240static void
1241io_fptr_finalize(void *fptr)
1242{
1243 rb_io_fptr_finalize((struct rb_io *)fptr);
1244}
1245
1246static inline void
1247make_io_zombie(void *objspace, VALUE obj)
1248{
1249 rb_io_t *fptr = RFILE(obj)->fptr;
1250 rb_gc_impl_make_zombie(objspace, obj, io_fptr_finalize, fptr);
1251}
1252
1253static bool
1254rb_data_free(void *objspace, VALUE obj)
1255{
1256 void *data = RTYPEDDATA_P(obj) ? RTYPEDDATA_GET_DATA(obj) : DATA_PTR(obj);
1257 if (data) {
1258 int free_immediately = false;
1259 void (*dfree)(void *);
1260
1261 if (RTYPEDDATA_P(obj)) {
1262 free_immediately = (RTYPEDDATA_TYPE(obj)->flags & RUBY_TYPED_FREE_IMMEDIATELY) != 0;
1263 dfree = RTYPEDDATA_TYPE(obj)->function.dfree;
1264 }
1265 else {
1266 dfree = RDATA(obj)->dfree;
1267 }
1268
1269 if (dfree) {
1270 if (dfree == RUBY_DEFAULT_FREE) {
1271 if (!RTYPEDDATA_P(obj) || !RTYPEDDATA_EMBEDDED_P(obj)) {
1272 xfree(data);
1273 RB_DEBUG_COUNTER_INC(obj_data_xfree);
1274 }
1275 }
1276 else if (free_immediately) {
1277 (*dfree)(data);
1278 if (RTYPEDDATA_TYPE(obj)->flags & RUBY_TYPED_EMBEDDABLE && !RTYPEDDATA_EMBEDDED_P(obj)) {
1279 xfree(data);
1280 }
1281
1282 RB_DEBUG_COUNTER_INC(obj_data_imm_free);
1283 }
1284 else {
1285 rb_gc_impl_make_zombie(objspace, obj, dfree, data);
1286 RB_DEBUG_COUNTER_INC(obj_data_zombie);
1287 return FALSE;
1288 }
1289 }
1290 else {
1291 RB_DEBUG_COUNTER_INC(obj_data_empty);
1292 }
1293 }
1294
1295 return true;
1296}
1297
1299 VALUE klass;
1300 rb_objspace_t *objspace; // used for update_*
1301};
1302
1303static void
1304classext_free(rb_classext_t *ext, bool is_prime, VALUE box_value, void *arg)
1305{
1306 struct classext_foreach_args *args = (struct classext_foreach_args *)arg;
1307
1308 rb_class_classext_free(args->klass, ext, is_prime);
1309}
1310
1311static void
1312classext_iclass_free(rb_classext_t *ext, bool is_prime, VALUE box_value, void *arg)
1313{
1314 struct classext_foreach_args *args = (struct classext_foreach_args *)arg;
1315
1316 rb_iclass_classext_free(args->klass, ext, is_prime);
1317}
1318
1319bool
1320rb_gc_obj_free(void *objspace, VALUE obj)
1321{
1322 struct classext_foreach_args args;
1323
1324 RB_DEBUG_COUNTER_INC(obj_free);
1325
1326 switch (BUILTIN_TYPE(obj)) {
1327 case T_NIL:
1328 case T_FIXNUM:
1329 case T_TRUE:
1330 case T_FALSE:
1331 rb_bug("obj_free() called for broken object");
1332 break;
1333 default:
1334 break;
1335 }
1336
1337 switch (BUILTIN_TYPE(obj)) {
1338 case T_OBJECT:
1339 if (FL_TEST_RAW(obj, ROBJECT_HEAP)) {
1340 if (rb_shape_obj_too_complex_p(obj)) {
1341 RB_DEBUG_COUNTER_INC(obj_obj_too_complex);
1342 st_free_table(ROBJECT_FIELDS_HASH(obj));
1343 }
1344 else {
1345 xfree(ROBJECT(obj)->as.heap.fields);
1346 RB_DEBUG_COUNTER_INC(obj_obj_ptr);
1347 }
1348 }
1349 else {
1350 RB_DEBUG_COUNTER_INC(obj_obj_embed);
1351 }
1352 break;
1353 case T_MODULE:
1354 case T_CLASS:
1355#if USE_ZJIT
1356 rb_zjit_klass_free(obj);
1357#endif
1358 args.klass = obj;
1359 rb_class_classext_foreach(obj, classext_free, (void *)&args);
1360 if (RCLASS_CLASSEXT_TBL(obj)) {
1361 st_free_table(RCLASS_CLASSEXT_TBL(obj));
1362 }
1363 (void)RB_DEBUG_COUNTER_INC_IF(obj_module_ptr, BUILTIN_TYPE(obj) == T_MODULE);
1364 (void)RB_DEBUG_COUNTER_INC_IF(obj_class_ptr, BUILTIN_TYPE(obj) == T_CLASS);
1365 break;
1366 case T_STRING:
1367 rb_str_free(obj);
1368 break;
1369 case T_ARRAY:
1370 rb_ary_free(obj);
1371 break;
1372 case T_HASH:
1373#if USE_DEBUG_COUNTER
1374 switch (RHASH_SIZE(obj)) {
1375 case 0:
1376 RB_DEBUG_COUNTER_INC(obj_hash_empty);
1377 break;
1378 case 1:
1379 RB_DEBUG_COUNTER_INC(obj_hash_1);
1380 break;
1381 case 2:
1382 RB_DEBUG_COUNTER_INC(obj_hash_2);
1383 break;
1384 case 3:
1385 RB_DEBUG_COUNTER_INC(obj_hash_3);
1386 break;
1387 case 4:
1388 RB_DEBUG_COUNTER_INC(obj_hash_4);
1389 break;
1390 case 5:
1391 case 6:
1392 case 7:
1393 case 8:
1394 RB_DEBUG_COUNTER_INC(obj_hash_5_8);
1395 break;
1396 default:
1397 GC_ASSERT(RHASH_SIZE(obj) > 8);
1398 RB_DEBUG_COUNTER_INC(obj_hash_g8);
1399 }
1400
1401 if (RHASH_AR_TABLE_P(obj)) {
1402 if (RHASH_AR_TABLE(obj) == NULL) {
1403 RB_DEBUG_COUNTER_INC(obj_hash_null);
1404 }
1405 else {
1406 RB_DEBUG_COUNTER_INC(obj_hash_ar);
1407 }
1408 }
1409 else {
1410 RB_DEBUG_COUNTER_INC(obj_hash_st);
1411 }
1412#endif
1413
1414 rb_hash_free(obj);
1415 break;
1416 case T_REGEXP:
1417 if (RREGEXP(obj)->ptr) {
1418 onig_free(RREGEXP(obj)->ptr);
1419 RB_DEBUG_COUNTER_INC(obj_regexp_ptr);
1420 }
1421 break;
1422 case T_DATA:
1423 if (!rb_data_free(objspace, obj)) return false;
1424 break;
1425 case T_MATCH:
1426 {
1427 rb_matchext_t *rm = RMATCH_EXT(obj);
1428#if USE_DEBUG_COUNTER
1429 if (rm->regs.num_regs >= 8) {
1430 RB_DEBUG_COUNTER_INC(obj_match_ge8);
1431 }
1432 else if (rm->regs.num_regs >= 4) {
1433 RB_DEBUG_COUNTER_INC(obj_match_ge4);
1434 }
1435 else if (rm->regs.num_regs >= 1) {
1436 RB_DEBUG_COUNTER_INC(obj_match_under4);
1437 }
1438#endif
1439 onig_region_free(&rm->regs, 0);
1440 xfree(rm->char_offset);
1441
1442 RB_DEBUG_COUNTER_INC(obj_match_ptr);
1443 }
1444 break;
1445 case T_FILE:
1446 if (RFILE(obj)->fptr) {
1447 make_io_zombie(objspace, obj);
1448 RB_DEBUG_COUNTER_INC(obj_file_ptr);
1449 return FALSE;
1450 }
1451 break;
1452 case T_RATIONAL:
1453 RB_DEBUG_COUNTER_INC(obj_rational);
1454 break;
1455 case T_COMPLEX:
1456 RB_DEBUG_COUNTER_INC(obj_complex);
1457 break;
1458 case T_MOVED:
1459 break;
1460 case T_ICLASS:
1461 args.klass = obj;
1462
1463 rb_class_classext_foreach(obj, classext_iclass_free, (void *)&args);
1464 if (RCLASS_CLASSEXT_TBL(obj)) {
1465 st_free_table(RCLASS_CLASSEXT_TBL(obj));
1466 }
1467
1468 RB_DEBUG_COUNTER_INC(obj_iclass_ptr);
1469 break;
1470
1471 case T_FLOAT:
1472 RB_DEBUG_COUNTER_INC(obj_float);
1473 break;
1474
1475 case T_BIGNUM:
1476 if (!BIGNUM_EMBED_P(obj) && BIGNUM_DIGITS(obj)) {
1477 xfree(BIGNUM_DIGITS(obj));
1478 RB_DEBUG_COUNTER_INC(obj_bignum_ptr);
1479 }
1480 else {
1481 RB_DEBUG_COUNTER_INC(obj_bignum_embed);
1482 }
1483 break;
1484
1485 case T_NODE:
1486 UNEXPECTED_NODE(obj_free);
1487 break;
1488
1489 case T_STRUCT:
1490 if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) ||
1491 RSTRUCT(obj)->as.heap.ptr == NULL) {
1492 RB_DEBUG_COUNTER_INC(obj_struct_embed);
1493 }
1494 else {
1495 xfree((void *)RSTRUCT(obj)->as.heap.ptr);
1496 RB_DEBUG_COUNTER_INC(obj_struct_ptr);
1497 }
1498 break;
1499
1500 case T_SYMBOL:
1501 RB_DEBUG_COUNTER_INC(obj_symbol);
1502 break;
1503
1504 case T_IMEMO:
1505 rb_imemo_free((VALUE)obj);
1506 break;
1507
1508 default:
1509 rb_bug("gc_sweep(): unknown data type 0x%x(%p) 0x%"PRIxVALUE,
1510 BUILTIN_TYPE(obj), (void*)obj, RBASIC(obj)->flags);
1511 }
1512
1513 if (FL_TEST_RAW(obj, FL_FINALIZE)) {
1514 rb_gc_impl_make_zombie(objspace, obj, 0, 0);
1515 return FALSE;
1516 }
1517 else {
1518 return TRUE;
1519 }
1520}
1521
1522void
1523rb_objspace_set_event_hook(const rb_event_flag_t event)
1524{
1525 rb_gc_impl_set_event_hook(rb_gc_get_objspace(), event);
1526}
1527
1528static int
1529internal_object_p(VALUE obj)
1530{
1531 void *ptr = asan_unpoison_object_temporary(obj);
1532
1533 if (RBASIC(obj)->flags) {
1534 switch (BUILTIN_TYPE(obj)) {
1535 case T_NODE:
1536 UNEXPECTED_NODE(internal_object_p);
1537 break;
1538 case T_NONE:
1539 case T_MOVED:
1540 case T_IMEMO:
1541 case T_ICLASS:
1542 case T_ZOMBIE:
1543 break;
1544 case T_CLASS:
1545 if (obj == rb_mRubyVMFrozenCore)
1546 return 1;
1547
1548 if (!RBASIC_CLASS(obj)) break;
1549 if (RCLASS_SINGLETON_P(obj)) {
1550 return rb_singleton_class_internal_p(obj);
1551 }
1552 return 0;
1553 default:
1554 if (!RBASIC(obj)->klass) break;
1555 return 0;
1556 }
1557 }
1558 if (ptr || !RBASIC(obj)->flags) {
1559 rb_asan_poison_object(obj);
1560 }
1561 return 1;
1562}
1563
1564int
1565rb_objspace_internal_object_p(VALUE obj)
1566{
1567 return internal_object_p(obj);
1568}
1569
1571 size_t num;
1572 VALUE of;
1573};
1574
1575static int
1576os_obj_of_i(void *vstart, void *vend, size_t stride, void *data)
1577{
1578 struct os_each_struct *oes = (struct os_each_struct *)data;
1579
1580 VALUE v = (VALUE)vstart;
1581 for (; v != (VALUE)vend; v += stride) {
1582 if (!internal_object_p(v)) {
1583 if (!oes->of || rb_obj_is_kind_of(v, oes->of)) {
1584 if (!rb_multi_ractor_p() || rb_ractor_shareable_p(v)) {
1585 rb_yield(v);
1586 oes->num++;
1587 }
1588 }
1589 }
1590 }
1591
1592 return 0;
1593}
1594
1595static VALUE
1596os_obj_of(VALUE of)
1597{
1598 struct os_each_struct oes;
1599
1600 oes.num = 0;
1601 oes.of = of;
1602 rb_objspace_each_objects(os_obj_of_i, &oes);
1603 return SIZET2NUM(oes.num);
1604}
1605
1606/*
1607 * call-seq:
1608 * ObjectSpace.each_object([module]) {|obj| ... } -> integer
1609 * ObjectSpace.each_object([module]) -> an_enumerator
1610 *
1611 * Calls the block once for each living, nonimmediate object in this
1612 * Ruby process. If <i>module</i> is specified, calls the block
1613 * for only those classes or modules that match (or are a subclass of)
1614 * <i>module</i>. Returns the number of objects found. Immediate
1615 * objects (such as <code>Fixnum</code>s, static <code>Symbol</code>s
1616 * <code>true</code>, <code>false</code> and <code>nil</code>) are
1617 * never returned.
1618 *
1619 * If no block is given, an enumerator is returned instead.
1620 *
1621 * Job = Class.new
1622 * jobs = [Job.new, Job.new]
1623 * count = ObjectSpace.each_object(Job) {|x| p x }
1624 * puts "Total count: #{count}"
1625 *
1626 * <em>produces:</em>
1627 *
1628 * #<Job:0x000000011d6cbbf0>
1629 * #<Job:0x000000011d6cbc68>
1630 * Total count: 2
1631 *
1632 * Due to a current Ractor implementation issue, this method does not yield
1633 * Ractor-unshareable objects when the process is in multi-Ractor mode. Multi-ractor
1634 * mode is enabled when <code>Ractor.new</code> has been called for the first time.
1635 * See https://bugs.ruby-lang.org/issues/19387 for more information.
1636 *
1637 * a = 12345678987654321 # shareable
1638 * b = [].freeze # shareable
1639 * c = {} # not shareable
1640 * ObjectSpace.each_object {|x| x } # yields a, b, and c
1641 * Ractor.new {} # enter multi-Ractor mode
1642 * ObjectSpace.each_object {|x| x } # does not yield c
1643 *
1644 */
1645
1646static VALUE
1647os_each_obj(int argc, VALUE *argv, VALUE os)
1648{
1649 VALUE of;
1650
1651 of = (!rb_check_arity(argc, 0, 1) ? 0 : argv[0]);
1652 RETURN_ENUMERATOR(os, 1, &of);
1653 return os_obj_of(of);
1654}
1655
1656/*
1657 * call-seq:
1658 * ObjectSpace.undefine_finalizer(obj)
1659 *
1660 * Removes all finalizers for <i>obj</i>.
1661 *
1662 */
1663
1664static VALUE
1665undefine_final(VALUE os, VALUE obj)
1666{
1667 return rb_undefine_finalizer(obj);
1668}
1669
1670VALUE
1671rb_undefine_finalizer(VALUE obj)
1672{
1673 rb_check_frozen(obj);
1674
1675 rb_gc_impl_undefine_finalizer(rb_gc_get_objspace(), obj);
1676
1677 return obj;
1678}
1679
1680static void
1681should_be_callable(VALUE block)
1682{
1683 if (!rb_obj_respond_to(block, idCall, TRUE)) {
1684 rb_raise(rb_eArgError, "wrong type argument %"PRIsVALUE" (should be callable)",
1685 rb_obj_class(block));
1686 }
1687}
1688
1689static void
1690should_be_finalizable(VALUE obj)
1691{
1692 if (!FL_ABLE(obj)) {
1693 rb_raise(rb_eArgError, "cannot define finalizer for %s",
1694 rb_obj_classname(obj));
1695 }
1696 rb_check_frozen(obj);
1697}
1698
1699void
1700rb_gc_copy_finalizer(VALUE dest, VALUE obj)
1701{
1702 rb_gc_impl_copy_finalizer(rb_gc_get_objspace(), dest, obj);
1703}
1704
1705/*
1706 * call-seq:
1707 * ObjectSpace.define_finalizer(obj, aProc=proc())
1708 *
1709 * Adds <i>aProc</i> as a finalizer, to be called after <i>obj</i>
1710 * was destroyed. The object ID of the <i>obj</i> will be passed
1711 * as an argument to <i>aProc</i>. If <i>aProc</i> is a lambda or
1712 * method, make sure it can be called with a single argument.
1713 *
1714 * The return value is an array <code>[0, aProc]</code>.
1715 *
1716 * The two recommended patterns are to either create the finaliser proc
1717 * in a non-instance method where it can safely capture the needed state,
1718 * or to use a custom callable object that stores the needed state
1719 * explicitly as instance variables.
1720 *
1721 * class Foo
1722 * def initialize(data_needed_for_finalization)
1723 * ObjectSpace.define_finalizer(self, self.class.create_finalizer(data_needed_for_finalization))
1724 * end
1725 *
1726 * def self.create_finalizer(data_needed_for_finalization)
1727 * proc {
1728 * puts "finalizing #{data_needed_for_finalization}"
1729 * }
1730 * end
1731 * end
1732 *
1733 * class Bar
1734 * class Remover
1735 * def initialize(data_needed_for_finalization)
1736 * @data_needed_for_finalization = data_needed_for_finalization
1737 * end
1738 *
1739 * def call(id)
1740 * puts "finalizing #{@data_needed_for_finalization}"
1741 * end
1742 * end
1743 *
1744 * def initialize(data_needed_for_finalization)
1745 * ObjectSpace.define_finalizer(self, Remover.new(data_needed_for_finalization))
1746 * end
1747 * end
1748 *
1749 * Note that if your finalizer references the object to be
1750 * finalized it will never be run on GC, although it will still be
1751 * run at exit. You will get a warning if you capture the object
1752 * to be finalized as the receiver of the finalizer.
1753 *
1754 * class CapturesSelf
1755 * def initialize(name)
1756 * ObjectSpace.define_finalizer(self, proc {
1757 * # this finalizer will only be run on exit
1758 * puts "finalizing #{name}"
1759 * })
1760 * end
1761 * end
1762 *
1763 * Also note that finalization can be unpredictable and is never guaranteed
1764 * to be run except on exit.
1765 */
1766
1767static VALUE
1768define_final(int argc, VALUE *argv, VALUE os)
1769{
1770 VALUE obj, block;
1771
1772 rb_scan_args(argc, argv, "11", &obj, &block);
1773 if (argc == 1) {
1774 block = rb_block_proc();
1775 }
1776
1777 if (rb_callable_receiver(block) == obj) {
1778 rb_warn("finalizer references object to be finalized");
1779 }
1780
1781 return rb_define_finalizer(obj, block);
1782}
1783
1784VALUE
1785rb_define_finalizer(VALUE obj, VALUE block)
1786{
1787 should_be_finalizable(obj);
1788 should_be_callable(block);
1789
1790 block = rb_gc_impl_define_finalizer(rb_gc_get_objspace(), obj, block);
1791
1792 block = rb_ary_new3(2, INT2FIX(0), block);
1793 OBJ_FREEZE(block);
1794 return block;
1795}
1796
1797void
1798rb_objspace_call_finalizer(void)
1799{
1800 rb_gc_impl_shutdown_call_finalizer(rb_gc_get_objspace());
1801}
1802
1803void
1804rb_objspace_free_objects(void *objspace)
1805{
1806 rb_gc_impl_shutdown_free_objects(objspace);
1807}
1808
1809int
1810rb_objspace_garbage_object_p(VALUE obj)
1811{
1812 return !SPECIAL_CONST_P(obj) && rb_gc_impl_garbage_object_p(rb_gc_get_objspace(), obj);
1813}
1814
1815bool
1816rb_gc_pointer_to_heap_p(VALUE obj)
1817{
1818 return rb_gc_impl_pointer_to_heap_p(rb_gc_get_objspace(), (void *)obj);
1819}
1820
1821#define OBJ_ID_INCREMENT (RUBY_IMMEDIATE_MASK + 1)
1822#define LAST_OBJECT_ID() (object_id_counter * OBJ_ID_INCREMENT)
1823static VALUE id2ref_value = 0;
1824static st_table *id2ref_tbl = NULL;
1825
1826#if SIZEOF_SIZE_T == SIZEOF_LONG_LONG
1827static size_t object_id_counter = 1;
1828#else
1829static unsigned long long object_id_counter = 1;
1830#endif
1831
1832static inline VALUE
1833generate_next_object_id(void)
1834{
1835#if SIZEOF_SIZE_T == SIZEOF_LONG_LONG
1836 // 64bit atomics are available
1837 return SIZET2NUM(RUBY_ATOMIC_SIZE_FETCH_ADD(object_id_counter, 1) * OBJ_ID_INCREMENT);
1838#else
1839 unsigned int lock_lev = RB_GC_VM_LOCK();
1840 VALUE id = ULL2NUM(++object_id_counter * OBJ_ID_INCREMENT);
1841 RB_GC_VM_UNLOCK(lock_lev);
1842 return id;
1843#endif
1844}
1845
1846void
1847rb_gc_obj_id_moved(VALUE obj)
1848{
1849 if (UNLIKELY(id2ref_tbl)) {
1850 st_insert(id2ref_tbl, (st_data_t)rb_obj_id(obj), (st_data_t)obj);
1851 }
1852}
1853
1854static int
1855object_id_cmp(st_data_t x, st_data_t y)
1856{
1857 if (RB_TYPE_P(x, T_BIGNUM)) {
1858 return !rb_big_eql(x, y);
1859 }
1860 else {
1861 return x != y;
1862 }
1863}
1864
1865static st_index_t
1866object_id_hash(st_data_t n)
1867{
1868 return FIX2LONG(rb_hash((VALUE)n));
1869}
1870
1871static const struct st_hash_type object_id_hash_type = {
1872 object_id_cmp,
1873 object_id_hash,
1874};
1875
1876static void gc_mark_tbl_no_pin(st_table *table);
1877
1878static void
1879id2ref_tbl_mark(void *data)
1880{
1881 st_table *table = (st_table *)data;
1882 if (UNLIKELY(!RB_POSFIXABLE(LAST_OBJECT_ID()))) {
1883 // It's very unlikely, but if enough object ids were generated, keys may be T_BIGNUM
1884 rb_mark_set(table);
1885 }
1886 // We purposely don't mark values, as they are weak references.
1887 // rb_gc_obj_free_vm_weak_references takes care of cleaning them up.
1888}
1889
1890static size_t
1891id2ref_tbl_memsize(const void *data)
1892{
1893 return rb_st_memsize(data);
1894}
1895
1896static void
1897id2ref_tbl_free(void *data)
1898{
1899 id2ref_tbl = NULL; // clear global ref
1900 st_table *table = (st_table *)data;
1901 st_free_table(table);
1902}
1903
1904static const rb_data_type_t id2ref_tbl_type = {
1905 .wrap_struct_name = "VM/_id2ref_table",
1906 .function = {
1907 .dmark = id2ref_tbl_mark,
1908 .dfree = id2ref_tbl_free,
1909 .dsize = id2ref_tbl_memsize,
1910 // dcompact function not required because the table is reference updated
1911 // in rb_gc_vm_weak_table_foreach
1912 },
1913 .flags = RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_FREE_IMMEDIATELY
1914};
1915
1916static VALUE
1917class_object_id(VALUE klass)
1918{
1919 VALUE id = RUBY_ATOMIC_VALUE_LOAD(RCLASS(klass)->object_id);
1920 if (!id) {
1921 unsigned int lock_lev = RB_GC_VM_LOCK();
1922 id = generate_next_object_id();
1923 VALUE existing_id = RUBY_ATOMIC_VALUE_CAS(RCLASS(klass)->object_id, 0, id);
1924 if (existing_id) {
1925 id = existing_id;
1926 }
1927 else if (RB_UNLIKELY(id2ref_tbl)) {
1928 st_insert(id2ref_tbl, id, klass);
1929 }
1930 RB_GC_VM_UNLOCK(lock_lev);
1931 }
1932 return id;
1933}
1934
1935static inline VALUE
1936object_id_get(VALUE obj, shape_id_t shape_id)
1937{
1938 VALUE id;
1939 if (rb_shape_too_complex_p(shape_id)) {
1940 id = rb_obj_field_get(obj, ROOT_TOO_COMPLEX_WITH_OBJ_ID);
1941 }
1942 else {
1943 id = rb_obj_field_get(obj, rb_shape_object_id(shape_id));
1944 }
1945
1946#if RUBY_DEBUG
1947 if (!(FIXNUM_P(id) || RB_TYPE_P(id, T_BIGNUM))) {
1948 rb_p(obj);
1949 rb_bug("Object's shape includes object_id, but it's missing %s", rb_obj_info(obj));
1950 }
1951#endif
1952
1953 return id;
1954}
1955
1956static VALUE
1957object_id0(VALUE obj)
1958{
1959 VALUE id = Qfalse;
1960 shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
1961
1962 if (rb_shape_has_object_id(shape_id)) {
1963 return object_id_get(obj, shape_id);
1964 }
1965
1966 shape_id_t object_id_shape_id = rb_shape_transition_object_id(obj);
1967
1968 id = generate_next_object_id();
1969 rb_obj_field_set(obj, object_id_shape_id, 0, id);
1970
1971 RUBY_ASSERT(RBASIC_SHAPE_ID(obj) == object_id_shape_id);
1972 RUBY_ASSERT(rb_shape_obj_has_id(obj));
1973
1974 if (RB_UNLIKELY(id2ref_tbl)) {
1975 RB_VM_LOCKING() {
1976 st_insert(id2ref_tbl, (st_data_t)id, (st_data_t)obj);
1977 }
1978 }
1979 return id;
1980}
1981
1982static VALUE
1983object_id(VALUE obj)
1984{
1985 switch (BUILTIN_TYPE(obj)) {
1986 case T_CLASS:
1987 case T_MODULE:
1988 // With Ruby Box, classes and modules have different fields
1989 // in different boxes, so we cannot store the object id
1990 // in fields.
1991 return class_object_id(obj);
1992 case T_IMEMO:
1993 RUBY_ASSERT(IMEMO_TYPE_P(obj, imemo_fields));
1994 break;
1995 default:
1996 break;
1997 }
1998
1999 if (UNLIKELY(rb_gc_multi_ractor_p() && rb_ractor_shareable_p(obj))) {
2000 unsigned int lock_lev = RB_GC_VM_LOCK();
2001 VALUE id = object_id0(obj);
2002 RB_GC_VM_UNLOCK(lock_lev);
2003 return id;
2004 }
2005
2006 return object_id0(obj);
2007}
2008
2009static void
2010build_id2ref_i(VALUE obj, void *data)
2011{
2012 st_table *id2ref_tbl = (st_table *)data;
2013
2014 switch (BUILTIN_TYPE(obj)) {
2015 case T_CLASS:
2016 case T_MODULE:
2017 RUBY_ASSERT(!rb_objspace_garbage_object_p(obj));
2018 if (RCLASS(obj)->object_id) {
2019 st_insert(id2ref_tbl, RCLASS(obj)->object_id, obj);
2020 }
2021 break;
2022 case T_IMEMO:
2023 RUBY_ASSERT(!rb_objspace_garbage_object_p(obj));
2024 if (IMEMO_TYPE_P(obj, imemo_fields) && rb_shape_obj_has_id(obj)) {
2025 st_insert(id2ref_tbl, rb_obj_id(obj), rb_imemo_fields_owner(obj));
2026 }
2027 break;
2028 case T_OBJECT:
2029 RUBY_ASSERT(!rb_objspace_garbage_object_p(obj));
2030 if (rb_shape_obj_has_id(obj)) {
2031 st_insert(id2ref_tbl, rb_obj_id(obj), obj);
2032 }
2033 break;
2034 default:
2035 // For generic_fields, the T_IMEMO/fields is responsible for populating the entry.
2036 break;
2037 }
2038}
2039
2040static VALUE
2041object_id_to_ref(void *objspace_ptr, VALUE object_id)
2042{
2043 rb_objspace_t *objspace = objspace_ptr;
2044
2045 unsigned int lev = RB_GC_VM_LOCK();
2046
2047 if (!id2ref_tbl) {
2048 rb_gc_vm_barrier(); // stop other ractors
2049
2050 // GC Must not trigger while we build the table, otherwise if we end
2051 // up freeing an object that had an ID, we might try to delete it from
2052 // the table even though it wasn't inserted yet.
2053 st_table *tmp_id2ref_tbl = st_init_table(&object_id_hash_type);
2054 VALUE tmp_id2ref_value = TypedData_Wrap_Struct(0, &id2ref_tbl_type, tmp_id2ref_tbl);
2055
2056 // build_id2ref_i will most certainly malloc, which could trigger GC and sweep
2057 // objects we just added to the table.
2058 // By calling rb_gc_disable() we also save having to handle potentially garbage objects.
2059 bool gc_disabled = RTEST(rb_gc_disable());
2060 {
2061 id2ref_tbl = tmp_id2ref_tbl;
2062 id2ref_value = tmp_id2ref_value;
2063
2064 rb_gc_impl_each_object(objspace, build_id2ref_i, (void *)id2ref_tbl);
2065 }
2066 if (!gc_disabled) rb_gc_enable();
2067 }
2068
2069 VALUE obj;
2070 bool found = st_lookup(id2ref_tbl, object_id, &obj) && !rb_gc_impl_garbage_object_p(objspace, obj);
2071
2072 RB_GC_VM_UNLOCK(lev);
2073
2074 if (found) {
2075 return obj;
2076 }
2077
2078 if (rb_funcall(object_id, rb_intern(">="), 1, ULL2NUM(LAST_OBJECT_ID()))) {
2079 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is not an id value", rb_funcall(object_id, rb_intern("to_s"), 1, INT2FIX(10)));
2080 }
2081 else {
2082 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is a recycled object", rb_funcall(object_id, rb_intern("to_s"), 1, INT2FIX(10)));
2083 }
2084}
2085
2086static inline void
2087obj_free_object_id(VALUE obj)
2088{
2089 VALUE obj_id = 0;
2090 if (RB_UNLIKELY(id2ref_tbl)) {
2091 switch (BUILTIN_TYPE(obj)) {
2092 case T_CLASS:
2093 case T_MODULE:
2094 obj_id = RCLASS(obj)->object_id;
2095 break;
2096 case T_IMEMO:
2097 if (!IMEMO_TYPE_P(obj, imemo_fields)) {
2098 return;
2099 }
2100 // fallthrough
2101 case T_OBJECT:
2102 {
2103 shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
2104 if (rb_shape_has_object_id(shape_id)) {
2105 obj_id = object_id_get(obj, shape_id);
2106 }
2107 break;
2108 }
2109 default:
2110 // For generic_fields, the T_IMEMO/fields is responsible for freeing the id.
2111 return;
2112 }
2113
2114 if (RB_UNLIKELY(obj_id)) {
2115 RUBY_ASSERT(FIXNUM_P(obj_id) || RB_TYPE_P(obj_id, T_BIGNUM));
2116
2117 if (!st_delete(id2ref_tbl, (st_data_t *)&obj_id, NULL)) {
2118 // The the object is a T_IMEMO/fields, then it's possible the actual object
2119 // has been garbage collected already.
2120 if (!RB_TYPE_P(obj, T_IMEMO)) {
2121 rb_bug("Object ID seen, but not in _id2ref table: object_id=%llu object=%s", NUM2ULL(obj_id), rb_obj_info(obj));
2122 }
2123 }
2124 }
2125 }
2126}
2127
2128void
2129rb_gc_obj_free_vm_weak_references(VALUE obj)
2130{
2132 obj_free_object_id(obj);
2133
2134 if (rb_obj_gen_fields_p(obj)) {
2136 }
2137
2138 switch (BUILTIN_TYPE(obj)) {
2139 case T_STRING:
2140 if (FL_TEST_RAW(obj, RSTRING_FSTR)) {
2141 rb_gc_free_fstring(obj);
2142 }
2143 break;
2144 case T_SYMBOL:
2145 rb_gc_free_dsymbol(obj);
2146 break;
2147 case T_IMEMO:
2148 switch (imemo_type(obj)) {
2149 case imemo_callcache: {
2150 const struct rb_callcache *cc = (const struct rb_callcache *)obj;
2151
2152 if (vm_cc_refinement_p(cc)) {
2153 rb_vm_delete_cc_refinement(cc);
2154 }
2155
2156 break;
2157 }
2158 case imemo_callinfo:
2159 rb_vm_ci_free((const struct rb_callinfo *)obj);
2160 break;
2161 case imemo_ment:
2162 rb_free_method_entry_vm_weak_references((const rb_method_entry_t *)obj);
2163 break;
2164 default:
2165 break;
2166 }
2167 break;
2168 default:
2169 break;
2170 }
2171}
2172
2173/*
2174 * call-seq:
2175 * ObjectSpace._id2ref(object_id) -> an_object
2176 *
2177 * Converts an object id to a reference to the object. May not be
2178 * called on an object id passed as a parameter to a finalizer.
2179 *
2180 * s = "I am a string" #=> "I am a string"
2181 * r = ObjectSpace._id2ref(s.object_id) #=> "I am a string"
2182 * r == s #=> true
2183 *
2184 * On multi-ractor mode, if the object is not shareable, it raises
2185 * RangeError.
2186 *
2187 * This method is deprecated and should no longer be used.
2188 */
2189
2190static VALUE
2191id2ref(VALUE objid)
2192{
2193 objid = rb_to_int(objid);
2194 if (FIXNUM_P(objid) || rb_big_size(objid) <= SIZEOF_VOIDP) {
2195 VALUE ptr = (VALUE)NUM2PTR(objid);
2196 if (SPECIAL_CONST_P(ptr)) {
2197 if (ptr == Qtrue) return Qtrue;
2198 if (ptr == Qfalse) return Qfalse;
2199 if (NIL_P(ptr)) return Qnil;
2200 if (FIXNUM_P(ptr)) return ptr;
2201 if (FLONUM_P(ptr)) return ptr;
2202
2203 if (SYMBOL_P(ptr)) {
2204 // Check that the symbol is valid
2205 if (rb_static_id_valid_p(SYM2ID(ptr))) {
2206 return ptr;
2207 }
2208 else {
2209 rb_raise(rb_eRangeError, "%p is not a symbol id value", (void *)ptr);
2210 }
2211 }
2212
2213 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is not an id value", rb_int2str(objid, 10));
2214 }
2215 }
2216
2217 VALUE obj = object_id_to_ref(rb_gc_get_objspace(), objid);
2218 if (!rb_multi_ractor_p() || rb_ractor_shareable_p(obj)) {
2219 return obj;
2220 }
2221 else {
2222 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is the id of an unshareable object on multi-ractor", rb_int2str(objid, 10));
2223 }
2224}
2225
2226/* :nodoc: */
2227static VALUE
2228os_id2ref(VALUE os, VALUE objid)
2229{
2230 rb_category_warn(RB_WARN_CATEGORY_DEPRECATED, "ObjectSpace._id2ref is deprecated");
2231 return id2ref(objid);
2232}
2233
2234static VALUE
2235rb_find_object_id(void *objspace, VALUE obj, VALUE (*get_heap_object_id)(VALUE))
2236{
2237 if (SPECIAL_CONST_P(obj)) {
2238#if SIZEOF_LONG == SIZEOF_VOIDP
2239 return LONG2NUM((SIGNED_VALUE)obj);
2240#else
2241 return LL2NUM((SIGNED_VALUE)obj);
2242#endif
2243 }
2244
2245 return get_heap_object_id(obj);
2246}
2247
2248static VALUE
2249nonspecial_obj_id(VALUE obj)
2250{
2251#if SIZEOF_LONG == SIZEOF_VOIDP
2252 return (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG);
2253#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
2254 return LL2NUM((SIGNED_VALUE)(obj) / 2);
2255#else
2256# error not supported
2257#endif
2258}
2259
2260VALUE
2261rb_memory_id(VALUE obj)
2262{
2263 return rb_find_object_id(NULL, obj, nonspecial_obj_id);
2264}
2265
2266/*
2267 * Document-method: __id__
2268 * Document-method: object_id
2269 *
2270 * call-seq:
2271 * obj.__id__ -> integer
2272 * obj.object_id -> integer
2273 *
2274 * Returns an integer identifier for +obj+.
2275 *
2276 * The same number will be returned on all calls to +object_id+ for a given
2277 * object, and no two active objects will share an id.
2278 *
2279 * Note: that some objects of builtin classes are reused for optimization.
2280 * This is the case for immediate values and frozen string literals.
2281 *
2282 * BasicObject implements +__id__+, Kernel implements +object_id+.
2283 *
2284 * Immediate values are not passed by reference but are passed by value:
2285 * +nil+, +true+, +false+, Fixnums, Symbols, and some Floats.
2286 *
2287 * Object.new.object_id == Object.new.object_id # => false
2288 * (21 * 2).object_id == (21 * 2).object_id # => true
2289 * "hello".object_id == "hello".object_id # => false
2290 * "hi".freeze.object_id == "hi".freeze.object_id # => true
2291 */
2292
2293VALUE
2294rb_obj_id(VALUE obj)
2295{
2296 /* If obj is an immediate, the object ID is obj directly converted to a Numeric.
2297 * Otherwise, the object ID is a Numeric that is a non-zero multiple of
2298 * (RUBY_IMMEDIATE_MASK + 1) which guarantees that it does not collide with
2299 * any immediates. */
2300 return rb_find_object_id(rb_gc_get_objspace(), obj, object_id);
2301}
2302
2303bool
2304rb_obj_id_p(VALUE obj)
2305{
2306 return !RB_TYPE_P(obj, T_IMEMO) && rb_shape_obj_has_id(obj);
2307}
2308
2309/*
2310 * GC implementations should call this function before the GC phase that updates references
2311 * embedded in the machine code generated by JIT compilers. JIT compilers usually enforce the
2312 * "W^X" policy and protect the code memory from being modified during execution. This function
2313 * makes the code memory writeable.
2314 */
2315void
2316rb_gc_before_updating_jit_code(void)
2317{
2318#if USE_YJIT
2319 rb_yjit_mark_all_writeable();
2320#endif
2321}
2322
2323/*
2324 * GC implementations should call this function before the GC phase that updates references
2325 * embedded in the machine code generated by JIT compilers. This function makes the code memory
2326 * executable again.
2327 */
2328void
2329rb_gc_after_updating_jit_code(void)
2330{
2331#if USE_YJIT
2332 rb_yjit_mark_all_executable();
2333#endif
2334}
2335
2336static void
2337classext_memsize(rb_classext_t *ext, bool prime, VALUE box_value, void *arg)
2338{
2339 size_t *size = (size_t *)arg;
2340 size_t s = 0;
2341
2342 if (RCLASSEXT_M_TBL(ext)) {
2343 s += rb_id_table_memsize(RCLASSEXT_M_TBL(ext));
2344 }
2345 if (RCLASSEXT_CVC_TBL(ext)) {
2346 s += rb_id_table_memsize(RCLASSEXT_CVC_TBL(ext));
2347 }
2348 if (RCLASSEXT_CONST_TBL(ext)) {
2349 s += rb_id_table_memsize(RCLASSEXT_CONST_TBL(ext));
2350 }
2351 if (RCLASSEXT_SUPERCLASSES_WITH_SELF(ext)) {
2352 s += (RCLASSEXT_SUPERCLASS_DEPTH(ext) + 1) * sizeof(VALUE);
2353 }
2354 if (!prime) {
2355 s += sizeof(rb_classext_t);
2356 }
2357 *size += s;
2358}
2359
2360static void
2361classext_superclasses_memsize(rb_classext_t *ext, bool prime, VALUE box_value, void *arg)
2362{
2363 size_t *size = (size_t *)arg;
2364 size_t array_size;
2365 if (RCLASSEXT_SUPERCLASSES_WITH_SELF(ext)) {
2366 RUBY_ASSERT(prime);
2367 array_size = RCLASSEXT_SUPERCLASS_DEPTH(ext) + 1;
2368 *size += array_size * sizeof(VALUE);
2369 }
2370}
2371
2372size_t
2373rb_obj_memsize_of(VALUE obj)
2374{
2375 size_t size = 0;
2376
2377 if (SPECIAL_CONST_P(obj)) {
2378 return 0;
2379 }
2380
2381 switch (BUILTIN_TYPE(obj)) {
2382 case T_OBJECT:
2383 if (FL_TEST_RAW(obj, ROBJECT_HEAP)) {
2384 if (rb_shape_obj_too_complex_p(obj)) {
2385 size += rb_st_memsize(ROBJECT_FIELDS_HASH(obj));
2386 }
2387 else {
2388 size += ROBJECT_FIELDS_CAPACITY(obj) * sizeof(VALUE);
2389 }
2390 }
2391 break;
2392 case T_MODULE:
2393 case T_CLASS:
2394 rb_class_classext_foreach(obj, classext_memsize, (void *)&size);
2395 rb_class_classext_foreach(obj, classext_superclasses_memsize, (void *)&size);
2396 break;
2397 case T_ICLASS:
2398 if (RICLASS_OWNS_M_TBL_P(obj)) {
2399 if (RCLASS_M_TBL(obj)) {
2400 size += rb_id_table_memsize(RCLASS_M_TBL(obj));
2401 }
2402 }
2403 break;
2404 case T_STRING:
2405 size += rb_str_memsize(obj);
2406 break;
2407 case T_ARRAY:
2408 size += rb_ary_memsize(obj);
2409 break;
2410 case T_HASH:
2411 if (RHASH_ST_TABLE_P(obj)) {
2412 VM_ASSERT(RHASH_ST_TABLE(obj) != NULL);
2413 /* st_table is in the slot */
2414 size += st_memsize(RHASH_ST_TABLE(obj)) - sizeof(st_table);
2415 }
2416 break;
2417 case T_REGEXP:
2418 if (RREGEXP_PTR(obj)) {
2419 size += onig_memsize(RREGEXP_PTR(obj));
2420 }
2421 break;
2422 case T_DATA:
2423 size += rb_objspace_data_type_memsize(obj);
2424 break;
2425 case T_MATCH:
2426 {
2427 rb_matchext_t *rm = RMATCH_EXT(obj);
2428 size += onig_region_memsize(&rm->regs);
2429 size += sizeof(struct rmatch_offset) * rm->char_offset_num_allocated;
2430 }
2431 break;
2432 case T_FILE:
2433 if (RFILE(obj)->fptr) {
2434 size += rb_io_memsize(RFILE(obj)->fptr);
2435 }
2436 break;
2437 case T_RATIONAL:
2438 case T_COMPLEX:
2439 break;
2440 case T_IMEMO:
2441 size += rb_imemo_memsize(obj);
2442 break;
2443
2444 case T_FLOAT:
2445 case T_SYMBOL:
2446 break;
2447
2448 case T_BIGNUM:
2449 if (!(RBASIC(obj)->flags & BIGNUM_EMBED_FLAG) && BIGNUM_DIGITS(obj)) {
2450 size += BIGNUM_LEN(obj) * sizeof(BDIGIT);
2451 }
2452 break;
2453
2454 case T_NODE:
2455 UNEXPECTED_NODE(obj_memsize_of);
2456 break;
2457
2458 case T_STRUCT:
2459 if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) == 0 &&
2460 RSTRUCT(obj)->as.heap.ptr) {
2461 size += sizeof(VALUE) * RSTRUCT_LEN(obj);
2462 }
2463 break;
2464
2465 case T_ZOMBIE:
2466 case T_MOVED:
2467 break;
2468
2469 default:
2470 rb_bug("objspace/memsize_of(): unknown data type 0x%x(%p)",
2471 BUILTIN_TYPE(obj), (void*)obj);
2472 }
2473
2474 return size + rb_gc_obj_slot_size(obj);
2475}
2476
2477static int
2478set_zero(st_data_t key, st_data_t val, st_data_t arg)
2479{
2480 VALUE k = (VALUE)key;
2481 VALUE hash = (VALUE)arg;
2482 rb_hash_aset(hash, k, INT2FIX(0));
2483 return ST_CONTINUE;
2484}
2485
2487 size_t counts[T_MASK+1];
2488 size_t freed;
2489 size_t total;
2490};
2491
2492static void
2493count_objects_i(VALUE obj, void *d)
2494{
2495 struct count_objects_data *data = (struct count_objects_data *)d;
2496
2497 if (RBASIC(obj)->flags) {
2498 data->counts[BUILTIN_TYPE(obj)]++;
2499 }
2500 else {
2501 data->freed++;
2502 }
2503
2504 data->total++;
2505}
2506
2507/*
2508 * call-seq:
2509 * ObjectSpace.count_objects([result_hash]) -> hash
2510 *
2511 * Counts all objects grouped by type.
2512 *
2513 * It returns a hash, such as:
2514 * {
2515 * :TOTAL=>10000,
2516 * :FREE=>3011,
2517 * :T_OBJECT=>6,
2518 * :T_CLASS=>404,
2519 * # ...
2520 * }
2521 *
2522 * The contents of the returned hash are implementation specific.
2523 * It may be changed in future.
2524 *
2525 * The keys starting with +:T_+ means live objects.
2526 * For example, +:T_ARRAY+ is the number of arrays.
2527 * +:FREE+ means object slots which is not used now.
2528 * +:TOTAL+ means sum of above.
2529 *
2530 * If the optional argument +result_hash+ is given,
2531 * it is overwritten and returned. This is intended to avoid probe effect.
2532 *
2533 * h = {}
2534 * ObjectSpace.count_objects(h)
2535 * puts h
2536 * # => { :TOTAL=>10000, :T_CLASS=>158280, :T_MODULE=>20672, :T_STRING=>527249 }
2537 *
2538 * This method is only expected to work on C Ruby.
2539 *
2540 */
2541
2542static VALUE
2543count_objects(int argc, VALUE *argv, VALUE os)
2544{
2545 struct count_objects_data data = { 0 };
2546 VALUE hash = Qnil;
2547 VALUE types[T_MASK + 1];
2548
2549 if (rb_check_arity(argc, 0, 1) == 1) {
2550 hash = argv[0];
2551 if (!RB_TYPE_P(hash, T_HASH))
2552 rb_raise(rb_eTypeError, "non-hash given");
2553 }
2554
2555 for (size_t i = 0; i <= T_MASK; i++) {
2556 // type_sym can allocate an object,
2557 // so we need to create all key symbols in advance
2558 // not to disturb the result
2559 types[i] = type_sym(i);
2560 }
2561
2562 // Same as type_sym, we need to create all key symbols in advance
2563 VALUE total = ID2SYM(rb_intern("TOTAL"));
2564 VALUE free = ID2SYM(rb_intern("FREE"));
2565
2566 rb_gc_impl_each_object(rb_gc_get_objspace(), count_objects_i, &data);
2567
2568 if (NIL_P(hash)) {
2569 hash = rb_hash_new();
2570 }
2571 else if (!RHASH_EMPTY_P(hash)) {
2572 rb_hash_stlike_foreach(hash, set_zero, hash);
2573 }
2574 rb_hash_aset(hash, total, SIZET2NUM(data.total));
2575 rb_hash_aset(hash, free, SIZET2NUM(data.freed));
2576
2577 for (size_t i = 0; i <= T_MASK; i++) {
2578 if (data.counts[i]) {
2579 rb_hash_aset(hash, types[i], SIZET2NUM(data.counts[i]));
2580 }
2581 }
2582
2583 return hash;
2584}
2585
2586#define SET_STACK_END SET_MACHINE_STACK_END(&ec->machine.stack_end)
2587
2588#define STACK_START (ec->machine.stack_start)
2589#define STACK_END (ec->machine.stack_end)
2590#define STACK_LEVEL_MAX (ec->machine.stack_maxsize/sizeof(VALUE))
2591
2592#if STACK_GROW_DIRECTION < 0
2593# define STACK_LENGTH (size_t)(STACK_START - STACK_END)
2594#elif STACK_GROW_DIRECTION > 0
2595# define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
2596#else
2597# define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
2598 : (size_t)(STACK_END - STACK_START + 1))
2599#endif
2600#if !STACK_GROW_DIRECTION
2601int ruby_stack_grow_direction;
2602int
2603ruby_get_stack_grow_direction(volatile VALUE *addr)
2604{
2605 VALUE *end;
2606 SET_MACHINE_STACK_END(&end);
2607
2608 if (end > addr) return ruby_stack_grow_direction = 1;
2609 return ruby_stack_grow_direction = -1;
2610}
2611#endif
2612
2613size_t
2615{
2616 rb_execution_context_t *ec = GET_EC();
2617 SET_STACK_END;
2618 if (p) *p = STACK_UPPER(STACK_END, STACK_START, STACK_END);
2619 return STACK_LENGTH;
2620}
2621
2622#define PREVENT_STACK_OVERFLOW 1
2623#ifndef PREVENT_STACK_OVERFLOW
2624#if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
2625# define PREVENT_STACK_OVERFLOW 1
2626#else
2627# define PREVENT_STACK_OVERFLOW 0
2628#endif
2629#endif
2630#if PREVENT_STACK_OVERFLOW && !defined(__EMSCRIPTEN__)
2631static int
2632stack_check(rb_execution_context_t *ec, int water_mark)
2633{
2634 SET_STACK_END;
2635
2636 size_t length = STACK_LENGTH;
2637 size_t maximum_length = STACK_LEVEL_MAX - water_mark;
2638
2639 return length > maximum_length;
2640}
2641#else
2642#define stack_check(ec, water_mark) FALSE
2643#endif
2644
2645#define STACKFRAME_FOR_CALL_CFUNC 2048
2646
2647int
2648rb_ec_stack_check(rb_execution_context_t *ec)
2649{
2650 return stack_check(ec, STACKFRAME_FOR_CALL_CFUNC);
2651}
2652
2653int
2655{
2656 return stack_check(GET_EC(), STACKFRAME_FOR_CALL_CFUNC);
2657}
2658
2659/* ==================== Marking ==================== */
2660
2661#define RB_GC_MARK_OR_TRAVERSE(func, obj_or_ptr, obj, check_obj) do { \
2662 if (!RB_SPECIAL_CONST_P(obj)) { \
2663 rb_vm_t *vm = GET_VM(); \
2664 void *objspace = vm->gc.objspace; \
2665 if (LIKELY(vm->gc.mark_func_data == NULL)) { \
2666 GC_ASSERT(rb_gc_impl_during_gc_p(objspace)); \
2667 (func)(objspace, (obj_or_ptr)); \
2668 } \
2669 else if (check_obj ? \
2670 rb_gc_impl_pointer_to_heap_p(objspace, (const void *)obj) && \
2671 !rb_gc_impl_garbage_object_p(objspace, obj) : \
2672 true) { \
2673 GC_ASSERT(!rb_gc_impl_during_gc_p(objspace)); \
2674 struct gc_mark_func_data_struct *mark_func_data = vm->gc.mark_func_data; \
2675 vm->gc.mark_func_data = NULL; \
2676 mark_func_data->mark_func((obj), mark_func_data->data); \
2677 vm->gc.mark_func_data = mark_func_data; \
2678 } \
2679 } \
2680} while (0)
2681
2682static inline void
2683gc_mark_internal(VALUE obj)
2684{
2685 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark, obj, obj, false);
2686}
2687
2688void
2689rb_gc_mark_movable(VALUE obj)
2690{
2691 gc_mark_internal(obj);
2692}
2693
2694void
2695rb_gc_mark_and_move(VALUE *ptr)
2696{
2697 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark_and_move, ptr, *ptr, false);
2698}
2699
2700static inline void
2701gc_mark_and_pin_internal(VALUE obj)
2702{
2703 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark_and_pin, obj, obj, false);
2704}
2705
2706void
2707rb_gc_mark(VALUE obj)
2708{
2709 gc_mark_and_pin_internal(obj);
2710}
2711
2712static inline void
2713gc_mark_maybe_internal(VALUE obj)
2714{
2715 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark_maybe, obj, obj, true);
2716}
2717
2718void
2719rb_gc_mark_maybe(VALUE obj)
2720{
2721 gc_mark_maybe_internal(obj);
2722}
2723
2724ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS(static void each_location(register const VALUE *x, register long n, void (*cb)(VALUE, void *), void *data));
2725static void
2726each_location(register const VALUE *x, register long n, void (*cb)(VALUE, void *), void *data)
2727{
2728 VALUE v;
2729 while (n--) {
2730 v = *x;
2731 cb(v, data);
2732 x++;
2733 }
2734}
2735
2736static void
2737each_location_ptr(const VALUE *start, const VALUE *end, void (*cb)(VALUE, void *), void *data)
2738{
2739 if (end <= start) return;
2740 each_location(start, end - start, cb, data);
2741}
2742
2743static void
2744gc_mark_maybe_each_location(VALUE obj, void *data)
2745{
2746 gc_mark_maybe_internal(obj);
2747}
2748
2749void
2750rb_gc_mark_locations(const VALUE *start, const VALUE *end)
2751{
2752 each_location_ptr(start, end, gc_mark_maybe_each_location, NULL);
2753}
2754
2755void
2756rb_gc_mark_values(long n, const VALUE *values)
2757{
2758 for (long i = 0; i < n; i++) {
2759 gc_mark_internal(values[i]);
2760 }
2761}
2762
2763void
2764rb_gc_mark_vm_stack_values(long n, const VALUE *values)
2765{
2766 for (long i = 0; i < n; i++) {
2767 gc_mark_and_pin_internal(values[i]);
2768 }
2769}
2770
2771static int
2772mark_key(st_data_t key, st_data_t value, st_data_t data)
2773{
2774 gc_mark_and_pin_internal((VALUE)key);
2775
2776 return ST_CONTINUE;
2777}
2778
2779void
2780rb_mark_set(st_table *tbl)
2781{
2782 if (!tbl) return;
2783
2784 st_foreach(tbl, mark_key, (st_data_t)rb_gc_get_objspace());
2785}
2786
2787static int
2788mark_keyvalue(st_data_t key, st_data_t value, st_data_t data)
2789{
2790 gc_mark_internal((VALUE)key);
2791 gc_mark_internal((VALUE)value);
2792
2793 return ST_CONTINUE;
2794}
2795
2796static int
2797pin_key_pin_value(st_data_t key, st_data_t value, st_data_t data)
2798{
2799 gc_mark_and_pin_internal((VALUE)key);
2800 gc_mark_and_pin_internal((VALUE)value);
2801
2802 return ST_CONTINUE;
2803}
2804
2805static int
2806pin_key_mark_value(st_data_t key, st_data_t value, st_data_t data)
2807{
2808 gc_mark_and_pin_internal((VALUE)key);
2809 gc_mark_internal((VALUE)value);
2810
2811 return ST_CONTINUE;
2812}
2813
2814static void
2815mark_hash(VALUE hash)
2816{
2817 if (rb_hash_compare_by_id_p(hash)) {
2818 rb_hash_stlike_foreach(hash, pin_key_mark_value, 0);
2819 }
2820 else {
2821 rb_hash_stlike_foreach(hash, mark_keyvalue, 0);
2822 }
2823
2824 gc_mark_internal(RHASH(hash)->ifnone);
2825}
2826
2827void
2828rb_mark_hash(st_table *tbl)
2829{
2830 if (!tbl) return;
2831
2832 st_foreach(tbl, pin_key_pin_value, 0);
2833}
2834
2835static enum rb_id_table_iterator_result
2836mark_method_entry_i(VALUE me, void *objspace)
2837{
2838 gc_mark_internal(me);
2839
2840 return ID_TABLE_CONTINUE;
2841}
2842
2843static void
2844mark_m_tbl(void *objspace, struct rb_id_table *tbl)
2845{
2846 if (tbl) {
2847 rb_id_table_foreach_values(tbl, mark_method_entry_i, objspace);
2848 }
2849}
2850
2851static enum rb_id_table_iterator_result
2852mark_const_entry_i(VALUE value, void *objspace)
2853{
2854 const rb_const_entry_t *ce = (const rb_const_entry_t *)value;
2855
2856 if (!rb_gc_checking_shareable()) {
2857 gc_mark_internal(ce->value);
2858 gc_mark_internal(ce->file); // TODO: ce->file should be shareable?
2859 }
2860 return ID_TABLE_CONTINUE;
2861}
2862
2863static void
2864mark_const_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
2865{
2866 if (!tbl) return;
2867 rb_id_table_foreach_values(tbl, mark_const_entry_i, objspace);
2868}
2869
2870static enum rb_id_table_iterator_result
2871mark_cvc_tbl_i(VALUE cvc_entry, void *objspace)
2872{
2873 struct rb_cvar_class_tbl_entry *entry;
2874
2875 entry = (struct rb_cvar_class_tbl_entry *)cvc_entry;
2876
2877 RUBY_ASSERT(entry->cref == 0 || (BUILTIN_TYPE((VALUE)entry->cref) == T_IMEMO && IMEMO_TYPE_P(entry->cref, imemo_cref)));
2878 gc_mark_internal((VALUE)entry->cref);
2879
2880 return ID_TABLE_CONTINUE;
2881}
2882
2883static void
2884mark_cvc_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
2885{
2886 if (!tbl) return;
2887 rb_id_table_foreach_values(tbl, mark_cvc_tbl_i, objspace);
2888}
2889
2890#if STACK_GROW_DIRECTION < 0
2891#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
2892#elif STACK_GROW_DIRECTION > 0
2893#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
2894#else
2895#define GET_STACK_BOUNDS(start, end, appendix) \
2896 ((STACK_END < STACK_START) ? \
2897 ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
2898#endif
2899
2900static void
2901gc_mark_machine_stack_location_maybe(VALUE obj, void *data)
2902{
2903 gc_mark_maybe_internal(obj);
2904
2905#ifdef RUBY_ASAN_ENABLED
2906 const rb_execution_context_t *ec = (const rb_execution_context_t *)data;
2907 void *fake_frame_start;
2908 void *fake_frame_end;
2909 bool is_fake_frame = asan_get_fake_stack_extents(
2910 ec->machine.asan_fake_stack_handle, obj,
2911 ec->machine.stack_start, ec->machine.stack_end,
2912 &fake_frame_start, &fake_frame_end
2913 );
2914 if (is_fake_frame) {
2915 each_location_ptr(fake_frame_start, fake_frame_end, gc_mark_maybe_each_location, NULL);
2916 }
2917#endif
2918}
2919
2920static bool
2921gc_object_moved_p_internal(void *objspace, VALUE obj)
2922{
2923 if (SPECIAL_CONST_P(obj)) {
2924 return false;
2925 }
2926
2927 return rb_gc_impl_object_moved_p(objspace, obj);
2928}
2929
2930static VALUE
2931gc_location_internal(void *objspace, VALUE value)
2932{
2933 if (SPECIAL_CONST_P(value)) {
2934 return value;
2935 }
2936
2937 GC_ASSERT(rb_gc_impl_pointer_to_heap_p(objspace, (void *)value));
2938
2939 return rb_gc_impl_location(objspace, value);
2940}
2941
2942VALUE
2943rb_gc_location(VALUE value)
2944{
2945 return gc_location_internal(rb_gc_get_objspace(), value);
2946}
2947
2948#if defined(__wasm__)
2949
2950
2951static VALUE *rb_stack_range_tmp[2];
2952
2953static void
2954rb_mark_locations(void *begin, void *end)
2955{
2956 rb_stack_range_tmp[0] = begin;
2957 rb_stack_range_tmp[1] = end;
2958}
2959
2960void
2961rb_gc_save_machine_context(void)
2962{
2963 // no-op
2964}
2965
2966# if defined(__EMSCRIPTEN__)
2967
2968static void
2969mark_current_machine_context(const rb_execution_context_t *ec)
2970{
2971 emscripten_scan_stack(rb_mark_locations);
2972 each_location_ptr(rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe_each_location, NULL);
2973
2974 emscripten_scan_registers(rb_mark_locations);
2975 each_location_ptr(rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe_each_location, NULL);
2976}
2977# else // use Asyncify version
2978
2979static void
2980mark_current_machine_context(rb_execution_context_t *ec)
2981{
2982 VALUE *stack_start, *stack_end;
2983 SET_STACK_END;
2984 GET_STACK_BOUNDS(stack_start, stack_end, 1);
2985 each_location_ptr(stack_start, stack_end, gc_mark_maybe_each_location, NULL);
2986
2987 rb_wasm_scan_locals(rb_mark_locations);
2988 each_location_ptr(rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe_each_location, NULL);
2989}
2990
2991# endif
2992
2993#else // !defined(__wasm__)
2994
2995void
2996rb_gc_save_machine_context(void)
2997{
2998 rb_thread_t *thread = GET_THREAD();
2999
3000 RB_VM_SAVE_MACHINE_CONTEXT(thread);
3001}
3002
3003
3004static void
3005mark_current_machine_context(const rb_execution_context_t *ec)
3006{
3007 rb_gc_mark_machine_context(ec);
3008}
3009#endif
3010
3011void
3012rb_gc_mark_machine_context(const rb_execution_context_t *ec)
3013{
3014 VALUE *stack_start, *stack_end;
3015
3016 GET_STACK_BOUNDS(stack_start, stack_end, 0);
3017 RUBY_DEBUG_LOG("ec->th:%u stack_start:%p stack_end:%p", rb_ec_thread_ptr(ec)->serial, stack_start, stack_end);
3018
3019 void *data =
3020#ifdef RUBY_ASAN_ENABLED
3021 /* gc_mark_machine_stack_location_maybe() uses data as const */
3023#else
3024 NULL;
3025#endif
3026
3027 each_location_ptr(stack_start, stack_end, gc_mark_machine_stack_location_maybe, data);
3028 int num_regs = sizeof(ec->machine.regs)/(sizeof(VALUE));
3029 each_location((VALUE*)&ec->machine.regs, num_regs, gc_mark_machine_stack_location_maybe, data);
3030}
3031
3032static int
3033rb_mark_tbl_i(st_data_t key, st_data_t value, st_data_t data)
3034{
3035 gc_mark_and_pin_internal((VALUE)value);
3036
3037 return ST_CONTINUE;
3038}
3039
3040void
3041rb_mark_tbl(st_table *tbl)
3042{
3043 if (!tbl || tbl->num_entries == 0) return;
3044
3045 st_foreach(tbl, rb_mark_tbl_i, 0);
3046}
3047
3048static void
3049gc_mark_tbl_no_pin(st_table *tbl)
3050{
3051 if (!tbl || tbl->num_entries == 0) return;
3052
3053 st_foreach(tbl, gc_mark_tbl_no_pin_i, 0);
3054}
3055
3056void
3057rb_mark_tbl_no_pin(st_table *tbl)
3058{
3059 gc_mark_tbl_no_pin(tbl);
3060}
3061
3062static bool
3063gc_declarative_marking_p(const rb_data_type_t *type)
3064{
3065 return (type->flags & RUBY_TYPED_DECL_MARKING) != 0;
3066}
3067
3068void
3069rb_gc_mark_roots(void *objspace, const char **categoryp)
3070{
3071 rb_execution_context_t *ec = GET_EC();
3072 rb_vm_t *vm = rb_ec_vm_ptr(ec);
3073
3074#define MARK_CHECKPOINT(category) do { \
3075 if (categoryp) *categoryp = category; \
3076} while (0)
3077
3078 MARK_CHECKPOINT("vm");
3079 rb_vm_mark(vm);
3080
3081 MARK_CHECKPOINT("end_proc");
3082 rb_mark_end_proc();
3083
3084 MARK_CHECKPOINT("global_tbl");
3085 rb_gc_mark_global_tbl();
3086
3087#if USE_YJIT
3088 void rb_yjit_root_mark(void); // in Rust
3089
3090 if (rb_yjit_enabled_p) {
3091 MARK_CHECKPOINT("YJIT");
3092 rb_yjit_root_mark();
3093 }
3094#endif
3095
3096#if USE_ZJIT
3097 void rb_zjit_root_mark(void);
3098 if (rb_zjit_enabled_p) {
3099 MARK_CHECKPOINT("ZJIT");
3100 rb_zjit_root_mark();
3101 }
3102#endif
3103
3104 MARK_CHECKPOINT("machine_context");
3105 mark_current_machine_context(ec);
3106
3107 MARK_CHECKPOINT("global_symbols");
3108 rb_sym_global_symbols_mark_and_move();
3109
3110 MARK_CHECKPOINT("finish");
3111
3112#undef MARK_CHECKPOINT
3113}
3114
3119
3120static void
3121gc_mark_classext_module(rb_classext_t *ext, bool prime, VALUE box_value, void *arg)
3122{
3124 rb_objspace_t *objspace = foreach_arg->objspace;
3125
3126 if (RCLASSEXT_SUPER(ext)) {
3127 gc_mark_internal(RCLASSEXT_SUPER(ext));
3128 }
3129 mark_m_tbl(objspace, RCLASSEXT_M_TBL(ext));
3130
3131 if (!rb_gc_checking_shareable()) {
3132 // unshareable
3133 gc_mark_internal(RCLASSEXT_FIELDS_OBJ(ext));
3134 }
3135
3136 if (!RCLASSEXT_SHARED_CONST_TBL(ext) && RCLASSEXT_CONST_TBL(ext)) {
3137 mark_const_tbl(objspace, RCLASSEXT_CONST_TBL(ext));
3138 }
3139 mark_m_tbl(objspace, RCLASSEXT_CALLABLE_M_TBL(ext));
3140 gc_mark_internal(RCLASSEXT_CC_TBL(ext));
3141 mark_cvc_tbl(objspace, RCLASSEXT_CVC_TBL(ext));
3142 gc_mark_internal(RCLASSEXT_CLASSPATH(ext));
3143}
3144
3145static void
3146gc_mark_classext_iclass(rb_classext_t *ext, bool prime, VALUE box_value, void *arg)
3147{
3149 rb_objspace_t *objspace = foreach_arg->objspace;
3150
3151 if (RCLASSEXT_SUPER(ext)) {
3152 gc_mark_internal(RCLASSEXT_SUPER(ext));
3153 }
3154 if (RCLASSEXT_ICLASS_IS_ORIGIN(ext) && !RCLASSEXT_ICLASS_ORIGIN_SHARED_MTBL(ext)) {
3155 mark_m_tbl(objspace, RCLASSEXT_M_TBL(ext));
3156 }
3157 if (RCLASSEXT_INCLUDER(ext)) {
3158 gc_mark_internal(RCLASSEXT_INCLUDER(ext));
3159 }
3160 mark_m_tbl(objspace, RCLASSEXT_CALLABLE_M_TBL(ext));
3161 gc_mark_internal(RCLASSEXT_CC_TBL(ext));
3162}
3163
3164#define TYPED_DATA_REFS_OFFSET_LIST(d) (size_t *)(uintptr_t)RTYPEDDATA_TYPE(d)->function.dmark
3165
3166void
3167rb_gc_mark_children(void *objspace, VALUE obj)
3168{
3169 struct gc_mark_classext_foreach_arg foreach_args;
3170
3171 if (rb_obj_gen_fields_p(obj)) {
3172 rb_mark_generic_ivar(obj);
3173 }
3174
3175 switch (BUILTIN_TYPE(obj)) {
3176 case T_FLOAT:
3177 case T_BIGNUM:
3178 return;
3179
3180 case T_NIL:
3181 case T_FIXNUM:
3182 rb_bug("rb_gc_mark() called for broken object");
3183 break;
3184
3185 case T_NODE:
3186 UNEXPECTED_NODE(rb_gc_mark);
3187 break;
3188
3189 case T_IMEMO:
3190 rb_imemo_mark_and_move(obj, false);
3191 return;
3192
3193 default:
3194 break;
3195 }
3196
3197 gc_mark_internal(RBASIC(obj)->klass);
3198
3199 switch (BUILTIN_TYPE(obj)) {
3200 case T_CLASS:
3201 if (FL_TEST_RAW(obj, FL_SINGLETON) &&
3202 !rb_gc_checking_shareable()) {
3203 gc_mark_internal(RCLASS_ATTACHED_OBJECT(obj));
3204 }
3205 // Continue to the shared T_CLASS/T_MODULE
3206 case T_MODULE:
3207 foreach_args.objspace = objspace;
3208 foreach_args.obj = obj;
3209 rb_class_classext_foreach(obj, gc_mark_classext_module, (void *)&foreach_args);
3210 if (BOX_USER_P(RCLASS_PRIME_BOX(obj))) {
3211 gc_mark_internal(RCLASS_PRIME_BOX(obj)->box_object);
3212 }
3213 break;
3214
3215 case T_ICLASS:
3216 foreach_args.objspace = objspace;
3217 foreach_args.obj = obj;
3218 rb_class_classext_foreach(obj, gc_mark_classext_iclass, (void *)&foreach_args);
3219 if (BOX_USER_P(RCLASS_PRIME_BOX(obj))) {
3220 gc_mark_internal(RCLASS_PRIME_BOX(obj)->box_object);
3221 }
3222 break;
3223
3224 case T_ARRAY:
3225 if (ARY_SHARED_P(obj)) {
3226 VALUE root = ARY_SHARED_ROOT(obj);
3227 gc_mark_internal(root);
3228 }
3229 else {
3230 long len = RARRAY_LEN(obj);
3231 const VALUE *ptr = RARRAY_CONST_PTR(obj);
3232 for (long i = 0; i < len; i++) {
3233 gc_mark_internal(ptr[i]);
3234 }
3235 }
3236 break;
3237
3238 case T_HASH:
3239 mark_hash(obj);
3240 break;
3241
3242 case T_SYMBOL:
3243 gc_mark_internal(RSYMBOL(obj)->fstr);
3244 break;
3245
3246 case T_STRING:
3247 if (STR_SHARED_P(obj)) {
3248 if (STR_EMBED_P(RSTRING(obj)->as.heap.aux.shared)) {
3249 /* Embedded shared strings cannot be moved because this string
3250 * points into the slot of the shared string. There may be code
3251 * using the RSTRING_PTR on the stack, which would pin this
3252 * string but not pin the shared string, causing it to move. */
3253 gc_mark_and_pin_internal(RSTRING(obj)->as.heap.aux.shared);
3254 }
3255 else {
3256 gc_mark_internal(RSTRING(obj)->as.heap.aux.shared);
3257 }
3258 }
3259 break;
3260
3261 case T_DATA: {
3262 bool typed_data = RTYPEDDATA_P(obj);
3263 void *const ptr = typed_data ? RTYPEDDATA_GET_DATA(obj) : DATA_PTR(obj);
3264
3265 if (typed_data) {
3266 gc_mark_internal(RTYPEDDATA(obj)->fields_obj);
3267 }
3268
3269 if (ptr) {
3270 if (typed_data && gc_declarative_marking_p(RTYPEDDATA_TYPE(obj))) {
3271 size_t *offset_list = TYPED_DATA_REFS_OFFSET_LIST(obj);
3272
3273 for (size_t offset = *offset_list; offset != RUBY_REF_END; offset = *offset_list++) {
3274 gc_mark_internal(*(VALUE *)((char *)ptr + offset));
3275 }
3276 }
3277 else {
3278 RUBY_DATA_FUNC mark_func = typed_data ?
3280 RDATA(obj)->dmark;
3281 if (mark_func) (*mark_func)(ptr);
3282 }
3283 }
3284
3285 break;
3286 }
3287
3288 case T_OBJECT: {
3289 uint32_t len;
3290 if (rb_shape_obj_too_complex_p(obj)) {
3291 gc_mark_tbl_no_pin(ROBJECT_FIELDS_HASH(obj));
3292 len = ROBJECT_FIELDS_COUNT_COMPLEX(obj);
3293 }
3294 else {
3295 const VALUE * const ptr = ROBJECT_FIELDS(obj);
3296
3297 len = ROBJECT_FIELDS_COUNT_NOT_COMPLEX(obj);
3298 for (uint32_t i = 0; i < len; i++) {
3299 gc_mark_internal(ptr[i]);
3300 }
3301 }
3302
3303 attr_index_t fields_count = (attr_index_t)len;
3304 if (fields_count) {
3305 VALUE klass = RBASIC_CLASS(obj);
3306
3307 // Increment max_iv_count if applicable, used to determine size pool allocation
3308 if (RCLASS_MAX_IV_COUNT(klass) < fields_count) {
3309 RCLASS_SET_MAX_IV_COUNT(klass, fields_count);
3310 }
3311 }
3312
3313 break;
3314 }
3315
3316 case T_FILE:
3317 if (RFILE(obj)->fptr) {
3318 gc_mark_internal(RFILE(obj)->fptr->self);
3319 gc_mark_internal(RFILE(obj)->fptr->pathv);
3320 gc_mark_internal(RFILE(obj)->fptr->tied_io_for_writing);
3321 gc_mark_internal(RFILE(obj)->fptr->writeconv_asciicompat);
3322 gc_mark_internal(RFILE(obj)->fptr->writeconv_pre_ecopts);
3323 gc_mark_internal(RFILE(obj)->fptr->encs.ecopts);
3324 gc_mark_internal(RFILE(obj)->fptr->write_lock);
3325 gc_mark_internal(RFILE(obj)->fptr->timeout);
3326 gc_mark_internal(RFILE(obj)->fptr->wakeup_mutex);
3327 }
3328 break;
3329
3330 case T_REGEXP:
3331 gc_mark_internal(RREGEXP(obj)->src);
3332 break;
3333
3334 case T_MATCH:
3335 gc_mark_internal(RMATCH(obj)->regexp);
3336 if (RMATCH(obj)->str) {
3337 gc_mark_internal(RMATCH(obj)->str);
3338 }
3339 break;
3340
3341 case T_RATIONAL:
3342 gc_mark_internal(RRATIONAL(obj)->num);
3343 gc_mark_internal(RRATIONAL(obj)->den);
3344 break;
3345
3346 case T_COMPLEX:
3347 gc_mark_internal(RCOMPLEX(obj)->real);
3348 gc_mark_internal(RCOMPLEX(obj)->imag);
3349 break;
3350
3351 case T_STRUCT: {
3352 const long len = RSTRUCT_LEN(obj);
3353 const VALUE * const ptr = RSTRUCT_CONST_PTR(obj);
3354
3355 for (long i = 0; i < len; i++) {
3356 gc_mark_internal(ptr[i]);
3357 }
3358
3359 if (rb_shape_obj_has_fields(obj) && !FL_TEST_RAW(obj, RSTRUCT_GEN_FIELDS)) {
3360 gc_mark_internal(RSTRUCT_FIELDS_OBJ(obj));
3361 }
3362
3363 break;
3364 }
3365
3366 default:
3367 if (BUILTIN_TYPE(obj) == T_MOVED) rb_bug("rb_gc_mark(): %p is T_MOVED", (void *)obj);
3368 if (BUILTIN_TYPE(obj) == T_NONE) rb_bug("rb_gc_mark(): %p is T_NONE", (void *)obj);
3369 if (BUILTIN_TYPE(obj) == T_ZOMBIE) rb_bug("rb_gc_mark(): %p is T_ZOMBIE", (void *)obj);
3370 rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
3371 BUILTIN_TYPE(obj), (void *)obj,
3372 rb_gc_impl_pointer_to_heap_p(objspace, (void *)obj) ? "corrupted object" : "non object");
3373 }
3374}
3375
3376size_t
3377rb_gc_obj_optimal_size(VALUE obj)
3378{
3379 switch (BUILTIN_TYPE(obj)) {
3380 case T_ARRAY:
3381 {
3382 size_t size = rb_ary_size_as_embedded(obj);
3383 if (rb_gc_size_allocatable_p(size)) {
3384 return size;
3385 }
3386 else {
3387 return sizeof(struct RArray);
3388 }
3389 }
3390
3391 case T_OBJECT:
3392 if (rb_shape_obj_too_complex_p(obj)) {
3393 return sizeof(struct RObject);
3394 }
3395 else {
3396 size_t size = rb_obj_embedded_size(ROBJECT_FIELDS_CAPACITY(obj));
3397 if (rb_gc_size_allocatable_p(size)) {
3398 return size;
3399 }
3400 else {
3401 return sizeof(struct RObject);
3402 }
3403 }
3404
3405 case T_STRING:
3406 {
3407 size_t size = rb_str_size_as_embedded(obj);
3408 if (rb_gc_size_allocatable_p(size)) {
3409 return size;
3410 }
3411 else {
3412 return sizeof(struct RString);
3413 }
3414 }
3415
3416 case T_HASH:
3417 return sizeof(struct RHash) + (RHASH_ST_TABLE_P(obj) ? sizeof(st_table) : sizeof(ar_table));
3418
3419 default:
3420 return 0;
3421 }
3422}
3423
3424void
3425rb_gc_writebarrier(VALUE a, VALUE b)
3426{
3427 rb_gc_impl_writebarrier(rb_gc_get_objspace(), a, b);
3428}
3429
3430void
3431rb_gc_writebarrier_unprotect(VALUE obj)
3432{
3433 rb_gc_impl_writebarrier_unprotect(rb_gc_get_objspace(), obj);
3434}
3435
3436/*
3437 * remember `obj' if needed.
3438 */
3439void
3440rb_gc_writebarrier_remember(VALUE obj)
3441{
3442 rb_gc_impl_writebarrier_remember(rb_gc_get_objspace(), obj);
3443}
3444
3445void
3446rb_gc_copy_attributes(VALUE dest, VALUE obj)
3447{
3448 rb_gc_impl_copy_attributes(rb_gc_get_objspace(), dest, obj);
3449}
3450
3451int
3452rb_gc_modular_gc_loaded_p(void)
3453{
3454#if USE_MODULAR_GC
3455 return rb_gc_functions.modular_gc_loaded_p;
3456#else
3457 return false;
3458#endif
3459}
3460
3461const char *
3462rb_gc_active_gc_name(void)
3463{
3464 const char *gc_name = rb_gc_impl_active_gc_name();
3465
3466 const size_t len = strlen(gc_name);
3467 if (len > RB_GC_MAX_NAME_LEN) {
3468 rb_bug("GC should have a name no more than %d chars long. Currently: %zu (%s)",
3469 RB_GC_MAX_NAME_LEN, len, gc_name);
3470 }
3471
3472 return gc_name;
3473}
3474
3476rb_gc_object_metadata(VALUE obj)
3477{
3478 return rb_gc_impl_object_metadata(rb_gc_get_objspace(), obj);
3479}
3480
3481/* GC */
3482
3483void *
3484rb_gc_ractor_cache_alloc(rb_ractor_t *ractor)
3485{
3486 return rb_gc_impl_ractor_cache_alloc(rb_gc_get_objspace(), ractor);
3487}
3488
3489void
3490rb_gc_ractor_cache_free(void *cache)
3491{
3492 rb_gc_impl_ractor_cache_free(rb_gc_get_objspace(), cache);
3493}
3494
3495void
3496rb_gc_register_mark_object(VALUE obj)
3497{
3498 if (!rb_gc_impl_pointer_to_heap_p(rb_gc_get_objspace(), (void *)obj))
3499 return;
3500
3501 rb_vm_register_global_object(obj);
3502}
3503
3504void
3505rb_gc_register_address(VALUE *addr)
3506{
3507 rb_vm_t *vm = GET_VM();
3508
3509 VALUE obj = *addr;
3510
3511 struct global_object_list *tmp = ALLOC(struct global_object_list);
3512 tmp->next = vm->global_object_list;
3513 tmp->varptr = addr;
3514 vm->global_object_list = tmp;
3515
3516 /*
3517 * Because some C extensions have assignment-then-register bugs,
3518 * we guard `obj` here so that it would not get swept defensively.
3519 */
3520 RB_GC_GUARD(obj);
3521 if (0 && !SPECIAL_CONST_P(obj)) {
3522 rb_warn("Object is assigned to registering address already: %"PRIsVALUE,
3523 rb_obj_class(obj));
3524 rb_print_backtrace(stderr);
3525 }
3526}
3527
3528void
3529rb_gc_unregister_address(VALUE *addr)
3530{
3531 rb_vm_t *vm = GET_VM();
3532 struct global_object_list *tmp = vm->global_object_list;
3533
3534 if (tmp->varptr == addr) {
3535 vm->global_object_list = tmp->next;
3536 xfree(tmp);
3537 return;
3538 }
3539 while (tmp->next) {
3540 if (tmp->next->varptr == addr) {
3541 struct global_object_list *t = tmp->next;
3542
3543 tmp->next = tmp->next->next;
3544 xfree(t);
3545 break;
3546 }
3547 tmp = tmp->next;
3548 }
3549}
3550
3551void
3553{
3554 rb_gc_register_address(var);
3555}
3556
3557static VALUE
3558gc_start_internal(rb_execution_context_t *ec, VALUE self, VALUE full_mark, VALUE immediate_mark, VALUE immediate_sweep, VALUE compact)
3559{
3560 rb_gc_impl_start(rb_gc_get_objspace(), RTEST(full_mark), RTEST(immediate_mark), RTEST(immediate_sweep), RTEST(compact));
3561
3562 return Qnil;
3563}
3564
3565/*
3566 * rb_objspace_each_objects() is special C API to walk through
3567 * Ruby object space. This C API is too difficult to use it.
3568 * To be frank, you should not use it. Or you need to read the
3569 * source code of this function and understand what this function does.
3570 *
3571 * 'callback' will be called several times (the number of heap page,
3572 * at current implementation) with:
3573 * vstart: a pointer to the first living object of the heap_page.
3574 * vend: a pointer to next to the valid heap_page area.
3575 * stride: a distance to next VALUE.
3576 *
3577 * If callback() returns non-zero, the iteration will be stopped.
3578 *
3579 * This is a sample callback code to iterate liveness objects:
3580 *
3581 * static int
3582 * sample_callback(void *vstart, void *vend, int stride, void *data)
3583 * {
3584 * VALUE v = (VALUE)vstart;
3585 * for (; v != (VALUE)vend; v += stride) {
3586 * if (!rb_objspace_internal_object_p(v)) { // liveness check
3587 * // do something with live object 'v'
3588 * }
3589 * }
3590 * return 0; // continue to iteration
3591 * }
3592 *
3593 * Note: 'vstart' is not a top of heap_page. This point the first
3594 * living object to grasp at least one object to avoid GC issue.
3595 * This means that you can not walk through all Ruby object page
3596 * including freed object page.
3597 *
3598 * Note: On this implementation, 'stride' is the same as sizeof(RVALUE).
3599 * However, there are possibilities to pass variable values with
3600 * 'stride' with some reasons. You must use stride instead of
3601 * use some constant value in the iteration.
3602 */
3603void
3604rb_objspace_each_objects(int (*callback)(void *, void *, size_t, void *), void *data)
3605{
3606 rb_gc_impl_each_objects(rb_gc_get_objspace(), callback, data);
3607}
3608
3609static void
3610gc_ref_update_array(void *objspace, VALUE v)
3611{
3612 if (ARY_SHARED_P(v)) {
3613 VALUE old_root = RARRAY(v)->as.heap.aux.shared_root;
3614
3615 UPDATE_IF_MOVED(objspace, RARRAY(v)->as.heap.aux.shared_root);
3616
3617 VALUE new_root = RARRAY(v)->as.heap.aux.shared_root;
3618 // If the root is embedded and its location has changed
3619 if (ARY_EMBED_P(new_root) && new_root != old_root) {
3620 size_t offset = (size_t)(RARRAY(v)->as.heap.ptr - RARRAY(old_root)->as.ary);
3621 GC_ASSERT(RARRAY(v)->as.heap.ptr >= RARRAY(old_root)->as.ary);
3622 RARRAY(v)->as.heap.ptr = RARRAY(new_root)->as.ary + offset;
3623 }
3624 }
3625 else {
3626 long len = RARRAY_LEN(v);
3627
3628 if (len > 0) {
3629 VALUE *ptr = (VALUE *)RARRAY_CONST_PTR(v);
3630 for (long i = 0; i < len; i++) {
3631 UPDATE_IF_MOVED(objspace, ptr[i]);
3632 }
3633 }
3634
3635 if (rb_gc_obj_slot_size(v) >= rb_ary_size_as_embedded(v)) {
3636 if (rb_ary_embeddable_p(v)) {
3637 rb_ary_make_embedded(v);
3638 }
3639 }
3640 }
3641}
3642
3643static void
3644gc_ref_update_object(void *objspace, VALUE v)
3645{
3646 VALUE *ptr = ROBJECT_FIELDS(v);
3647
3648 if (FL_TEST_RAW(v, ROBJECT_HEAP)) {
3649 if (rb_shape_obj_too_complex_p(v)) {
3650 gc_ref_update_table_values_only(ROBJECT_FIELDS_HASH(v));
3651 return;
3652 }
3653
3654 size_t slot_size = rb_gc_obj_slot_size(v);
3655 size_t embed_size = rb_obj_embedded_size(ROBJECT_FIELDS_CAPACITY(v));
3656 if (slot_size >= embed_size) {
3657 // Object can be re-embedded
3658 memcpy(ROBJECT(v)->as.ary, ptr, sizeof(VALUE) * ROBJECT_FIELDS_COUNT(v));
3659 FL_UNSET_RAW(v, ROBJECT_HEAP);
3660 xfree(ptr);
3661 ptr = ROBJECT(v)->as.ary;
3662 }
3663 }
3664
3665 for (uint32_t i = 0; i < ROBJECT_FIELDS_COUNT(v); i++) {
3666 UPDATE_IF_MOVED(objspace, ptr[i]);
3667 }
3668}
3669
3670void
3671rb_gc_ref_update_table_values_only(st_table *tbl)
3672{
3673 gc_ref_update_table_values_only(tbl);
3674}
3675
3676/* Update MOVED references in a VALUE=>VALUE st_table */
3677void
3678rb_gc_update_tbl_refs(st_table *ptr)
3679{
3680 gc_update_table_refs(ptr);
3681}
3682
3683static void
3684gc_ref_update_hash(void *objspace, VALUE v)
3685{
3686 rb_hash_stlike_foreach_with_replace(v, hash_foreach_replace, hash_replace_ref, (st_data_t)objspace);
3687}
3688
3689static void
3690gc_update_values(void *objspace, long n, VALUE *values)
3691{
3692 for (long i = 0; i < n; i++) {
3693 UPDATE_IF_MOVED(objspace, values[i]);
3694 }
3695}
3696
3697void
3698rb_gc_update_values(long n, VALUE *values)
3699{
3700 gc_update_values(rb_gc_get_objspace(), n, values);
3701}
3702
3703static enum rb_id_table_iterator_result
3704check_id_table_move(VALUE value, void *data)
3705{
3706 void *objspace = (void *)data;
3707
3708 if (gc_object_moved_p_internal(objspace, (VALUE)value)) {
3709 return ID_TABLE_REPLACE;
3710 }
3711
3712 return ID_TABLE_CONTINUE;
3713}
3714
3715void
3716rb_gc_prepare_heap_process_object(VALUE obj)
3717{
3718 switch (BUILTIN_TYPE(obj)) {
3719 case T_STRING:
3720 // Precompute the string coderange. This both save time for when it will be
3721 // eventually needed, and avoid mutating heap pages after a potential fork.
3723 break;
3724 default:
3725 break;
3726 }
3727}
3728
3729void
3730rb_gc_prepare_heap(void)
3731{
3732 rb_gc_impl_prepare_heap(rb_gc_get_objspace());
3733}
3734
3735size_t
3736rb_gc_heap_id_for_size(size_t size)
3737{
3738 return rb_gc_impl_heap_id_for_size(rb_gc_get_objspace(), size);
3739}
3740
3741bool
3742rb_gc_size_allocatable_p(size_t size)
3743{
3744 return rb_gc_impl_size_allocatable_p(size);
3745}
3746
3747static enum rb_id_table_iterator_result
3748update_id_table(VALUE *value, void *data, int existing)
3749{
3750 void *objspace = (void *)data;
3751
3752 if (gc_object_moved_p_internal(objspace, (VALUE)*value)) {
3753 *value = gc_location_internal(objspace, (VALUE)*value);
3754 }
3755
3756 return ID_TABLE_CONTINUE;
3757}
3758
3759static void
3760update_m_tbl(void *objspace, struct rb_id_table *tbl)
3761{
3762 if (tbl) {
3763 rb_id_table_foreach_values_with_replace(tbl, check_id_table_move, update_id_table, objspace);
3764 }
3765}
3766
3767static enum rb_id_table_iterator_result
3768update_cvc_tbl_i(VALUE cvc_entry, void *objspace)
3769{
3770 struct rb_cvar_class_tbl_entry *entry;
3771
3772 entry = (struct rb_cvar_class_tbl_entry *)cvc_entry;
3773
3774 if (entry->cref) {
3775 TYPED_UPDATE_IF_MOVED(objspace, rb_cref_t *, entry->cref);
3776 }
3777
3778 entry->class_value = gc_location_internal(objspace, entry->class_value);
3779
3780 return ID_TABLE_CONTINUE;
3781}
3782
3783static void
3784update_cvc_tbl(void *objspace, struct rb_id_table *tbl)
3785{
3786 if (!tbl) return;
3787 rb_id_table_foreach_values(tbl, update_cvc_tbl_i, objspace);
3788}
3789
3790static enum rb_id_table_iterator_result
3791update_const_tbl_i(VALUE value, void *objspace)
3792{
3793 rb_const_entry_t *ce = (rb_const_entry_t *)value;
3794
3795 if (gc_object_moved_p_internal(objspace, ce->value)) {
3796 ce->value = gc_location_internal(objspace, ce->value);
3797 }
3798
3799 if (gc_object_moved_p_internal(objspace, ce->file)) {
3800 ce->file = gc_location_internal(objspace, ce->file);
3801 }
3802
3803 return ID_TABLE_CONTINUE;
3804}
3805
3806static void
3807update_const_tbl(void *objspace, struct rb_id_table *tbl)
3808{
3809 if (!tbl) return;
3810 rb_id_table_foreach_values(tbl, update_const_tbl_i, objspace);
3811}
3812
3813static void
3814update_subclasses(void *objspace, rb_classext_t *ext)
3815{
3816 rb_subclass_entry_t *entry;
3817 rb_subclass_anchor_t *anchor = RCLASSEXT_SUBCLASSES(ext);
3818 if (!anchor) return;
3819 entry = anchor->head;
3820 while (entry) {
3821 if (entry->klass)
3822 UPDATE_IF_MOVED(objspace, entry->klass);
3823 entry = entry->next;
3824 }
3825}
3826
3827static void
3828update_superclasses(rb_objspace_t *objspace, rb_classext_t *ext)
3829{
3830 if (RCLASSEXT_SUPERCLASSES_WITH_SELF(ext)) {
3831 size_t array_size = RCLASSEXT_SUPERCLASS_DEPTH(ext) + 1;
3832 for (size_t i = 0; i < array_size; i++) {
3833 UPDATE_IF_MOVED(objspace, RCLASSEXT_SUPERCLASSES(ext)[i]);
3834 }
3835 }
3836}
3837
3838static void
3839update_classext_values(rb_objspace_t *objspace, rb_classext_t *ext, bool is_iclass)
3840{
3841 UPDATE_IF_MOVED(objspace, RCLASSEXT_ORIGIN(ext));
3842 UPDATE_IF_MOVED(objspace, RCLASSEXT_REFINED_CLASS(ext));
3843 UPDATE_IF_MOVED(objspace, RCLASSEXT_CLASSPATH(ext));
3844 if (is_iclass) {
3845 UPDATE_IF_MOVED(objspace, RCLASSEXT_INCLUDER(ext));
3846 }
3847}
3848
3849static void
3850update_classext(rb_classext_t *ext, bool is_prime, VALUE box_value, void *arg)
3851{
3852 struct classext_foreach_args *args = (struct classext_foreach_args *)arg;
3853 rb_objspace_t *objspace = args->objspace;
3854
3855 if (RCLASSEXT_SUPER(ext)) {
3856 UPDATE_IF_MOVED(objspace, RCLASSEXT_SUPER(ext));
3857 }
3858
3859 update_m_tbl(objspace, RCLASSEXT_M_TBL(ext));
3860
3861 UPDATE_IF_MOVED(objspace, ext->fields_obj);
3862 if (!RCLASSEXT_SHARED_CONST_TBL(ext)) {
3863 update_const_tbl(objspace, RCLASSEXT_CONST_TBL(ext));
3864 }
3865 UPDATE_IF_MOVED(objspace, RCLASSEXT_CC_TBL(ext));
3866 update_cvc_tbl(objspace, RCLASSEXT_CVC_TBL(ext));
3867 update_superclasses(objspace, ext);
3868 update_subclasses(objspace, ext);
3869
3870 update_classext_values(objspace, ext, false);
3871}
3872
3873static void
3874update_iclass_classext(rb_classext_t *ext, bool is_prime, VALUE box_value, void *arg)
3875{
3876 struct classext_foreach_args *args = (struct classext_foreach_args *)arg;
3877 rb_objspace_t *objspace = args->objspace;
3878
3879 if (RCLASSEXT_SUPER(ext)) {
3880 UPDATE_IF_MOVED(objspace, RCLASSEXT_SUPER(ext));
3881 }
3882 update_m_tbl(objspace, RCLASSEXT_M_TBL(ext));
3883 update_m_tbl(objspace, RCLASSEXT_CALLABLE_M_TBL(ext));
3884 UPDATE_IF_MOVED(objspace, RCLASSEXT_CC_TBL(ext));
3885 update_subclasses(objspace, ext);
3886
3887 update_classext_values(objspace, ext, true);
3888}
3889
3891 vm_table_foreach_callback_func callback;
3892 vm_table_update_callback_func update_callback;
3893 void *data;
3894 bool weak_only;
3895};
3896
3897static int
3898vm_weak_table_foreach_weak_key(st_data_t key, st_data_t value, st_data_t data, int error)
3899{
3900 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3901
3902 int ret = iter_data->callback((VALUE)key, iter_data->data);
3903
3904 if (!iter_data->weak_only) {
3905 if (ret != ST_CONTINUE) return ret;
3906
3907 ret = iter_data->callback((VALUE)value, iter_data->data);
3908 }
3909
3910 return ret;
3911}
3912
3913static int
3914vm_weak_table_foreach_update_weak_key(st_data_t *key, st_data_t *value, st_data_t data, int existing)
3915{
3916 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3917
3918 int ret = iter_data->update_callback((VALUE *)key, iter_data->data);
3919
3920 if (!iter_data->weak_only) {
3921 if (ret != ST_CONTINUE) return ret;
3922
3923 ret = iter_data->update_callback((VALUE *)value, iter_data->data);
3924 }
3925
3926 return ret;
3927}
3928
3929static int
3930vm_weak_table_cc_refinement_foreach(st_data_t key, st_data_t data, int error)
3931{
3932 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3933
3934 return iter_data->callback((VALUE)key, iter_data->data);
3935}
3936
3937static int
3938vm_weak_table_cc_refinement_foreach_update_update(st_data_t *key, st_data_t data, int existing)
3939{
3940 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3941
3942 return iter_data->update_callback((VALUE *)key, iter_data->data);
3943}
3944
3945
3946static int
3947vm_weak_table_sym_set_foreach(VALUE *sym_ptr, void *data)
3948{
3949 VALUE sym = *sym_ptr;
3950 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3951
3952 if (RB_SPECIAL_CONST_P(sym)) return ST_CONTINUE;
3953
3954 int ret = iter_data->callback(sym, iter_data->data);
3955
3956 if (ret == ST_REPLACE) {
3957 ret = iter_data->update_callback(sym_ptr, iter_data->data);
3958 }
3959
3960 return ret;
3961}
3962
3963struct st_table *rb_generic_fields_tbl_get(void);
3964
3965static int
3966vm_weak_table_id2ref_foreach(st_data_t key, st_data_t value, st_data_t data, int error)
3967{
3968 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3969
3970 if (!iter_data->weak_only && !FIXNUM_P((VALUE)key)) {
3971 int ret = iter_data->callback((VALUE)key, iter_data->data);
3972 if (ret != ST_CONTINUE) return ret;
3973 }
3974
3975 return iter_data->callback((VALUE)value, iter_data->data);
3976}
3977
3978static int
3979vm_weak_table_id2ref_foreach_update(st_data_t *key, st_data_t *value, st_data_t data, int existing)
3980{
3981 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3982
3983 iter_data->update_callback((VALUE *)value, iter_data->data);
3984
3985 if (!iter_data->weak_only && !FIXNUM_P((VALUE)*key)) {
3986 iter_data->update_callback((VALUE *)key, iter_data->data);
3987 }
3988
3989 return ST_CONTINUE;
3990}
3991
3992static int
3993vm_weak_table_gen_fields_foreach(st_data_t key, st_data_t value, st_data_t data)
3994{
3995 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3996
3997 int ret = iter_data->callback((VALUE)key, iter_data->data);
3998
3999 VALUE new_value = (VALUE)value;
4000 VALUE new_key = (VALUE)key;
4001
4002 switch (ret) {
4003 case ST_CONTINUE:
4004 break;
4005
4006 case ST_DELETE:
4007 RBASIC_SET_SHAPE_ID((VALUE)key, ROOT_SHAPE_ID);
4008 return ST_DELETE;
4009
4010 case ST_REPLACE: {
4011 ret = iter_data->update_callback(&new_key, iter_data->data);
4012 if (key != new_key) {
4013 ret = ST_DELETE;
4014 }
4015 break;
4016 }
4017
4018 default:
4019 rb_bug("vm_weak_table_gen_fields_foreach: return value %d not supported", ret);
4020 }
4021
4022 if (!iter_data->weak_only) {
4023 int ivar_ret = iter_data->callback(new_value, iter_data->data);
4024 switch (ivar_ret) {
4025 case ST_CONTINUE:
4026 break;
4027
4028 case ST_REPLACE:
4029 iter_data->update_callback(&new_value, iter_data->data);
4030 break;
4031
4032 default:
4033 rb_bug("vm_weak_table_gen_fields_foreach: return value %d not supported", ivar_ret);
4034 }
4035 }
4036
4037 if (key != new_key || value != new_value) {
4038 DURING_GC_COULD_MALLOC_REGION_START();
4039 {
4040 st_insert(rb_generic_fields_tbl_get(), (st_data_t)new_key, new_value);
4041 }
4042 DURING_GC_COULD_MALLOC_REGION_END();
4043 }
4044
4045 return ret;
4046}
4047
4048static int
4049vm_weak_table_frozen_strings_foreach(VALUE *str, void *data)
4050{
4051 // int retval = vm_weak_table_foreach_weak_key(key, value, data, error);
4052 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
4053 int retval = iter_data->callback(*str, iter_data->data);
4054
4055 if (retval == ST_REPLACE) {
4056 retval = iter_data->update_callback(str, iter_data->data);
4057 }
4058
4059 if (retval == ST_DELETE) {
4060 FL_UNSET(*str, RSTRING_FSTR);
4061 }
4062
4063 return retval;
4064}
4065
4066void rb_fstring_foreach_with_replace(int (*callback)(VALUE *str, void *data), void *data);
4067void
4068rb_gc_vm_weak_table_foreach(vm_table_foreach_callback_func callback,
4069 vm_table_update_callback_func update_callback,
4070 void *data,
4071 bool weak_only,
4072 enum rb_gc_vm_weak_tables table)
4073{
4074 rb_vm_t *vm = GET_VM();
4075
4076 struct global_vm_table_foreach_data foreach_data = {
4077 .callback = callback,
4078 .update_callback = update_callback,
4079 .data = data,
4080 .weak_only = weak_only,
4081 };
4082
4083 switch (table) {
4084 case RB_GC_VM_CI_TABLE: {
4085 if (vm->ci_table) {
4086 st_foreach_with_replace(
4087 vm->ci_table,
4088 vm_weak_table_foreach_weak_key,
4089 vm_weak_table_foreach_update_weak_key,
4090 (st_data_t)&foreach_data
4091 );
4092 }
4093 break;
4094 }
4095 case RB_GC_VM_OVERLOADED_CME_TABLE: {
4096 if (vm->overloaded_cme_table) {
4097 st_foreach_with_replace(
4098 vm->overloaded_cme_table,
4099 vm_weak_table_foreach_weak_key,
4100 vm_weak_table_foreach_update_weak_key,
4101 (st_data_t)&foreach_data
4102 );
4103 }
4104 break;
4105 }
4106 case RB_GC_VM_GLOBAL_SYMBOLS_TABLE: {
4107 rb_sym_global_symbol_table_foreach_weak_reference(
4108 vm_weak_table_sym_set_foreach,
4109 &foreach_data
4110 );
4111 break;
4112 }
4113 case RB_GC_VM_ID2REF_TABLE: {
4114 if (id2ref_tbl) {
4115 st_foreach_with_replace(
4116 id2ref_tbl,
4117 vm_weak_table_id2ref_foreach,
4118 vm_weak_table_id2ref_foreach_update,
4119 (st_data_t)&foreach_data
4120 );
4121 }
4122 break;
4123 }
4124 case RB_GC_VM_GENERIC_FIELDS_TABLE: {
4125 st_table *generic_fields_tbl = rb_generic_fields_tbl_get();
4126 if (generic_fields_tbl) {
4127 st_foreach(
4128 generic_fields_tbl,
4129 vm_weak_table_gen_fields_foreach,
4130 (st_data_t)&foreach_data
4131 );
4132 }
4133 break;
4134 }
4135 case RB_GC_VM_FROZEN_STRINGS_TABLE: {
4136 rb_fstring_foreach_with_replace(
4137 vm_weak_table_frozen_strings_foreach,
4138 &foreach_data
4139 );
4140 break;
4141 }
4142 case RB_GC_VM_CC_REFINEMENT_TABLE: {
4143 if (vm->cc_refinement_table) {
4144 set_foreach_with_replace(
4145 vm->cc_refinement_table,
4146 vm_weak_table_cc_refinement_foreach,
4147 vm_weak_table_cc_refinement_foreach_update_update,
4148 (st_data_t)&foreach_data
4149 );
4150 }
4151 break;
4152 }
4153 case RB_GC_VM_WEAK_TABLE_COUNT:
4154 rb_bug("Unreachable");
4155 default:
4156 rb_bug("rb_gc_vm_weak_table_foreach: unknown table %d", table);
4157 }
4158}
4159
4160void
4161rb_gc_update_vm_references(void *objspace)
4162{
4163 rb_execution_context_t *ec = GET_EC();
4164 rb_vm_t *vm = rb_ec_vm_ptr(ec);
4165
4166 rb_vm_update_references(vm);
4167 rb_gc_update_global_tbl();
4168 rb_sym_global_symbols_mark_and_move();
4169
4170#if USE_YJIT
4171 void rb_yjit_root_update_references(void); // in Rust
4172
4173 if (rb_yjit_enabled_p) {
4174 rb_yjit_root_update_references();
4175 }
4176#endif
4177
4178#if USE_ZJIT
4179 void rb_zjit_root_update_references(void); // in Rust
4180
4181 if (rb_zjit_enabled_p) {
4182 rb_zjit_root_update_references();
4183 }
4184#endif
4185}
4186
4187void
4188rb_gc_update_object_references(void *objspace, VALUE obj)
4189{
4190 struct classext_foreach_args args;
4191
4192 switch (BUILTIN_TYPE(obj)) {
4193 case T_CLASS:
4194 if (FL_TEST_RAW(obj, FL_SINGLETON)) {
4195 UPDATE_IF_MOVED(objspace, RCLASS_ATTACHED_OBJECT(obj));
4196 }
4197 // Continue to the shared T_CLASS/T_MODULE
4198 case T_MODULE:
4199 args.klass = obj;
4200 args.objspace = objspace;
4201 rb_class_classext_foreach(obj, update_classext, (void *)&args);
4202 break;
4203
4204 case T_ICLASS:
4205 args.objspace = objspace;
4206 rb_class_classext_foreach(obj, update_iclass_classext, (void *)&args);
4207 break;
4208
4209 case T_IMEMO:
4210 rb_imemo_mark_and_move(obj, true);
4211 return;
4212
4213 case T_NIL:
4214 case T_FIXNUM:
4215 case T_NODE:
4216 case T_MOVED:
4217 case T_NONE:
4218 /* These can't move */
4219 return;
4220
4221 case T_ARRAY:
4222 gc_ref_update_array(objspace, obj);
4223 break;
4224
4225 case T_HASH:
4226 gc_ref_update_hash(objspace, obj);
4227 UPDATE_IF_MOVED(objspace, RHASH(obj)->ifnone);
4228 break;
4229
4230 case T_STRING:
4231 {
4232 if (STR_SHARED_P(obj)) {
4233 UPDATE_IF_MOVED(objspace, RSTRING(obj)->as.heap.aux.shared);
4234 }
4235
4236 /* If, after move the string is not embedded, and can fit in the
4237 * slot it's been placed in, then re-embed it. */
4238 if (rb_gc_obj_slot_size(obj) >= rb_str_size_as_embedded(obj)) {
4239 if (!STR_EMBED_P(obj) && rb_str_reembeddable_p(obj)) {
4240 rb_str_make_embedded(obj);
4241 }
4242 }
4243
4244 break;
4245 }
4246 case T_DATA:
4247 /* Call the compaction callback, if it exists */
4248 {
4249 bool typed_data = RTYPEDDATA_P(obj);
4250 void *const ptr = typed_data ? RTYPEDDATA_GET_DATA(obj) : DATA_PTR(obj);
4251
4252 if (typed_data) {
4253 UPDATE_IF_MOVED(objspace, RTYPEDDATA(obj)->fields_obj);
4254 }
4255
4256 if (ptr) {
4257 if (typed_data && gc_declarative_marking_p(RTYPEDDATA_TYPE(obj))) {
4258 size_t *offset_list = TYPED_DATA_REFS_OFFSET_LIST(obj);
4259
4260 for (size_t offset = *offset_list; offset != RUBY_REF_END; offset = *offset_list++) {
4261 VALUE *ref = (VALUE *)((char *)ptr + offset);
4262 *ref = gc_location_internal(objspace, *ref);
4263 }
4264 }
4265 else if (typed_data) {
4266 RUBY_DATA_FUNC compact_func = RTYPEDDATA_TYPE(obj)->function.dcompact;
4267 if (compact_func) (*compact_func)(ptr);
4268 }
4269 }
4270 }
4271 break;
4272
4273 case T_OBJECT:
4274 gc_ref_update_object(objspace, obj);
4275 break;
4276
4277 case T_FILE:
4278 if (RFILE(obj)->fptr) {
4279 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->self);
4280 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->pathv);
4281 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->tied_io_for_writing);
4282 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->writeconv_asciicompat);
4283 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->writeconv_pre_ecopts);
4284 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->encs.ecopts);
4285 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->write_lock);
4286 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->timeout);
4287 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->wakeup_mutex);
4288 }
4289 break;
4290 case T_REGEXP:
4291 UPDATE_IF_MOVED(objspace, RREGEXP(obj)->src);
4292 break;
4293
4294 case T_SYMBOL:
4295 UPDATE_IF_MOVED(objspace, RSYMBOL(obj)->fstr);
4296 break;
4297
4298 case T_FLOAT:
4299 case T_BIGNUM:
4300 break;
4301
4302 case T_MATCH:
4303 UPDATE_IF_MOVED(objspace, RMATCH(obj)->regexp);
4304
4305 if (RMATCH(obj)->str) {
4306 UPDATE_IF_MOVED(objspace, RMATCH(obj)->str);
4307 }
4308 break;
4309
4310 case T_RATIONAL:
4311 UPDATE_IF_MOVED(objspace, RRATIONAL(obj)->num);
4312 UPDATE_IF_MOVED(objspace, RRATIONAL(obj)->den);
4313 break;
4314
4315 case T_COMPLEX:
4316 UPDATE_IF_MOVED(objspace, RCOMPLEX(obj)->real);
4317 UPDATE_IF_MOVED(objspace, RCOMPLEX(obj)->imag);
4318
4319 break;
4320
4321 case T_STRUCT:
4322 {
4323 long i, len = RSTRUCT_LEN(obj);
4324 VALUE *ptr = (VALUE *)RSTRUCT_CONST_PTR(obj);
4325
4326 for (i = 0; i < len; i++) {
4327 UPDATE_IF_MOVED(objspace, ptr[i]);
4328 }
4329
4330 if (RSTRUCT_EMBED_LEN(obj)) {
4331 if (!FL_TEST_RAW(obj, RSTRUCT_GEN_FIELDS)) {
4332 UPDATE_IF_MOVED(objspace, ptr[len]);
4333 }
4334 }
4335 else {
4336 UPDATE_IF_MOVED(objspace, RSTRUCT(obj)->as.heap.fields_obj);
4337 }
4338 }
4339 break;
4340 default:
4341 rb_bug("unreachable");
4342 break;
4343 }
4344
4345 UPDATE_IF_MOVED(objspace, RBASIC(obj)->klass);
4346}
4347
4348VALUE
4349rb_gc_start(void)
4350{
4351 rb_gc();
4352 return Qnil;
4353}
4354
4355void
4356rb_gc(void)
4357{
4358 unless_objspace(objspace) { return; }
4359
4360 rb_gc_impl_start(objspace, true, true, true, false);
4361}
4362
4363int
4364rb_during_gc(void)
4365{
4366 unless_objspace(objspace) { return FALSE; }
4367
4368 return rb_gc_impl_during_gc_p(objspace);
4369}
4370
4371size_t
4372rb_gc_count(void)
4373{
4374 return rb_gc_impl_gc_count(rb_gc_get_objspace());
4375}
4376
4377static VALUE
4378gc_count(rb_execution_context_t *ec, VALUE self)
4379{
4380 return SIZET2NUM(rb_gc_count());
4381}
4382
4383VALUE
4384rb_gc_latest_gc_info(VALUE key)
4385{
4386 if (!SYMBOL_P(key) && !RB_TYPE_P(key, T_HASH)) {
4387 rb_raise(rb_eTypeError, "non-hash or symbol given");
4388 }
4389
4390 VALUE val = rb_gc_impl_latest_gc_info(rb_gc_get_objspace(), key);
4391
4392 if (val == Qundef) {
4393 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
4394 }
4395
4396 return val;
4397}
4398
4399static VALUE
4400gc_stat(rb_execution_context_t *ec, VALUE self, VALUE arg) // arg is (nil || hash || symbol)
4401{
4402 if (NIL_P(arg)) {
4403 arg = rb_hash_new();
4404 }
4405 else if (!RB_TYPE_P(arg, T_HASH) && !SYMBOL_P(arg)) {
4406 rb_raise(rb_eTypeError, "non-hash or symbol given");
4407 }
4408
4409 VALUE ret = rb_gc_impl_stat(rb_gc_get_objspace(), arg);
4410
4411 if (ret == Qundef) {
4412 GC_ASSERT(SYMBOL_P(arg));
4413
4414 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(arg));
4415 }
4416
4417 return ret;
4418}
4419
4420size_t
4421rb_gc_stat(VALUE arg)
4422{
4423 if (!RB_TYPE_P(arg, T_HASH) && !SYMBOL_P(arg)) {
4424 rb_raise(rb_eTypeError, "non-hash or symbol given");
4425 }
4426
4427 VALUE ret = rb_gc_impl_stat(rb_gc_get_objspace(), arg);
4428
4429 if (ret == Qundef) {
4430 GC_ASSERT(SYMBOL_P(arg));
4431
4432 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(arg));
4433 }
4434
4435 if (SYMBOL_P(arg)) {
4436 return NUM2SIZET(ret);
4437 }
4438 else {
4439 return 0;
4440 }
4441}
4442
4443static VALUE
4444gc_stat_heap(rb_execution_context_t *ec, VALUE self, VALUE heap_name, VALUE arg)
4445{
4446 if (NIL_P(arg)) {
4447 arg = rb_hash_new();
4448 }
4449
4450 if (NIL_P(heap_name)) {
4451 if (!RB_TYPE_P(arg, T_HASH)) {
4452 rb_raise(rb_eTypeError, "non-hash given");
4453 }
4454 }
4455 else if (FIXNUM_P(heap_name)) {
4456 if (!SYMBOL_P(arg) && !RB_TYPE_P(arg, T_HASH)) {
4457 rb_raise(rb_eTypeError, "non-hash or symbol given");
4458 }
4459 }
4460 else {
4461 rb_raise(rb_eTypeError, "heap_name must be nil or an Integer");
4462 }
4463
4464 VALUE ret = rb_gc_impl_stat_heap(rb_gc_get_objspace(), heap_name, arg);
4465
4466 if (ret == Qundef) {
4467 GC_ASSERT(SYMBOL_P(arg));
4468
4469 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(arg));
4470 }
4471
4472 return ret;
4473}
4474
4475static VALUE
4476gc_config_get(rb_execution_context_t *ec, VALUE self)
4477{
4478 VALUE cfg_hash = rb_gc_impl_config_get(rb_gc_get_objspace());
4479 rb_hash_aset(cfg_hash, sym("implementation"), rb_fstring_cstr(rb_gc_impl_active_gc_name()));
4480
4481 return cfg_hash;
4482}
4483
4484static VALUE
4485gc_config_set(rb_execution_context_t *ec, VALUE self, VALUE hash)
4486{
4487 void *objspace = rb_gc_get_objspace();
4488
4489 rb_gc_impl_config_set(objspace, hash);
4490
4491 return Qnil;
4492}
4493
4494static VALUE
4495gc_stress_get(rb_execution_context_t *ec, VALUE self)
4496{
4497 return rb_gc_impl_stress_get(rb_gc_get_objspace());
4498}
4499
4500static VALUE
4501gc_stress_set_m(rb_execution_context_t *ec, VALUE self, VALUE flag)
4502{
4503 rb_gc_impl_stress_set(rb_gc_get_objspace(), flag);
4504
4505 return flag;
4506}
4507
4508void
4509rb_gc_initial_stress_set(VALUE flag)
4510{
4511 initial_stress = flag;
4512}
4513
4514size_t *
4515rb_gc_heap_sizes(void)
4516{
4517 return rb_gc_impl_heap_sizes(rb_gc_get_objspace());
4518}
4519
4520VALUE
4521rb_gc_enable(void)
4522{
4523 return rb_objspace_gc_enable(rb_gc_get_objspace());
4524}
4525
4526VALUE
4527rb_objspace_gc_enable(void *objspace)
4528{
4529 bool disabled = !rb_gc_impl_gc_enabled_p(objspace);
4530 rb_gc_impl_gc_enable(objspace);
4531 return RBOOL(disabled);
4532}
4533
4534static VALUE
4535gc_enable(rb_execution_context_t *ec, VALUE _)
4536{
4537 return rb_gc_enable();
4538}
4539
4540static VALUE
4541gc_disable_no_rest(void *objspace)
4542{
4543 bool disabled = !rb_gc_impl_gc_enabled_p(objspace);
4544 rb_gc_impl_gc_disable(objspace, false);
4545 return RBOOL(disabled);
4546}
4547
4548VALUE
4549rb_gc_disable_no_rest(void)
4550{
4551 return gc_disable_no_rest(rb_gc_get_objspace());
4552}
4553
4554VALUE
4555rb_gc_disable(void)
4556{
4557 return rb_objspace_gc_disable(rb_gc_get_objspace());
4558}
4559
4560VALUE
4561rb_objspace_gc_disable(void *objspace)
4562{
4563 bool disabled = !rb_gc_impl_gc_enabled_p(objspace);
4564 rb_gc_impl_gc_disable(objspace, true);
4565 return RBOOL(disabled);
4566}
4567
4568static VALUE
4569gc_disable(rb_execution_context_t *ec, VALUE _)
4570{
4571 return rb_gc_disable();
4572}
4573
4574// TODO: think about moving ruby_gc_set_params into Init_heap or Init_gc
4575void
4576ruby_gc_set_params(void)
4577{
4578 rb_gc_impl_set_params(rb_gc_get_objspace());
4579}
4580
4581void
4582rb_objspace_reachable_objects_from(VALUE obj, void (func)(VALUE, void *), void *data)
4583{
4584 RB_VM_LOCKING() {
4585 if (rb_gc_impl_during_gc_p(rb_gc_get_objspace())) rb_bug("rb_objspace_reachable_objects_from() is not supported while during GC");
4586
4587 if (!RB_SPECIAL_CONST_P(obj)) {
4588 rb_vm_t *vm = GET_VM();
4589 struct gc_mark_func_data_struct *prev_mfd = vm->gc.mark_func_data;
4590 struct gc_mark_func_data_struct mfd = {
4591 .mark_func = func,
4592 .data = data,
4593 };
4594
4595 vm->gc.mark_func_data = &mfd;
4596 rb_gc_mark_children(rb_gc_get_objspace(), obj);
4597 vm->gc.mark_func_data = prev_mfd;
4598 }
4599 }
4600}
4601
4603 const char *category;
4604 void (*func)(const char *category, VALUE, void *);
4605 void *data;
4606};
4607
4608static void
4609root_objects_from(VALUE obj, void *ptr)
4610{
4611 const struct root_objects_data *data = (struct root_objects_data *)ptr;
4612 (*data->func)(data->category, obj, data->data);
4613}
4614
4615void
4616rb_objspace_reachable_objects_from_root(void (func)(const char *category, VALUE, void *), void *passing_data)
4617{
4618 if (rb_gc_impl_during_gc_p(rb_gc_get_objspace())) rb_bug("rb_gc_impl_objspace_reachable_objects_from_root() is not supported while during GC");
4619
4620 rb_vm_t *vm = GET_VM();
4621
4622 struct root_objects_data data = {
4623 .func = func,
4624 .data = passing_data,
4625 };
4626
4627 struct gc_mark_func_data_struct *prev_mfd = vm->gc.mark_func_data;
4628 struct gc_mark_func_data_struct mfd = {
4629 .mark_func = root_objects_from,
4630 .data = &data,
4631 };
4632
4633 vm->gc.mark_func_data = &mfd;
4634 rb_gc_save_machine_context();
4635 rb_gc_mark_roots(vm->gc.objspace, &data.category);
4636 vm->gc.mark_func_data = prev_mfd;
4637}
4638
4639/*
4640 ------------------------------ DEBUG ------------------------------
4641*/
4642
4643static const char *
4644type_name(int type, VALUE obj)
4645{
4646 switch (type) {
4647#define TYPE_NAME(t) case (t): return #t;
4648 TYPE_NAME(T_NONE);
4649 TYPE_NAME(T_OBJECT);
4650 TYPE_NAME(T_CLASS);
4651 TYPE_NAME(T_MODULE);
4652 TYPE_NAME(T_FLOAT);
4653 TYPE_NAME(T_STRING);
4654 TYPE_NAME(T_REGEXP);
4655 TYPE_NAME(T_ARRAY);
4656 TYPE_NAME(T_HASH);
4657 TYPE_NAME(T_STRUCT);
4658 TYPE_NAME(T_BIGNUM);
4659 TYPE_NAME(T_FILE);
4660 TYPE_NAME(T_MATCH);
4661 TYPE_NAME(T_COMPLEX);
4662 TYPE_NAME(T_RATIONAL);
4663 TYPE_NAME(T_NIL);
4664 TYPE_NAME(T_TRUE);
4665 TYPE_NAME(T_FALSE);
4666 TYPE_NAME(T_SYMBOL);
4667 TYPE_NAME(T_FIXNUM);
4668 TYPE_NAME(T_UNDEF);
4669 TYPE_NAME(T_IMEMO);
4670 TYPE_NAME(T_ICLASS);
4671 TYPE_NAME(T_MOVED);
4672 TYPE_NAME(T_ZOMBIE);
4673 case T_DATA:
4674 if (obj && rb_objspace_data_type_name(obj)) {
4675 return rb_objspace_data_type_name(obj);
4676 }
4677 return "T_DATA";
4678#undef TYPE_NAME
4679 }
4680 return "unknown";
4681}
4682
4683static const char *
4684obj_type_name(VALUE obj)
4685{
4686 return type_name(TYPE(obj), obj);
4687}
4688
4689const char *
4690rb_method_type_name(rb_method_type_t type)
4691{
4692 switch (type) {
4693 case VM_METHOD_TYPE_ISEQ: return "iseq";
4694 case VM_METHOD_TYPE_ATTRSET: return "attrest";
4695 case VM_METHOD_TYPE_IVAR: return "ivar";
4696 case VM_METHOD_TYPE_BMETHOD: return "bmethod";
4697 case VM_METHOD_TYPE_ALIAS: return "alias";
4698 case VM_METHOD_TYPE_REFINED: return "refined";
4699 case VM_METHOD_TYPE_CFUNC: return "cfunc";
4700 case VM_METHOD_TYPE_ZSUPER: return "zsuper";
4701 case VM_METHOD_TYPE_MISSING: return "missing";
4702 case VM_METHOD_TYPE_OPTIMIZED: return "optimized";
4703 case VM_METHOD_TYPE_UNDEF: return "undef";
4704 case VM_METHOD_TYPE_NOTIMPLEMENTED: return "notimplemented";
4705 }
4706 rb_bug("rb_method_type_name: unreachable (type: %d)", type);
4707}
4708
4709static void
4710rb_raw_iseq_info(char *const buff, const size_t buff_size, const rb_iseq_t *iseq)
4711{
4712 if (buff_size > 0 && ISEQ_BODY(iseq) && ISEQ_BODY(iseq)->location.label && !RB_TYPE_P(ISEQ_BODY(iseq)->location.pathobj, T_MOVED)) {
4713 VALUE path = rb_iseq_path(iseq);
4714 int n = ISEQ_BODY(iseq)->location.first_lineno;
4715 snprintf(buff, buff_size, " %s@%s:%d",
4716 RSTRING_PTR(ISEQ_BODY(iseq)->location.label),
4717 RSTRING_PTR(path), n);
4718 }
4719}
4720
4721static int
4722str_len_no_raise(VALUE str)
4723{
4724 long len = RSTRING_LEN(str);
4725 if (len < 0) return 0;
4726 if (len > INT_MAX) return INT_MAX;
4727 return (int)len;
4728}
4729
4730#define BUFF_ARGS buff + pos, buff_size - pos
4731#define APPEND_F(...) if ((pos += snprintf(BUFF_ARGS, "" __VA_ARGS__)) >= buff_size) goto end
4732#define APPEND_S(s) do { \
4733 if ((pos + (int)rb_strlen_lit(s)) >= buff_size) { \
4734 goto end; \
4735 } \
4736 else { \
4737 memcpy(buff + pos, (s), rb_strlen_lit(s) + 1); \
4738 } \
4739 } while (0)
4740#define C(c, s) ((c) != 0 ? (s) : " ")
4741
4742static size_t
4743rb_raw_obj_info_common(char *const buff, const size_t buff_size, const VALUE obj)
4744{
4745 size_t pos = 0;
4746
4747 if (SPECIAL_CONST_P(obj)) {
4748 APPEND_F("%s", obj_type_name(obj));
4749
4750 if (FIXNUM_P(obj)) {
4751 APPEND_F(" %ld", FIX2LONG(obj));
4752 }
4753 else if (SYMBOL_P(obj)) {
4754 APPEND_F(" %s", rb_id2name(SYM2ID(obj)));
4755 }
4756 }
4757 else {
4758 // const int age = RVALUE_AGE_GET(obj);
4759
4760 if (rb_gc_impl_pointer_to_heap_p(rb_gc_get_objspace(), (void *)obj)) {
4761 APPEND_F("%p %s/", (void *)obj, obj_type_name(obj));
4762 // TODO: fixme
4763 // APPEND_F("%p [%d%s%s%s%s%s%s] %s ",
4764 // (void *)obj, age,
4765 // C(RVALUE_UNCOLLECTIBLE_BITMAP(obj), "L"),
4766 // C(RVALUE_MARK_BITMAP(obj), "M"),
4767 // C(RVALUE_PIN_BITMAP(obj), "P"),
4768 // C(RVALUE_MARKING_BITMAP(obj), "R"),
4769 // C(RVALUE_WB_UNPROTECTED_BITMAP(obj), "U"),
4770 // C(rb_objspace_garbage_object_p(obj), "G"),
4771 // obj_type_name(obj));
4772 }
4773 else {
4774 /* fake */
4775 // APPEND_F("%p [%dXXXX] %s",
4776 // (void *)obj, age,
4777 // obj_type_name(obj));
4778 }
4779
4780 if (internal_object_p(obj)) {
4781 /* ignore */
4782 }
4783 else if (RBASIC(obj)->klass == 0) {
4784 APPEND_S("(temporary internal)");
4785 }
4786 else if (RTEST(RBASIC(obj)->klass)) {
4787 VALUE class_path = rb_class_path_cached(RBASIC(obj)->klass);
4788 if (!NIL_P(class_path)) {
4789 APPEND_F("%s ", RSTRING_PTR(class_path));
4790 }
4791 }
4792 }
4793 end:
4794
4795 return pos;
4796}
4797
4798const char *rb_raw_obj_info(char *const buff, const size_t buff_size, VALUE obj);
4799
4800static size_t
4801rb_raw_obj_info_buitin_type(char *const buff, const size_t buff_size, const VALUE obj, size_t pos)
4802{
4803 if (LIKELY(pos < buff_size) && !SPECIAL_CONST_P(obj)) {
4804 const enum ruby_value_type type = BUILTIN_TYPE(obj);
4805
4806 switch (type) {
4807 case T_NODE:
4808 UNEXPECTED_NODE(rb_raw_obj_info);
4809 break;
4810 case T_ARRAY:
4811 if (ARY_SHARED_P(obj)) {
4812 APPEND_S("shared -> ");
4813 rb_raw_obj_info(BUFF_ARGS, ARY_SHARED_ROOT(obj));
4814 }
4815 else {
4816 APPEND_F("[%s%s%s] ",
4817 C(ARY_EMBED_P(obj), "E"),
4818 C(ARY_SHARED_P(obj), "S"),
4819 C(ARY_SHARED_ROOT_P(obj), "R"));
4820
4821 if (ARY_EMBED_P(obj)) {
4822 APPEND_F("len: %ld (embed)",
4823 RARRAY_LEN(obj));
4824 }
4825 else {
4826 APPEND_F("len: %ld, capa:%ld ptr:%p",
4827 RARRAY_LEN(obj),
4828 RARRAY(obj)->as.heap.aux.capa,
4829 (void *)RARRAY_CONST_PTR(obj));
4830 }
4831 }
4832 break;
4833 case T_STRING: {
4834 if (STR_SHARED_P(obj)) {
4835 APPEND_F(" [shared] len: %ld", RSTRING_LEN(obj));
4836 }
4837 else {
4838 if (STR_EMBED_P(obj)) APPEND_S(" [embed]");
4839
4840 APPEND_F(" len: %ld, capa: %" PRIdSIZE, RSTRING_LEN(obj), rb_str_capacity(obj));
4841 }
4842 APPEND_F(" \"%.*s\"", str_len_no_raise(obj), RSTRING_PTR(obj));
4843 break;
4844 }
4845 case T_SYMBOL: {
4846 VALUE fstr = RSYMBOL(obj)->fstr;
4847 ID id = RSYMBOL(obj)->id;
4848 if (RB_TYPE_P(fstr, T_STRING)) {
4849 APPEND_F(":%s id:%d", RSTRING_PTR(fstr), (unsigned int)id);
4850 }
4851 else {
4852 APPEND_F("(%p) id:%d", (void *)fstr, (unsigned int)id);
4853 }
4854 break;
4855 }
4856 case T_MOVED: {
4857 APPEND_F("-> %p", (void*)gc_location_internal(rb_gc_get_objspace(), obj));
4858 break;
4859 }
4860 case T_HASH: {
4861 APPEND_F("[%c] %"PRIdSIZE,
4862 RHASH_AR_TABLE_P(obj) ? 'A' : 'S',
4863 RHASH_SIZE(obj));
4864 break;
4865 }
4866 case T_CLASS:
4867 case T_MODULE:
4868 {
4869 VALUE class_path = rb_class_path_cached(obj);
4870 if (!NIL_P(class_path)) {
4871 APPEND_F("%s", RSTRING_PTR(class_path));
4872 }
4873 else {
4874 APPEND_S("(anon)");
4875 }
4876 break;
4877 }
4878 case T_ICLASS:
4879 {
4880 VALUE class_path = rb_class_path_cached(RBASIC_CLASS(obj));
4881 if (!NIL_P(class_path)) {
4882 APPEND_F("src:%s", RSTRING_PTR(class_path));
4883 }
4884 break;
4885 }
4886 case T_OBJECT:
4887 {
4888 if (FL_TEST_RAW(obj, ROBJECT_HEAP)) {
4889 if (rb_shape_obj_too_complex_p(obj)) {
4890 size_t hash_len = rb_st_table_size(ROBJECT_FIELDS_HASH(obj));
4891 APPEND_F("(too_complex) len:%zu", hash_len);
4892 }
4893 else {
4894 APPEND_F("(embed) len:%d capa:%d", RSHAPE_LEN(RBASIC_SHAPE_ID(obj)), ROBJECT_FIELDS_CAPACITY(obj));
4895 }
4896 }
4897 else {
4898 APPEND_F("len:%d capa:%d ptr:%p", RSHAPE_LEN(RBASIC_SHAPE_ID(obj)), ROBJECT_FIELDS_CAPACITY(obj), (void *)ROBJECT_FIELDS(obj));
4899 }
4900 }
4901 break;
4902 case T_DATA: {
4903 const struct rb_block *block;
4904 const rb_iseq_t *iseq;
4905 if (rb_obj_is_proc(obj) &&
4906 (block = vm_proc_block(obj)) != NULL &&
4907 (vm_block_type(block) == block_type_iseq) &&
4908 (iseq = vm_block_iseq(block)) != NULL) {
4909 rb_raw_iseq_info(BUFF_ARGS, iseq);
4910 }
4911 else if (rb_ractor_p(obj)) {
4912 rb_ractor_t *r = (void *)DATA_PTR(obj);
4913 if (r) {
4914 APPEND_F("r:%d", r->pub.id);
4915 }
4916 }
4917 else {
4918 const char * const type_name = rb_objspace_data_type_name(obj);
4919 if (type_name) {
4920 APPEND_F("%s", type_name);
4921 }
4922 }
4923 break;
4924 }
4925 case T_IMEMO: {
4926 APPEND_F("<%s> ", rb_imemo_name(imemo_type(obj)));
4927
4928 switch (imemo_type(obj)) {
4929 case imemo_ment:
4930 {
4931 const rb_method_entry_t *me = (const rb_method_entry_t *)obj;
4932
4933 APPEND_F(":%s (%s%s%s%s) type:%s aliased:%d owner:%p defined_class:%p",
4934 rb_id2name(me->called_id),
4935 METHOD_ENTRY_VISI(me) == METHOD_VISI_PUBLIC ? "pub" :
4936 METHOD_ENTRY_VISI(me) == METHOD_VISI_PRIVATE ? "pri" : "pro",
4937 METHOD_ENTRY_COMPLEMENTED(me) ? ",cmp" : "",
4938 METHOD_ENTRY_CACHED(me) ? ",cc" : "",
4939 METHOD_ENTRY_INVALIDATED(me) ? ",inv" : "",
4940 me->def ? rb_method_type_name(me->def->type) : "NULL",
4941 me->def ? me->def->aliased : -1,
4942 (void *)me->owner, // obj_info(me->owner),
4943 (void *)me->defined_class); //obj_info(me->defined_class)));
4944
4945 if (me->def) {
4946 switch (me->def->type) {
4947 case VM_METHOD_TYPE_ISEQ:
4948 APPEND_S(" (iseq:");
4949 rb_raw_obj_info(BUFF_ARGS, (VALUE)me->def->body.iseq.iseqptr);
4950 APPEND_S(")");
4951 break;
4952 default:
4953 break;
4954 }
4955 }
4956
4957 break;
4958 }
4959 case imemo_iseq: {
4960 const rb_iseq_t *iseq = (const rb_iseq_t *)obj;
4961 rb_raw_iseq_info(BUFF_ARGS, iseq);
4962 break;
4963 }
4964 case imemo_callinfo:
4965 {
4966 const struct rb_callinfo *ci = (const struct rb_callinfo *)obj;
4967 APPEND_F("(mid:%s, flag:%x argc:%d, kwarg:%s)",
4968 rb_id2name(vm_ci_mid(ci)),
4969 vm_ci_flag(ci),
4970 vm_ci_argc(ci),
4971 vm_ci_kwarg(ci) ? "available" : "NULL");
4972 break;
4973 }
4974 case imemo_callcache:
4975 {
4976 const struct rb_callcache *cc = (const struct rb_callcache *)obj;
4977 VALUE class_path = vm_cc_valid(cc) ? rb_class_path_cached(cc->klass) : Qnil;
4978 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4979
4980 APPEND_F("(klass:%s cme:%s%s (%p) call:%p",
4981 NIL_P(class_path) ? (vm_cc_valid(cc) ? "??" : "<NULL>") : RSTRING_PTR(class_path),
4982 cme ? rb_id2name(cme->called_id) : "<NULL>",
4983 cme ? (METHOD_ENTRY_INVALIDATED(cme) ? " [inv]" : "") : "",
4984 (void *)cme,
4985 (void *)(uintptr_t)vm_cc_call(cc));
4986 break;
4987 }
4988 default:
4989 break;
4990 }
4991 }
4992 default:
4993 break;
4994 }
4995 }
4996 end:
4997
4998 return pos;
4999}
5000
5001#undef C
5002
5003#ifdef RUBY_ASAN_ENABLED
5004void
5005rb_asan_poison_object(VALUE obj)
5006{
5007 MAYBE_UNUSED(struct RVALUE *) ptr = (void *)obj;
5008 asan_poison_memory_region(ptr, rb_gc_obj_slot_size(obj));
5009}
5010
5011void
5012rb_asan_unpoison_object(VALUE obj, bool newobj_p)
5013{
5014 MAYBE_UNUSED(struct RVALUE *) ptr = (void *)obj;
5015 asan_unpoison_memory_region(ptr, rb_gc_obj_slot_size(obj), newobj_p);
5016}
5017
5018void *
5019rb_asan_poisoned_object_p(VALUE obj)
5020{
5021 MAYBE_UNUSED(struct RVALUE *) ptr = (void *)obj;
5022 return __asan_region_is_poisoned(ptr, rb_gc_obj_slot_size(obj));
5023}
5024#endif
5025
5026static void
5027raw_obj_info(char *const buff, const size_t buff_size, VALUE obj)
5028{
5029 size_t pos = rb_raw_obj_info_common(buff, buff_size, obj);
5030 pos = rb_raw_obj_info_buitin_type(buff, buff_size, obj, pos);
5031 if (pos >= buff_size) {} // truncated
5032}
5033
5034const char *
5035rb_raw_obj_info(char *const buff, const size_t buff_size, VALUE obj)
5036{
5037 void *objspace = rb_gc_get_objspace();
5038
5039 if (SPECIAL_CONST_P(obj)) {
5040 raw_obj_info(buff, buff_size, obj);
5041 }
5042 else if (!rb_gc_impl_pointer_to_heap_p(objspace, (const void *)obj)) {
5043 snprintf(buff, buff_size, "out-of-heap:%p", (void *)obj);
5044 }
5045#if 0 // maybe no need to check it?
5046 else if (0 && rb_gc_impl_garbage_object_p(objspace, obj)) {
5047 snprintf(buff, buff_size, "garbage:%p", (void *)obj);
5048 }
5049#endif
5050 else {
5051 asan_unpoisoning_object(obj) {
5052 raw_obj_info(buff, buff_size, obj);
5053 }
5054 }
5055 return buff;
5056}
5057
5058#undef APPEND_S
5059#undef APPEND_F
5060#undef BUFF_ARGS
5061
5062/* Increments *var atomically and resets *var to 0 when maxval is
5063 * reached. Returns the wraparound old *var value (0...maxval). */
5064static rb_atomic_t
5065atomic_inc_wraparound(rb_atomic_t *var, const rb_atomic_t maxval)
5066{
5067 rb_atomic_t oldval = RUBY_ATOMIC_FETCH_ADD(*var, 1);
5068 if (RB_UNLIKELY(oldval >= maxval - 1)) { // wraparound *var
5069 const rb_atomic_t newval = oldval + 1;
5070 RUBY_ATOMIC_CAS(*var, newval, newval % maxval);
5071 oldval %= maxval;
5072 }
5073 return oldval;
5074}
5075
5076static const char *
5077obj_info(VALUE obj)
5078{
5079 if (RGENGC_OBJ_INFO) {
5080 static struct {
5081 rb_atomic_t index;
5082 char buffers[10][0x100];
5083 } info = {0};
5084
5085 rb_atomic_t index = atomic_inc_wraparound(&info.index, numberof(info.buffers));
5086 char *const buff = info.buffers[index];
5087 return rb_raw_obj_info(buff, sizeof(info.buffers[0]), obj);
5088 }
5089 return obj_type_name(obj);
5090}
5091
5092/*
5093 ------------------------ Extended allocator ------------------------
5094*/
5095
5097 VALUE exc;
5098 const char *fmt;
5099 va_list *ap;
5100};
5101
5102static void *
5103gc_vraise(void *ptr)
5104{
5105 struct gc_raise_tag *argv = ptr;
5106 rb_vraise(argv->exc, argv->fmt, *argv->ap);
5107 UNREACHABLE_RETURN(NULL);
5108}
5109
5110static void
5111gc_raise(VALUE exc, const char *fmt, ...)
5112{
5113 va_list ap;
5114 va_start(ap, fmt);
5115 struct gc_raise_tag argv = {
5116 exc, fmt, &ap,
5117 };
5118
5119 if (ruby_native_thread_p()) {
5120 rb_thread_call_with_gvl(gc_vraise, &argv);
5122 }
5123 else {
5124 /* Not in a ruby thread */
5125 fprintf(stderr, "%s", "[FATAL] ");
5126 vfprintf(stderr, fmt, ap);
5127 }
5128
5129 va_end(ap);
5130 abort();
5131}
5132
5133NORETURN(static void negative_size_allocation_error(const char *));
5134static void
5135negative_size_allocation_error(const char *msg)
5136{
5137 gc_raise(rb_eNoMemError, "%s", msg);
5138}
5139
5140static void *
5141ruby_memerror_body(void *dummy)
5142{
5143 rb_memerror();
5144 return 0;
5145}
5146
5147NORETURN(static void ruby_memerror(void));
5149static void
5150ruby_memerror(void)
5151{
5152 if (ruby_thread_has_gvl_p()) {
5153 rb_memerror();
5154 }
5155 else {
5156 if (ruby_native_thread_p()) {
5157 rb_thread_call_with_gvl(ruby_memerror_body, 0);
5158 }
5159 else {
5160 /* no ruby thread */
5161 fprintf(stderr, "[FATAL] failed to allocate memory\n");
5162 }
5163 }
5164
5165 /* We have discussions whether we should die here; */
5166 /* We might rethink about it later. */
5167 exit(EXIT_FAILURE);
5168}
5169
5170void
5171rb_memerror(void)
5172{
5173 /* the `GET_VM()->special_exceptions` below assumes that
5174 * the VM is reachable from the current thread. We should
5175 * definitely make sure of that. */
5176 RUBY_ASSERT_ALWAYS(ruby_thread_has_gvl_p());
5177
5178 rb_execution_context_t *ec = GET_EC();
5179 VALUE exc = GET_VM()->special_exceptions[ruby_error_nomemory];
5180
5181 if (!exc ||
5182 rb_ec_raised_p(ec, RAISED_NOMEMORY) ||
5183 rb_ec_vm_lock_rec(ec) != ec->tag->lock_rec) {
5184 fprintf(stderr, "[FATAL] failed to allocate memory\n");
5185 exit(EXIT_FAILURE);
5186 }
5187 if (rb_ec_raised_p(ec, RAISED_NOMEMORY)) {
5188 rb_ec_raised_clear(ec);
5189 }
5190 else {
5191 rb_ec_raised_set(ec, RAISED_NOMEMORY);
5192 exc = ruby_vm_special_exception_copy(exc);
5193 }
5194 ec->errinfo = exc;
5195 EC_JUMP_TAG(ec, TAG_RAISE);
5196}
5197
5198bool
5199rb_memerror_reentered(void)
5200{
5201 rb_execution_context_t *ec = GET_EC();
5202 return (ec && rb_ec_raised_p(ec, RAISED_NOMEMORY));
5203}
5204
5205static void *
5206handle_malloc_failure(void *ptr)
5207{
5208 if (LIKELY(ptr)) {
5209 return ptr;
5210 }
5211 else {
5212 ruby_memerror();
5213 UNREACHABLE_RETURN(ptr);
5214 }
5215}
5216
5217static void *ruby_xmalloc_body(size_t size);
5218
5219void *
5220ruby_xmalloc(size_t size)
5221{
5222 return handle_malloc_failure(ruby_xmalloc_body(size));
5223}
5224
5225static bool
5226malloc_gc_allowed(void)
5227{
5228 rb_ractor_t *r = rb_current_ractor_raw(false);
5229
5230 return r == NULL || !r->malloc_gc_disabled;
5231}
5232
5233static void *
5234ruby_xmalloc_body(size_t size)
5235{
5236 if ((ssize_t)size < 0) {
5237 negative_size_allocation_error("too large allocation size");
5238 }
5239
5240 return rb_gc_impl_malloc(rb_gc_get_objspace(), size, malloc_gc_allowed());
5241}
5242
5243void
5244ruby_malloc_size_overflow(size_t count, size_t elsize)
5245{
5246 rb_raise(rb_eArgError,
5247 "malloc: possible integer overflow (%"PRIuSIZE"*%"PRIuSIZE")",
5248 count, elsize);
5249}
5250
5251void
5252ruby_malloc_add_size_overflow(size_t x, size_t y)
5253{
5254 rb_raise(rb_eArgError,
5255 "malloc: possible integer overflow (%"PRIuSIZE"+%"PRIuSIZE")",
5256 x, y);
5257}
5258
5259static void *ruby_xmalloc2_body(size_t n, size_t size);
5260
5261void *
5262ruby_xmalloc2(size_t n, size_t size)
5263{
5264 return handle_malloc_failure(ruby_xmalloc2_body(n, size));
5265}
5266
5267static void *
5268ruby_xmalloc2_body(size_t n, size_t size)
5269{
5270 return rb_gc_impl_malloc(rb_gc_get_objspace(), xmalloc2_size(n, size), malloc_gc_allowed());
5271}
5272
5273static void *ruby_xcalloc_body(size_t n, size_t size);
5274
5275void *
5276ruby_xcalloc(size_t n, size_t size)
5277{
5278 return handle_malloc_failure(ruby_xcalloc_body(n, size));
5279}
5280
5281static void *
5282ruby_xcalloc_body(size_t n, size_t size)
5283{
5284 return rb_gc_impl_calloc(rb_gc_get_objspace(), xmalloc2_size(n, size), malloc_gc_allowed());
5285}
5286
5287static void *ruby_sized_xrealloc_body(void *ptr, size_t new_size, size_t old_size);
5288
5289#ifdef ruby_sized_xrealloc
5290#undef ruby_sized_xrealloc
5291#endif
5292void *
5293ruby_sized_xrealloc(void *ptr, size_t new_size, size_t old_size)
5294{
5295 return handle_malloc_failure(ruby_sized_xrealloc_body(ptr, new_size, old_size));
5296}
5297
5298static void *
5299ruby_sized_xrealloc_body(void *ptr, size_t new_size, size_t old_size)
5300{
5301 if ((ssize_t)new_size < 0) {
5302 negative_size_allocation_error("too large allocation size");
5303 }
5304
5305 return rb_gc_impl_realloc(rb_gc_get_objspace(), ptr, new_size, old_size, malloc_gc_allowed());
5306}
5307
5308void *
5309ruby_xrealloc(void *ptr, size_t new_size)
5310{
5311 return ruby_sized_xrealloc(ptr, new_size, 0);
5312}
5313
5314static void *ruby_sized_xrealloc2_body(void *ptr, size_t n, size_t size, size_t old_n);
5315
5316#ifdef ruby_sized_xrealloc2
5317#undef ruby_sized_xrealloc2
5318#endif
5319void *
5320ruby_sized_xrealloc2(void *ptr, size_t n, size_t size, size_t old_n)
5321{
5322 return handle_malloc_failure(ruby_sized_xrealloc2_body(ptr, n, size, old_n));
5323}
5324
5325static void *
5326ruby_sized_xrealloc2_body(void *ptr, size_t n, size_t size, size_t old_n)
5327{
5328 size_t len = xmalloc2_size(n, size);
5329 return rb_gc_impl_realloc(rb_gc_get_objspace(), ptr, len, old_n * size, malloc_gc_allowed());
5330}
5331
5332void *
5333ruby_xrealloc2(void *ptr, size_t n, size_t size)
5334{
5335 return ruby_sized_xrealloc2(ptr, n, size, 0);
5336}
5337
5338#ifdef ruby_sized_xfree
5339#undef ruby_sized_xfree
5340#endif
5341void
5342ruby_sized_xfree(void *x, size_t size)
5343{
5344 if (LIKELY(x)) {
5345 /* It's possible for a C extension's pthread destructor function set by pthread_key_create
5346 * to be called after ruby_vm_destruct and attempt to free memory. Fall back to mimfree in
5347 * that case. */
5348 if (LIKELY(GET_VM())) {
5349 rb_gc_impl_free(rb_gc_get_objspace(), x, size);
5350 }
5351 else {
5352 ruby_mimfree(x);
5353 }
5354 }
5355}
5356
5357void
5358ruby_xfree(void *x)
5359{
5360 ruby_sized_xfree(x, 0);
5361}
5362
5363void *
5364rb_xmalloc_mul_add(size_t x, size_t y, size_t z) /* x * y + z */
5365{
5366 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
5367 return ruby_xmalloc(w);
5368}
5369
5370void *
5371rb_xcalloc_mul_add(size_t x, size_t y, size_t z) /* x * y + z */
5372{
5373 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
5374 return ruby_xcalloc(w, 1);
5375}
5376
5377void *
5378rb_xrealloc_mul_add(const void *p, size_t x, size_t y, size_t z) /* x * y + z */
5379{
5380 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
5381 return ruby_xrealloc((void *)p, w);
5382}
5383
5384void *
5385rb_xmalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
5386{
5387 size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
5388 return ruby_xmalloc(u);
5389}
5390
5391void *
5392rb_xcalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
5393{
5394 size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
5395 return ruby_xcalloc(u, 1);
5396}
5397
5398/* Mimic ruby_xmalloc, but need not rb_objspace.
5399 * should return pointer suitable for ruby_xfree
5400 */
5401void *
5402ruby_mimmalloc(size_t size)
5403{
5404 void *mem;
5405#if CALC_EXACT_MALLOC_SIZE
5406 size += sizeof(struct malloc_obj_info);
5407#endif
5408 mem = malloc(size);
5409#if CALC_EXACT_MALLOC_SIZE
5410 if (!mem) {
5411 return NULL;
5412 }
5413 else
5414 /* set 0 for consistency of allocated_size/allocations */
5415 {
5416 struct malloc_obj_info *info = mem;
5417 info->size = 0;
5418 mem = info + 1;
5419 }
5420#endif
5421 return mem;
5422}
5423
5424void *
5425ruby_mimcalloc(size_t num, size_t size)
5426{
5427 void *mem;
5428#if CALC_EXACT_MALLOC_SIZE
5429 struct rbimpl_size_overflow_tag t = rbimpl_size_mul_overflow(num, size);
5430 if (UNLIKELY(t.overflowed)) {
5431 return NULL;
5432 }
5433 size = t.result + sizeof(struct malloc_obj_info);
5434 mem = calloc1(size);
5435 if (!mem) {
5436 return NULL;
5437 }
5438 else
5439 /* set 0 for consistency of allocated_size/allocations */
5440 {
5441 struct malloc_obj_info *info = mem;
5442 info->size = 0;
5443 mem = info + 1;
5444 }
5445#else
5446 mem = calloc(num, size);
5447#endif
5448 return mem;
5449}
5450
5451void
5452ruby_mimfree(void *ptr)
5453{
5454#if CALC_EXACT_MALLOC_SIZE
5455 struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
5456 ptr = info;
5457#endif
5458 free(ptr);
5459}
5460
5461void
5462rb_gc_adjust_memory_usage(ssize_t diff)
5463{
5464 unless_objspace(objspace) { return; }
5465
5466 rb_gc_impl_adjust_memory_usage(objspace, diff);
5467}
5468
5469const char *
5470rb_obj_info(VALUE obj)
5471{
5472 return obj_info(obj);
5473}
5474
5475void
5476rb_obj_info_dump(VALUE obj)
5477{
5478 char buff[0x100];
5479 fprintf(stderr, "rb_obj_info_dump: %s\n", rb_raw_obj_info(buff, 0x100, obj));
5480}
5481
5482void
5483rb_obj_info_dump_loc(VALUE obj, const char *file, int line, const char *func)
5484{
5485 char buff[0x100];
5486 fprintf(stderr, "<OBJ_INFO:%s@%s:%d> %s\n", func, file, line, rb_raw_obj_info(buff, 0x100, obj));
5487}
5488
5489void
5490rb_gc_before_fork(void)
5491{
5492 rb_gc_impl_before_fork(rb_gc_get_objspace());
5493}
5494
5495void
5496rb_gc_after_fork(rb_pid_t pid)
5497{
5498 rb_gc_impl_after_fork(rb_gc_get_objspace(), pid);
5499}
5500
5501bool
5502rb_gc_obj_shareable_p(VALUE obj)
5503{
5504 return RB_OBJ_SHAREABLE_P(obj);
5505}
5506
5507void
5508rb_gc_rp(VALUE obj)
5509{
5510 rp(obj);
5511}
5512
5514 VALUE parent;
5515 long err_count;
5516};
5517
5518static void
5519check_shareable_i(const VALUE child, void *ptr)
5520{
5521 struct check_shareable_data *data = (struct check_shareable_data *)ptr;
5522
5523 if (!rb_gc_obj_shareable_p(child)) {
5524 fprintf(stderr, "(a) ");
5525 rb_gc_rp(data->parent);
5526 fprintf(stderr, "(b) ");
5527 rb_gc_rp(child);
5528 fprintf(stderr, "check_shareable_i: shareable (a) -> unshareable (b)\n");
5529
5530 data->err_count++;
5531 rb_bug("!! violate shareable constraint !!");
5532 }
5533}
5534
5535static bool gc_checking_shareable = false;
5536
5537static void
5538gc_verify_shareable(void *objspace, VALUE obj, void *data)
5539{
5540 // while gc_checking_shareable is true,
5541 // other Ractors should not run the GC, until the flag is not local.
5542 // TODO: remove VM locking if the flag is Ractor local
5543
5544 unsigned int lev = RB_GC_VM_LOCK();
5545 {
5546 gc_checking_shareable = true;
5547 rb_objspace_reachable_objects_from(obj, check_shareable_i, (void *)data);
5548 gc_checking_shareable = false;
5549 }
5550 RB_GC_VM_UNLOCK(lev);
5551}
5552
5553// TODO: only one level (non-recursive)
5554void
5555rb_gc_verify_shareable(VALUE obj)
5556{
5557 rb_objspace_t *objspace = rb_gc_get_objspace();
5558 struct check_shareable_data data = {
5559 .parent = obj,
5560 .err_count = 0,
5561 };
5562 gc_verify_shareable(objspace, obj, &data);
5563
5564 if (data.err_count > 0) {
5565 rb_bug("rb_gc_verify_shareable");
5566 }
5567}
5568
5569bool
5570rb_gc_checking_shareable(void)
5571{
5572 return gc_checking_shareable;
5573}
5574
5575/*
5576 * Document-module: ObjectSpace
5577 *
5578 * The ObjectSpace module contains a number of routines
5579 * that interact with the garbage collection facility and allow you to
5580 * traverse all living objects with an iterator.
5581 *
5582 * ObjectSpace also provides support for object finalizers, procs that will be
5583 * called after a specific object was destroyed by garbage collection. See
5584 * the documentation for +ObjectSpace.define_finalizer+ for important
5585 * information on how to use this method correctly.
5586 *
5587 * a = "A"
5588 * b = "B"
5589 *
5590 * ObjectSpace.define_finalizer(a, proc {|id| puts "Finalizer one on #{id}" })
5591 * ObjectSpace.define_finalizer(b, proc {|id| puts "Finalizer two on #{id}" })
5592 *
5593 * a = nil
5594 * b = nil
5595 *
5596 * _produces:_
5597 *
5598 * Finalizer two on 537763470
5599 * Finalizer one on 537763480
5600 */
5601
5602/* Document-class: GC::Profiler
5603 *
5604 * The GC profiler provides access to information on GC runs including time,
5605 * length and object space size.
5606 *
5607 * Example:
5608 *
5609 * GC::Profiler.enable
5610 *
5611 * require 'rdoc/rdoc'
5612 *
5613 * GC::Profiler.report
5614 *
5615 * GC::Profiler.disable
5616 *
5617 * See also GC.count, GC.malloc_allocated_size and GC.malloc_allocations
5618 */
5619
5620#include "gc.rbinc"
5621
5622void
5623Init_GC(void)
5624{
5625#undef rb_intern
5626 rb_gc_register_address(&id2ref_value);
5627
5628 malloc_offset = gc_compute_malloc_offset();
5629
5630 rb_mGC = rb_define_module("GC");
5631
5632 VALUE rb_mObjSpace = rb_define_module("ObjectSpace");
5633
5634 rb_define_module_function(rb_mObjSpace, "each_object", os_each_obj, -1);
5635
5636 rb_define_module_function(rb_mObjSpace, "define_finalizer", define_final, -1);
5637 rb_define_module_function(rb_mObjSpace, "undefine_finalizer", undefine_final, 1);
5638
5639 rb_define_module_function(rb_mObjSpace, "_id2ref", os_id2ref, 1);
5640
5641 rb_vm_register_special_exception(ruby_error_nomemory, rb_eNoMemError, "failed to allocate memory");
5642
5643 rb_define_method(rb_cBasicObject, "__id__", rb_obj_id, 0);
5644 rb_define_method(rb_mKernel, "object_id", rb_obj_id, 0);
5645
5646 rb_define_module_function(rb_mObjSpace, "count_objects", count_objects, -1);
5647
5648 rb_gc_impl_init();
5649}
5650
5651// Set a name for the anonymous virtual memory area. `addr` is the starting
5652// address of the area and `size` is its length in bytes. `name` is a
5653// NUL-terminated human-readable string.
5654//
5655// This function is usually called after calling `mmap()`. The human-readable
5656// annotation helps developers identify the call site of `mmap()` that created
5657// the memory mapping.
5658//
5659// This function currently only works on Linux 5.17 or higher. After calling
5660// this function, we can see annotations in the form of "[anon:...]" in
5661// `/proc/self/maps`, where `...` is the content of `name`. This function has
5662// no effect when called on other platforms.
5663void
5664ruby_annotate_mmap(const void *addr, unsigned long size, const char *name)
5665{
5666#if defined(HAVE_SYS_PRCTL_H) && defined(PR_SET_VMA) && defined(PR_SET_VMA_ANON_NAME)
5667 // The name length cannot exceed 80 (including the '\0').
5668 RUBY_ASSERT(strlen(name) < 80);
5669 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, (unsigned long)addr, size, name);
5670 // We ignore errors in prctl. prctl may set errno to EINVAL for several
5671 // reasons.
5672 // 1. The attr (PR_SET_VMA_ANON_NAME) is not a valid attribute.
5673 // 2. addr is an invalid address.
5674 // 3. The string pointed by name is too long.
5675 // The first error indicates PR_SET_VMA_ANON_NAME is not available, and may
5676 // happen if we run the compiled binary on an old kernel. In theory, all
5677 // other errors should result in a failure. But since EINVAL cannot tell
5678 // the first error from others, and this function is mainly used for
5679 // debugging, we silently ignore the error.
5680 errno = 0;
5681#endif
5682}
#define RUBY_ASSERT_ALWAYS(expr,...)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
Definition assert.h:199
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
#define RUBY_ATOMIC_VALUE_CAS(var, oldval, newval)
Identical to RUBY_ATOMIC_CAS, except it expects its arguments are VALUE.
Definition atomic.h:406
#define RUBY_ATOMIC_SIZE_FETCH_ADD(var, val)
Identical to RUBY_ATOMIC_FETCH_ADD, except it expects its arguments to be size_t.
Definition atomic.h:235
#define RUBY_ATOMIC_CAS(var, oldval, newval)
Atomic compare-and-swap.
Definition atomic.h:165
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ATOMIC_FETCH_ADD(var, val)
Atomically replaces the value pointed by var with the result of addition of val to the old value of v...
Definition atomic.h:118
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_module_function(klass, mid, func, arity)
Defines klass#mid and makes it a module function.
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RUBY_INTERNAL_EVENT_NEWOBJ
Object allocated.
Definition event.h:93
@ RUBY_FL_WB_PROTECTED
Definition fl_type.h:198
VALUE rb_define_module(const char *name)
Defines a top-level module.
Definition class.c:1702
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
Retrieves argument from argc and argv to given VALUE references according to the format string.
Definition class.c:3246
#define T_COMPLEX
Old name of RUBY_T_COMPLEX.
Definition value_type.h:59
#define TYPE(_)
Old name of rb_type.
Definition value_type.h:108
#define FL_SINGLETON
Old name of RUBY_FL_SINGLETON.
Definition fl_type.h:58
#define T_FILE
Old name of RUBY_T_FILE.
Definition value_type.h:62
#define FL_UNSET_RAW
Old name of RB_FL_UNSET_RAW.
Definition fl_type.h:133
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:400
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define T_MASK
Old name of RUBY_T_MASK.
Definition value_type.h:68
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define T_NIL
Old name of RUBY_T_NIL.
Definition value_type.h:72
#define UNREACHABLE
Old name of RBIMPL_UNREACHABLE.
Definition assume.h:28
#define T_FLOAT
Old name of RUBY_T_FLOAT.
Definition value_type.h:64
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition value_type.h:67
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
Definition value_type.h:57
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
Definition value_type.h:79
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition fl_type.h:134
#define T_FIXNUM
Old name of RUBY_T_FIXNUM.
Definition value_type.h:63
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition assume.h:29
#define SYM2ID
Old name of RB_SYM2ID.
Definition symbol.h:45
#define T_DATA
Old name of RUBY_T_DATA.
Definition value_type.h:60
#define FIXNUM_FLAG
Old name of RUBY_FIXNUM_FLAG.
#define LL2NUM
Old name of RB_LL2NUM.
Definition long_long.h:30
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:205
#define T_NONE
Old name of RUBY_T_NONE.
Definition value_type.h:74
#define T_NODE
Old name of RUBY_T_NODE.
Definition value_type.h:73
#define SIZET2NUM
Old name of RB_SIZE2NUM.
Definition size_t.h:62
#define FL_FINALIZE
Old name of RUBY_FL_FINALIZE.
Definition fl_type.h:61
#define T_MODULE
Old name of RUBY_T_MODULE.
Definition value_type.h:70
#define ASSUME
Old name of RBIMPL_ASSUME.
Definition assume.h:27
#define T_TRUE
Old name of RUBY_T_TRUE.
Definition value_type.h:81
#define T_RATIONAL
Old name of RUBY_T_RATIONAL.
Definition value_type.h:76
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define FL_ABLE
Old name of RB_FL_ABLE.
Definition fl_type.h:121
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:131
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition array.h:658
#define LONG2NUM
Old name of RB_LONG2NUM.
Definition long.h:50
#define T_FALSE
Old name of RUBY_T_FALSE.
Definition value_type.h:61
#define ULL2NUM
Old name of RB_ULL2NUM.
Definition long_long.h:31
#define T_UNDEF
Old name of RUBY_T_UNDEF.
Definition value_type.h:82
#define FLONUM_P
Old name of RB_FLONUM_P.
#define Qtrue
Old name of RUBY_Qtrue.
#define T_ZOMBIE
Old name of RUBY_T_ZOMBIE.
Definition value_type.h:83
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define NUM2ULL
Old name of RB_NUM2ULL.
Definition long_long.h:35
#define FL_WB_PROTECTED
Old name of RUBY_FL_WB_PROTECTED.
Definition fl_type.h:59
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
Definition value_type.h:80
#define T_MATCH
Old name of RUBY_T_MATCH.
Definition value_type.h:69
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define T_MOVED
Old name of RUBY_T_MOVED.
Definition value_type.h:71
#define xcalloc
Old name of ruby_xcalloc.
Definition xmalloc.h:55
#define FL_UNSET
Old name of RB_FL_UNSET.
Definition fl_type.h:132
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define NUM2SIZET
Old name of RB_NUM2SIZE.
Definition size_t.h:61
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
#define T_REGEXP
Old name of RUBY_T_REGEXP.
Definition value_type.h:77
size_t ruby_stack_length(VALUE **p)
Queries what Ruby thinks is the machine stack.
Definition gc.c:2614
int ruby_stack_check(void)
Checks for stack overflow.
Definition gc.c:2654
void rb_category_warn(rb_warning_category_t category, const char *fmt,...)
Identical to rb_category_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:476
VALUE rb_eNoMemError
NoMemoryError exception.
Definition error.c:1442
VALUE rb_eRangeError
RangeError exception.
Definition error.c:1435
#define ruby_verbose
This variable controls whether the interpreter is in debug mode.
Definition error.h:475
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1431
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:466
@ RB_WARN_CATEGORY_DEPRECATED
Warning is for deprecated features.
Definition error.h:48
VALUE rb_mKernel
Kernel module.
Definition object.c:60
VALUE rb_mGC
GC module.
Definition gc.c:424
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:264
VALUE rb_cBasicObject
BasicObject class.
Definition object.c:59
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:923
size_t rb_obj_embedded_size(uint32_t fields_count)
Internal header for Object.
Definition object.c:94
VALUE rb_to_int(VALUE val)
Identical to rb_check_to_int(), except it raises in case of conversion mismatch.
Definition object.c:3306
#define RB_POSFIXABLE(_)
Checks if the passed value is in range of fixnum, assuming it is a positive number.
Definition fixnum.h:43
int rb_enc_str_coderange(VALUE str)
Scans the passed string to collect its code range.
Definition string.c:947
VALUE rb_funcall(VALUE recv, ID mid, int n,...)
Calls a method.
Definition vm_eval.c:1117
Defines RBIMPL_HAS_BUILTIN.
void rb_ary_free(VALUE ary)
Destroys the given array for no reason.
#define RETURN_ENUMERATOR(obj, argc, argv)
Identical to RETURN_SIZED_ENUMERATOR(), except its size is unknown.
Definition enumerator.h:242
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:284
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition proc.c:983
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
Definition proc.c:120
void rb_str_free(VALUE str)
Destroys the given string for no reason.
Definition string.c:1752
size_t rb_str_capacity(VALUE str)
Queries the capacity of the given string.
Definition string.c:1001
VALUE rb_class_path_cached(VALUE mod)
Just another name of rb_mod_name.
Definition variable.c:389
void rb_free_generic_ivar(VALUE obj)
Frees the list of instance variables.
Definition variable.c:1295
void rb_undef_alloc_func(VALUE klass)
Deletes the allocator function of a class.
Definition vm_method.c:1650
VALUE rb_check_funcall(VALUE recv, ID mid, int argc, const VALUE *argv)
Identical to rb_funcallv(), except it returns RUBY_Qundef instead of raising rb_eNoMethodError.
Definition vm_eval.c:686
rb_alloc_func_t rb_get_alloc_func(VALUE klass)
Queries the allocator function of a class.
Definition vm_method.c:1656
int rb_obj_respond_to(VALUE obj, ID mid, int private_p)
Identical to rb_respond_to(), except it additionally takes the visibility parameter.
Definition vm_method.c:3386
VALUE rb_sym2str(VALUE symbol)
Obtain a frozen string representation of a symbol (not including the leading colon).
Definition symbol.c:993
int rb_io_fptr_finalize(rb_io_t *fptr)
Destroys the given IO.
Definition io.c:5693
int len
Length of the buffer.
Definition io.h:8
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
Definition ractor.h:249
#define RB_OBJ_SHAREABLE_P(obj)
Queries if the passed object has previously classified as shareable or not.
Definition ractor.h:235
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
Definition thread.c:2062
VALUE rb_yield(VALUE val)
Yields the block.
Definition vm_eval.c:1372
#define RBIMPL_ATTR_MAYBE_UNUSED()
Wraps (or simulates) [[maybe_unused]]
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
VALUE type(ANYARGS)
ANYARGS-ed function type.
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
#define RARRAY(obj)
Convenient casting macro.
Definition rarray.h:44
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:166
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS(obj)
Convenient casting macro.
Definition rclass.h:38
VALUE rb_data_object_wrap(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
This is the primitive way to wrap an existing C struct into RData.
Definition gc.c:1069
#define DATA_PTR(obj)
Convenient getter macro.
Definition rdata.h:67
VALUE rb_data_object_zalloc(VALUE klass, size_t size, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
Identical to rb_data_object_wrap(), except it allocates a new data region internally instead of takin...
Definition gc.c:1084
#define RDATA(obj)
Convenient casting macro.
Definition rdata.h:59
#define RUBY_DEFAULT_FREE
This is a value you can set to RData::dfree.
Definition rdata.h:78
void(* RUBY_DATA_FUNC)(void *)
This is the type of callbacks registered to RData.
Definition rdata.h:104
#define RFILE(obj)
Convenient casting macro.
Definition rfile.h:50
#define RHASH_SIZE(h)
Queries the size of the hash.
Definition rhash.h:69
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
#define RMATCH(obj)
Convenient casting macro.
Definition rmatch.h:37
#define ROBJECT(obj)
Convenient casting macro.
Definition robject.h:43
static VALUE * ROBJECT_FIELDS(VALUE obj)
Queries the instance variables.
Definition robject.h:128
#define RREGEXP(obj)
Convenient casting macro.
Definition rregexp.h:37
#define RREGEXP_PTR(obj)
Convenient accessor macro.
Definition rregexp.h:45
#define RSTRING(obj)
Convenient casting macro.
Definition rstring.h:41
static bool RTYPEDDATA_P(VALUE obj)
Checks whether the passed object is RTypedData or RData.
Definition rtypeddata.h:575
VALUE rb_data_typed_object_wrap(VALUE klass, void *datap, const rb_data_type_t *type)
This is the primitive way to wrap an existing C struct into RTypedData.
Definition gc.c:1108
VALUE rb_data_typed_object_zalloc(VALUE klass, size_t size, const rb_data_type_t *type)
Identical to rb_data_typed_object_wrap(), except it allocates a new data region internally instead of...
Definition gc.c:1118
#define TypedData_Wrap_Struct(klass, data_type, sval)
Converts sval, a pointer to your struct, into a Ruby object.
Definition rtypeddata.h:461
#define RTYPEDDATA(obj)
Convenient casting macro.
Definition rtypeddata.h:95
static const struct rb_data_type_struct * RTYPEDDATA_TYPE(VALUE obj)
Queries for the type of given object.
Definition rtypeddata.h:598
const char * rb_obj_classname(VALUE obj)
Queries the name of the class of the passed object.
Definition variable.c:515
void rb_p(VALUE obj)
Inspects an object.
Definition io.c:9059
#define errno
Ractor-aware version of errno.
Definition ruby.h:388
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition thread.c:5791
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
Defines old _.
#define _(args)
This was a transition path from K&R to ANSI.
Definition stdarg.h:35
Ruby's array.
Definition rarray.h:128
Ruby object's base components.
Definition rbasic.h:69
Definition rdata.h:120
RUBY_DATA_FUNC dfree
This function is called when the object is no longer used.
Definition rdata.h:143
RUBY_DATA_FUNC dmark
This function is called when the object is experiencing GC marks.
Definition rdata.h:134
void * data
Pointer to the actual C level struct that you want to wrap.
Definition rdata.h:149
Definition hash.h:53
Ruby's ordinal objects.
Definition robject.h:85
Ruby's String.
Definition rstring.h:196
"Typed" user data.
Definition rtypeddata.h:358
void * data
Pointer to the actual C level struct that you want to wrap.
Definition rtypeddata.h:378
VALUE fields_obj
Direct reference to the slots that holds instance variables, if any.
Definition rtypeddata.h:364
Definition method.h:63
Definition constant.h:33
CREF (Class REFerence)
Definition method.h:45
Definition class.h:72
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:208
RUBY_DATA_FUNC dfree
This function is called when the object is no longer used.
Definition rtypeddata.h:238
RUBY_DATA_FUNC dcompact
This function is called when the object is relocated.
Definition rtypeddata.h:259
const char * wrap_struct_name
Name of structs of this kind.
Definition rtypeddata.h:215
RUBY_DATA_FUNC dmark
This function is called when the object is experiencing GC marks.
Definition rtypeddata.h:229
struct rb_data_type_struct::@55 function
Function pointers.
VALUE flags
Type-specific behavioural characteristics.
Definition rtypeddata.h:317
Definition gc_impl.h:15
Ruby's IO, metadata and buffers.
Definition io.h:295
Represents a match.
Definition rmatch.h:71
struct rmatch_offset * char_offset
Capture group offsets, in C array.
Definition rmatch.h:79
int char_offset_num_allocated
Number of rmatch_offset that ::rmatch::char_offset holds.
Definition rmatch.h:82
struct re_registers regs
"Registers" of a match.
Definition rmatch.h:76
Definition method.h:55
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:143
Definition class.h:65
Represents the region of a capture group.
Definition rmatch.h:65
Definition st.h:79
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
intptr_t SIGNED_VALUE
A signed integer type that has the same width with VALUE.
Definition value.h:63
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376
ruby_value_type
C-level type of an object.
Definition value_type.h:113