Ruby 3.5.0dev (2025-07-25 revision 9e105a503705b33294da649c1d36051b78008df6)
gc.c (9e105a503705b33294da649c1d36051b78008df6)
1/**********************************************************************
2
3 gc.c -
4
5 $Author$
6 created at: Tue Oct 5 09:44:46 JST 1993
7
8 Copyright (C) 1993-2007 Yukihiro Matsumoto
9 Copyright (C) 2000 Network Applied Communication Laboratory, Inc.
10 Copyright (C) 2000 Information-technology Promotion Agency, Japan
11
12**********************************************************************/
13
14#define rb_data_object_alloc rb_data_object_alloc
15#define rb_data_typed_object_alloc rb_data_typed_object_alloc
16
17#include "ruby/internal/config.h"
18#ifdef _WIN32
19# include "ruby/ruby.h"
20#endif
21
22#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
23# include "wasm/setjmp.h"
24# include "wasm/machine.h"
25#else
26# include <setjmp.h>
27#endif
28#include <stdarg.h>
29#include <stdio.h>
30
31/* MALLOC_HEADERS_BEGIN */
32#ifndef HAVE_MALLOC_USABLE_SIZE
33# ifdef _WIN32
34# define HAVE_MALLOC_USABLE_SIZE
35# define malloc_usable_size(a) _msize(a)
36# elif defined HAVE_MALLOC_SIZE
37# define HAVE_MALLOC_USABLE_SIZE
38# define malloc_usable_size(a) malloc_size(a)
39# endif
40#endif
41
42#ifdef HAVE_MALLOC_USABLE_SIZE
43# ifdef RUBY_ALTERNATIVE_MALLOC_HEADER
44/* Alternative malloc header is included in ruby/missing.h */
45# elif defined(HAVE_MALLOC_H)
46# include <malloc.h>
47# elif defined(HAVE_MALLOC_NP_H)
48# include <malloc_np.h>
49# elif defined(HAVE_MALLOC_MALLOC_H)
50# include <malloc/malloc.h>
51# endif
52#endif
53
54/* MALLOC_HEADERS_END */
55
56#ifdef HAVE_SYS_TIME_H
57# include <sys/time.h>
58#endif
59
60#ifdef HAVE_SYS_RESOURCE_H
61# include <sys/resource.h>
62#endif
63
64#if defined _WIN32 || defined __CYGWIN__
65# include <windows.h>
66#elif defined(HAVE_POSIX_MEMALIGN)
67#elif defined(HAVE_MEMALIGN)
68# include <malloc.h>
69#endif
70
71#include <sys/types.h>
72
73#ifdef __EMSCRIPTEN__
74#include <emscripten.h>
75#endif
76
77/* For ruby_annotate_mmap */
78#ifdef HAVE_SYS_PRCTL_H
79#include <sys/prctl.h>
80#endif
81
82#undef LIST_HEAD /* ccan/list conflicts with BSD-origin sys/queue.h. */
83
84#include "constant.h"
85#include "darray.h"
86#include "debug_counter.h"
87#include "eval_intern.h"
88#include "gc/gc.h"
89#include "id_table.h"
90#include "internal.h"
91#include "internal/class.h"
92#include "internal/compile.h"
93#include "internal/complex.h"
94#include "internal/concurrent_set.h"
95#include "internal/cont.h"
96#include "internal/error.h"
97#include "internal/eval.h"
98#include "internal/gc.h"
99#include "internal/hash.h"
100#include "internal/imemo.h"
101#include "internal/io.h"
102#include "internal/numeric.h"
103#include "internal/object.h"
104#include "internal/proc.h"
105#include "internal/rational.h"
106#include "internal/sanitizers.h"
107#include "internal/struct.h"
108#include "internal/symbol.h"
109#include "internal/thread.h"
110#include "internal/variable.h"
111#include "internal/warnings.h"
112#include "probes.h"
113#include "regint.h"
114#include "ruby/debug.h"
115#include "ruby/io.h"
116#include "ruby/re.h"
117#include "ruby/st.h"
118#include "ruby/thread.h"
119#include "ruby/util.h"
120#include "ruby/vm.h"
121#include "ruby_assert.h"
122#include "ruby_atomic.h"
123#include "symbol.h"
124#include "variable.h"
125#include "vm_core.h"
126#include "vm_sync.h"
127#include "vm_callinfo.h"
128#include "ractor_core.h"
129#include "yjit.h"
130
131#include "builtin.h"
132#include "shape.h"
133
134unsigned int
135rb_gc_vm_lock(const char *file, int line)
136{
137 unsigned int lev = 0;
138 rb_vm_lock_enter(&lev, file, line);
139 return lev;
140}
141
142void
143rb_gc_vm_unlock(unsigned int lev, const char *file, int line)
144{
145 rb_vm_lock_leave(&lev, file, line);
146}
147
148unsigned int
149rb_gc_cr_lock(const char *file, int line)
150{
151 unsigned int lev;
152 rb_vm_lock_enter_cr(GET_RACTOR(), &lev, file, line);
153 return lev;
154}
155
156void
157rb_gc_cr_unlock(unsigned int lev, const char *file, int line)
158{
159 rb_vm_lock_leave_cr(GET_RACTOR(), &lev, file, line);
160}
161
162unsigned int
163rb_gc_vm_lock_no_barrier(const char *file, int line)
164{
165 unsigned int lev = 0;
166 rb_vm_lock_enter_nb(&lev, file, line);
167 return lev;
168}
169
170void
171rb_gc_vm_unlock_no_barrier(unsigned int lev, const char *file, int line)
172{
173 rb_vm_lock_leave_nb(&lev, file, line);
174}
175
176void
177rb_gc_vm_barrier(void)
178{
179 rb_vm_barrier();
180}
181
182#if USE_MODULAR_GC
183void *
184rb_gc_get_ractor_newobj_cache(void)
185{
186 return GET_RACTOR()->newobj_cache;
187}
188
189void
190rb_gc_initialize_vm_context(struct rb_gc_vm_context *context)
191{
192 rb_native_mutex_initialize(&context->lock);
193 context->ec = GET_EC();
194}
195
196void
197rb_gc_worker_thread_set_vm_context(struct rb_gc_vm_context *context)
198{
199 rb_native_mutex_lock(&context->lock);
200
201 GC_ASSERT(rb_current_execution_context(false) == NULL);
202
203#ifdef RB_THREAD_LOCAL_SPECIFIER
204 rb_current_ec_set(context->ec);
205#else
206 native_tls_set(ruby_current_ec_key, context->ec);
207#endif
208}
209
210void
211rb_gc_worker_thread_unset_vm_context(struct rb_gc_vm_context *context)
212{
213 rb_native_mutex_unlock(&context->lock);
214
215 GC_ASSERT(rb_current_execution_context(true) == context->ec);
216
217#ifdef RB_THREAD_LOCAL_SPECIFIER
218 rb_current_ec_set(NULL);
219#else
220 native_tls_set(ruby_current_ec_key, NULL);
221#endif
222}
223#endif
224
225bool
226rb_gc_event_hook_required_p(rb_event_flag_t event)
227{
228 return ruby_vm_event_flags & event;
229}
230
231void
232rb_gc_event_hook(VALUE obj, rb_event_flag_t event)
233{
234 if (LIKELY(!rb_gc_event_hook_required_p(event))) return;
235
236 rb_execution_context_t *ec = GET_EC();
237 if (!ec->cfp) return;
238
239 EXEC_EVENT_HOOK(ec, event, ec->cfp->self, 0, 0, 0, obj);
240}
241
242void *
243rb_gc_get_objspace(void)
244{
245 return GET_VM()->gc.objspace;
246}
247
248
249void
250rb_gc_ractor_newobj_cache_foreach(void (*func)(void *cache, void *data), void *data)
251{
252 rb_ractor_t *r = NULL;
253 if (RB_LIKELY(ruby_single_main_ractor)) {
254 GC_ASSERT(
255 ccan_list_empty(&GET_VM()->ractor.set) ||
256 (ccan_list_top(&GET_VM()->ractor.set, rb_ractor_t, vmlr_node) == ruby_single_main_ractor &&
257 ccan_list_tail(&GET_VM()->ractor.set, rb_ractor_t, vmlr_node) == ruby_single_main_ractor)
258 );
259
260 func(ruby_single_main_ractor->newobj_cache, data);
261 }
262 else {
263 ccan_list_for_each(&GET_VM()->ractor.set, r, vmlr_node) {
264 func(r->newobj_cache, data);
265 }
266 }
267}
268
269void
270rb_gc_run_obj_finalizer(VALUE objid, long count, VALUE (*callback)(long i, void *data), void *data)
271{
272 volatile struct {
273 VALUE errinfo;
274 VALUE final;
276 VALUE *sp;
277 long finished;
278 } saved;
279
280 rb_execution_context_t * volatile ec = GET_EC();
281#define RESTORE_FINALIZER() (\
282 ec->cfp = saved.cfp, \
283 ec->cfp->sp = saved.sp, \
284 ec->errinfo = saved.errinfo)
285
286 saved.errinfo = ec->errinfo;
287 saved.cfp = ec->cfp;
288 saved.sp = ec->cfp->sp;
289 saved.finished = 0;
290 saved.final = Qundef;
291
292 rb_ractor_ignore_belonging(true);
293 EC_PUSH_TAG(ec);
294 enum ruby_tag_type state = EC_EXEC_TAG();
295 if (state != TAG_NONE) {
296 ++saved.finished; /* skip failed finalizer */
297
298 VALUE failed_final = saved.final;
299 saved.final = Qundef;
300 if (!UNDEF_P(failed_final) && !NIL_P(ruby_verbose)) {
301 rb_warn("Exception in finalizer %+"PRIsVALUE, failed_final);
302 rb_ec_error_print(ec, ec->errinfo);
303 }
304 }
305
306 for (long i = saved.finished; RESTORE_FINALIZER(), i < count; saved.finished = ++i) {
307 saved.final = callback(i, data);
308 rb_check_funcall(saved.final, idCall, 1, &objid);
309 }
310 EC_POP_TAG();
311 rb_ractor_ignore_belonging(false);
312#undef RESTORE_FINALIZER
313}
314
315void
316rb_gc_set_pending_interrupt(void)
317{
318 rb_execution_context_t *ec = GET_EC();
319 ec->interrupt_mask |= PENDING_INTERRUPT_MASK;
320}
321
322void
323rb_gc_unset_pending_interrupt(void)
324{
325 rb_execution_context_t *ec = GET_EC();
326 ec->interrupt_mask &= ~PENDING_INTERRUPT_MASK;
327}
328
329bool
330rb_gc_multi_ractor_p(void)
331{
332 return rb_multi_ractor_p();
333}
334
335bool rb_obj_is_main_ractor(VALUE gv);
336
337bool
338rb_gc_shutdown_call_finalizer_p(VALUE obj)
339{
340 switch (BUILTIN_TYPE(obj)) {
341 case T_DATA:
342 if (!ruby_free_at_exit_p() && (!DATA_PTR(obj) || !RDATA(obj)->dfree)) return false;
343 if (rb_obj_is_thread(obj)) return false;
344 if (rb_obj_is_mutex(obj)) return false;
345 if (rb_obj_is_fiber(obj)) return false;
346 if (rb_obj_is_main_ractor(obj)) return false;
347 if (rb_obj_is_fstring_table(obj)) return false;
348 if (rb_obj_is_symbol_table(obj)) return false;
349
350 return true;
351
352 case T_FILE:
353 return true;
354
355 case T_SYMBOL:
356 if (RSYMBOL(obj)->fstr &&
357 (BUILTIN_TYPE(RSYMBOL(obj)->fstr) == T_NONE ||
358 BUILTIN_TYPE(RSYMBOL(obj)->fstr) == T_ZOMBIE)) {
359 RSYMBOL(obj)->fstr = 0;
360 }
361 return true;
362
363 case T_NONE:
364 return false;
365
366 default:
367 return ruby_free_at_exit_p();
368 }
369}
370
371uint32_t
372rb_gc_get_shape(VALUE obj)
373{
374 return (uint32_t)rb_obj_shape_id(obj);
375}
376
377void
378rb_gc_set_shape(VALUE obj, uint32_t shape_id)
379{
380 rb_obj_set_shape_id(obj, (uint32_t)shape_id);
381}
382
383uint32_t
384rb_gc_rebuild_shape(VALUE obj, size_t heap_id)
385{
387
388 return (uint32_t)rb_shape_transition_heap(obj, heap_id);
389}
390
391void rb_vm_update_references(void *ptr);
392
393#define rb_setjmp(env) RUBY_SETJMP(env)
394#define rb_jmp_buf rb_jmpbuf_t
395#undef rb_data_object_wrap
396
397#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
398#define MAP_ANONYMOUS MAP_ANON
399#endif
400
401#define unless_objspace(objspace) \
402 void *objspace; \
403 rb_vm_t *unless_objspace_vm = GET_VM(); \
404 if (unless_objspace_vm) objspace = unless_objspace_vm->gc.objspace; \
405 else /* return; or objspace will be warned uninitialized */
406
407#define RMOVED(obj) ((struct RMoved *)(obj))
408
409#define TYPED_UPDATE_IF_MOVED(_objspace, _type, _thing) do { \
410 if (rb_gc_impl_object_moved_p((_objspace), (VALUE)(_thing))) { \
411 *(_type *)&(_thing) = (_type)gc_location_internal(_objspace, (VALUE)_thing); \
412 } \
413} while (0)
414
415#define UPDATE_IF_MOVED(_objspace, _thing) TYPED_UPDATE_IF_MOVED(_objspace, VALUE, _thing)
416
417#if RUBY_MARK_FREE_DEBUG
418int ruby_gc_debug_indent = 0;
419#endif
420
421#ifndef RGENGC_OBJ_INFO
422# define RGENGC_OBJ_INFO RGENGC_CHECK_MODE
423#endif
424
425#ifndef CALC_EXACT_MALLOC_SIZE
426# define CALC_EXACT_MALLOC_SIZE 0
427#endif
428
430
431static size_t malloc_offset = 0;
432#if defined(HAVE_MALLOC_USABLE_SIZE)
433static size_t
434gc_compute_malloc_offset(void)
435{
436 // Different allocators use different metadata storage strategies which result in different
437 // ideal sizes.
438 // For instance malloc(64) will waste 8B with glibc, but waste 0B with jemalloc.
439 // But malloc(56) will waste 0B with glibc, but waste 8B with jemalloc.
440 // So we try allocating 64, 56 and 48 bytes and select the first offset that doesn't
441 // waste memory.
442 // This was tested on Linux with glibc 2.35 and jemalloc 5, and for both it result in
443 // no wasted memory.
444 size_t offset = 0;
445 for (offset = 0; offset <= 16; offset += 8) {
446 size_t allocated = (64 - offset);
447 void *test_ptr = malloc(allocated);
448 size_t wasted = malloc_usable_size(test_ptr) - allocated;
449 free(test_ptr);
450
451 if (wasted == 0) {
452 return offset;
453 }
454 }
455 return 0;
456}
457#else
458static size_t
459gc_compute_malloc_offset(void)
460{
461 // If we don't have malloc_usable_size, we use powers of 2.
462 return 0;
463}
464#endif
465
466size_t
467rb_malloc_grow_capa(size_t current, size_t type_size)
468{
469 size_t current_capacity = current;
470 if (current_capacity < 4) {
471 current_capacity = 4;
472 }
473 current_capacity *= type_size;
474
475 // We double the current capacity.
476 size_t new_capacity = (current_capacity * 2);
477
478 // And round up to the next power of 2 if it's not already one.
479 if (rb_popcount64(new_capacity) != 1) {
480 new_capacity = (size_t)(1 << (64 - nlz_int64(new_capacity)));
481 }
482
483 new_capacity -= malloc_offset;
484 new_capacity /= type_size;
485 if (current > new_capacity) {
486 rb_bug("rb_malloc_grow_capa: current_capacity=%zu, new_capacity=%zu, malloc_offset=%zu", current, new_capacity, malloc_offset);
487 }
488 RUBY_ASSERT(new_capacity > current);
489 return new_capacity;
490}
491
492static inline struct rbimpl_size_mul_overflow_tag
493size_mul_add_overflow(size_t x, size_t y, size_t z) /* x * y + z */
494{
495 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
496 struct rbimpl_size_mul_overflow_tag u = rbimpl_size_add_overflow(t.right, z);
497 return (struct rbimpl_size_mul_overflow_tag) { t.left || u.left, u.right };
498}
499
500static inline struct rbimpl_size_mul_overflow_tag
501size_mul_add_mul_overflow(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
502{
503 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
504 struct rbimpl_size_mul_overflow_tag u = rbimpl_size_mul_overflow(z, w);
505 struct rbimpl_size_mul_overflow_tag v = rbimpl_size_add_overflow(t.right, u.right);
506 return (struct rbimpl_size_mul_overflow_tag) { t.left || u.left || v.left, v.right };
507}
508
509PRINTF_ARGS(NORETURN(static void gc_raise(VALUE, const char*, ...)), 2, 3);
510
511static inline size_t
512size_mul_or_raise(size_t x, size_t y, VALUE exc)
513{
514 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
515 if (LIKELY(!t.left)) {
516 return t.right;
517 }
518 else if (rb_during_gc()) {
519 rb_memerror(); /* or...? */
520 }
521 else {
522 gc_raise(
523 exc,
524 "integer overflow: %"PRIuSIZE
525 " * %"PRIuSIZE
526 " > %"PRIuSIZE,
527 x, y, (size_t)SIZE_MAX);
528 }
529}
530
531size_t
532rb_size_mul_or_raise(size_t x, size_t y, VALUE exc)
533{
534 return size_mul_or_raise(x, y, exc);
535}
536
537static inline size_t
538size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
539{
540 struct rbimpl_size_mul_overflow_tag t = size_mul_add_overflow(x, y, z);
541 if (LIKELY(!t.left)) {
542 return t.right;
543 }
544 else if (rb_during_gc()) {
545 rb_memerror(); /* or...? */
546 }
547 else {
548 gc_raise(
549 exc,
550 "integer overflow: %"PRIuSIZE
551 " * %"PRIuSIZE
552 " + %"PRIuSIZE
553 " > %"PRIuSIZE,
554 x, y, z, (size_t)SIZE_MAX);
555 }
556}
557
558size_t
559rb_size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
560{
561 return size_mul_add_or_raise(x, y, z, exc);
562}
563
564static inline size_t
565size_mul_add_mul_or_raise(size_t x, size_t y, size_t z, size_t w, VALUE exc)
566{
567 struct rbimpl_size_mul_overflow_tag t = size_mul_add_mul_overflow(x, y, z, w);
568 if (LIKELY(!t.left)) {
569 return t.right;
570 }
571 else if (rb_during_gc()) {
572 rb_memerror(); /* or...? */
573 }
574 else {
575 gc_raise(
576 exc,
577 "integer overflow: %"PRIdSIZE
578 " * %"PRIdSIZE
579 " + %"PRIdSIZE
580 " * %"PRIdSIZE
581 " > %"PRIdSIZE,
582 x, y, z, w, (size_t)SIZE_MAX);
583 }
584}
585
586#if defined(HAVE_RB_GC_GUARDED_PTR_VAL) && HAVE_RB_GC_GUARDED_PTR_VAL
587/* trick the compiler into thinking a external signal handler uses this */
588volatile VALUE rb_gc_guarded_val;
589volatile VALUE *
590rb_gc_guarded_ptr_val(volatile VALUE *ptr, VALUE val)
591{
592 rb_gc_guarded_val = val;
593
594 return ptr;
595}
596#endif
597
598static const char *obj_type_name(VALUE obj);
599#include "gc/default/default.c"
600
601#if USE_MODULAR_GC && !defined(HAVE_DLOPEN)
602# error "Modular GC requires dlopen"
603#elif USE_MODULAR_GC
604#include <dlfcn.h>
605
606typedef struct gc_function_map {
607 // Bootup
608 void *(*objspace_alloc)(void);
609 void (*objspace_init)(void *objspace_ptr);
610 void *(*ractor_cache_alloc)(void *objspace_ptr, void *ractor);
611 void (*set_params)(void *objspace_ptr);
612 void (*init)(void);
613 size_t *(*heap_sizes)(void *objspace_ptr);
614 // Shutdown
615 void (*shutdown_free_objects)(void *objspace_ptr);
616 void (*objspace_free)(void *objspace_ptr);
617 void (*ractor_cache_free)(void *objspace_ptr, void *cache);
618 // GC
619 void (*start)(void *objspace_ptr, bool full_mark, bool immediate_mark, bool immediate_sweep, bool compact);
620 bool (*during_gc_p)(void *objspace_ptr);
621 void (*prepare_heap)(void *objspace_ptr);
622 void (*gc_enable)(void *objspace_ptr);
623 void (*gc_disable)(void *objspace_ptr, bool finish_current_gc);
624 bool (*gc_enabled_p)(void *objspace_ptr);
625 VALUE (*config_get)(void *objpace_ptr);
626 void (*config_set)(void *objspace_ptr, VALUE hash);
627 void (*stress_set)(void *objspace_ptr, VALUE flag);
628 VALUE (*stress_get)(void *objspace_ptr);
629 // Object allocation
630 VALUE (*new_obj)(void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, bool wb_protected, size_t alloc_size);
631 size_t (*obj_slot_size)(VALUE obj);
632 size_t (*heap_id_for_size)(void *objspace_ptr, size_t size);
633 bool (*size_allocatable_p)(size_t size);
634 // Malloc
635 void *(*malloc)(void *objspace_ptr, size_t size);
636 void *(*calloc)(void *objspace_ptr, size_t size);
637 void *(*realloc)(void *objspace_ptr, void *ptr, size_t new_size, size_t old_size);
638 void (*free)(void *objspace_ptr, void *ptr, size_t old_size);
639 void (*adjust_memory_usage)(void *objspace_ptr, ssize_t diff);
640 // Marking
641 void (*mark)(void *objspace_ptr, VALUE obj);
642 void (*mark_and_move)(void *objspace_ptr, VALUE *ptr);
643 void (*mark_and_pin)(void *objspace_ptr, VALUE obj);
644 void (*mark_maybe)(void *objspace_ptr, VALUE obj);
645 void (*mark_weak)(void *objspace_ptr, VALUE *ptr);
646 void (*remove_weak)(void *objspace_ptr, VALUE parent_obj, VALUE *ptr);
647 // Compaction
648 bool (*object_moved_p)(void *objspace_ptr, VALUE obj);
649 VALUE (*location)(void *objspace_ptr, VALUE value);
650 // Write barriers
651 void (*writebarrier)(void *objspace_ptr, VALUE a, VALUE b);
652 void (*writebarrier_unprotect)(void *objspace_ptr, VALUE obj);
653 void (*writebarrier_remember)(void *objspace_ptr, VALUE obj);
654 // Heap walking
655 void (*each_objects)(void *objspace_ptr, int (*callback)(void *, void *, size_t, void *), void *data);
656 void (*each_object)(void *objspace_ptr, void (*func)(VALUE obj, void *data), void *data);
657 // Finalizers
658 void (*make_zombie)(void *objspace_ptr, VALUE obj, void (*dfree)(void *), void *data);
659 VALUE (*define_finalizer)(void *objspace_ptr, VALUE obj, VALUE block);
660 void (*undefine_finalizer)(void *objspace_ptr, VALUE obj);
661 void (*copy_finalizer)(void *objspace_ptr, VALUE dest, VALUE obj);
662 void (*shutdown_call_finalizer)(void *objspace_ptr);
663 // Forking
664 void (*before_fork)(void *objspace_ptr);
665 void (*after_fork)(void *objspace_ptr, rb_pid_t pid);
666 // Statistics
667 void (*set_measure_total_time)(void *objspace_ptr, VALUE flag);
668 bool (*get_measure_total_time)(void *objspace_ptr);
669 unsigned long long (*get_total_time)(void *objspace_ptr);
670 size_t (*gc_count)(void *objspace_ptr);
671 VALUE (*latest_gc_info)(void *objspace_ptr, VALUE key);
672 VALUE (*stat)(void *objspace_ptr, VALUE hash_or_sym);
673 VALUE (*stat_heap)(void *objspace_ptr, VALUE heap_name, VALUE hash_or_sym);
674 const char *(*active_gc_name)(void);
675 // Miscellaneous
676 struct rb_gc_object_metadata_entry *(*object_metadata)(void *objspace_ptr, VALUE obj);
677 bool (*pointer_to_heap_p)(void *objspace_ptr, const void *ptr);
678 bool (*garbage_object_p)(void *objspace_ptr, VALUE obj);
679 void (*set_event_hook)(void *objspace_ptr, const rb_event_flag_t event);
680 void (*copy_attributes)(void *objspace_ptr, VALUE dest, VALUE obj);
681
682 bool modular_gc_loaded_p;
683} rb_gc_function_map_t;
684
685static rb_gc_function_map_t rb_gc_functions;
686
687# define RUBY_GC_LIBRARY "RUBY_GC_LIBRARY"
688# define MODULAR_GC_DIR STRINGIZE(modular_gc_dir)
689
690static void
691ruby_modular_gc_init(void)
692{
693 // Assert that the directory path ends with a /
694 RUBY_ASSERT_ALWAYS(MODULAR_GC_DIR[sizeof(MODULAR_GC_DIR) - 2] == '/');
695
696 const char *gc_so_file = getenv(RUBY_GC_LIBRARY);
697
698 rb_gc_function_map_t gc_functions = { 0 };
699
700 char *gc_so_path = NULL;
701 void *handle = NULL;
702 if (gc_so_file) {
703 /* Check to make sure that gc_so_file matches /[\w-_]+/ so that it does
704 * not load a shared object outside of the directory. */
705 for (size_t i = 0; i < strlen(gc_so_file); i++) {
706 char c = gc_so_file[i];
707 if (isalnum(c)) continue;
708 switch (c) {
709 case '-':
710 case '_':
711 break;
712 default:
713 fprintf(stderr, "Only alphanumeric, dash, and underscore is allowed in "RUBY_GC_LIBRARY"\n");
714 exit(1);
715 }
716 }
717
718 size_t gc_so_path_size = strlen(MODULAR_GC_DIR "librubygc." DLEXT) + strlen(gc_so_file) + 1;
719#ifdef LOAD_RELATIVE
720 Dl_info dli;
721 size_t prefix_len = 0;
722 if (dladdr((void *)(uintptr_t)ruby_modular_gc_init, &dli)) {
723 const char *base = strrchr(dli.dli_fname, '/');
724 if (base) {
725 size_t tail = 0;
726# define end_with_p(lit) \
727 (prefix_len >= (tail = rb_strlen_lit(lit)) && \
728 memcmp(base - tail, lit, tail) == 0)
729
730 prefix_len = base - dli.dli_fname;
731 if (end_with_p("/bin") || end_with_p("/lib")) {
732 prefix_len -= tail;
733 }
734 prefix_len += MODULAR_GC_DIR[0] != '/';
735 gc_so_path_size += prefix_len;
736 }
737 }
738#endif
739 gc_so_path = alloca(gc_so_path_size);
740 {
741 size_t gc_so_path_idx = 0;
742#define GC_SO_PATH_APPEND(str) do { \
743 gc_so_path_idx += strlcpy(gc_so_path + gc_so_path_idx, str, gc_so_path_size - gc_so_path_idx); \
744} while (0)
745#ifdef LOAD_RELATIVE
746 if (prefix_len > 0) {
747 memcpy(gc_so_path, dli.dli_fname, prefix_len);
748 gc_so_path_idx = prefix_len;
749 }
750#endif
751 GC_SO_PATH_APPEND(MODULAR_GC_DIR "librubygc.");
752 GC_SO_PATH_APPEND(gc_so_file);
753 GC_SO_PATH_APPEND(DLEXT);
754 GC_ASSERT(gc_so_path_idx == gc_so_path_size - 1);
755#undef GC_SO_PATH_APPEND
756 }
757
758 handle = dlopen(gc_so_path, RTLD_LAZY | RTLD_GLOBAL);
759 if (!handle) {
760 fprintf(stderr, "ruby_modular_gc_init: Shared library %s cannot be opened: %s\n", gc_so_path, dlerror());
761 exit(1);
762 }
763
764 gc_functions.modular_gc_loaded_p = true;
765 }
766
767# define load_modular_gc_func(name) do { \
768 if (handle) { \
769 const char *func_name = "rb_gc_impl_" #name; \
770 gc_functions.name = dlsym(handle, func_name); \
771 if (!gc_functions.name) { \
772 fprintf(stderr, "ruby_modular_gc_init: %s function not exported by library %s\n", func_name, gc_so_path); \
773 exit(1); \
774 } \
775 } \
776 else { \
777 gc_functions.name = rb_gc_impl_##name; \
778 } \
779} while (0)
780
781 // Bootup
782 load_modular_gc_func(objspace_alloc);
783 load_modular_gc_func(objspace_init);
784 load_modular_gc_func(ractor_cache_alloc);
785 load_modular_gc_func(set_params);
786 load_modular_gc_func(init);
787 load_modular_gc_func(heap_sizes);
788 // Shutdown
789 load_modular_gc_func(shutdown_free_objects);
790 load_modular_gc_func(objspace_free);
791 load_modular_gc_func(ractor_cache_free);
792 // GC
793 load_modular_gc_func(start);
794 load_modular_gc_func(during_gc_p);
795 load_modular_gc_func(prepare_heap);
796 load_modular_gc_func(gc_enable);
797 load_modular_gc_func(gc_disable);
798 load_modular_gc_func(gc_enabled_p);
799 load_modular_gc_func(config_set);
800 load_modular_gc_func(config_get);
801 load_modular_gc_func(stress_set);
802 load_modular_gc_func(stress_get);
803 // Object allocation
804 load_modular_gc_func(new_obj);
805 load_modular_gc_func(obj_slot_size);
806 load_modular_gc_func(heap_id_for_size);
807 load_modular_gc_func(size_allocatable_p);
808 // Malloc
809 load_modular_gc_func(malloc);
810 load_modular_gc_func(calloc);
811 load_modular_gc_func(realloc);
812 load_modular_gc_func(free);
813 load_modular_gc_func(adjust_memory_usage);
814 // Marking
815 load_modular_gc_func(mark);
816 load_modular_gc_func(mark_and_move);
817 load_modular_gc_func(mark_and_pin);
818 load_modular_gc_func(mark_maybe);
819 load_modular_gc_func(mark_weak);
820 load_modular_gc_func(remove_weak);
821 // Compaction
822 load_modular_gc_func(object_moved_p);
823 load_modular_gc_func(location);
824 // Write barriers
825 load_modular_gc_func(writebarrier);
826 load_modular_gc_func(writebarrier_unprotect);
827 load_modular_gc_func(writebarrier_remember);
828 // Heap walking
829 load_modular_gc_func(each_objects);
830 load_modular_gc_func(each_object);
831 // Finalizers
832 load_modular_gc_func(make_zombie);
833 load_modular_gc_func(define_finalizer);
834 load_modular_gc_func(undefine_finalizer);
835 load_modular_gc_func(copy_finalizer);
836 load_modular_gc_func(shutdown_call_finalizer);
837 // Forking
838 load_modular_gc_func(before_fork);
839 load_modular_gc_func(after_fork);
840 // Statistics
841 load_modular_gc_func(set_measure_total_time);
842 load_modular_gc_func(get_measure_total_time);
843 load_modular_gc_func(get_total_time);
844 load_modular_gc_func(gc_count);
845 load_modular_gc_func(latest_gc_info);
846 load_modular_gc_func(stat);
847 load_modular_gc_func(stat_heap);
848 load_modular_gc_func(active_gc_name);
849 // Miscellaneous
850 load_modular_gc_func(object_metadata);
851 load_modular_gc_func(pointer_to_heap_p);
852 load_modular_gc_func(garbage_object_p);
853 load_modular_gc_func(set_event_hook);
854 load_modular_gc_func(copy_attributes);
855
856# undef load_modular_gc_func
857
858 rb_gc_functions = gc_functions;
859}
860
861// Bootup
862# define rb_gc_impl_objspace_alloc rb_gc_functions.objspace_alloc
863# define rb_gc_impl_objspace_init rb_gc_functions.objspace_init
864# define rb_gc_impl_ractor_cache_alloc rb_gc_functions.ractor_cache_alloc
865# define rb_gc_impl_set_params rb_gc_functions.set_params
866# define rb_gc_impl_init rb_gc_functions.init
867# define rb_gc_impl_heap_sizes rb_gc_functions.heap_sizes
868// Shutdown
869# define rb_gc_impl_shutdown_free_objects rb_gc_functions.shutdown_free_objects
870# define rb_gc_impl_objspace_free rb_gc_functions.objspace_free
871# define rb_gc_impl_ractor_cache_free rb_gc_functions.ractor_cache_free
872// GC
873# define rb_gc_impl_start rb_gc_functions.start
874# define rb_gc_impl_during_gc_p rb_gc_functions.during_gc_p
875# define rb_gc_impl_prepare_heap rb_gc_functions.prepare_heap
876# define rb_gc_impl_gc_enable rb_gc_functions.gc_enable
877# define rb_gc_impl_gc_disable rb_gc_functions.gc_disable
878# define rb_gc_impl_gc_enabled_p rb_gc_functions.gc_enabled_p
879# define rb_gc_impl_config_get rb_gc_functions.config_get
880# define rb_gc_impl_config_set rb_gc_functions.config_set
881# define rb_gc_impl_stress_set rb_gc_functions.stress_set
882# define rb_gc_impl_stress_get rb_gc_functions.stress_get
883// Object allocation
884# define rb_gc_impl_new_obj rb_gc_functions.new_obj
885# define rb_gc_impl_obj_slot_size rb_gc_functions.obj_slot_size
886# define rb_gc_impl_heap_id_for_size rb_gc_functions.heap_id_for_size
887# define rb_gc_impl_size_allocatable_p rb_gc_functions.size_allocatable_p
888// Malloc
889# define rb_gc_impl_malloc rb_gc_functions.malloc
890# define rb_gc_impl_calloc rb_gc_functions.calloc
891# define rb_gc_impl_realloc rb_gc_functions.realloc
892# define rb_gc_impl_free rb_gc_functions.free
893# define rb_gc_impl_adjust_memory_usage rb_gc_functions.adjust_memory_usage
894// Marking
895# define rb_gc_impl_mark rb_gc_functions.mark
896# define rb_gc_impl_mark_and_move rb_gc_functions.mark_and_move
897# define rb_gc_impl_mark_and_pin rb_gc_functions.mark_and_pin
898# define rb_gc_impl_mark_maybe rb_gc_functions.mark_maybe
899# define rb_gc_impl_mark_weak rb_gc_functions.mark_weak
900# define rb_gc_impl_remove_weak rb_gc_functions.remove_weak
901// Compaction
902# define rb_gc_impl_object_moved_p rb_gc_functions.object_moved_p
903# define rb_gc_impl_location rb_gc_functions.location
904// Write barriers
905# define rb_gc_impl_writebarrier rb_gc_functions.writebarrier
906# define rb_gc_impl_writebarrier_unprotect rb_gc_functions.writebarrier_unprotect
907# define rb_gc_impl_writebarrier_remember rb_gc_functions.writebarrier_remember
908// Heap walking
909# define rb_gc_impl_each_objects rb_gc_functions.each_objects
910# define rb_gc_impl_each_object rb_gc_functions.each_object
911// Finalizers
912# define rb_gc_impl_make_zombie rb_gc_functions.make_zombie
913# define rb_gc_impl_define_finalizer rb_gc_functions.define_finalizer
914# define rb_gc_impl_undefine_finalizer rb_gc_functions.undefine_finalizer
915# define rb_gc_impl_copy_finalizer rb_gc_functions.copy_finalizer
916# define rb_gc_impl_shutdown_call_finalizer rb_gc_functions.shutdown_call_finalizer
917// Forking
918# define rb_gc_impl_before_fork rb_gc_functions.before_fork
919# define rb_gc_impl_after_fork rb_gc_functions.after_fork
920// Statistics
921# define rb_gc_impl_set_measure_total_time rb_gc_functions.set_measure_total_time
922# define rb_gc_impl_get_measure_total_time rb_gc_functions.get_measure_total_time
923# define rb_gc_impl_get_total_time rb_gc_functions.get_total_time
924# define rb_gc_impl_gc_count rb_gc_functions.gc_count
925# define rb_gc_impl_latest_gc_info rb_gc_functions.latest_gc_info
926# define rb_gc_impl_stat rb_gc_functions.stat
927# define rb_gc_impl_stat_heap rb_gc_functions.stat_heap
928# define rb_gc_impl_active_gc_name rb_gc_functions.active_gc_name
929// Miscellaneous
930# define rb_gc_impl_object_metadata rb_gc_functions.object_metadata
931# define rb_gc_impl_pointer_to_heap_p rb_gc_functions.pointer_to_heap_p
932# define rb_gc_impl_garbage_object_p rb_gc_functions.garbage_object_p
933# define rb_gc_impl_set_event_hook rb_gc_functions.set_event_hook
934# define rb_gc_impl_copy_attributes rb_gc_functions.copy_attributes
935#endif
936
937#ifdef RUBY_ASAN_ENABLED
938static void
939asan_death_callback(void)
940{
941 if (GET_VM()) {
942 rb_bug_without_die("ASAN error");
943 }
944}
945#endif
946
947static VALUE initial_stress = Qfalse;
948
949void *
950rb_objspace_alloc(void)
951{
952#if USE_MODULAR_GC
953 ruby_modular_gc_init();
954#endif
955
956 void *objspace = rb_gc_impl_objspace_alloc();
957 ruby_current_vm_ptr->gc.objspace = objspace;
958 rb_gc_impl_objspace_init(objspace);
959 rb_gc_impl_stress_set(objspace, initial_stress);
960
961#ifdef RUBY_ASAN_ENABLED
962 __sanitizer_set_death_callback(asan_death_callback);
963#endif
964
965 return objspace;
966}
967
968void
969rb_objspace_free(void *objspace)
970{
971 rb_gc_impl_objspace_free(objspace);
972}
973
974size_t
975rb_gc_obj_slot_size(VALUE obj)
976{
977 return rb_gc_impl_obj_slot_size(obj);
978}
979
980static inline void
981gc_validate_pc(void)
982{
983#if RUBY_DEBUG
984 rb_execution_context_t *ec = GET_EC();
985 const rb_control_frame_t *cfp = ec->cfp;
986 if (cfp && VM_FRAME_RUBYFRAME_P(cfp) && cfp->pc) {
987 RUBY_ASSERT(cfp->pc >= ISEQ_BODY(cfp->iseq)->iseq_encoded);
988 RUBY_ASSERT(cfp->pc <= ISEQ_BODY(cfp->iseq)->iseq_encoded + ISEQ_BODY(cfp->iseq)->iseq_size);
989 }
990#endif
991}
992
993static inline VALUE
994newobj_of(rb_ractor_t *cr, VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, bool wb_protected, size_t size)
995{
996 VALUE obj = rb_gc_impl_new_obj(rb_gc_get_objspace(), cr->newobj_cache, klass, flags, v1, v2, v3, wb_protected, size);
997
998 gc_validate_pc();
999
1000 if (UNLIKELY(rb_gc_event_hook_required_p(RUBY_INTERNAL_EVENT_NEWOBJ))) {
1001 unsigned int lev;
1002 RB_VM_LOCK_ENTER_CR_LEV(cr, &lev);
1003 {
1004 memset((char *)obj + RVALUE_SIZE, 0, rb_gc_obj_slot_size(obj) - RVALUE_SIZE);
1005
1006 /* We must disable GC here because the callback could call xmalloc
1007 * which could potentially trigger a GC, and a lot of code is unsafe
1008 * to trigger a GC right after an object has been allocated because
1009 * they perform initialization for the object and assume that the
1010 * GC does not trigger before then. */
1011 bool gc_disabled = RTEST(rb_gc_disable_no_rest());
1012 {
1013 rb_gc_event_hook(obj, RUBY_INTERNAL_EVENT_NEWOBJ);
1014 }
1015 if (!gc_disabled) rb_gc_enable();
1016 }
1017 RB_VM_LOCK_LEAVE_CR_LEV(cr, &lev);
1018 }
1019
1020 return obj;
1021}
1022
1023VALUE
1024rb_wb_unprotected_newobj_of(VALUE klass, VALUE flags, size_t size)
1025{
1026 GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
1027 return newobj_of(GET_RACTOR(), klass, flags, 0, 0, 0, FALSE, size);
1028}
1029
1030VALUE
1031rb_wb_protected_newobj_of(rb_execution_context_t *ec, VALUE klass, VALUE flags, size_t size)
1032{
1033 GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
1034 return newobj_of(rb_ec_ractor_ptr(ec), klass, flags, 0, 0, 0, TRUE, size);
1035}
1036
1037#define UNEXPECTED_NODE(func) \
1038 rb_bug(#func"(): GC does not handle T_NODE 0x%x(%p) 0x%"PRIxVALUE, \
1039 BUILTIN_TYPE(obj), (void*)(obj), RBASIC(obj)->flags)
1040
1041static inline void
1042rb_data_object_check(VALUE klass)
1043{
1044 if (klass != rb_cObject && (rb_get_alloc_func(klass) == rb_class_allocate_instance)) {
1045 rb_undef_alloc_func(klass);
1046 rb_warn("undefining the allocator of T_DATA class %"PRIsVALUE, klass);
1047 }
1048}
1049
1050VALUE
1051rb_data_object_wrap(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
1052{
1054 if (klass) rb_data_object_check(klass);
1055 return newobj_of(GET_RACTOR(), klass, T_DATA, (VALUE)dmark, (VALUE)datap, (VALUE)dfree, !dmark, sizeof(struct RTypedData));
1056}
1057
1058VALUE
1059rb_data_object_zalloc(VALUE klass, size_t size, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
1060{
1061 VALUE obj = rb_data_object_wrap(klass, 0, dmark, dfree);
1062 DATA_PTR(obj) = xcalloc(1, size);
1063 return obj;
1064}
1065
1066static VALUE
1067typed_data_alloc(VALUE klass, VALUE typed_flag, void *datap, const rb_data_type_t *type, size_t size)
1068{
1069 RBIMPL_NONNULL_ARG(type);
1070 if (klass) rb_data_object_check(klass);
1071 bool wb_protected = (type->flags & RUBY_FL_WB_PROTECTED) || !type->function.dmark;
1072 return newobj_of(GET_RACTOR(), klass, T_DATA, ((VALUE)type) | IS_TYPED_DATA | typed_flag, (VALUE)datap, 0, wb_protected, size);
1073}
1074
1075VALUE
1076rb_data_typed_object_wrap(VALUE klass, void *datap, const rb_data_type_t *type)
1077{
1078 if (UNLIKELY(type->flags & RUBY_TYPED_EMBEDDABLE)) {
1079 rb_raise(rb_eTypeError, "Cannot wrap an embeddable TypedData");
1080 }
1081
1082 return typed_data_alloc(klass, 0, datap, type, sizeof(struct RTypedData));
1083}
1084
1085VALUE
1086rb_data_typed_object_zalloc(VALUE klass, size_t size, const rb_data_type_t *type)
1087{
1088 if (type->flags & RUBY_TYPED_EMBEDDABLE) {
1089 if (!(type->flags & RUBY_TYPED_FREE_IMMEDIATELY)) {
1090 rb_raise(rb_eTypeError, "Embeddable TypedData must be freed immediately");
1091 }
1092
1093 size_t embed_size = offsetof(struct RTypedData, data) + size;
1094 if (rb_gc_size_allocatable_p(embed_size)) {
1095 VALUE obj = typed_data_alloc(klass, TYPED_DATA_EMBEDDED, 0, type, embed_size);
1096 memset((char *)obj + offsetof(struct RTypedData, data), 0, size);
1097 return obj;
1098 }
1099 }
1100
1101 VALUE obj = typed_data_alloc(klass, 0, NULL, type, sizeof(struct RTypedData));
1102 DATA_PTR(obj) = xcalloc(1, size);
1103 return obj;
1104}
1105
1106static size_t
1107rb_objspace_data_type_memsize(VALUE obj)
1108{
1109 size_t size = 0;
1110 if (RTYPEDDATA_P(obj)) {
1111 const rb_data_type_t *type = RTYPEDDATA_TYPE(obj);
1112 const void *ptr = RTYPEDDATA_GET_DATA(obj);
1113
1114 if (RTYPEDDATA_TYPE(obj)->flags & RUBY_TYPED_EMBEDDABLE && !RTYPEDDATA_EMBEDDED_P(obj)) {
1115#ifdef HAVE_MALLOC_USABLE_SIZE
1116 size += malloc_usable_size((void *)ptr);
1117#endif
1118 }
1119
1120 if (ptr && type->function.dsize) {
1121 size += type->function.dsize(ptr);
1122 }
1123 }
1124
1125 return size;
1126}
1127
1128const char *
1129rb_objspace_data_type_name(VALUE obj)
1130{
1131 if (RTYPEDDATA_P(obj)) {
1132 return RTYPEDDATA_TYPE(obj)->wrap_struct_name;
1133 }
1134 else {
1135 return 0;
1136 }
1137}
1138
1139static enum rb_id_table_iterator_result
1140cvar_table_free_i(VALUE value, void *ctx)
1141{
1142 xfree((void *)value);
1143 return ID_TABLE_CONTINUE;
1144}
1145
1146static void
1147io_fptr_finalize(void *fptr)
1148{
1149 rb_io_fptr_finalize((struct rb_io *)fptr);
1150}
1151
1152static inline void
1153make_io_zombie(void *objspace, VALUE obj)
1154{
1155 rb_io_t *fptr = RFILE(obj)->fptr;
1156 rb_gc_impl_make_zombie(objspace, obj, io_fptr_finalize, fptr);
1157}
1158
1159static bool
1160rb_data_free(void *objspace, VALUE obj)
1161{
1162 void *data = RTYPEDDATA_P(obj) ? RTYPEDDATA_GET_DATA(obj) : DATA_PTR(obj);
1163 if (data) {
1164 int free_immediately = false;
1165 void (*dfree)(void *);
1166
1167 if (RTYPEDDATA_P(obj)) {
1168 free_immediately = (RTYPEDDATA_TYPE(obj)->flags & RUBY_TYPED_FREE_IMMEDIATELY) != 0;
1169 dfree = RTYPEDDATA_TYPE(obj)->function.dfree;
1170 }
1171 else {
1172 dfree = RDATA(obj)->dfree;
1173 }
1174
1175 if (dfree) {
1176 if (dfree == RUBY_DEFAULT_FREE) {
1177 if (!RTYPEDDATA_P(obj) || !RTYPEDDATA_EMBEDDED_P(obj)) {
1178 xfree(data);
1179 RB_DEBUG_COUNTER_INC(obj_data_xfree);
1180 }
1181 }
1182 else if (free_immediately) {
1183 (*dfree)(data);
1184 if (RTYPEDDATA_TYPE(obj)->flags & RUBY_TYPED_EMBEDDABLE && !RTYPEDDATA_EMBEDDED_P(obj)) {
1185 xfree(data);
1186 }
1187
1188 RB_DEBUG_COUNTER_INC(obj_data_imm_free);
1189 }
1190 else {
1191 rb_gc_impl_make_zombie(objspace, obj, dfree, data);
1192 RB_DEBUG_COUNTER_INC(obj_data_zombie);
1193 return FALSE;
1194 }
1195 }
1196 else {
1197 RB_DEBUG_COUNTER_INC(obj_data_empty);
1198 }
1199 }
1200
1201 return true;
1202}
1203
1205 VALUE klass;
1206 rb_objspace_t *objspace; // used for update_*
1207};
1208
1209static void
1210classext_free(rb_classext_t *ext, bool is_prime, VALUE namespace, void *arg)
1211{
1212 struct rb_id_table *tbl;
1213 struct classext_foreach_args *args = (struct classext_foreach_args *)arg;
1214
1215 rb_id_table_free(RCLASSEXT_M_TBL(ext));
1216 rb_cc_tbl_free(RCLASSEXT_CC_TBL(ext), args->klass);
1217 if (!RCLASSEXT_SHARED_CONST_TBL(ext) && (tbl = RCLASSEXT_CONST_TBL(ext)) != NULL) {
1218 rb_free_const_table(tbl);
1219 }
1220 if ((tbl = RCLASSEXT_CVC_TBL(ext)) != NULL) {
1221 rb_id_table_foreach_values(tbl, cvar_table_free_i, NULL);
1222 rb_id_table_free(tbl);
1223 }
1224 rb_class_classext_free_subclasses(ext, args->klass);
1225 if (RCLASSEXT_SUPERCLASSES_WITH_SELF(ext)) {
1226 RUBY_ASSERT(is_prime); // superclasses should only be used on prime
1227 xfree(RCLASSEXT_SUPERCLASSES(ext));
1228 }
1229 if (!is_prime) { // the prime classext will be freed with RClass
1230 xfree(ext);
1231 }
1232}
1233
1234static void
1235classext_iclass_free(rb_classext_t *ext, bool is_prime, VALUE namespace, void *arg)
1236{
1237 struct classext_foreach_args *args = (struct classext_foreach_args *)arg;
1238
1239 if (RCLASSEXT_ICLASS_IS_ORIGIN(ext) && !RCLASSEXT_ICLASS_ORIGIN_SHARED_MTBL(ext)) {
1240 /* Method table is not shared for origin iclasses of classes */
1241 rb_id_table_free(RCLASSEXT_M_TBL(ext));
1242 }
1243 if (RCLASSEXT_CALLABLE_M_TBL(ext) != NULL) {
1244 rb_id_table_free(RCLASSEXT_CALLABLE_M_TBL(ext));
1245 }
1246 rb_cc_tbl_free(RCLASSEXT_CC_TBL(ext), args->klass);
1247
1248 rb_class_classext_free_subclasses(ext, args->klass);
1249
1250 if (!is_prime) { // the prime classext will be freed with RClass
1251 xfree(ext);
1252 }
1253}
1254
1255bool
1256rb_gc_obj_free(void *objspace, VALUE obj)
1257{
1258 struct classext_foreach_args args;
1259
1260 RB_DEBUG_COUNTER_INC(obj_free);
1261
1262 switch (BUILTIN_TYPE(obj)) {
1263 case T_NIL:
1264 case T_FIXNUM:
1265 case T_TRUE:
1266 case T_FALSE:
1267 rb_bug("obj_free() called for broken object");
1268 break;
1269 default:
1270 break;
1271 }
1272
1273 switch (BUILTIN_TYPE(obj)) {
1274 case T_OBJECT:
1275 if (rb_shape_obj_too_complex_p(obj)) {
1276 RB_DEBUG_COUNTER_INC(obj_obj_too_complex);
1277 st_free_table(ROBJECT_FIELDS_HASH(obj));
1278 }
1279 else if (RBASIC(obj)->flags & ROBJECT_EMBED) {
1280 RB_DEBUG_COUNTER_INC(obj_obj_embed);
1281 }
1282 else {
1283 xfree(ROBJECT(obj)->as.heap.fields);
1284 RB_DEBUG_COUNTER_INC(obj_obj_ptr);
1285 }
1286 break;
1287 case T_MODULE:
1288 case T_CLASS:
1289 args.klass = obj;
1290 rb_class_classext_foreach(obj, classext_free, (void *)&args);
1291 if (RCLASS_CLASSEXT_TBL(obj)) {
1292 st_free_table(RCLASS_CLASSEXT_TBL(obj));
1293 }
1294 (void)RB_DEBUG_COUNTER_INC_IF(obj_module_ptr, BUILTIN_TYPE(obj) == T_MODULE);
1295 (void)RB_DEBUG_COUNTER_INC_IF(obj_class_ptr, BUILTIN_TYPE(obj) == T_CLASS);
1296 break;
1297 case T_STRING:
1298 rb_str_free(obj);
1299 break;
1300 case T_ARRAY:
1301 rb_ary_free(obj);
1302 break;
1303 case T_HASH:
1304#if USE_DEBUG_COUNTER
1305 switch (RHASH_SIZE(obj)) {
1306 case 0:
1307 RB_DEBUG_COUNTER_INC(obj_hash_empty);
1308 break;
1309 case 1:
1310 RB_DEBUG_COUNTER_INC(obj_hash_1);
1311 break;
1312 case 2:
1313 RB_DEBUG_COUNTER_INC(obj_hash_2);
1314 break;
1315 case 3:
1316 RB_DEBUG_COUNTER_INC(obj_hash_3);
1317 break;
1318 case 4:
1319 RB_DEBUG_COUNTER_INC(obj_hash_4);
1320 break;
1321 case 5:
1322 case 6:
1323 case 7:
1324 case 8:
1325 RB_DEBUG_COUNTER_INC(obj_hash_5_8);
1326 break;
1327 default:
1328 GC_ASSERT(RHASH_SIZE(obj) > 8);
1329 RB_DEBUG_COUNTER_INC(obj_hash_g8);
1330 }
1331
1332 if (RHASH_AR_TABLE_P(obj)) {
1333 if (RHASH_AR_TABLE(obj) == NULL) {
1334 RB_DEBUG_COUNTER_INC(obj_hash_null);
1335 }
1336 else {
1337 RB_DEBUG_COUNTER_INC(obj_hash_ar);
1338 }
1339 }
1340 else {
1341 RB_DEBUG_COUNTER_INC(obj_hash_st);
1342 }
1343#endif
1344
1345 rb_hash_free(obj);
1346 break;
1347 case T_REGEXP:
1348 if (RREGEXP(obj)->ptr) {
1349 onig_free(RREGEXP(obj)->ptr);
1350 RB_DEBUG_COUNTER_INC(obj_regexp_ptr);
1351 }
1352 break;
1353 case T_DATA:
1354 if (!rb_data_free(objspace, obj)) return false;
1355 break;
1356 case T_MATCH:
1357 {
1358 rb_matchext_t *rm = RMATCH_EXT(obj);
1359#if USE_DEBUG_COUNTER
1360 if (rm->regs.num_regs >= 8) {
1361 RB_DEBUG_COUNTER_INC(obj_match_ge8);
1362 }
1363 else if (rm->regs.num_regs >= 4) {
1364 RB_DEBUG_COUNTER_INC(obj_match_ge4);
1365 }
1366 else if (rm->regs.num_regs >= 1) {
1367 RB_DEBUG_COUNTER_INC(obj_match_under4);
1368 }
1369#endif
1370 onig_region_free(&rm->regs, 0);
1371 xfree(rm->char_offset);
1372
1373 RB_DEBUG_COUNTER_INC(obj_match_ptr);
1374 }
1375 break;
1376 case T_FILE:
1377 if (RFILE(obj)->fptr) {
1378 make_io_zombie(objspace, obj);
1379 RB_DEBUG_COUNTER_INC(obj_file_ptr);
1380 return FALSE;
1381 }
1382 break;
1383 case T_RATIONAL:
1384 RB_DEBUG_COUNTER_INC(obj_rational);
1385 break;
1386 case T_COMPLEX:
1387 RB_DEBUG_COUNTER_INC(obj_complex);
1388 break;
1389 case T_MOVED:
1390 break;
1391 case T_ICLASS:
1392 args.klass = obj;
1393
1394 rb_class_classext_foreach(obj, classext_iclass_free, (void *)&args);
1395 if (RCLASS_CLASSEXT_TBL(obj)) {
1396 st_free_table(RCLASS_CLASSEXT_TBL(obj));
1397 }
1398
1399 RB_DEBUG_COUNTER_INC(obj_iclass_ptr);
1400 break;
1401
1402 case T_FLOAT:
1403 RB_DEBUG_COUNTER_INC(obj_float);
1404 break;
1405
1406 case T_BIGNUM:
1407 if (!BIGNUM_EMBED_P(obj) && BIGNUM_DIGITS(obj)) {
1408 xfree(BIGNUM_DIGITS(obj));
1409 RB_DEBUG_COUNTER_INC(obj_bignum_ptr);
1410 }
1411 else {
1412 RB_DEBUG_COUNTER_INC(obj_bignum_embed);
1413 }
1414 break;
1415
1416 case T_NODE:
1417 UNEXPECTED_NODE(obj_free);
1418 break;
1419
1420 case T_STRUCT:
1421 if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) ||
1422 RSTRUCT(obj)->as.heap.ptr == NULL) {
1423 RB_DEBUG_COUNTER_INC(obj_struct_embed);
1424 }
1425 else {
1426 xfree((void *)RSTRUCT(obj)->as.heap.ptr);
1427 RB_DEBUG_COUNTER_INC(obj_struct_ptr);
1428 }
1429 break;
1430
1431 case T_SYMBOL:
1432 RB_DEBUG_COUNTER_INC(obj_symbol);
1433 break;
1434
1435 case T_IMEMO:
1436 rb_imemo_free((VALUE)obj);
1437 break;
1438
1439 default:
1440 rb_bug("gc_sweep(): unknown data type 0x%x(%p) 0x%"PRIxVALUE,
1441 BUILTIN_TYPE(obj), (void*)obj, RBASIC(obj)->flags);
1442 }
1443
1444 if (FL_TEST_RAW(obj, FL_FINALIZE)) {
1445 rb_gc_impl_make_zombie(objspace, obj, 0, 0);
1446 return FALSE;
1447 }
1448 else {
1449 return TRUE;
1450 }
1451}
1452
1453void
1454rb_objspace_set_event_hook(const rb_event_flag_t event)
1455{
1456 rb_gc_impl_set_event_hook(rb_gc_get_objspace(), event);
1457}
1458
1459static int
1460internal_object_p(VALUE obj)
1461{
1462 void *ptr = asan_unpoison_object_temporary(obj);
1463
1464 if (RBASIC(obj)->flags) {
1465 switch (BUILTIN_TYPE(obj)) {
1466 case T_NODE:
1467 UNEXPECTED_NODE(internal_object_p);
1468 break;
1469 case T_NONE:
1470 case T_MOVED:
1471 case T_IMEMO:
1472 case T_ICLASS:
1473 case T_ZOMBIE:
1474 break;
1475 case T_CLASS:
1476 if (obj == rb_mRubyVMFrozenCore)
1477 return 1;
1478
1479 if (!RBASIC_CLASS(obj)) break;
1480 if (RCLASS_SINGLETON_P(obj)) {
1481 return rb_singleton_class_internal_p(obj);
1482 }
1483 return 0;
1484 default:
1485 if (!RBASIC(obj)->klass) break;
1486 return 0;
1487 }
1488 }
1489 if (ptr || !RBASIC(obj)->flags) {
1490 rb_asan_poison_object(obj);
1491 }
1492 return 1;
1493}
1494
1495int
1496rb_objspace_internal_object_p(VALUE obj)
1497{
1498 return internal_object_p(obj);
1499}
1500
1502 size_t num;
1503 VALUE of;
1504};
1505
1506static int
1507os_obj_of_i(void *vstart, void *vend, size_t stride, void *data)
1508{
1509 struct os_each_struct *oes = (struct os_each_struct *)data;
1510
1511 VALUE v = (VALUE)vstart;
1512 for (; v != (VALUE)vend; v += stride) {
1513 if (!internal_object_p(v)) {
1514 if (!oes->of || rb_obj_is_kind_of(v, oes->of)) {
1515 if (!rb_multi_ractor_p() || rb_ractor_shareable_p(v)) {
1516 rb_yield(v);
1517 oes->num++;
1518 }
1519 }
1520 }
1521 }
1522
1523 return 0;
1524}
1525
1526static VALUE
1527os_obj_of(VALUE of)
1528{
1529 struct os_each_struct oes;
1530
1531 oes.num = 0;
1532 oes.of = of;
1533 rb_objspace_each_objects(os_obj_of_i, &oes);
1534 return SIZET2NUM(oes.num);
1535}
1536
1537/*
1538 * call-seq:
1539 * ObjectSpace.each_object([module]) {|obj| ... } -> integer
1540 * ObjectSpace.each_object([module]) -> an_enumerator
1541 *
1542 * Calls the block once for each living, nonimmediate object in this
1543 * Ruby process. If <i>module</i> is specified, calls the block
1544 * for only those classes or modules that match (or are a subclass of)
1545 * <i>module</i>. Returns the number of objects found. Immediate
1546 * objects (<code>Fixnum</code>s, <code>Symbol</code>s
1547 * <code>true</code>, <code>false</code>, and <code>nil</code>) are
1548 * never returned. In the example below, #each_object returns both
1549 * the numbers we defined and several constants defined in the Math
1550 * module.
1551 *
1552 * If no block is given, an enumerator is returned instead.
1553 *
1554 * a = 102.7
1555 * b = 95 # Won't be returned
1556 * c = 12345678987654321
1557 * count = ObjectSpace.each_object(Numeric) {|x| p x }
1558 * puts "Total count: #{count}"
1559 *
1560 * <em>produces:</em>
1561 *
1562 * 12345678987654321
1563 * 102.7
1564 * 2.71828182845905
1565 * 3.14159265358979
1566 * 2.22044604925031e-16
1567 * 1.7976931348623157e+308
1568 * 2.2250738585072e-308
1569 * Total count: 7
1570 *
1571 * Due to a current known Ractor implementation issue, this method will not yield
1572 * Ractor-unshareable objects in multi-Ractor mode (when
1573 * <code>Ractor.new</code> has been called within the process at least once).
1574 * See https://bugs.ruby-lang.org/issues/19387 for more information.
1575 *
1576 * a = 12345678987654321 # shareable
1577 * b = [].freeze # shareable
1578 * c = {} # not shareable
1579 * ObjectSpace.each_object {|x| x } # yields a, b, and c
1580 * Ractor.new {} # enter multi-Ractor mode
1581 * ObjectSpace.each_object {|x| x } # does not yield c
1582 *
1583 */
1584
1585static VALUE
1586os_each_obj(int argc, VALUE *argv, VALUE os)
1587{
1588 VALUE of;
1589
1590 of = (!rb_check_arity(argc, 0, 1) ? 0 : argv[0]);
1591 RETURN_ENUMERATOR(os, 1, &of);
1592 return os_obj_of(of);
1593}
1594
1595/*
1596 * call-seq:
1597 * ObjectSpace.undefine_finalizer(obj)
1598 *
1599 * Removes all finalizers for <i>obj</i>.
1600 *
1601 */
1602
1603static VALUE
1604undefine_final(VALUE os, VALUE obj)
1605{
1606 return rb_undefine_finalizer(obj);
1607}
1608
1609VALUE
1610rb_undefine_finalizer(VALUE obj)
1611{
1612 rb_check_frozen(obj);
1613
1614 rb_gc_impl_undefine_finalizer(rb_gc_get_objspace(), obj);
1615
1616 return obj;
1617}
1618
1619static void
1620should_be_callable(VALUE block)
1621{
1622 if (!rb_obj_respond_to(block, idCall, TRUE)) {
1623 rb_raise(rb_eArgError, "wrong type argument %"PRIsVALUE" (should be callable)",
1624 rb_obj_class(block));
1625 }
1626}
1627
1628static void
1629should_be_finalizable(VALUE obj)
1630{
1631 if (!FL_ABLE(obj)) {
1632 rb_raise(rb_eArgError, "cannot define finalizer for %s",
1633 rb_obj_classname(obj));
1634 }
1635 rb_check_frozen(obj);
1636}
1637
1638void
1639rb_gc_copy_finalizer(VALUE dest, VALUE obj)
1640{
1641 rb_gc_impl_copy_finalizer(rb_gc_get_objspace(), dest, obj);
1642}
1643
1644/*
1645 * call-seq:
1646 * ObjectSpace.define_finalizer(obj, aProc=proc())
1647 *
1648 * Adds <i>aProc</i> as a finalizer, to be called after <i>obj</i>
1649 * was destroyed. The object ID of the <i>obj</i> will be passed
1650 * as an argument to <i>aProc</i>. If <i>aProc</i> is a lambda or
1651 * method, make sure it can be called with a single argument.
1652 *
1653 * The return value is an array <code>[0, aProc]</code>.
1654 *
1655 * The two recommended patterns are to either create the finaliser proc
1656 * in a non-instance method where it can safely capture the needed state,
1657 * or to use a custom callable object that stores the needed state
1658 * explicitly as instance variables.
1659 *
1660 * class Foo
1661 * def initialize(data_needed_for_finalization)
1662 * ObjectSpace.define_finalizer(self, self.class.create_finalizer(data_needed_for_finalization))
1663 * end
1664 *
1665 * def self.create_finalizer(data_needed_for_finalization)
1666 * proc {
1667 * puts "finalizing #{data_needed_for_finalization}"
1668 * }
1669 * end
1670 * end
1671 *
1672 * class Bar
1673 * class Remover
1674 * def initialize(data_needed_for_finalization)
1675 * @data_needed_for_finalization = data_needed_for_finalization
1676 * end
1677 *
1678 * def call(id)
1679 * puts "finalizing #{@data_needed_for_finalization}"
1680 * end
1681 * end
1682 *
1683 * def initialize(data_needed_for_finalization)
1684 * ObjectSpace.define_finalizer(self, Remover.new(data_needed_for_finalization))
1685 * end
1686 * end
1687 *
1688 * Note that if your finalizer references the object to be
1689 * finalized it will never be run on GC, although it will still be
1690 * run at exit. You will get a warning if you capture the object
1691 * to be finalized as the receiver of the finalizer.
1692 *
1693 * class CapturesSelf
1694 * def initialize(name)
1695 * ObjectSpace.define_finalizer(self, proc {
1696 * # this finalizer will only be run on exit
1697 * puts "finalizing #{name}"
1698 * })
1699 * end
1700 * end
1701 *
1702 * Also note that finalization can be unpredictable and is never guaranteed
1703 * to be run except on exit.
1704 */
1705
1706static VALUE
1707define_final(int argc, VALUE *argv, VALUE os)
1708{
1709 VALUE obj, block;
1710
1711 rb_scan_args(argc, argv, "11", &obj, &block);
1712 if (argc == 1) {
1713 block = rb_block_proc();
1714 }
1715
1716 if (rb_callable_receiver(block) == obj) {
1717 rb_warn("finalizer references object to be finalized");
1718 }
1719
1720 return rb_define_finalizer(obj, block);
1721}
1722
1723VALUE
1724rb_define_finalizer(VALUE obj, VALUE block)
1725{
1726 should_be_finalizable(obj);
1727 should_be_callable(block);
1728
1729 block = rb_gc_impl_define_finalizer(rb_gc_get_objspace(), obj, block);
1730
1731 block = rb_ary_new3(2, INT2FIX(0), block);
1732 OBJ_FREEZE(block);
1733 return block;
1734}
1735
1736void
1737rb_objspace_call_finalizer(void)
1738{
1739 rb_gc_impl_shutdown_call_finalizer(rb_gc_get_objspace());
1740}
1741
1742void
1743rb_objspace_free_objects(void *objspace)
1744{
1745 rb_gc_impl_shutdown_free_objects(objspace);
1746}
1747
1748int
1749rb_objspace_garbage_object_p(VALUE obj)
1750{
1751 return rb_gc_impl_garbage_object_p(rb_gc_get_objspace(), obj);
1752}
1753
1754bool
1755rb_gc_pointer_to_heap_p(VALUE obj)
1756{
1757 return rb_gc_impl_pointer_to_heap_p(rb_gc_get_objspace(), (void *)obj);
1758}
1759
1760#define OBJ_ID_INCREMENT (RUBY_IMMEDIATE_MASK + 1)
1761#define LAST_OBJECT_ID() (object_id_counter * OBJ_ID_INCREMENT)
1762static VALUE id2ref_value = 0;
1763static st_table *id2ref_tbl = NULL;
1764static bool id2ref_tbl_built = false;
1765
1766#if SIZEOF_SIZE_T == SIZEOF_LONG_LONG
1767static size_t object_id_counter = 1;
1768#else
1769static unsigned long long object_id_counter = 1;
1770#endif
1771
1772static inline VALUE
1773generate_next_object_id(void)
1774{
1775#if SIZEOF_SIZE_T == SIZEOF_LONG_LONG
1776 // 64bit atomics are available
1777 return SIZET2NUM(RUBY_ATOMIC_SIZE_FETCH_ADD(object_id_counter, 1) * OBJ_ID_INCREMENT);
1778#else
1779 unsigned int lock_lev = RB_GC_VM_LOCK();
1780 VALUE id = ULL2NUM(++object_id_counter * OBJ_ID_INCREMENT);
1781 RB_GC_VM_UNLOCK(lock_lev);
1782 return id;
1783#endif
1784}
1785
1786void
1787rb_gc_obj_id_moved(VALUE obj)
1788{
1789 if (UNLIKELY(id2ref_tbl)) {
1790 st_insert(id2ref_tbl, (st_data_t)rb_obj_id(obj), (st_data_t)obj);
1791 }
1792}
1793
1794static int
1795object_id_cmp(st_data_t x, st_data_t y)
1796{
1797 if (RB_TYPE_P(x, T_BIGNUM)) {
1798 return !rb_big_eql(x, y);
1799 }
1800 else {
1801 return x != y;
1802 }
1803}
1804
1805static st_index_t
1806object_id_hash(st_data_t n)
1807{
1808 return FIX2LONG(rb_hash((VALUE)n));
1809}
1810
1811static const struct st_hash_type object_id_hash_type = {
1812 object_id_cmp,
1813 object_id_hash,
1814};
1815
1816static void gc_mark_tbl_no_pin(st_table *table);
1817
1818static void
1819id2ref_tbl_mark(void *data)
1820{
1821 st_table *table = (st_table *)data;
1822 if (UNLIKELY(!RB_POSFIXABLE(LAST_OBJECT_ID()))) {
1823 // It's very unlikely, but if enough object ids were generated, keys may be T_BIGNUM
1824 rb_mark_set(table);
1825 }
1826 // We purposedly don't mark values, as they are weak references.
1827 // rb_gc_obj_free_vm_weak_references takes care of cleaning them up.
1828}
1829
1830static size_t
1831id2ref_tbl_memsize(const void *data)
1832{
1833 return rb_st_memsize(data);
1834}
1835
1836static void
1837id2ref_tbl_free(void *data)
1838{
1839 id2ref_tbl = NULL; // clear global ref
1840 st_table *table = (st_table *)data;
1841 st_free_table(table);
1842}
1843
1844static const rb_data_type_t id2ref_tbl_type = {
1845 .wrap_struct_name = "VM/_id2ref_table",
1846 .function = {
1847 .dmark = id2ref_tbl_mark,
1848 .dfree = id2ref_tbl_free,
1849 .dsize = id2ref_tbl_memsize,
1850 // dcompact function not required because the table is reference updated
1851 // in rb_gc_vm_weak_table_foreach
1852 },
1853 .flags = RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_FREE_IMMEDIATELY
1854};
1855
1856static VALUE
1857class_object_id(VALUE klass)
1858{
1859 VALUE id = RUBY_ATOMIC_VALUE_LOAD(RCLASS(klass)->object_id);
1860 if (!id) {
1861 unsigned int lock_lev = RB_GC_VM_LOCK();
1862 id = generate_next_object_id();
1863 VALUE existing_id = RUBY_ATOMIC_VALUE_CAS(RCLASS(klass)->object_id, 0, id);
1864 if (existing_id) {
1865 id = existing_id;
1866 }
1867 else if (RB_UNLIKELY(id2ref_tbl)) {
1868 st_insert(id2ref_tbl, id, klass);
1869 }
1870 RB_GC_VM_UNLOCK(lock_lev);
1871 }
1872 return id;
1873}
1874
1875static inline VALUE
1876object_id_get(VALUE obj, shape_id_t shape_id)
1877{
1878 VALUE id;
1879 if (rb_shape_too_complex_p(shape_id)) {
1880 id = rb_obj_field_get(obj, ROOT_TOO_COMPLEX_WITH_OBJ_ID);
1881 }
1882 else {
1883 id = rb_obj_field_get(obj, rb_shape_object_id(shape_id));
1884 }
1885
1886#if RUBY_DEBUG
1887 if (!(FIXNUM_P(id) || RB_TYPE_P(id, T_BIGNUM))) {
1888 rb_p(obj);
1889 rb_bug("Object's shape includes object_id, but it's missing %s", rb_obj_info(obj));
1890 }
1891#endif
1892
1893 return id;
1894}
1895
1896static VALUE
1897object_id0(VALUE obj)
1898{
1899 VALUE id = Qfalse;
1900 shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
1901
1902 if (rb_shape_has_object_id(shape_id)) {
1903 return object_id_get(obj, shape_id);
1904 }
1905
1906 // rb_shape_object_id_shape may lock if the current shape has
1907 // multiple children.
1908 shape_id_t object_id_shape_id = rb_shape_transition_object_id(obj);
1909
1910 id = generate_next_object_id();
1911 rb_obj_field_set(obj, object_id_shape_id, 0, id);
1912
1913 RUBY_ASSERT(RBASIC_SHAPE_ID(obj) == object_id_shape_id);
1914 RUBY_ASSERT(rb_shape_obj_has_id(obj));
1915
1916 if (RB_UNLIKELY(id2ref_tbl)) {
1917 st_insert(id2ref_tbl, (st_data_t)id, (st_data_t)obj);
1918 }
1919 return id;
1920}
1921
1922static VALUE
1923object_id(VALUE obj)
1924{
1925 switch (BUILTIN_TYPE(obj)) {
1926 case T_CLASS:
1927 case T_MODULE:
1928 // With namespaces, classes and modules have different fields
1929 // in different namespaces, so we cannot store the object id
1930 // in fields.
1931 return class_object_id(obj);
1932 case T_IMEMO:
1933 rb_bug("T_IMEMO can't have an object_id");
1934 break;
1935 default:
1936 break;
1937 }
1938
1939 if (UNLIKELY(rb_gc_multi_ractor_p() && rb_ractor_shareable_p(obj))) {
1940 unsigned int lock_lev = RB_GC_VM_LOCK();
1941 VALUE id = object_id0(obj);
1942 RB_GC_VM_UNLOCK(lock_lev);
1943 return id;
1944 }
1945
1946 return object_id0(obj);
1947}
1948
1949static void
1950build_id2ref_i(VALUE obj, void *data)
1951{
1952 st_table *id2ref_tbl = (st_table *)data;
1953
1954 switch (BUILTIN_TYPE(obj)) {
1955 case T_CLASS:
1956 case T_MODULE:
1957 if (RCLASS(obj)->object_id) {
1958 st_insert(id2ref_tbl, RCLASS(obj)->object_id, obj);
1959 }
1960 break;
1961 case T_IMEMO:
1962 case T_NONE:
1963 break;
1964 default:
1965 if (rb_shape_obj_has_id(obj)) {
1966 st_insert(id2ref_tbl, rb_obj_id(obj), obj);
1967 }
1968 break;
1969 }
1970}
1971
1972static VALUE
1973object_id_to_ref(void *objspace_ptr, VALUE object_id)
1974{
1975 rb_objspace_t *objspace = objspace_ptr;
1976
1977 unsigned int lev = RB_GC_VM_LOCK();
1978
1979 if (!id2ref_tbl) {
1980 rb_gc_vm_barrier(); // stop other ractors
1981
1982 // GC Must not trigger while we build the table, otherwise if we end
1983 // up freeing an object that had an ID, we might try to delete it from
1984 // the table even though it wasn't inserted yet.
1985 id2ref_tbl = st_init_table(&object_id_hash_type);
1986 id2ref_value = TypedData_Wrap_Struct(0, &id2ref_tbl_type, id2ref_tbl);
1987
1988 // build_id2ref_i will most certainly malloc, which could trigger GC and sweep
1989 // objects we just added to the table.
1990 bool gc_disabled = RTEST(rb_gc_disable_no_rest());
1991 {
1992 rb_gc_impl_each_object(objspace, build_id2ref_i, (void *)id2ref_tbl);
1993 }
1994 if (!gc_disabled) rb_gc_enable();
1995 id2ref_tbl_built = true;
1996 }
1997
1998 VALUE obj;
1999 bool found = st_lookup(id2ref_tbl, object_id, &obj) && !rb_gc_impl_garbage_object_p(objspace, obj);
2000
2001 RB_GC_VM_UNLOCK(lev);
2002
2003 if (found) {
2004 return obj;
2005 }
2006
2007 if (rb_funcall(object_id, rb_intern(">="), 1, ULL2NUM(LAST_OBJECT_ID()))) {
2008 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is not an id value", rb_funcall(object_id, rb_intern("to_s"), 1, INT2FIX(10)));
2009 }
2010 else {
2011 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is a recycled object", rb_funcall(object_id, rb_intern("to_s"), 1, INT2FIX(10)));
2012 }
2013}
2014
2015static inline void
2016obj_free_object_id(VALUE obj)
2017{
2018 VALUE obj_id = 0;
2019 if (RB_UNLIKELY(id2ref_tbl)) {
2020 switch (BUILTIN_TYPE(obj)) {
2021 case T_CLASS:
2022 case T_MODULE:
2023 obj_id = RCLASS(obj)->object_id;
2024 break;
2025 case T_IMEMO:
2026 if (!IMEMO_TYPE_P(obj, imemo_fields)) {
2027 return;
2028 }
2029 // fallthrough
2030 case T_OBJECT:
2031 {
2032 shape_id_t shape_id = RBASIC_SHAPE_ID(obj);
2033 if (rb_shape_has_object_id(shape_id)) {
2034 obj_id = object_id_get(obj, shape_id);
2035 }
2036 break;
2037 }
2038 default:
2039 // For generic_fields, the T_IMEMO/fields is responsible for freeing the id.
2040 return;
2041 }
2042
2043 if (RB_UNLIKELY(obj_id)) {
2044 RUBY_ASSERT(FIXNUM_P(obj_id) || RB_TYPE_P(obj_id, T_BIGNUM));
2045
2046 if (!st_delete(id2ref_tbl, (st_data_t *)&obj_id, NULL)) {
2047 // If we're currently building the table then it's not a bug.
2048 // The the object is a T_IMEMO/fields, then it's possible the actual object
2049 // has been garbage collected already.
2050 if (id2ref_tbl_built && !RB_TYPE_P(obj, T_IMEMO)) {
2051 rb_bug("Object ID seen, but not in _id2ref table: object_id=%llu object=%s", NUM2ULL(obj_id), rb_obj_info(obj));
2052 }
2053 }
2054 }
2055 }
2056}
2057
2058void
2059rb_gc_obj_free_vm_weak_references(VALUE obj)
2060{
2061 obj_free_object_id(obj);
2062
2063 if (rb_obj_exivar_p(obj)) {
2065 }
2066
2067 switch (BUILTIN_TYPE(obj)) {
2068 case T_STRING:
2069 if (FL_TEST_RAW(obj, RSTRING_FSTR)) {
2070 rb_gc_free_fstring(obj);
2071 }
2072 break;
2073 case T_SYMBOL:
2074 rb_gc_free_dsymbol(obj);
2075 break;
2076 case T_IMEMO:
2077 switch (imemo_type(obj)) {
2078 case imemo_callcache: {
2079 const struct rb_callcache *cc = (const struct rb_callcache *)obj;
2080
2081 if (vm_cc_refinement_p(cc)) {
2082 rb_vm_delete_cc_refinement(cc);
2083 }
2084
2085 break;
2086 }
2087 case imemo_callinfo:
2088 rb_vm_ci_free((const struct rb_callinfo *)obj);
2089 break;
2090 case imemo_ment:
2091 rb_free_method_entry_vm_weak_references((const rb_method_entry_t *)obj);
2092 break;
2093 default:
2094 break;
2095 }
2096 break;
2097 default:
2098 break;
2099 }
2100}
2101
2102/*
2103 * call-seq:
2104 * ObjectSpace._id2ref(object_id) -> an_object
2105 *
2106 * Converts an object id to a reference to the object. May not be
2107 * called on an object id passed as a parameter to a finalizer.
2108 *
2109 * s = "I am a string" #=> "I am a string"
2110 * r = ObjectSpace._id2ref(s.object_id) #=> "I am a string"
2111 * r == s #=> true
2112 *
2113 * On multi-ractor mode, if the object is not shareable, it raises
2114 * RangeError.
2115 *
2116 * This method is deprecated and should no longer be used.
2117 */
2118
2119static VALUE
2120id2ref(VALUE objid)
2121{
2122#if SIZEOF_LONG == SIZEOF_VOIDP
2123#define NUM2PTR(x) NUM2ULONG(x)
2124#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
2125#define NUM2PTR(x) NUM2ULL(x)
2126#endif
2127 objid = rb_to_int(objid);
2128 if (FIXNUM_P(objid) || rb_big_size(objid) <= SIZEOF_VOIDP) {
2129 VALUE ptr = NUM2PTR(objid);
2130 if (SPECIAL_CONST_P(ptr)) {
2131 if (ptr == Qtrue) return Qtrue;
2132 if (ptr == Qfalse) return Qfalse;
2133 if (NIL_P(ptr)) return Qnil;
2134 if (FIXNUM_P(ptr)) return ptr;
2135 if (FLONUM_P(ptr)) return ptr;
2136
2137 if (SYMBOL_P(ptr)) {
2138 // Check that the symbol is valid
2139 if (rb_static_id_valid_p(SYM2ID(ptr))) {
2140 return ptr;
2141 }
2142 else {
2143 rb_raise(rb_eRangeError, "%p is not a symbol id value", (void *)ptr);
2144 }
2145 }
2146
2147 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is not an id value", rb_int2str(objid, 10));
2148 }
2149 }
2150
2151 VALUE obj = object_id_to_ref(rb_gc_get_objspace(), objid);
2152 if (!rb_multi_ractor_p() || rb_ractor_shareable_p(obj)) {
2153 return obj;
2154 }
2155 else {
2156 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is the id of an unshareable object on multi-ractor", rb_int2str(objid, 10));
2157 }
2158}
2159
2160/* :nodoc: */
2161static VALUE
2162os_id2ref(VALUE os, VALUE objid)
2163{
2164 rb_category_warn(RB_WARN_CATEGORY_DEPRECATED, "ObjectSpace._id2ref is deprecated");
2165 return id2ref(objid);
2166}
2167
2168static VALUE
2169rb_find_object_id(void *objspace, VALUE obj, VALUE (*get_heap_object_id)(VALUE))
2170{
2171 if (SPECIAL_CONST_P(obj)) {
2172#if SIZEOF_LONG == SIZEOF_VOIDP
2173 return LONG2NUM((SIGNED_VALUE)obj);
2174#else
2175 return LL2NUM((SIGNED_VALUE)obj);
2176#endif
2177 }
2178
2179 return get_heap_object_id(obj);
2180}
2181
2182static VALUE
2183nonspecial_obj_id(VALUE obj)
2184{
2185#if SIZEOF_LONG == SIZEOF_VOIDP
2186 return (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG);
2187#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
2188 return LL2NUM((SIGNED_VALUE)(obj) / 2);
2189#else
2190# error not supported
2191#endif
2192}
2193
2194VALUE
2195rb_memory_id(VALUE obj)
2196{
2197 return rb_find_object_id(NULL, obj, nonspecial_obj_id);
2198}
2199
2200/*
2201 * Document-method: __id__
2202 * Document-method: object_id
2203 *
2204 * call-seq:
2205 * obj.__id__ -> integer
2206 * obj.object_id -> integer
2207 *
2208 * Returns an integer identifier for +obj+.
2209 *
2210 * The same number will be returned on all calls to +object_id+ for a given
2211 * object, and no two active objects will share an id.
2212 *
2213 * Note: that some objects of builtin classes are reused for optimization.
2214 * This is the case for immediate values and frozen string literals.
2215 *
2216 * BasicObject implements +__id__+, Kernel implements +object_id+.
2217 *
2218 * Immediate values are not passed by reference but are passed by value:
2219 * +nil+, +true+, +false+, Fixnums, Symbols, and some Floats.
2220 *
2221 * Object.new.object_id == Object.new.object_id # => false
2222 * (21 * 2).object_id == (21 * 2).object_id # => true
2223 * "hello".object_id == "hello".object_id # => false
2224 * "hi".freeze.object_id == "hi".freeze.object_id # => true
2225 */
2226
2227VALUE
2228rb_obj_id(VALUE obj)
2229{
2230 /* If obj is an immediate, the object ID is obj directly converted to a Numeric.
2231 * Otherwise, the object ID is a Numeric that is a non-zero multiple of
2232 * (RUBY_IMMEDIATE_MASK + 1) which guarantees that it does not collide with
2233 * any immediates. */
2234 return rb_find_object_id(rb_gc_get_objspace(), obj, object_id);
2235}
2236
2237bool
2238rb_obj_id_p(VALUE obj)
2239{
2240 return !RB_TYPE_P(obj, T_IMEMO) && rb_shape_obj_has_id(obj);
2241}
2242
2243/*
2244 * GC implementations should call this function before the GC phase that updates references
2245 * embedded in the machine code generated by JIT compilers. JIT compilers usually enforce the
2246 * "W^X" policy and protect the code memory from being modified during execution. This function
2247 * makes the code memory writeable.
2248 */
2249void
2250rb_gc_before_updating_jit_code(void)
2251{
2252#if USE_YJIT
2253 rb_yjit_mark_all_writeable();
2254#endif
2255}
2256
2257/*
2258 * GC implementations should call this function before the GC phase that updates references
2259 * embedded in the machine code generated by JIT compilers. This function makes the code memory
2260 * executable again.
2261 */
2262void
2263rb_gc_after_updating_jit_code(void)
2264{
2265#if USE_YJIT
2266 rb_yjit_mark_all_executable();
2267#endif
2268}
2269
2270static enum rb_id_table_iterator_result
2271cc_table_memsize_i(VALUE ccs_ptr, void *data_ptr)
2272{
2273 size_t *total_size = data_ptr;
2274 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
2275 *total_size += sizeof(*ccs);
2276 *total_size += sizeof(ccs->entries[0]) * ccs->capa;
2277 return ID_TABLE_CONTINUE;
2278}
2279
2280static size_t
2281cc_table_memsize(struct rb_id_table *cc_table)
2282{
2283 size_t total = rb_id_table_memsize(cc_table);
2284 rb_id_table_foreach_values(cc_table, cc_table_memsize_i, &total);
2285 return total;
2286}
2287
2288static void
2289classext_memsize(rb_classext_t *ext, bool prime, VALUE namespace, void *arg)
2290{
2291 size_t *size = (size_t *)arg;
2292 size_t s = 0;
2293
2294 if (RCLASSEXT_M_TBL(ext)) {
2295 s += rb_id_table_memsize(RCLASSEXT_M_TBL(ext));
2296 }
2297 if (RCLASSEXT_CVC_TBL(ext)) {
2298 s += rb_id_table_memsize(RCLASSEXT_CVC_TBL(ext));
2299 }
2300 if (RCLASSEXT_CONST_TBL(ext)) {
2301 s += rb_id_table_memsize(RCLASSEXT_CONST_TBL(ext));
2302 }
2303 if (RCLASSEXT_CC_TBL(ext)) {
2304 s += cc_table_memsize(RCLASSEXT_CC_TBL(ext));
2305 }
2306 if (RCLASSEXT_SUPERCLASSES_WITH_SELF(ext)) {
2307 s += (RCLASSEXT_SUPERCLASS_DEPTH(ext) + 1) * sizeof(VALUE);
2308 }
2309 if (!prime) {
2310 s += sizeof(rb_classext_t);
2311 }
2312 *size += s;
2313}
2314
2315static void
2316classext_superclasses_memsize(rb_classext_t *ext, bool prime, VALUE namespace, void *arg)
2317{
2318 size_t *size = (size_t *)arg;
2319 size_t array_size;
2320 if (RCLASSEXT_SUPERCLASSES_WITH_SELF(ext)) {
2321 RUBY_ASSERT(prime);
2322 array_size = RCLASSEXT_SUPERCLASS_DEPTH(ext) + 1;
2323 *size += array_size * sizeof(VALUE);
2324 }
2325}
2326
2327size_t
2328rb_obj_memsize_of(VALUE obj)
2329{
2330 size_t size = 0;
2331
2332 if (SPECIAL_CONST_P(obj)) {
2333 return 0;
2334 }
2335
2336 switch (BUILTIN_TYPE(obj)) {
2337 case T_OBJECT:
2338 if (rb_shape_obj_too_complex_p(obj)) {
2339 size += rb_st_memsize(ROBJECT_FIELDS_HASH(obj));
2340 }
2341 else if (!(RBASIC(obj)->flags & ROBJECT_EMBED)) {
2342 size += ROBJECT_FIELDS_CAPACITY(obj) * sizeof(VALUE);
2343 }
2344 break;
2345 case T_MODULE:
2346 case T_CLASS:
2347 rb_class_classext_foreach(obj, classext_memsize, (void *)&size);
2348 rb_class_classext_foreach(obj, classext_superclasses_memsize, (void *)&size);
2349 break;
2350 case T_ICLASS:
2351 if (RICLASS_OWNS_M_TBL_P(obj)) {
2352 if (RCLASS_M_TBL(obj)) {
2353 size += rb_id_table_memsize(RCLASS_M_TBL(obj));
2354 }
2355 }
2356 if (RCLASS_WRITABLE_CC_TBL(obj)) {
2357 size += cc_table_memsize(RCLASS_WRITABLE_CC_TBL(obj));
2358 }
2359 break;
2360 case T_STRING:
2361 size += rb_str_memsize(obj);
2362 break;
2363 case T_ARRAY:
2364 size += rb_ary_memsize(obj);
2365 break;
2366 case T_HASH:
2367 if (RHASH_ST_TABLE_P(obj)) {
2368 VM_ASSERT(RHASH_ST_TABLE(obj) != NULL);
2369 /* st_table is in the slot */
2370 size += st_memsize(RHASH_ST_TABLE(obj)) - sizeof(st_table);
2371 }
2372 break;
2373 case T_REGEXP:
2374 if (RREGEXP_PTR(obj)) {
2375 size += onig_memsize(RREGEXP_PTR(obj));
2376 }
2377 break;
2378 case T_DATA:
2379 size += rb_objspace_data_type_memsize(obj);
2380 break;
2381 case T_MATCH:
2382 {
2383 rb_matchext_t *rm = RMATCH_EXT(obj);
2384 size += onig_region_memsize(&rm->regs);
2385 size += sizeof(struct rmatch_offset) * rm->char_offset_num_allocated;
2386 }
2387 break;
2388 case T_FILE:
2389 if (RFILE(obj)->fptr) {
2390 size += rb_io_memsize(RFILE(obj)->fptr);
2391 }
2392 break;
2393 case T_RATIONAL:
2394 case T_COMPLEX:
2395 break;
2396 case T_IMEMO:
2397 size += rb_imemo_memsize(obj);
2398 break;
2399
2400 case T_FLOAT:
2401 case T_SYMBOL:
2402 break;
2403
2404 case T_BIGNUM:
2405 if (!(RBASIC(obj)->flags & BIGNUM_EMBED_FLAG) && BIGNUM_DIGITS(obj)) {
2406 size += BIGNUM_LEN(obj) * sizeof(BDIGIT);
2407 }
2408 break;
2409
2410 case T_NODE:
2411 UNEXPECTED_NODE(obj_memsize_of);
2412 break;
2413
2414 case T_STRUCT:
2415 if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) == 0 &&
2416 RSTRUCT(obj)->as.heap.ptr) {
2417 size += sizeof(VALUE) * RSTRUCT_LEN(obj);
2418 }
2419 break;
2420
2421 case T_ZOMBIE:
2422 case T_MOVED:
2423 break;
2424
2425 default:
2426 rb_bug("objspace/memsize_of(): unknown data type 0x%x(%p)",
2427 BUILTIN_TYPE(obj), (void*)obj);
2428 }
2429
2430 return size + rb_gc_obj_slot_size(obj);
2431}
2432
2433static int
2434set_zero(st_data_t key, st_data_t val, st_data_t arg)
2435{
2436 VALUE k = (VALUE)key;
2437 VALUE hash = (VALUE)arg;
2438 rb_hash_aset(hash, k, INT2FIX(0));
2439 return ST_CONTINUE;
2440}
2441
2443 size_t counts[T_MASK+1];
2444 size_t freed;
2445 size_t total;
2446};
2447
2448static void
2449count_objects_i(VALUE obj, void *d)
2450{
2451 struct count_objects_data *data = (struct count_objects_data *)d;
2452
2453 if (RBASIC(obj)->flags) {
2454 data->counts[BUILTIN_TYPE(obj)]++;
2455 }
2456 else {
2457 data->freed++;
2458 }
2459
2460 data->total++;
2461}
2462
2463/*
2464 * call-seq:
2465 * ObjectSpace.count_objects([result_hash]) -> hash
2466 *
2467 * Counts all objects grouped by type.
2468 *
2469 * It returns a hash, such as:
2470 * {
2471 * :TOTAL=>10000,
2472 * :FREE=>3011,
2473 * :T_OBJECT=>6,
2474 * :T_CLASS=>404,
2475 * # ...
2476 * }
2477 *
2478 * The contents of the returned hash are implementation specific.
2479 * It may be changed in future.
2480 *
2481 * The keys starting with +:T_+ means live objects.
2482 * For example, +:T_ARRAY+ is the number of arrays.
2483 * +:FREE+ means object slots which is not used now.
2484 * +:TOTAL+ means sum of above.
2485 *
2486 * If the optional argument +result_hash+ is given,
2487 * it is overwritten and returned. This is intended to avoid probe effect.
2488 *
2489 * h = {}
2490 * ObjectSpace.count_objects(h)
2491 * puts h
2492 * # => { :TOTAL=>10000, :T_CLASS=>158280, :T_MODULE=>20672, :T_STRING=>527249 }
2493 *
2494 * This method is only expected to work on C Ruby.
2495 *
2496 */
2497
2498static VALUE
2499count_objects(int argc, VALUE *argv, VALUE os)
2500{
2501 struct count_objects_data data = { 0 };
2502 VALUE hash = Qnil;
2503 VALUE types[T_MASK + 1];
2504
2505 if (rb_check_arity(argc, 0, 1) == 1) {
2506 hash = argv[0];
2507 if (!RB_TYPE_P(hash, T_HASH))
2508 rb_raise(rb_eTypeError, "non-hash given");
2509 }
2510
2511 for (size_t i = 0; i <= T_MASK; i++) {
2512 // type_sym can allocate an object,
2513 // so we need to create all key symbols in advance
2514 // not to disturb the result
2515 types[i] = type_sym(i);
2516 }
2517
2518 rb_gc_impl_each_object(rb_gc_get_objspace(), count_objects_i, &data);
2519
2520 if (NIL_P(hash)) {
2521 hash = rb_hash_new();
2522 }
2523 else if (!RHASH_EMPTY_P(hash)) {
2524 rb_hash_stlike_foreach(hash, set_zero, hash);
2525 }
2526 rb_hash_aset(hash, ID2SYM(rb_intern("TOTAL")), SIZET2NUM(data.total));
2527 rb_hash_aset(hash, ID2SYM(rb_intern("FREE")), SIZET2NUM(data.freed));
2528
2529 for (size_t i = 0; i <= T_MASK; i++) {
2530 if (data.counts[i]) {
2531 rb_hash_aset(hash, types[i], SIZET2NUM(data.counts[i]));
2532 }
2533 }
2534
2535 return hash;
2536}
2537
2538#define SET_STACK_END SET_MACHINE_STACK_END(&ec->machine.stack_end)
2539
2540#define STACK_START (ec->machine.stack_start)
2541#define STACK_END (ec->machine.stack_end)
2542#define STACK_LEVEL_MAX (ec->machine.stack_maxsize/sizeof(VALUE))
2543
2544#if STACK_GROW_DIRECTION < 0
2545# define STACK_LENGTH (size_t)(STACK_START - STACK_END)
2546#elif STACK_GROW_DIRECTION > 0
2547# define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
2548#else
2549# define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
2550 : (size_t)(STACK_END - STACK_START + 1))
2551#endif
2552#if !STACK_GROW_DIRECTION
2553int ruby_stack_grow_direction;
2554int
2555ruby_get_stack_grow_direction(volatile VALUE *addr)
2556{
2557 VALUE *end;
2558 SET_MACHINE_STACK_END(&end);
2559
2560 if (end > addr) return ruby_stack_grow_direction = 1;
2561 return ruby_stack_grow_direction = -1;
2562}
2563#endif
2564
2565size_t
2567{
2568 rb_execution_context_t *ec = GET_EC();
2569 SET_STACK_END;
2570 if (p) *p = STACK_UPPER(STACK_END, STACK_START, STACK_END);
2571 return STACK_LENGTH;
2572}
2573
2574#define PREVENT_STACK_OVERFLOW 1
2575#ifndef PREVENT_STACK_OVERFLOW
2576#if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
2577# define PREVENT_STACK_OVERFLOW 1
2578#else
2579# define PREVENT_STACK_OVERFLOW 0
2580#endif
2581#endif
2582#if PREVENT_STACK_OVERFLOW && !defined(__EMSCRIPTEN__)
2583static int
2584stack_check(rb_execution_context_t *ec, int water_mark)
2585{
2586 SET_STACK_END;
2587
2588 size_t length = STACK_LENGTH;
2589 size_t maximum_length = STACK_LEVEL_MAX - water_mark;
2590
2591 return length > maximum_length;
2592}
2593#else
2594#define stack_check(ec, water_mark) FALSE
2595#endif
2596
2597#define STACKFRAME_FOR_CALL_CFUNC 2048
2598
2599int
2600rb_ec_stack_check(rb_execution_context_t *ec)
2601{
2602 return stack_check(ec, STACKFRAME_FOR_CALL_CFUNC);
2603}
2604
2605int
2607{
2608 return stack_check(GET_EC(), STACKFRAME_FOR_CALL_CFUNC);
2609}
2610
2611/* ==================== Marking ==================== */
2612
2613#define RB_GC_MARK_OR_TRAVERSE(func, obj_or_ptr, obj, check_obj) do { \
2614 if (!RB_SPECIAL_CONST_P(obj)) { \
2615 rb_vm_t *vm = GET_VM(); \
2616 void *objspace = vm->gc.objspace; \
2617 if (LIKELY(vm->gc.mark_func_data == NULL)) { \
2618 GC_ASSERT(rb_gc_impl_during_gc_p(objspace)); \
2619 (func)(objspace, (obj_or_ptr)); \
2620 } \
2621 else if (check_obj ? \
2622 rb_gc_impl_pointer_to_heap_p(objspace, (const void *)obj) && \
2623 !rb_gc_impl_garbage_object_p(objspace, obj) : \
2624 true) { \
2625 GC_ASSERT(!rb_gc_impl_during_gc_p(objspace)); \
2626 struct gc_mark_func_data_struct *mark_func_data = vm->gc.mark_func_data; \
2627 vm->gc.mark_func_data = NULL; \
2628 mark_func_data->mark_func((obj), mark_func_data->data); \
2629 vm->gc.mark_func_data = mark_func_data; \
2630 } \
2631 } \
2632} while (0)
2633
2634static inline void
2635gc_mark_internal(VALUE obj)
2636{
2637 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark, obj, obj, false);
2638}
2639
2640void
2641rb_gc_mark_movable(VALUE obj)
2642{
2643 gc_mark_internal(obj);
2644}
2645
2646void
2647rb_gc_mark_and_move(VALUE *ptr)
2648{
2649 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark_and_move, ptr, *ptr, false);
2650}
2651
2652static inline void
2653gc_mark_and_pin_internal(VALUE obj)
2654{
2655 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark_and_pin, obj, obj, false);
2656}
2657
2658void
2659rb_gc_mark(VALUE obj)
2660{
2661 gc_mark_and_pin_internal(obj);
2662}
2663
2664static inline void
2665gc_mark_maybe_internal(VALUE obj)
2666{
2667 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark_maybe, obj, obj, true);
2668}
2669
2670void
2671rb_gc_mark_maybe(VALUE obj)
2672{
2673 gc_mark_maybe_internal(obj);
2674}
2675
2676void
2677rb_gc_mark_weak(VALUE *ptr)
2678{
2679 if (RB_SPECIAL_CONST_P(*ptr)) return;
2680
2681 rb_vm_t *vm = GET_VM();
2682 void *objspace = vm->gc.objspace;
2683 if (LIKELY(vm->gc.mark_func_data == NULL)) {
2684 GC_ASSERT(rb_gc_impl_during_gc_p(objspace));
2685
2686 rb_gc_impl_mark_weak(objspace, ptr);
2687 }
2688 else {
2689 GC_ASSERT(!rb_gc_impl_during_gc_p(objspace));
2690 }
2691}
2692
2693void
2694rb_gc_remove_weak(VALUE parent_obj, VALUE *ptr)
2695{
2696 rb_gc_impl_remove_weak(rb_gc_get_objspace(), parent_obj, ptr);
2697}
2698
2699ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS(static void each_location(register const VALUE *x, register long n, void (*cb)(VALUE, void *), void *data));
2700static void
2701each_location(register const VALUE *x, register long n, void (*cb)(VALUE, void *), void *data)
2702{
2703 VALUE v;
2704 while (n--) {
2705 v = *x;
2706 cb(v, data);
2707 x++;
2708 }
2709}
2710
2711static void
2712each_location_ptr(const VALUE *start, const VALUE *end, void (*cb)(VALUE, void *), void *data)
2713{
2714 if (end <= start) return;
2715 each_location(start, end - start, cb, data);
2716}
2717
2718static void
2719gc_mark_maybe_each_location(VALUE obj, void *data)
2720{
2721 gc_mark_maybe_internal(obj);
2722}
2723
2724void
2725rb_gc_mark_locations(const VALUE *start, const VALUE *end)
2726{
2727 each_location_ptr(start, end, gc_mark_maybe_each_location, NULL);
2728}
2729
2730void
2731rb_gc_mark_values(long n, const VALUE *values)
2732{
2733 for (long i = 0; i < n; i++) {
2734 gc_mark_internal(values[i]);
2735 }
2736}
2737
2738void
2739rb_gc_mark_vm_stack_values(long n, const VALUE *values)
2740{
2741 for (long i = 0; i < n; i++) {
2742 gc_mark_and_pin_internal(values[i]);
2743 }
2744}
2745
2746static int
2747mark_key(st_data_t key, st_data_t value, st_data_t data)
2748{
2749 gc_mark_and_pin_internal((VALUE)key);
2750
2751 return ST_CONTINUE;
2752}
2753
2754void
2755rb_mark_set(st_table *tbl)
2756{
2757 if (!tbl) return;
2758
2759 st_foreach(tbl, mark_key, (st_data_t)rb_gc_get_objspace());
2760}
2761
2762static int
2763mark_keyvalue(st_data_t key, st_data_t value, st_data_t data)
2764{
2765 gc_mark_internal((VALUE)key);
2766 gc_mark_internal((VALUE)value);
2767
2768 return ST_CONTINUE;
2769}
2770
2771static int
2772pin_key_pin_value(st_data_t key, st_data_t value, st_data_t data)
2773{
2774 gc_mark_and_pin_internal((VALUE)key);
2775 gc_mark_and_pin_internal((VALUE)value);
2776
2777 return ST_CONTINUE;
2778}
2779
2780static int
2781pin_key_mark_value(st_data_t key, st_data_t value, st_data_t data)
2782{
2783 gc_mark_and_pin_internal((VALUE)key);
2784 gc_mark_internal((VALUE)value);
2785
2786 return ST_CONTINUE;
2787}
2788
2789static void
2790mark_hash(VALUE hash)
2791{
2792 if (rb_hash_compare_by_id_p(hash)) {
2793 rb_hash_stlike_foreach(hash, pin_key_mark_value, 0);
2794 }
2795 else {
2796 rb_hash_stlike_foreach(hash, mark_keyvalue, 0);
2797 }
2798
2799 gc_mark_internal(RHASH(hash)->ifnone);
2800}
2801
2802void
2803rb_mark_hash(st_table *tbl)
2804{
2805 if (!tbl) return;
2806
2807 st_foreach(tbl, pin_key_pin_value, 0);
2808}
2809
2810static enum rb_id_table_iterator_result
2811mark_method_entry_i(VALUE me, void *objspace)
2812{
2813 gc_mark_internal(me);
2814
2815 return ID_TABLE_CONTINUE;
2816}
2817
2818static void
2819mark_m_tbl(void *objspace, struct rb_id_table *tbl)
2820{
2821 if (tbl) {
2822 rb_id_table_foreach_values(tbl, mark_method_entry_i, objspace);
2823 }
2824}
2825
2826static enum rb_id_table_iterator_result
2827mark_const_entry_i(VALUE value, void *objspace)
2828{
2829 const rb_const_entry_t *ce = (const rb_const_entry_t *)value;
2830
2831 gc_mark_internal(ce->value);
2832 gc_mark_internal(ce->file);
2833 return ID_TABLE_CONTINUE;
2834}
2835
2836static void
2837mark_const_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
2838{
2839 if (!tbl) return;
2840 rb_id_table_foreach_values(tbl, mark_const_entry_i, objspace);
2841}
2842
2845 VALUE klass;
2846};
2847
2848static enum rb_id_table_iterator_result
2849mark_cc_entry_i(VALUE ccs_ptr, void *data)
2850{
2851 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
2852
2853 VM_ASSERT(vm_ccs_p(ccs));
2854
2855 if (METHOD_ENTRY_INVALIDATED(ccs->cme)) {
2856 rb_vm_ccs_free(ccs);
2857 return ID_TABLE_DELETE;
2858 }
2859 else {
2860 gc_mark_internal((VALUE)ccs->cme);
2861
2862 for (int i=0; i<ccs->len; i++) {
2863 VM_ASSERT(((struct mark_cc_entry_args *)data)->klass == ccs->entries[i].cc->klass);
2864 VM_ASSERT(vm_cc_check_cme(ccs->entries[i].cc, ccs->cme));
2865
2866 gc_mark_internal((VALUE)ccs->entries[i].cc);
2867 }
2868 return ID_TABLE_CONTINUE;
2869 }
2870}
2871
2872static void
2873mark_cc_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl, VALUE klass)
2874{
2875 struct mark_cc_entry_args args;
2876
2877 if (!tbl) return;
2878
2879 args.objspace = objspace;
2880 args.klass = klass;
2881 rb_id_table_foreach_values(tbl, mark_cc_entry_i, (void *)&args);
2882}
2883
2884static enum rb_id_table_iterator_result
2885mark_cvc_tbl_i(VALUE cvc_entry, void *objspace)
2886{
2887 struct rb_cvar_class_tbl_entry *entry;
2888
2889 entry = (struct rb_cvar_class_tbl_entry *)cvc_entry;
2890
2891 RUBY_ASSERT(entry->cref == 0 || (BUILTIN_TYPE((VALUE)entry->cref) == T_IMEMO && IMEMO_TYPE_P(entry->cref, imemo_cref)));
2892 gc_mark_internal((VALUE)entry->cref);
2893
2894 return ID_TABLE_CONTINUE;
2895}
2896
2897static void
2898mark_cvc_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
2899{
2900 if (!tbl) return;
2901 rb_id_table_foreach_values(tbl, mark_cvc_tbl_i, objspace);
2902}
2903
2904#if STACK_GROW_DIRECTION < 0
2905#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
2906#elif STACK_GROW_DIRECTION > 0
2907#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
2908#else
2909#define GET_STACK_BOUNDS(start, end, appendix) \
2910 ((STACK_END < STACK_START) ? \
2911 ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
2912#endif
2913
2914static void
2915gc_mark_machine_stack_location_maybe(VALUE obj, void *data)
2916{
2917 gc_mark_maybe_internal(obj);
2918
2919#ifdef RUBY_ASAN_ENABLED
2920 const rb_execution_context_t *ec = (const rb_execution_context_t *)data;
2921 void *fake_frame_start;
2922 void *fake_frame_end;
2923 bool is_fake_frame = asan_get_fake_stack_extents(
2924 ec->machine.asan_fake_stack_handle, obj,
2925 ec->machine.stack_start, ec->machine.stack_end,
2926 &fake_frame_start, &fake_frame_end
2927 );
2928 if (is_fake_frame) {
2929 each_location_ptr(fake_frame_start, fake_frame_end, gc_mark_maybe_each_location, NULL);
2930 }
2931#endif
2932}
2933
2934static VALUE
2935gc_location_internal(void *objspace, VALUE value)
2936{
2937 if (SPECIAL_CONST_P(value)) {
2938 return value;
2939 }
2940
2941 GC_ASSERT(rb_gc_impl_pointer_to_heap_p(objspace, (void *)value));
2942
2943 return rb_gc_impl_location(objspace, value);
2944}
2945
2946VALUE
2947rb_gc_location(VALUE value)
2948{
2949 return gc_location_internal(rb_gc_get_objspace(), value);
2950}
2951
2952#if defined(__wasm__)
2953
2954
2955static VALUE *rb_stack_range_tmp[2];
2956
2957static void
2958rb_mark_locations(void *begin, void *end)
2959{
2960 rb_stack_range_tmp[0] = begin;
2961 rb_stack_range_tmp[1] = end;
2962}
2963
2964void
2965rb_gc_save_machine_context(void)
2966{
2967 // no-op
2968}
2969
2970# if defined(__EMSCRIPTEN__)
2971
2972static void
2973mark_current_machine_context(const rb_execution_context_t *ec)
2974{
2975 emscripten_scan_stack(rb_mark_locations);
2976 each_location_ptr(rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe_each_location, NULL);
2977
2978 emscripten_scan_registers(rb_mark_locations);
2979 each_location_ptr(rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe_each_location, NULL);
2980}
2981# else // use Asyncify version
2982
2983static void
2984mark_current_machine_context(rb_execution_context_t *ec)
2985{
2986 VALUE *stack_start, *stack_end;
2987 SET_STACK_END;
2988 GET_STACK_BOUNDS(stack_start, stack_end, 1);
2989 each_location_ptr(stack_start, stack_end, gc_mark_maybe_each_location, NULL);
2990
2991 rb_wasm_scan_locals(rb_mark_locations);
2992 each_location_ptr(rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe_each_location, NULL);
2993}
2994
2995# endif
2996
2997#else // !defined(__wasm__)
2998
2999void
3000rb_gc_save_machine_context(void)
3001{
3002 rb_thread_t *thread = GET_THREAD();
3003
3004 RB_VM_SAVE_MACHINE_CONTEXT(thread);
3005}
3006
3007
3008static void
3009mark_current_machine_context(const rb_execution_context_t *ec)
3010{
3011 rb_gc_mark_machine_context(ec);
3012}
3013#endif
3014
3015void
3016rb_gc_mark_machine_context(const rb_execution_context_t *ec)
3017{
3018 VALUE *stack_start, *stack_end;
3019
3020 GET_STACK_BOUNDS(stack_start, stack_end, 0);
3021 RUBY_DEBUG_LOG("ec->th:%u stack_start:%p stack_end:%p", rb_ec_thread_ptr(ec)->serial, stack_start, stack_end);
3022
3023 void *data =
3024#ifdef RUBY_ASAN_ENABLED
3025 /* gc_mark_machine_stack_location_maybe() uses data as const */
3027#else
3028 NULL;
3029#endif
3030
3031 each_location_ptr(stack_start, stack_end, gc_mark_machine_stack_location_maybe, data);
3032 int num_regs = sizeof(ec->machine.regs)/(sizeof(VALUE));
3033 each_location((VALUE*)&ec->machine.regs, num_regs, gc_mark_machine_stack_location_maybe, data);
3034}
3035
3036static int
3037rb_mark_tbl_i(st_data_t key, st_data_t value, st_data_t data)
3038{
3039 gc_mark_and_pin_internal((VALUE)value);
3040
3041 return ST_CONTINUE;
3042}
3043
3044void
3045rb_mark_tbl(st_table *tbl)
3046{
3047 if (!tbl || tbl->num_entries == 0) return;
3048
3049 st_foreach(tbl, rb_mark_tbl_i, 0);
3050}
3051
3052static void
3053gc_mark_tbl_no_pin(st_table *tbl)
3054{
3055 if (!tbl || tbl->num_entries == 0) return;
3056
3057 st_foreach(tbl, gc_mark_tbl_no_pin_i, 0);
3058}
3059
3060void
3061rb_mark_tbl_no_pin(st_table *tbl)
3062{
3063 gc_mark_tbl_no_pin(tbl);
3064}
3065
3066static bool
3067gc_declarative_marking_p(const rb_data_type_t *type)
3068{
3069 return (type->flags & RUBY_TYPED_DECL_MARKING) != 0;
3070}
3071
3072void
3073rb_gc_mark_roots(void *objspace, const char **categoryp)
3074{
3075 rb_execution_context_t *ec = GET_EC();
3076 rb_vm_t *vm = rb_ec_vm_ptr(ec);
3077
3078#define MARK_CHECKPOINT(category) do { \
3079 if (categoryp) *categoryp = category; \
3080} while (0)
3081
3082 MARK_CHECKPOINT("vm");
3083 rb_vm_mark(vm);
3084
3085 MARK_CHECKPOINT("end_proc");
3086 rb_mark_end_proc();
3087
3088 MARK_CHECKPOINT("global_tbl");
3089 rb_gc_mark_global_tbl();
3090
3091#if USE_YJIT
3092 void rb_yjit_root_mark(void); // in Rust
3093
3094 if (rb_yjit_enabled_p) {
3095 MARK_CHECKPOINT("YJIT");
3096 rb_yjit_root_mark();
3097 }
3098#endif
3099
3100 MARK_CHECKPOINT("machine_context");
3101 mark_current_machine_context(ec);
3102
3103 MARK_CHECKPOINT("global_symbols");
3104 rb_sym_global_symbols_mark();
3105
3106 MARK_CHECKPOINT("finish");
3107
3108#undef MARK_CHECKPOINT
3109}
3110
3115
3116static void
3117gc_mark_classext_module(rb_classext_t *ext, bool prime, VALUE namespace, void *arg)
3118{
3120 rb_objspace_t *objspace = foreach_arg->objspace;
3121 VALUE obj = foreach_arg->obj;
3122
3123 if (RCLASSEXT_SUPER(ext)) {
3124 gc_mark_internal(RCLASSEXT_SUPER(ext));
3125 }
3126 mark_m_tbl(objspace, RCLASSEXT_M_TBL(ext));
3127 gc_mark_internal(RCLASSEXT_FIELDS_OBJ(ext));
3128 if (!RCLASSEXT_SHARED_CONST_TBL(ext) && RCLASSEXT_CONST_TBL(ext)) {
3129 mark_const_tbl(objspace, RCLASSEXT_CONST_TBL(ext));
3130 }
3131 mark_m_tbl(objspace, RCLASSEXT_CALLABLE_M_TBL(ext));
3132 mark_cc_tbl(objspace, RCLASSEXT_CC_TBL(ext), obj);
3133 mark_cvc_tbl(objspace, RCLASSEXT_CVC_TBL(ext));
3134 gc_mark_internal(RCLASSEXT_CLASSPATH(ext));
3135}
3136
3137static void
3138gc_mark_classext_iclass(rb_classext_t *ext, bool prime, VALUE namespace, void *arg)
3139{
3141 rb_objspace_t *objspace = foreach_arg->objspace;
3142 VALUE iclass = foreach_arg->obj;
3143
3144 if (RCLASSEXT_SUPER(ext)) {
3145 gc_mark_internal(RCLASSEXT_SUPER(ext));
3146 }
3147 if (RCLASSEXT_ICLASS_IS_ORIGIN(ext) && !RCLASSEXT_ICLASS_ORIGIN_SHARED_MTBL(ext)) {
3148 mark_m_tbl(objspace, RCLASSEXT_M_TBL(ext));
3149 }
3150 if (RCLASSEXT_INCLUDER(ext)) {
3151 gc_mark_internal(RCLASSEXT_INCLUDER(ext));
3152 }
3153 mark_m_tbl(objspace, RCLASSEXT_CALLABLE_M_TBL(ext));
3154 mark_cc_tbl(objspace, RCLASSEXT_CC_TBL(ext), iclass);
3155}
3156
3157#define TYPED_DATA_REFS_OFFSET_LIST(d) (size_t *)(uintptr_t)RTYPEDDATA_TYPE(d)->function.dmark
3158
3159void
3160rb_gc_mark_children(void *objspace, VALUE obj)
3161{
3162 struct gc_mark_classext_foreach_arg foreach_args;
3163
3164 if (rb_obj_exivar_p(obj)) {
3165 rb_mark_generic_ivar(obj);
3166 }
3167
3168 switch (BUILTIN_TYPE(obj)) {
3169 case T_FLOAT:
3170 case T_BIGNUM:
3171 return;
3172
3173 case T_NIL:
3174 case T_FIXNUM:
3175 rb_bug("rb_gc_mark() called for broken object");
3176 break;
3177
3178 case T_NODE:
3179 UNEXPECTED_NODE(rb_gc_mark);
3180 break;
3181
3182 case T_IMEMO:
3183 rb_imemo_mark_and_move(obj, false);
3184 return;
3185
3186 default:
3187 break;
3188 }
3189
3190 gc_mark_internal(RBASIC(obj)->klass);
3191
3192 switch (BUILTIN_TYPE(obj)) {
3193 case T_CLASS:
3194 if (FL_TEST_RAW(obj, FL_SINGLETON)) {
3195 gc_mark_internal(RCLASS_ATTACHED_OBJECT(obj));
3196 }
3197 // Continue to the shared T_CLASS/T_MODULE
3198 case T_MODULE:
3199 foreach_args.objspace = objspace;
3200 foreach_args.obj = obj;
3201 rb_class_classext_foreach(obj, gc_mark_classext_module, (void *)&foreach_args);
3202 break;
3203
3204 case T_ICLASS:
3205 foreach_args.objspace = objspace;
3206 foreach_args.obj = obj;
3207 rb_class_classext_foreach(obj, gc_mark_classext_iclass, (void *)&foreach_args);
3208 break;
3209
3210 case T_ARRAY:
3211 if (ARY_SHARED_P(obj)) {
3212 VALUE root = ARY_SHARED_ROOT(obj);
3213 gc_mark_internal(root);
3214 }
3215 else {
3216 long len = RARRAY_LEN(obj);
3217 const VALUE *ptr = RARRAY_CONST_PTR(obj);
3218 for (long i = 0; i < len; i++) {
3219 gc_mark_internal(ptr[i]);
3220 }
3221 }
3222 break;
3223
3224 case T_HASH:
3225 mark_hash(obj);
3226 break;
3227
3228 case T_SYMBOL:
3229 gc_mark_internal(RSYMBOL(obj)->fstr);
3230 break;
3231
3232 case T_STRING:
3233 if (STR_SHARED_P(obj)) {
3234 if (STR_EMBED_P(RSTRING(obj)->as.heap.aux.shared)) {
3235 /* Embedded shared strings cannot be moved because this string
3236 * points into the slot of the shared string. There may be code
3237 * using the RSTRING_PTR on the stack, which would pin this
3238 * string but not pin the shared string, causing it to move. */
3239 gc_mark_and_pin_internal(RSTRING(obj)->as.heap.aux.shared);
3240 }
3241 else {
3242 gc_mark_internal(RSTRING(obj)->as.heap.aux.shared);
3243 }
3244 }
3245 break;
3246
3247 case T_DATA: {
3248 void *const ptr = RTYPEDDATA_P(obj) ? RTYPEDDATA_GET_DATA(obj) : DATA_PTR(obj);
3249
3250 if (ptr) {
3251 if (RTYPEDDATA_P(obj) && gc_declarative_marking_p(RTYPEDDATA_TYPE(obj))) {
3252 size_t *offset_list = TYPED_DATA_REFS_OFFSET_LIST(obj);
3253
3254 for (size_t offset = *offset_list; offset != RUBY_REF_END; offset = *offset_list++) {
3255 gc_mark_internal(*(VALUE *)((char *)ptr + offset));
3256 }
3257 }
3258 else {
3259 RUBY_DATA_FUNC mark_func = RTYPEDDATA_P(obj) ?
3261 RDATA(obj)->dmark;
3262 if (mark_func) (*mark_func)(ptr);
3263 }
3264 }
3265
3266 break;
3267 }
3268
3269 case T_OBJECT: {
3270 if (rb_shape_obj_too_complex_p(obj)) {
3271 gc_mark_tbl_no_pin(ROBJECT_FIELDS_HASH(obj));
3272 }
3273 else {
3274 const VALUE * const ptr = ROBJECT_FIELDS(obj);
3275
3276 uint32_t len = ROBJECT_FIELDS_COUNT(obj);
3277 for (uint32_t i = 0; i < len; i++) {
3278 gc_mark_internal(ptr[i]);
3279 }
3280 }
3281
3282 attr_index_t fields_count = ROBJECT_FIELDS_COUNT(obj);
3283 if (fields_count) {
3284 VALUE klass = RBASIC_CLASS(obj);
3285
3286 // Increment max_iv_count if applicable, used to determine size pool allocation
3287 if (RCLASS_MAX_IV_COUNT(klass) < fields_count) {
3288 RCLASS_SET_MAX_IV_COUNT(klass, fields_count);
3289 }
3290 }
3291
3292 break;
3293 }
3294
3295 case T_FILE:
3296 if (RFILE(obj)->fptr) {
3297 gc_mark_internal(RFILE(obj)->fptr->self);
3298 gc_mark_internal(RFILE(obj)->fptr->pathv);
3299 gc_mark_internal(RFILE(obj)->fptr->tied_io_for_writing);
3300 gc_mark_internal(RFILE(obj)->fptr->writeconv_asciicompat);
3301 gc_mark_internal(RFILE(obj)->fptr->writeconv_pre_ecopts);
3302 gc_mark_internal(RFILE(obj)->fptr->encs.ecopts);
3303 gc_mark_internal(RFILE(obj)->fptr->write_lock);
3304 gc_mark_internal(RFILE(obj)->fptr->timeout);
3305 gc_mark_internal(RFILE(obj)->fptr->wakeup_mutex);
3306 }
3307 break;
3308
3309 case T_REGEXP:
3310 gc_mark_internal(RREGEXP(obj)->src);
3311 break;
3312
3313 case T_MATCH:
3314 gc_mark_internal(RMATCH(obj)->regexp);
3315 if (RMATCH(obj)->str) {
3316 gc_mark_internal(RMATCH(obj)->str);
3317 }
3318 break;
3319
3320 case T_RATIONAL:
3321 gc_mark_internal(RRATIONAL(obj)->num);
3322 gc_mark_internal(RRATIONAL(obj)->den);
3323 break;
3324
3325 case T_COMPLEX:
3326 gc_mark_internal(RCOMPLEX(obj)->real);
3327 gc_mark_internal(RCOMPLEX(obj)->imag);
3328 break;
3329
3330 case T_STRUCT: {
3331 const long len = RSTRUCT_LEN(obj);
3332 const VALUE * const ptr = RSTRUCT_CONST_PTR(obj);
3333
3334 for (long i = 0; i < len; i++) {
3335 gc_mark_internal(ptr[i]);
3336 }
3337
3338 break;
3339 }
3340
3341 default:
3342 if (BUILTIN_TYPE(obj) == T_MOVED) rb_bug("rb_gc_mark(): %p is T_MOVED", (void *)obj);
3343 if (BUILTIN_TYPE(obj) == T_NONE) rb_bug("rb_gc_mark(): %p is T_NONE", (void *)obj);
3344 if (BUILTIN_TYPE(obj) == T_ZOMBIE) rb_bug("rb_gc_mark(): %p is T_ZOMBIE", (void *)obj);
3345 rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
3346 BUILTIN_TYPE(obj), (void *)obj,
3347 rb_gc_impl_pointer_to_heap_p(objspace, (void *)obj) ? "corrupted object" : "non object");
3348 }
3349}
3350
3351size_t
3352rb_gc_obj_optimal_size(VALUE obj)
3353{
3354 switch (BUILTIN_TYPE(obj)) {
3355 case T_ARRAY:
3356 return rb_ary_size_as_embedded(obj);
3357
3358 case T_OBJECT:
3359 if (rb_shape_obj_too_complex_p(obj)) {
3360 return sizeof(struct RObject);
3361 }
3362 else {
3363 return rb_obj_embedded_size(ROBJECT_FIELDS_CAPACITY(obj));
3364 }
3365
3366 case T_STRING:
3367 return rb_str_size_as_embedded(obj);
3368
3369 case T_HASH:
3370 return sizeof(struct RHash) + (RHASH_ST_TABLE_P(obj) ? sizeof(st_table) : sizeof(ar_table));
3371
3372 default:
3373 return 0;
3374 }
3375}
3376
3377void
3378rb_gc_writebarrier(VALUE a, VALUE b)
3379{
3380 rb_gc_impl_writebarrier(rb_gc_get_objspace(), a, b);
3381}
3382
3383void
3384rb_gc_writebarrier_unprotect(VALUE obj)
3385{
3386 rb_gc_impl_writebarrier_unprotect(rb_gc_get_objspace(), obj);
3387}
3388
3389/*
3390 * remember `obj' if needed.
3391 */
3392void
3393rb_gc_writebarrier_remember(VALUE obj)
3394{
3395 rb_gc_impl_writebarrier_remember(rb_gc_get_objspace(), obj);
3396}
3397
3398void
3399rb_gc_copy_attributes(VALUE dest, VALUE obj)
3400{
3401 rb_gc_impl_copy_attributes(rb_gc_get_objspace(), dest, obj);
3402}
3403
3404int
3405rb_gc_modular_gc_loaded_p(void)
3406{
3407#if USE_MODULAR_GC
3408 return rb_gc_functions.modular_gc_loaded_p;
3409#else
3410 return false;
3411#endif
3412}
3413
3414const char *
3415rb_gc_active_gc_name(void)
3416{
3417 const char *gc_name = rb_gc_impl_active_gc_name();
3418
3419 const size_t len = strlen(gc_name);
3420 if (len > RB_GC_MAX_NAME_LEN) {
3421 rb_bug("GC should have a name no more than %d chars long. Currently: %zu (%s)",
3422 RB_GC_MAX_NAME_LEN, len, gc_name);
3423 }
3424
3425 return gc_name;
3426}
3427
3429rb_gc_object_metadata(VALUE obj)
3430{
3431 return rb_gc_impl_object_metadata(rb_gc_get_objspace(), obj);
3432}
3433
3434/* GC */
3435
3436void *
3437rb_gc_ractor_cache_alloc(rb_ractor_t *ractor)
3438{
3439 return rb_gc_impl_ractor_cache_alloc(rb_gc_get_objspace(), ractor);
3440}
3441
3442void
3443rb_gc_ractor_cache_free(void *cache)
3444{
3445 rb_gc_impl_ractor_cache_free(rb_gc_get_objspace(), cache);
3446}
3447
3448void
3449rb_gc_register_mark_object(VALUE obj)
3450{
3451 if (!rb_gc_impl_pointer_to_heap_p(rb_gc_get_objspace(), (void *)obj))
3452 return;
3453
3454 rb_vm_register_global_object(obj);
3455}
3456
3457void
3458rb_gc_register_address(VALUE *addr)
3459{
3460 rb_vm_t *vm = GET_VM();
3461
3462 VALUE obj = *addr;
3463
3464 struct global_object_list *tmp = ALLOC(struct global_object_list);
3465 tmp->next = vm->global_object_list;
3466 tmp->varptr = addr;
3467 vm->global_object_list = tmp;
3468
3469 /*
3470 * Because some C extensions have assignment-then-register bugs,
3471 * we guard `obj` here so that it would not get swept defensively.
3472 */
3473 RB_GC_GUARD(obj);
3474 if (0 && !SPECIAL_CONST_P(obj)) {
3475 rb_warn("Object is assigned to registering address already: %"PRIsVALUE,
3476 rb_obj_class(obj));
3477 rb_print_backtrace(stderr);
3478 }
3479}
3480
3481void
3482rb_gc_unregister_address(VALUE *addr)
3483{
3484 rb_vm_t *vm = GET_VM();
3485 struct global_object_list *tmp = vm->global_object_list;
3486
3487 if (tmp->varptr == addr) {
3488 vm->global_object_list = tmp->next;
3489 xfree(tmp);
3490 return;
3491 }
3492 while (tmp->next) {
3493 if (tmp->next->varptr == addr) {
3494 struct global_object_list *t = tmp->next;
3495
3496 tmp->next = tmp->next->next;
3497 xfree(t);
3498 break;
3499 }
3500 tmp = tmp->next;
3501 }
3502}
3503
3504void
3506{
3507 rb_gc_register_address(var);
3508}
3509
3510static VALUE
3511gc_start_internal(rb_execution_context_t *ec, VALUE self, VALUE full_mark, VALUE immediate_mark, VALUE immediate_sweep, VALUE compact)
3512{
3513 rb_gc_impl_start(rb_gc_get_objspace(), RTEST(full_mark), RTEST(immediate_mark), RTEST(immediate_sweep), RTEST(compact));
3514
3515 return Qnil;
3516}
3517
3518/*
3519 * rb_objspace_each_objects() is special C API to walk through
3520 * Ruby object space. This C API is too difficult to use it.
3521 * To be frank, you should not use it. Or you need to read the
3522 * source code of this function and understand what this function does.
3523 *
3524 * 'callback' will be called several times (the number of heap page,
3525 * at current implementation) with:
3526 * vstart: a pointer to the first living object of the heap_page.
3527 * vend: a pointer to next to the valid heap_page area.
3528 * stride: a distance to next VALUE.
3529 *
3530 * If callback() returns non-zero, the iteration will be stopped.
3531 *
3532 * This is a sample callback code to iterate liveness objects:
3533 *
3534 * static int
3535 * sample_callback(void *vstart, void *vend, int stride, void *data)
3536 * {
3537 * VALUE v = (VALUE)vstart;
3538 * for (; v != (VALUE)vend; v += stride) {
3539 * if (!rb_objspace_internal_object_p(v)) { // liveness check
3540 * // do something with live object 'v'
3541 * }
3542 * }
3543 * return 0; // continue to iteration
3544 * }
3545 *
3546 * Note: 'vstart' is not a top of heap_page. This point the first
3547 * living object to grasp at least one object to avoid GC issue.
3548 * This means that you can not walk through all Ruby object page
3549 * including freed object page.
3550 *
3551 * Note: On this implementation, 'stride' is the same as sizeof(RVALUE).
3552 * However, there are possibilities to pass variable values with
3553 * 'stride' with some reasons. You must use stride instead of
3554 * use some constant value in the iteration.
3555 */
3556void
3557rb_objspace_each_objects(int (*callback)(void *, void *, size_t, void *), void *data)
3558{
3559 rb_gc_impl_each_objects(rb_gc_get_objspace(), callback, data);
3560}
3561
3562static void
3563gc_ref_update_array(void *objspace, VALUE v)
3564{
3565 if (ARY_SHARED_P(v)) {
3566 VALUE old_root = RARRAY(v)->as.heap.aux.shared_root;
3567
3568 UPDATE_IF_MOVED(objspace, RARRAY(v)->as.heap.aux.shared_root);
3569
3570 VALUE new_root = RARRAY(v)->as.heap.aux.shared_root;
3571 // If the root is embedded and its location has changed
3572 if (ARY_EMBED_P(new_root) && new_root != old_root) {
3573 size_t offset = (size_t)(RARRAY(v)->as.heap.ptr - RARRAY(old_root)->as.ary);
3574 GC_ASSERT(RARRAY(v)->as.heap.ptr >= RARRAY(old_root)->as.ary);
3575 RARRAY(v)->as.heap.ptr = RARRAY(new_root)->as.ary + offset;
3576 }
3577 }
3578 else {
3579 long len = RARRAY_LEN(v);
3580
3581 if (len > 0) {
3582 VALUE *ptr = (VALUE *)RARRAY_CONST_PTR(v);
3583 for (long i = 0; i < len; i++) {
3584 UPDATE_IF_MOVED(objspace, ptr[i]);
3585 }
3586 }
3587
3588 if (rb_gc_obj_slot_size(v) >= rb_ary_size_as_embedded(v)) {
3589 if (rb_ary_embeddable_p(v)) {
3590 rb_ary_make_embedded(v);
3591 }
3592 }
3593 }
3594}
3595
3596static void
3597gc_ref_update_object(void *objspace, VALUE v)
3598{
3599 VALUE *ptr = ROBJECT_FIELDS(v);
3600
3601 if (rb_shape_obj_too_complex_p(v)) {
3602 gc_ref_update_table_values_only(ROBJECT_FIELDS_HASH(v));
3603 return;
3604 }
3605
3606 size_t slot_size = rb_gc_obj_slot_size(v);
3607 size_t embed_size = rb_obj_embedded_size(ROBJECT_FIELDS_CAPACITY(v));
3608 if (slot_size >= embed_size && !RB_FL_TEST_RAW(v, ROBJECT_EMBED)) {
3609 // Object can be re-embedded
3610 memcpy(ROBJECT(v)->as.ary, ptr, sizeof(VALUE) * ROBJECT_FIELDS_COUNT(v));
3611 RB_FL_SET_RAW(v, ROBJECT_EMBED);
3612 xfree(ptr);
3613 ptr = ROBJECT(v)->as.ary;
3614 }
3615
3616 for (uint32_t i = 0; i < ROBJECT_FIELDS_COUNT(v); i++) {
3617 UPDATE_IF_MOVED(objspace, ptr[i]);
3618 }
3619}
3620
3621void
3622rb_gc_ref_update_table_values_only(st_table *tbl)
3623{
3624 gc_ref_update_table_values_only(tbl);
3625}
3626
3627/* Update MOVED references in a VALUE=>VALUE st_table */
3628void
3629rb_gc_update_tbl_refs(st_table *ptr)
3630{
3631 gc_update_table_refs(ptr);
3632}
3633
3634static void
3635gc_ref_update_hash(void *objspace, VALUE v)
3636{
3637 rb_hash_stlike_foreach_with_replace(v, hash_foreach_replace, hash_replace_ref, (st_data_t)objspace);
3638}
3639
3640static void
3641gc_update_values(void *objspace, long n, VALUE *values)
3642{
3643 for (long i = 0; i < n; i++) {
3644 UPDATE_IF_MOVED(objspace, values[i]);
3645 }
3646}
3647
3648void
3649rb_gc_update_values(long n, VALUE *values)
3650{
3651 gc_update_values(rb_gc_get_objspace(), n, values);
3652}
3653
3654static enum rb_id_table_iterator_result
3655check_id_table_move(VALUE value, void *data)
3656{
3657 void *objspace = (void *)data;
3658
3659 if (rb_gc_impl_object_moved_p(objspace, (VALUE)value)) {
3660 return ID_TABLE_REPLACE;
3661 }
3662
3663 return ID_TABLE_CONTINUE;
3664}
3665
3666void
3667rb_gc_prepare_heap_process_object(VALUE obj)
3668{
3669 switch (BUILTIN_TYPE(obj)) {
3670 case T_STRING:
3671 // Precompute the string coderange. This both save time for when it will be
3672 // eventually needed, and avoid mutating heap pages after a potential fork.
3674 break;
3675 default:
3676 break;
3677 }
3678}
3679
3680void
3681rb_gc_prepare_heap(void)
3682{
3683 rb_gc_impl_prepare_heap(rb_gc_get_objspace());
3684}
3685
3686size_t
3687rb_gc_heap_id_for_size(size_t size)
3688{
3689 return rb_gc_impl_heap_id_for_size(rb_gc_get_objspace(), size);
3690}
3691
3692bool
3693rb_gc_size_allocatable_p(size_t size)
3694{
3695 return rb_gc_impl_size_allocatable_p(size);
3696}
3697
3698static enum rb_id_table_iterator_result
3699update_id_table(VALUE *value, void *data, int existing)
3700{
3701 void *objspace = (void *)data;
3702
3703 if (rb_gc_impl_object_moved_p(objspace, (VALUE)*value)) {
3704 *value = gc_location_internal(objspace, (VALUE)*value);
3705 }
3706
3707 return ID_TABLE_CONTINUE;
3708}
3709
3710static void
3711update_m_tbl(void *objspace, struct rb_id_table *tbl)
3712{
3713 if (tbl) {
3714 rb_id_table_foreach_values_with_replace(tbl, check_id_table_move, update_id_table, objspace);
3715 }
3716}
3717
3718static enum rb_id_table_iterator_result
3719update_cc_tbl_i(VALUE ccs_ptr, void *objspace)
3720{
3721 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
3722 VM_ASSERT(vm_ccs_p(ccs));
3723
3724 if (rb_gc_impl_object_moved_p(objspace, (VALUE)ccs->cme)) {
3725 ccs->cme = (const rb_callable_method_entry_t *)gc_location_internal(objspace, (VALUE)ccs->cme);
3726 }
3727
3728 for (int i=0; i<ccs->len; i++) {
3729 if (rb_gc_impl_object_moved_p(objspace, (VALUE)ccs->entries[i].cc)) {
3730 ccs->entries[i].cc = (struct rb_callcache *)gc_location_internal(objspace, (VALUE)ccs->entries[i].cc);
3731 }
3732 }
3733
3734 // do not replace
3735 return ID_TABLE_CONTINUE;
3736}
3737
3738static void
3739update_cc_tbl(void *objspace, struct rb_id_table *tbl)
3740{
3741 if (!tbl) return;
3742 rb_id_table_foreach_values(tbl, update_cc_tbl_i, objspace);
3743}
3744
3745static enum rb_id_table_iterator_result
3746update_cvc_tbl_i(VALUE cvc_entry, void *objspace)
3747{
3748 struct rb_cvar_class_tbl_entry *entry;
3749
3750 entry = (struct rb_cvar_class_tbl_entry *)cvc_entry;
3751
3752 if (entry->cref) {
3753 TYPED_UPDATE_IF_MOVED(objspace, rb_cref_t *, entry->cref);
3754 }
3755
3756 entry->class_value = gc_location_internal(objspace, entry->class_value);
3757
3758 return ID_TABLE_CONTINUE;
3759}
3760
3761static void
3762update_cvc_tbl(void *objspace, struct rb_id_table *tbl)
3763{
3764 if (!tbl) return;
3765 rb_id_table_foreach_values(tbl, update_cvc_tbl_i, objspace);
3766}
3767
3768static enum rb_id_table_iterator_result
3769update_const_tbl_i(VALUE value, void *objspace)
3770{
3771 rb_const_entry_t *ce = (rb_const_entry_t *)value;
3772
3773 if (rb_gc_impl_object_moved_p(objspace, ce->value)) {
3774 ce->value = gc_location_internal(objspace, ce->value);
3775 }
3776
3777 if (rb_gc_impl_object_moved_p(objspace, ce->file)) {
3778 ce->file = gc_location_internal(objspace, ce->file);
3779 }
3780
3781 return ID_TABLE_CONTINUE;
3782}
3783
3784static void
3785update_const_tbl(void *objspace, struct rb_id_table *tbl)
3786{
3787 if (!tbl) return;
3788 rb_id_table_foreach_values(tbl, update_const_tbl_i, objspace);
3789}
3790
3791static void
3792update_subclasses(void *objspace, rb_classext_t *ext)
3793{
3794 rb_subclass_entry_t *entry;
3795 rb_subclass_anchor_t *anchor = RCLASSEXT_SUBCLASSES(ext);
3796 if (!anchor) return;
3797 entry = anchor->head;
3798 while (entry) {
3799 if (entry->klass)
3800 UPDATE_IF_MOVED(objspace, entry->klass);
3801 entry = entry->next;
3802 }
3803}
3804
3805static void
3806update_superclasses(rb_objspace_t *objspace, rb_classext_t *ext)
3807{
3808 if (RCLASSEXT_SUPERCLASSES_WITH_SELF(ext)) {
3809 size_t array_size = RCLASSEXT_SUPERCLASS_DEPTH(ext) + 1;
3810 for (size_t i = 0; i < array_size; i++) {
3811 UPDATE_IF_MOVED(objspace, RCLASSEXT_SUPERCLASSES(ext)[i]);
3812 }
3813 }
3814}
3815
3816static void
3817update_classext_values(rb_objspace_t *objspace, rb_classext_t *ext, bool is_iclass)
3818{
3819 UPDATE_IF_MOVED(objspace, RCLASSEXT_ORIGIN(ext));
3820 UPDATE_IF_MOVED(objspace, RCLASSEXT_REFINED_CLASS(ext));
3821 UPDATE_IF_MOVED(objspace, RCLASSEXT_CLASSPATH(ext));
3822 if (is_iclass) {
3823 UPDATE_IF_MOVED(objspace, RCLASSEXT_INCLUDER(ext));
3824 }
3825}
3826
3827static void
3828update_classext(rb_classext_t *ext, bool is_prime, VALUE namespace, void *arg)
3829{
3830 struct classext_foreach_args *args = (struct classext_foreach_args *)arg;
3831 rb_objspace_t *objspace = args->objspace;
3832
3833 if (RCLASSEXT_SUPER(ext)) {
3834 UPDATE_IF_MOVED(objspace, RCLASSEXT_SUPER(ext));
3835 }
3836
3837 update_m_tbl(objspace, RCLASSEXT_M_TBL(ext));
3838
3839 UPDATE_IF_MOVED(objspace, ext->fields_obj);
3840 if (!RCLASSEXT_SHARED_CONST_TBL(ext)) {
3841 update_const_tbl(objspace, RCLASSEXT_CONST_TBL(ext));
3842 }
3843 update_cc_tbl(objspace, RCLASSEXT_CC_TBL(ext));
3844 update_cvc_tbl(objspace, RCLASSEXT_CVC_TBL(ext));
3845 update_superclasses(objspace, ext);
3846 update_subclasses(objspace, ext);
3847
3848 update_classext_values(objspace, ext, false);
3849}
3850
3851static void
3852update_iclass_classext(rb_classext_t *ext, bool is_prime, VALUE namespace, void *arg)
3853{
3854 struct classext_foreach_args *args = (struct classext_foreach_args *)arg;
3855 rb_objspace_t *objspace = args->objspace;
3856
3857 if (RCLASSEXT_SUPER(ext)) {
3858 UPDATE_IF_MOVED(objspace, RCLASSEXT_SUPER(ext));
3859 }
3860 update_m_tbl(objspace, RCLASSEXT_M_TBL(ext));
3861 update_m_tbl(objspace, RCLASSEXT_CALLABLE_M_TBL(ext));
3862 update_cc_tbl(objspace, RCLASSEXT_CC_TBL(ext));
3863 update_subclasses(objspace, ext);
3864
3865 update_classext_values(objspace, ext, true);
3866}
3867
3869 vm_table_foreach_callback_func callback;
3870 vm_table_update_callback_func update_callback;
3871 void *data;
3872 bool weak_only;
3873};
3874
3875static int
3876vm_weak_table_foreach_weak_key(st_data_t key, st_data_t value, st_data_t data, int error)
3877{
3878 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3879
3880 int ret = iter_data->callback((VALUE)key, iter_data->data);
3881
3882 if (!iter_data->weak_only) {
3883 if (ret != ST_CONTINUE) return ret;
3884
3885 ret = iter_data->callback((VALUE)value, iter_data->data);
3886 }
3887
3888 return ret;
3889}
3890
3891static int
3892vm_weak_table_foreach_update_weak_key(st_data_t *key, st_data_t *value, st_data_t data, int existing)
3893{
3894 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3895
3896 int ret = iter_data->update_callback((VALUE *)key, iter_data->data);
3897
3898 if (!iter_data->weak_only) {
3899 if (ret != ST_CONTINUE) return ret;
3900
3901 ret = iter_data->update_callback((VALUE *)value, iter_data->data);
3902 }
3903
3904 return ret;
3905}
3906
3907static int
3908vm_weak_table_cc_refinement_foreach(st_data_t key, st_data_t data, int error)
3909{
3910 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3911
3912 return iter_data->callback((VALUE)key, iter_data->data);
3913}
3914
3915static int
3916vm_weak_table_cc_refinement_foreach_update_update(st_data_t *key, st_data_t data, int existing)
3917{
3918 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3919
3920 return iter_data->update_callback((VALUE *)key, iter_data->data);
3921}
3922
3923
3924static int
3925vm_weak_table_sym_set_foreach(VALUE *sym_ptr, void *data)
3926{
3927 VALUE sym = *sym_ptr;
3928 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3929
3930 if (RB_SPECIAL_CONST_P(sym)) return ST_CONTINUE;
3931
3932 int ret = iter_data->callback(sym, iter_data->data);
3933
3934 if (ret == ST_REPLACE) {
3935 ret = iter_data->update_callback(sym_ptr, iter_data->data);
3936 }
3937
3938 return ret;
3939}
3940
3941struct st_table *rb_generic_fields_tbl_get(void);
3942
3943static int
3944vm_weak_table_id2ref_foreach(st_data_t key, st_data_t value, st_data_t data, int error)
3945{
3946 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3947
3948 if (!iter_data->weak_only && !FIXNUM_P((VALUE)key)) {
3949 int ret = iter_data->callback((VALUE)key, iter_data->data);
3950 if (ret != ST_CONTINUE) return ret;
3951 }
3952
3953 return iter_data->callback((VALUE)value, iter_data->data);
3954}
3955
3956static int
3957vm_weak_table_id2ref_foreach_update(st_data_t *key, st_data_t *value, st_data_t data, int existing)
3958{
3959 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3960
3961 iter_data->update_callback((VALUE *)value, iter_data->data);
3962
3963 if (!iter_data->weak_only && !FIXNUM_P((VALUE)*key)) {
3964 iter_data->update_callback((VALUE *)key, iter_data->data);
3965 }
3966
3967 return ST_CONTINUE;
3968}
3969
3970static int
3971vm_weak_table_gen_fields_foreach(st_data_t key, st_data_t value, st_data_t data)
3972{
3973 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3974
3975 int ret = iter_data->callback((VALUE)key, iter_data->data);
3976
3977 VALUE new_value = (VALUE)value;
3978 VALUE new_key = (VALUE)key;
3979
3980 switch (ret) {
3981 case ST_CONTINUE:
3982 break;
3983
3984 case ST_DELETE:
3985 RBASIC_SET_SHAPE_ID((VALUE)key, ROOT_SHAPE_ID);
3986 return ST_DELETE;
3987
3988 case ST_REPLACE: {
3989 ret = iter_data->update_callback(&new_key, iter_data->data);
3990 if (key != new_key) {
3991 ret = ST_DELETE;
3992 }
3993 break;
3994 }
3995
3996 default:
3997 rb_bug("vm_weak_table_gen_fields_foreach: return value %d not supported", ret);
3998 }
3999
4000 if (!iter_data->weak_only) {
4001 int ivar_ret = iter_data->callback(new_value, iter_data->data);
4002 switch (ivar_ret) {
4003 case ST_CONTINUE:
4004 break;
4005
4006 case ST_REPLACE:
4007 iter_data->update_callback(&new_value, iter_data->data);
4008 break;
4009
4010 default:
4011 rb_bug("vm_weak_table_gen_fields_foreach: return value %d not supported", ivar_ret);
4012 }
4013 }
4014
4015 if (key != new_key || value != new_value) {
4016 DURING_GC_COULD_MALLOC_REGION_START();
4017 {
4018 st_insert(rb_generic_fields_tbl_get(), (st_data_t)new_key, new_value);
4019 }
4020 DURING_GC_COULD_MALLOC_REGION_END();
4021 }
4022
4023 return ret;
4024}
4025
4026static int
4027vm_weak_table_frozen_strings_foreach(VALUE *str, void *data)
4028{
4029 // int retval = vm_weak_table_foreach_weak_key(key, value, data, error);
4030 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
4031 int retval = iter_data->callback(*str, iter_data->data);
4032
4033 if (retval == ST_REPLACE) {
4034 retval = iter_data->update_callback(str, iter_data->data);
4035 }
4036
4037 if (retval == ST_DELETE) {
4038 FL_UNSET(*str, RSTRING_FSTR);
4039 }
4040
4041 return retval;
4042}
4043
4044void rb_fstring_foreach_with_replace(int (*callback)(VALUE *str, void *data), void *data);
4045void
4046rb_gc_vm_weak_table_foreach(vm_table_foreach_callback_func callback,
4047 vm_table_update_callback_func update_callback,
4048 void *data,
4049 bool weak_only,
4050 enum rb_gc_vm_weak_tables table)
4051{
4052 rb_vm_t *vm = GET_VM();
4053
4054 struct global_vm_table_foreach_data foreach_data = {
4055 .callback = callback,
4056 .update_callback = update_callback,
4057 .data = data,
4058 .weak_only = weak_only,
4059 };
4060
4061 switch (table) {
4062 case RB_GC_VM_CI_TABLE: {
4063 if (vm->ci_table) {
4064 st_foreach_with_replace(
4065 vm->ci_table,
4066 vm_weak_table_foreach_weak_key,
4067 vm_weak_table_foreach_update_weak_key,
4068 (st_data_t)&foreach_data
4069 );
4070 }
4071 break;
4072 }
4073 case RB_GC_VM_OVERLOADED_CME_TABLE: {
4074 if (vm->overloaded_cme_table) {
4075 st_foreach_with_replace(
4076 vm->overloaded_cme_table,
4077 vm_weak_table_foreach_weak_key,
4078 vm_weak_table_foreach_update_weak_key,
4079 (st_data_t)&foreach_data
4080 );
4081 }
4082 break;
4083 }
4084 case RB_GC_VM_GLOBAL_SYMBOLS_TABLE: {
4085 rb_sym_global_symbol_table_foreach_weak_reference(
4086 vm_weak_table_sym_set_foreach,
4087 &foreach_data
4088 );
4089 break;
4090 }
4091 case RB_GC_VM_ID2REF_TABLE: {
4092 if (id2ref_tbl) {
4093 st_foreach_with_replace(
4094 id2ref_tbl,
4095 vm_weak_table_id2ref_foreach,
4096 vm_weak_table_id2ref_foreach_update,
4097 (st_data_t)&foreach_data
4098 );
4099 }
4100 break;
4101 }
4102 case RB_GC_VM_GENERIC_FIELDS_TABLE: {
4103 st_table *generic_fields_tbl = rb_generic_fields_tbl_get();
4104 if (generic_fields_tbl) {
4105 st_foreach(
4106 generic_fields_tbl,
4107 vm_weak_table_gen_fields_foreach,
4108 (st_data_t)&foreach_data
4109 );
4110 }
4111 break;
4112 }
4113 case RB_GC_VM_FROZEN_STRINGS_TABLE: {
4114 rb_fstring_foreach_with_replace(
4115 vm_weak_table_frozen_strings_foreach,
4116 &foreach_data
4117 );
4118 break;
4119 }
4120 case RB_GC_VM_CC_REFINEMENT_TABLE: {
4121 if (vm->cc_refinement_table) {
4122 set_foreach_with_replace(
4123 vm->cc_refinement_table,
4124 vm_weak_table_cc_refinement_foreach,
4125 vm_weak_table_cc_refinement_foreach_update_update,
4126 (st_data_t)&foreach_data
4127 );
4128 }
4129 break;
4130 }
4131 case RB_GC_VM_WEAK_TABLE_COUNT:
4132 rb_bug("Unreachable");
4133 default:
4134 rb_bug("rb_gc_vm_weak_table_foreach: unknown table %d", table);
4135 }
4136}
4137
4138void
4139rb_gc_update_vm_references(void *objspace)
4140{
4141 rb_execution_context_t *ec = GET_EC();
4142 rb_vm_t *vm = rb_ec_vm_ptr(ec);
4143
4144 rb_vm_update_references(vm);
4145 rb_gc_update_global_tbl();
4146 rb_sym_global_symbols_update_references();
4147
4148#if USE_YJIT
4149 void rb_yjit_root_update_references(void); // in Rust
4150
4151 if (rb_yjit_enabled_p) {
4152 rb_yjit_root_update_references();
4153 }
4154#endif
4155}
4156
4157void
4158rb_gc_update_object_references(void *objspace, VALUE obj)
4159{
4160 struct classext_foreach_args args;
4161
4162 switch (BUILTIN_TYPE(obj)) {
4163 case T_CLASS:
4164 if (FL_TEST_RAW(obj, FL_SINGLETON)) {
4165 UPDATE_IF_MOVED(objspace, RCLASS_ATTACHED_OBJECT(obj));
4166 }
4167 // Continue to the shared T_CLASS/T_MODULE
4168 case T_MODULE:
4169 args.klass = obj;
4170 args.objspace = objspace;
4171 rb_class_classext_foreach(obj, update_classext, (void *)&args);
4172 break;
4173
4174 case T_ICLASS:
4175 args.objspace = objspace;
4176 rb_class_classext_foreach(obj, update_iclass_classext, (void *)&args);
4177 break;
4178
4179 case T_IMEMO:
4180 rb_imemo_mark_and_move(obj, true);
4181 return;
4182
4183 case T_NIL:
4184 case T_FIXNUM:
4185 case T_NODE:
4186 case T_MOVED:
4187 case T_NONE:
4188 /* These can't move */
4189 return;
4190
4191 case T_ARRAY:
4192 gc_ref_update_array(objspace, obj);
4193 break;
4194
4195 case T_HASH:
4196 gc_ref_update_hash(objspace, obj);
4197 UPDATE_IF_MOVED(objspace, RHASH(obj)->ifnone);
4198 break;
4199
4200 case T_STRING:
4201 {
4202 if (STR_SHARED_P(obj)) {
4203 UPDATE_IF_MOVED(objspace, RSTRING(obj)->as.heap.aux.shared);
4204 }
4205
4206 /* If, after move the string is not embedded, and can fit in the
4207 * slot it's been placed in, then re-embed it. */
4208 if (rb_gc_obj_slot_size(obj) >= rb_str_size_as_embedded(obj)) {
4209 if (!STR_EMBED_P(obj) && rb_str_reembeddable_p(obj)) {
4210 rb_str_make_embedded(obj);
4211 }
4212 }
4213
4214 break;
4215 }
4216 case T_DATA:
4217 /* Call the compaction callback, if it exists */
4218 {
4219 void *const ptr = RTYPEDDATA_P(obj) ? RTYPEDDATA_GET_DATA(obj) : DATA_PTR(obj);
4220 if (ptr) {
4221 if (RTYPEDDATA_P(obj) && gc_declarative_marking_p(RTYPEDDATA_TYPE(obj))) {
4222 size_t *offset_list = TYPED_DATA_REFS_OFFSET_LIST(obj);
4223
4224 for (size_t offset = *offset_list; offset != RUBY_REF_END; offset = *offset_list++) {
4225 VALUE *ref = (VALUE *)((char *)ptr + offset);
4226 *ref = gc_location_internal(objspace, *ref);
4227 }
4228 }
4229 else if (RTYPEDDATA_P(obj)) {
4230 RUBY_DATA_FUNC compact_func = RTYPEDDATA_TYPE(obj)->function.dcompact;
4231 if (compact_func) (*compact_func)(ptr);
4232 }
4233 }
4234 }
4235 break;
4236
4237 case T_OBJECT:
4238 gc_ref_update_object(objspace, obj);
4239 break;
4240
4241 case T_FILE:
4242 if (RFILE(obj)->fptr) {
4243 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->self);
4244 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->pathv);
4245 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->tied_io_for_writing);
4246 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->writeconv_asciicompat);
4247 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->writeconv_pre_ecopts);
4248 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->encs.ecopts);
4249 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->write_lock);
4250 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->timeout);
4251 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->wakeup_mutex);
4252 }
4253 break;
4254 case T_REGEXP:
4255 UPDATE_IF_MOVED(objspace, RREGEXP(obj)->src);
4256 break;
4257
4258 case T_SYMBOL:
4259 UPDATE_IF_MOVED(objspace, RSYMBOL(obj)->fstr);
4260 break;
4261
4262 case T_FLOAT:
4263 case T_BIGNUM:
4264 break;
4265
4266 case T_MATCH:
4267 UPDATE_IF_MOVED(objspace, RMATCH(obj)->regexp);
4268
4269 if (RMATCH(obj)->str) {
4270 UPDATE_IF_MOVED(objspace, RMATCH(obj)->str);
4271 }
4272 break;
4273
4274 case T_RATIONAL:
4275 UPDATE_IF_MOVED(objspace, RRATIONAL(obj)->num);
4276 UPDATE_IF_MOVED(objspace, RRATIONAL(obj)->den);
4277 break;
4278
4279 case T_COMPLEX:
4280 UPDATE_IF_MOVED(objspace, RCOMPLEX(obj)->real);
4281 UPDATE_IF_MOVED(objspace, RCOMPLEX(obj)->imag);
4282
4283 break;
4284
4285 case T_STRUCT:
4286 {
4287 long i, len = RSTRUCT_LEN(obj);
4288 VALUE *ptr = (VALUE *)RSTRUCT_CONST_PTR(obj);
4289
4290 for (i = 0; i < len; i++) {
4291 UPDATE_IF_MOVED(objspace, ptr[i]);
4292 }
4293 }
4294 break;
4295 default:
4296 rb_bug("unreachable");
4297 break;
4298 }
4299
4300 UPDATE_IF_MOVED(objspace, RBASIC(obj)->klass);
4301}
4302
4303VALUE
4304rb_gc_start(void)
4305{
4306 rb_gc();
4307 return Qnil;
4308}
4309
4310void
4311rb_gc(void)
4312{
4313 unless_objspace(objspace) { return; }
4314
4315 rb_gc_impl_start(objspace, true, true, true, false);
4316}
4317
4318int
4319rb_during_gc(void)
4320{
4321 unless_objspace(objspace) { return FALSE; }
4322
4323 return rb_gc_impl_during_gc_p(objspace);
4324}
4325
4326size_t
4327rb_gc_count(void)
4328{
4329 return rb_gc_impl_gc_count(rb_gc_get_objspace());
4330}
4331
4332static VALUE
4333gc_count(rb_execution_context_t *ec, VALUE self)
4334{
4335 return SIZET2NUM(rb_gc_count());
4336}
4337
4338VALUE
4339rb_gc_latest_gc_info(VALUE key)
4340{
4341 if (!SYMBOL_P(key) && !RB_TYPE_P(key, T_HASH)) {
4342 rb_raise(rb_eTypeError, "non-hash or symbol given");
4343 }
4344
4345 VALUE val = rb_gc_impl_latest_gc_info(rb_gc_get_objspace(), key);
4346
4347 if (val == Qundef) {
4348 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
4349 }
4350
4351 return val;
4352}
4353
4354static VALUE
4355gc_stat(rb_execution_context_t *ec, VALUE self, VALUE arg) // arg is (nil || hash || symbol)
4356{
4357 if (NIL_P(arg)) {
4358 arg = rb_hash_new();
4359 }
4360 else if (!RB_TYPE_P(arg, T_HASH) && !SYMBOL_P(arg)) {
4361 rb_raise(rb_eTypeError, "non-hash or symbol given");
4362 }
4363
4364 VALUE ret = rb_gc_impl_stat(rb_gc_get_objspace(), arg);
4365
4366 if (ret == Qundef) {
4367 GC_ASSERT(SYMBOL_P(arg));
4368
4369 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(arg));
4370 }
4371
4372 return ret;
4373}
4374
4375size_t
4376rb_gc_stat(VALUE arg)
4377{
4378 if (!RB_TYPE_P(arg, T_HASH) && !SYMBOL_P(arg)) {
4379 rb_raise(rb_eTypeError, "non-hash or symbol given");
4380 }
4381
4382 VALUE ret = rb_gc_impl_stat(rb_gc_get_objspace(), arg);
4383
4384 if (ret == Qundef) {
4385 GC_ASSERT(SYMBOL_P(arg));
4386
4387 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(arg));
4388 }
4389
4390 if (SYMBOL_P(arg)) {
4391 return NUM2SIZET(ret);
4392 }
4393 else {
4394 return 0;
4395 }
4396}
4397
4398static VALUE
4399gc_stat_heap(rb_execution_context_t *ec, VALUE self, VALUE heap_name, VALUE arg)
4400{
4401 if (NIL_P(arg)) {
4402 arg = rb_hash_new();
4403 }
4404
4405 if (NIL_P(heap_name)) {
4406 if (!RB_TYPE_P(arg, T_HASH)) {
4407 rb_raise(rb_eTypeError, "non-hash given");
4408 }
4409 }
4410 else if (FIXNUM_P(heap_name)) {
4411 if (!SYMBOL_P(arg) && !RB_TYPE_P(arg, T_HASH)) {
4412 rb_raise(rb_eTypeError, "non-hash or symbol given");
4413 }
4414 }
4415 else {
4416 rb_raise(rb_eTypeError, "heap_name must be nil or an Integer");
4417 }
4418
4419 VALUE ret = rb_gc_impl_stat_heap(rb_gc_get_objspace(), heap_name, arg);
4420
4421 if (ret == Qundef) {
4422 GC_ASSERT(SYMBOL_P(arg));
4423
4424 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(arg));
4425 }
4426
4427 return ret;
4428}
4429
4430static VALUE
4431gc_config_get(rb_execution_context_t *ec, VALUE self)
4432{
4433 VALUE cfg_hash = rb_gc_impl_config_get(rb_gc_get_objspace());
4434 rb_hash_aset(cfg_hash, sym("implementation"), rb_fstring_cstr(rb_gc_impl_active_gc_name()));
4435
4436 return cfg_hash;
4437}
4438
4439static VALUE
4440gc_config_set(rb_execution_context_t *ec, VALUE self, VALUE hash)
4441{
4442 void *objspace = rb_gc_get_objspace();
4443
4444 rb_gc_impl_config_set(objspace, hash);
4445
4446 return rb_gc_impl_config_get(objspace);
4447}
4448
4449static VALUE
4450gc_stress_get(rb_execution_context_t *ec, VALUE self)
4451{
4452 return rb_gc_impl_stress_get(rb_gc_get_objspace());
4453}
4454
4455static VALUE
4456gc_stress_set_m(rb_execution_context_t *ec, VALUE self, VALUE flag)
4457{
4458 rb_gc_impl_stress_set(rb_gc_get_objspace(), flag);
4459
4460 return flag;
4461}
4462
4463void
4464rb_gc_initial_stress_set(VALUE flag)
4465{
4466 initial_stress = flag;
4467}
4468
4469size_t *
4470rb_gc_heap_sizes(void)
4471{
4472 return rb_gc_impl_heap_sizes(rb_gc_get_objspace());
4473}
4474
4475VALUE
4476rb_gc_enable(void)
4477{
4478 return rb_objspace_gc_enable(rb_gc_get_objspace());
4479}
4480
4481VALUE
4482rb_objspace_gc_enable(void *objspace)
4483{
4484 bool disabled = !rb_gc_impl_gc_enabled_p(objspace);
4485 rb_gc_impl_gc_enable(objspace);
4486 return RBOOL(disabled);
4487}
4488
4489static VALUE
4490gc_enable(rb_execution_context_t *ec, VALUE _)
4491{
4492 return rb_gc_enable();
4493}
4494
4495static VALUE
4496gc_disable_no_rest(void *objspace)
4497{
4498 bool disabled = !rb_gc_impl_gc_enabled_p(objspace);
4499 rb_gc_impl_gc_disable(objspace, false);
4500 return RBOOL(disabled);
4501}
4502
4503VALUE
4504rb_gc_disable_no_rest(void)
4505{
4506 return gc_disable_no_rest(rb_gc_get_objspace());
4507}
4508
4509VALUE
4510rb_gc_disable(void)
4511{
4512 return rb_objspace_gc_disable(rb_gc_get_objspace());
4513}
4514
4515VALUE
4516rb_objspace_gc_disable(void *objspace)
4517{
4518 bool disabled = !rb_gc_impl_gc_enabled_p(objspace);
4519 rb_gc_impl_gc_disable(objspace, true);
4520 return RBOOL(disabled);
4521}
4522
4523static VALUE
4524gc_disable(rb_execution_context_t *ec, VALUE _)
4525{
4526 return rb_gc_disable();
4527}
4528
4529// TODO: think about moving ruby_gc_set_params into Init_heap or Init_gc
4530void
4531ruby_gc_set_params(void)
4532{
4533 rb_gc_impl_set_params(rb_gc_get_objspace());
4534}
4535
4536void
4537rb_objspace_reachable_objects_from(VALUE obj, void (func)(VALUE, void *), void *data)
4538{
4539 RB_VM_LOCKING() {
4540 if (rb_gc_impl_during_gc_p(rb_gc_get_objspace())) rb_bug("rb_objspace_reachable_objects_from() is not supported while during GC");
4541
4542 if (!RB_SPECIAL_CONST_P(obj)) {
4543 rb_vm_t *vm = GET_VM();
4544 struct gc_mark_func_data_struct *prev_mfd = vm->gc.mark_func_data;
4545 struct gc_mark_func_data_struct mfd = {
4546 .mark_func = func,
4547 .data = data,
4548 };
4549
4550 vm->gc.mark_func_data = &mfd;
4551 rb_gc_mark_children(rb_gc_get_objspace(), obj);
4552 vm->gc.mark_func_data = prev_mfd;
4553 }
4554 }
4555}
4556
4558 const char *category;
4559 void (*func)(const char *category, VALUE, void *);
4560 void *data;
4561};
4562
4563static void
4564root_objects_from(VALUE obj, void *ptr)
4565{
4566 const struct root_objects_data *data = (struct root_objects_data *)ptr;
4567 (*data->func)(data->category, obj, data->data);
4568}
4569
4570void
4571rb_objspace_reachable_objects_from_root(void (func)(const char *category, VALUE, void *), void *passing_data)
4572{
4573 if (rb_gc_impl_during_gc_p(rb_gc_get_objspace())) rb_bug("rb_gc_impl_objspace_reachable_objects_from_root() is not supported while during GC");
4574
4575 rb_vm_t *vm = GET_VM();
4576
4577 struct root_objects_data data = {
4578 .func = func,
4579 .data = passing_data,
4580 };
4581
4582 struct gc_mark_func_data_struct *prev_mfd = vm->gc.mark_func_data;
4583 struct gc_mark_func_data_struct mfd = {
4584 .mark_func = root_objects_from,
4585 .data = &data,
4586 };
4587
4588 vm->gc.mark_func_data = &mfd;
4589 rb_gc_save_machine_context();
4590 rb_gc_mark_roots(vm->gc.objspace, &data.category);
4591 vm->gc.mark_func_data = prev_mfd;
4592}
4593
4594/*
4595 ------------------------------ DEBUG ------------------------------
4596*/
4597
4598static const char *
4599type_name(int type, VALUE obj)
4600{
4601 switch (type) {
4602#define TYPE_NAME(t) case (t): return #t;
4603 TYPE_NAME(T_NONE);
4604 TYPE_NAME(T_OBJECT);
4605 TYPE_NAME(T_CLASS);
4606 TYPE_NAME(T_MODULE);
4607 TYPE_NAME(T_FLOAT);
4608 TYPE_NAME(T_STRING);
4609 TYPE_NAME(T_REGEXP);
4610 TYPE_NAME(T_ARRAY);
4611 TYPE_NAME(T_HASH);
4612 TYPE_NAME(T_STRUCT);
4613 TYPE_NAME(T_BIGNUM);
4614 TYPE_NAME(T_FILE);
4615 TYPE_NAME(T_MATCH);
4616 TYPE_NAME(T_COMPLEX);
4617 TYPE_NAME(T_RATIONAL);
4618 TYPE_NAME(T_NIL);
4619 TYPE_NAME(T_TRUE);
4620 TYPE_NAME(T_FALSE);
4621 TYPE_NAME(T_SYMBOL);
4622 TYPE_NAME(T_FIXNUM);
4623 TYPE_NAME(T_UNDEF);
4624 TYPE_NAME(T_IMEMO);
4625 TYPE_NAME(T_ICLASS);
4626 TYPE_NAME(T_MOVED);
4627 TYPE_NAME(T_ZOMBIE);
4628 case T_DATA:
4629 if (obj && rb_objspace_data_type_name(obj)) {
4630 return rb_objspace_data_type_name(obj);
4631 }
4632 return "T_DATA";
4633#undef TYPE_NAME
4634 }
4635 return "unknown";
4636}
4637
4638static const char *
4639obj_type_name(VALUE obj)
4640{
4641 return type_name(TYPE(obj), obj);
4642}
4643
4644const char *
4645rb_method_type_name(rb_method_type_t type)
4646{
4647 switch (type) {
4648 case VM_METHOD_TYPE_ISEQ: return "iseq";
4649 case VM_METHOD_TYPE_ATTRSET: return "attrest";
4650 case VM_METHOD_TYPE_IVAR: return "ivar";
4651 case VM_METHOD_TYPE_BMETHOD: return "bmethod";
4652 case VM_METHOD_TYPE_ALIAS: return "alias";
4653 case VM_METHOD_TYPE_REFINED: return "refined";
4654 case VM_METHOD_TYPE_CFUNC: return "cfunc";
4655 case VM_METHOD_TYPE_ZSUPER: return "zsuper";
4656 case VM_METHOD_TYPE_MISSING: return "missing";
4657 case VM_METHOD_TYPE_OPTIMIZED: return "optimized";
4658 case VM_METHOD_TYPE_UNDEF: return "undef";
4659 case VM_METHOD_TYPE_NOTIMPLEMENTED: return "notimplemented";
4660 }
4661 rb_bug("rb_method_type_name: unreachable (type: %d)", type);
4662}
4663
4664static void
4665rb_raw_iseq_info(char *const buff, const size_t buff_size, const rb_iseq_t *iseq)
4666{
4667 if (buff_size > 0 && ISEQ_BODY(iseq) && ISEQ_BODY(iseq)->location.label && !RB_TYPE_P(ISEQ_BODY(iseq)->location.pathobj, T_MOVED)) {
4668 VALUE path = rb_iseq_path(iseq);
4669 int n = ISEQ_BODY(iseq)->location.first_lineno;
4670 snprintf(buff, buff_size, " %s@%s:%d",
4671 RSTRING_PTR(ISEQ_BODY(iseq)->location.label),
4672 RSTRING_PTR(path), n);
4673 }
4674}
4675
4676static int
4677str_len_no_raise(VALUE str)
4678{
4679 long len = RSTRING_LEN(str);
4680 if (len < 0) return 0;
4681 if (len > INT_MAX) return INT_MAX;
4682 return (int)len;
4683}
4684
4685#define BUFF_ARGS buff + pos, buff_size - pos
4686#define APPEND_F(...) if ((pos += snprintf(BUFF_ARGS, "" __VA_ARGS__)) >= buff_size) goto end
4687#define APPEND_S(s) do { \
4688 if ((pos + (int)rb_strlen_lit(s)) >= buff_size) { \
4689 goto end; \
4690 } \
4691 else { \
4692 memcpy(buff + pos, (s), rb_strlen_lit(s) + 1); \
4693 } \
4694 } while (0)
4695#define C(c, s) ((c) != 0 ? (s) : " ")
4696
4697static size_t
4698rb_raw_obj_info_common(char *const buff, const size_t buff_size, const VALUE obj)
4699{
4700 size_t pos = 0;
4701
4702 if (SPECIAL_CONST_P(obj)) {
4703 APPEND_F("%s", obj_type_name(obj));
4704
4705 if (FIXNUM_P(obj)) {
4706 APPEND_F(" %ld", FIX2LONG(obj));
4707 }
4708 else if (SYMBOL_P(obj)) {
4709 APPEND_F(" %s", rb_id2name(SYM2ID(obj)));
4710 }
4711 }
4712 else {
4713 // const int age = RVALUE_AGE_GET(obj);
4714
4715 if (rb_gc_impl_pointer_to_heap_p(rb_gc_get_objspace(), (void *)obj)) {
4716 APPEND_F("%p %s/", (void *)obj, obj_type_name(obj));
4717 // TODO: fixme
4718 // APPEND_F("%p [%d%s%s%s%s%s%s] %s ",
4719 // (void *)obj, age,
4720 // C(RVALUE_UNCOLLECTIBLE_BITMAP(obj), "L"),
4721 // C(RVALUE_MARK_BITMAP(obj), "M"),
4722 // C(RVALUE_PIN_BITMAP(obj), "P"),
4723 // C(RVALUE_MARKING_BITMAP(obj), "R"),
4724 // C(RVALUE_WB_UNPROTECTED_BITMAP(obj), "U"),
4725 // C(rb_objspace_garbage_object_p(obj), "G"),
4726 // obj_type_name(obj));
4727 }
4728 else {
4729 /* fake */
4730 // APPEND_F("%p [%dXXXX] %s",
4731 // (void *)obj, age,
4732 // obj_type_name(obj));
4733 }
4734
4735 if (internal_object_p(obj)) {
4736 /* ignore */
4737 }
4738 else if (RBASIC(obj)->klass == 0) {
4739 APPEND_S("(temporary internal)");
4740 }
4741 else if (RTEST(RBASIC(obj)->klass)) {
4742 VALUE class_path = rb_class_path_cached(RBASIC(obj)->klass);
4743 if (!NIL_P(class_path)) {
4744 APPEND_F("%s ", RSTRING_PTR(class_path));
4745 }
4746 }
4747 }
4748 end:
4749
4750 return pos;
4751}
4752
4753const char *rb_raw_obj_info(char *const buff, const size_t buff_size, VALUE obj);
4754
4755static size_t
4756rb_raw_obj_info_buitin_type(char *const buff, const size_t buff_size, const VALUE obj, size_t pos)
4757{
4758 if (LIKELY(pos < buff_size) && !SPECIAL_CONST_P(obj)) {
4759 const enum ruby_value_type type = BUILTIN_TYPE(obj);
4760
4761 switch (type) {
4762 case T_NODE:
4763 UNEXPECTED_NODE(rb_raw_obj_info);
4764 break;
4765 case T_ARRAY:
4766 if (ARY_SHARED_P(obj)) {
4767 APPEND_S("shared -> ");
4768 rb_raw_obj_info(BUFF_ARGS, ARY_SHARED_ROOT(obj));
4769 }
4770 else if (ARY_EMBED_P(obj)) {
4771 APPEND_F("[%s%s] len: %ld (embed)",
4772 C(ARY_EMBED_P(obj), "E"),
4773 C(ARY_SHARED_P(obj), "S"),
4774 RARRAY_LEN(obj));
4775 }
4776 else {
4777 APPEND_F("[%s%s] len: %ld, capa:%ld ptr:%p",
4778 C(ARY_EMBED_P(obj), "E"),
4779 C(ARY_SHARED_P(obj), "S"),
4780 RARRAY_LEN(obj),
4781 ARY_EMBED_P(obj) ? -1L : RARRAY(obj)->as.heap.aux.capa,
4782 (void *)RARRAY_CONST_PTR(obj));
4783 }
4784 break;
4785 case T_STRING: {
4786 if (STR_SHARED_P(obj)) {
4787 APPEND_F(" [shared] len: %ld", RSTRING_LEN(obj));
4788 }
4789 else {
4790 if (STR_EMBED_P(obj)) APPEND_S(" [embed]");
4791
4792 APPEND_F(" len: %ld, capa: %" PRIdSIZE, RSTRING_LEN(obj), rb_str_capacity(obj));
4793 }
4794 APPEND_F(" \"%.*s\"", str_len_no_raise(obj), RSTRING_PTR(obj));
4795 break;
4796 }
4797 case T_SYMBOL: {
4798 VALUE fstr = RSYMBOL(obj)->fstr;
4799 ID id = RSYMBOL(obj)->id;
4800 if (RB_TYPE_P(fstr, T_STRING)) {
4801 APPEND_F(":%s id:%d", RSTRING_PTR(fstr), (unsigned int)id);
4802 }
4803 else {
4804 APPEND_F("(%p) id:%d", (void *)fstr, (unsigned int)id);
4805 }
4806 break;
4807 }
4808 case T_MOVED: {
4809 APPEND_F("-> %p", (void*)gc_location_internal(rb_gc_get_objspace(), obj));
4810 break;
4811 }
4812 case T_HASH: {
4813 APPEND_F("[%c] %"PRIdSIZE,
4814 RHASH_AR_TABLE_P(obj) ? 'A' : 'S',
4815 RHASH_SIZE(obj));
4816 break;
4817 }
4818 case T_CLASS:
4819 case T_MODULE:
4820 {
4821 VALUE class_path = rb_class_path_cached(obj);
4822 if (!NIL_P(class_path)) {
4823 APPEND_F("%s", RSTRING_PTR(class_path));
4824 }
4825 else {
4826 APPEND_S("(anon)");
4827 }
4828 break;
4829 }
4830 case T_ICLASS:
4831 {
4832 VALUE class_path = rb_class_path_cached(RBASIC_CLASS(obj));
4833 if (!NIL_P(class_path)) {
4834 APPEND_F("src:%s", RSTRING_PTR(class_path));
4835 }
4836 break;
4837 }
4838 case T_OBJECT:
4839 {
4840 if (rb_shape_obj_too_complex_p(obj)) {
4841 size_t hash_len = rb_st_table_size(ROBJECT_FIELDS_HASH(obj));
4842 APPEND_F("(too_complex) len:%zu", hash_len);
4843 }
4844 else {
4845 uint32_t len = ROBJECT_FIELDS_CAPACITY(obj);
4846
4847 if (RBASIC(obj)->flags & ROBJECT_EMBED) {
4848 APPEND_F("(embed) len:%d", len);
4849 }
4850 else {
4851 VALUE *ptr = ROBJECT_FIELDS(obj);
4852 APPEND_F("len:%d ptr:%p", len, (void *)ptr);
4853 }
4854 }
4855 }
4856 break;
4857 case T_DATA: {
4858 const struct rb_block *block;
4859 const rb_iseq_t *iseq;
4860 if (rb_obj_is_proc(obj) &&
4861 (block = vm_proc_block(obj)) != NULL &&
4862 (vm_block_type(block) == block_type_iseq) &&
4863 (iseq = vm_block_iseq(block)) != NULL) {
4864 rb_raw_iseq_info(BUFF_ARGS, iseq);
4865 }
4866 else if (rb_ractor_p(obj)) {
4867 rb_ractor_t *r = (void *)DATA_PTR(obj);
4868 if (r) {
4869 APPEND_F("r:%d", r->pub.id);
4870 }
4871 }
4872 else {
4873 const char * const type_name = rb_objspace_data_type_name(obj);
4874 if (type_name) {
4875 APPEND_F("%s", type_name);
4876 }
4877 }
4878 break;
4879 }
4880 case T_IMEMO: {
4881 APPEND_F("<%s> ", rb_imemo_name(imemo_type(obj)));
4882
4883 switch (imemo_type(obj)) {
4884 case imemo_ment:
4885 {
4886 const rb_method_entry_t *me = (const rb_method_entry_t *)obj;
4887
4888 APPEND_F(":%s (%s%s%s%s) type:%s aliased:%d owner:%p defined_class:%p",
4889 rb_id2name(me->called_id),
4890 METHOD_ENTRY_VISI(me) == METHOD_VISI_PUBLIC ? "pub" :
4891 METHOD_ENTRY_VISI(me) == METHOD_VISI_PRIVATE ? "pri" : "pro",
4892 METHOD_ENTRY_COMPLEMENTED(me) ? ",cmp" : "",
4893 METHOD_ENTRY_CACHED(me) ? ",cc" : "",
4894 METHOD_ENTRY_INVALIDATED(me) ? ",inv" : "",
4895 me->def ? rb_method_type_name(me->def->type) : "NULL",
4896 me->def ? me->def->aliased : -1,
4897 (void *)me->owner, // obj_info(me->owner),
4898 (void *)me->defined_class); //obj_info(me->defined_class)));
4899
4900 if (me->def) {
4901 switch (me->def->type) {
4902 case VM_METHOD_TYPE_ISEQ:
4903 APPEND_S(" (iseq:");
4904 rb_raw_obj_info(BUFF_ARGS, (VALUE)me->def->body.iseq.iseqptr);
4905 APPEND_S(")");
4906 break;
4907 default:
4908 break;
4909 }
4910 }
4911
4912 break;
4913 }
4914 case imemo_iseq: {
4915 const rb_iseq_t *iseq = (const rb_iseq_t *)obj;
4916 rb_raw_iseq_info(BUFF_ARGS, iseq);
4917 break;
4918 }
4919 case imemo_callinfo:
4920 {
4921 const struct rb_callinfo *ci = (const struct rb_callinfo *)obj;
4922 APPEND_F("(mid:%s, flag:%x argc:%d, kwarg:%s)",
4923 rb_id2name(vm_ci_mid(ci)),
4924 vm_ci_flag(ci),
4925 vm_ci_argc(ci),
4926 vm_ci_kwarg(ci) ? "available" : "NULL");
4927 break;
4928 }
4929 case imemo_callcache:
4930 {
4931 const struct rb_callcache *cc = (const struct rb_callcache *)obj;
4932 VALUE class_path = cc->klass ? rb_class_path_cached(cc->klass) : Qnil;
4933 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4934
4935 APPEND_F("(klass:%s cme:%s%s (%p) call:%p",
4936 NIL_P(class_path) ? (cc->klass ? "??" : "<NULL>") : RSTRING_PTR(class_path),
4937 cme ? rb_id2name(cme->called_id) : "<NULL>",
4938 cme ? (METHOD_ENTRY_INVALIDATED(cme) ? " [inv]" : "") : "",
4939 (void *)cme,
4940 (void *)(uintptr_t)vm_cc_call(cc));
4941 break;
4942 }
4943 default:
4944 break;
4945 }
4946 }
4947 default:
4948 break;
4949 }
4950 }
4951 end:
4952
4953 return pos;
4954}
4955
4956#undef C
4957
4958#ifdef RUBY_ASAN_ENABLED
4959void
4960rb_asan_poison_object(VALUE obj)
4961{
4962 MAYBE_UNUSED(struct RVALUE *) ptr = (void *)obj;
4963 asan_poison_memory_region(ptr, rb_gc_obj_slot_size(obj));
4964}
4965
4966void
4967rb_asan_unpoison_object(VALUE obj, bool newobj_p)
4968{
4969 MAYBE_UNUSED(struct RVALUE *) ptr = (void *)obj;
4970 asan_unpoison_memory_region(ptr, rb_gc_obj_slot_size(obj), newobj_p);
4971}
4972
4973void *
4974rb_asan_poisoned_object_p(VALUE obj)
4975{
4976 MAYBE_UNUSED(struct RVALUE *) ptr = (void *)obj;
4977 return __asan_region_is_poisoned(ptr, rb_gc_obj_slot_size(obj));
4978}
4979#endif
4980
4981static void
4982raw_obj_info(char *const buff, const size_t buff_size, VALUE obj)
4983{
4984 size_t pos = rb_raw_obj_info_common(buff, buff_size, obj);
4985 pos = rb_raw_obj_info_buitin_type(buff, buff_size, obj, pos);
4986 if (pos >= buff_size) {} // truncated
4987}
4988
4989const char *
4990rb_raw_obj_info(char *const buff, const size_t buff_size, VALUE obj)
4991{
4992 void *objspace = rb_gc_get_objspace();
4993
4994 if (SPECIAL_CONST_P(obj)) {
4995 raw_obj_info(buff, buff_size, obj);
4996 }
4997 else if (!rb_gc_impl_pointer_to_heap_p(objspace, (const void *)obj)) {
4998 snprintf(buff, buff_size, "out-of-heap:%p", (void *)obj);
4999 }
5000#if 0 // maybe no need to check it?
5001 else if (0 && rb_gc_impl_garbage_object_p(objspace, obj)) {
5002 snprintf(buff, buff_size, "garbage:%p", (void *)obj);
5003 }
5004#endif
5005 else {
5006 asan_unpoisoning_object(obj) {
5007 raw_obj_info(buff, buff_size, obj);
5008 }
5009 }
5010 return buff;
5011}
5012
5013#undef APPEND_S
5014#undef APPEND_F
5015#undef BUFF_ARGS
5016
5017#if RGENGC_OBJ_INFO
5018#define OBJ_INFO_BUFFERS_NUM 10
5019#define OBJ_INFO_BUFFERS_SIZE 0x100
5020static rb_atomic_t obj_info_buffers_index = 0;
5021static char obj_info_buffers[OBJ_INFO_BUFFERS_NUM][OBJ_INFO_BUFFERS_SIZE];
5022
5023/* Increments *var atomically and resets *var to 0 when maxval is
5024 * reached. Returns the wraparound old *var value (0...maxval). */
5025static rb_atomic_t
5026atomic_inc_wraparound(rb_atomic_t *var, const rb_atomic_t maxval)
5027{
5028 rb_atomic_t oldval = RUBY_ATOMIC_FETCH_ADD(*var, 1);
5029 if (RB_UNLIKELY(oldval >= maxval - 1)) { // wraparound *var
5030 const rb_atomic_t newval = oldval + 1;
5031 RUBY_ATOMIC_CAS(*var, newval, newval % maxval);
5032 oldval %= maxval;
5033 }
5034 return oldval;
5035}
5036
5037static const char *
5038obj_info(VALUE obj)
5039{
5040 rb_atomic_t index = atomic_inc_wraparound(&obj_info_buffers_index, OBJ_INFO_BUFFERS_NUM);
5041 char *const buff = obj_info_buffers[index];
5042 return rb_raw_obj_info(buff, OBJ_INFO_BUFFERS_SIZE, obj);
5043}
5044#else
5045static const char *
5046obj_info(VALUE obj)
5047{
5048 return obj_type_name(obj);
5049}
5050#endif
5051
5052/*
5053 ------------------------ Extended allocator ------------------------
5054*/
5055
5057 VALUE exc;
5058 const char *fmt;
5059 va_list *ap;
5060};
5061
5062static void *
5063gc_vraise(void *ptr)
5064{
5065 struct gc_raise_tag *argv = ptr;
5066 rb_vraise(argv->exc, argv->fmt, *argv->ap);
5067 UNREACHABLE_RETURN(NULL);
5068}
5069
5070static void
5071gc_raise(VALUE exc, const char *fmt, ...)
5072{
5073 va_list ap;
5074 va_start(ap, fmt);
5075 struct gc_raise_tag argv = {
5076 exc, fmt, &ap,
5077 };
5078
5079 if (ruby_thread_has_gvl_p()) {
5080 gc_vraise(&argv);
5082 }
5083 else if (ruby_native_thread_p()) {
5084 rb_thread_call_with_gvl(gc_vraise, &argv);
5086 }
5087 else {
5088 /* Not in a ruby thread */
5089 fprintf(stderr, "%s", "[FATAL] ");
5090 vfprintf(stderr, fmt, ap);
5091 }
5092
5093 va_end(ap);
5094 abort();
5095}
5096
5097NORETURN(static void negative_size_allocation_error(const char *));
5098static void
5099negative_size_allocation_error(const char *msg)
5100{
5101 gc_raise(rb_eNoMemError, "%s", msg);
5102}
5103
5104static void *
5105ruby_memerror_body(void *dummy)
5106{
5107 rb_memerror();
5108 return 0;
5109}
5110
5111NORETURN(static void ruby_memerror(void));
5113static void
5114ruby_memerror(void)
5115{
5116 if (ruby_thread_has_gvl_p()) {
5117 rb_memerror();
5118 }
5119 else {
5120 if (ruby_native_thread_p()) {
5121 rb_thread_call_with_gvl(ruby_memerror_body, 0);
5122 }
5123 else {
5124 /* no ruby thread */
5125 fprintf(stderr, "[FATAL] failed to allocate memory\n");
5126 }
5127 }
5128
5129 /* We have discussions whether we should die here; */
5130 /* We might rethink about it later. */
5131 exit(EXIT_FAILURE);
5132}
5133
5134void
5135rb_memerror(void)
5136{
5137 /* the `GET_VM()->special_exceptions` below assumes that
5138 * the VM is reachable from the current thread. We should
5139 * definitely make sure of that. */
5140 RUBY_ASSERT_ALWAYS(ruby_thread_has_gvl_p());
5141
5142 rb_execution_context_t *ec = GET_EC();
5143 VALUE exc = GET_VM()->special_exceptions[ruby_error_nomemory];
5144
5145 if (!exc ||
5146 rb_ec_raised_p(ec, RAISED_NOMEMORY) ||
5147 rb_ec_vm_lock_rec(ec) != ec->tag->lock_rec) {
5148 fprintf(stderr, "[FATAL] failed to allocate memory\n");
5149 exit(EXIT_FAILURE);
5150 }
5151 if (rb_ec_raised_p(ec, RAISED_NOMEMORY)) {
5152 rb_ec_raised_clear(ec);
5153 }
5154 else {
5155 rb_ec_raised_set(ec, RAISED_NOMEMORY);
5156 exc = ruby_vm_special_exception_copy(exc);
5157 }
5158 ec->errinfo = exc;
5159 EC_JUMP_TAG(ec, TAG_RAISE);
5160}
5161
5162bool
5163rb_memerror_reentered(void)
5164{
5165 rb_execution_context_t *ec = GET_EC();
5166 return (ec && rb_ec_raised_p(ec, RAISED_NOMEMORY));
5167}
5168
5169static void *
5170handle_malloc_failure(void *ptr)
5171{
5172 if (LIKELY(ptr)) {
5173 return ptr;
5174 }
5175 else {
5176 ruby_memerror();
5177 UNREACHABLE_RETURN(ptr);
5178 }
5179}
5180
5181static void *ruby_xmalloc_body(size_t size);
5182
5183void *
5184ruby_xmalloc(size_t size)
5185{
5186 return handle_malloc_failure(ruby_xmalloc_body(size));
5187}
5188
5189static void *
5190ruby_xmalloc_body(size_t size)
5191{
5192 if ((ssize_t)size < 0) {
5193 negative_size_allocation_error("too large allocation size");
5194 }
5195
5196 return rb_gc_impl_malloc(rb_gc_get_objspace(), size);
5197}
5198
5199void
5200ruby_malloc_size_overflow(size_t count, size_t elsize)
5201{
5202 rb_raise(rb_eArgError,
5203 "malloc: possible integer overflow (%"PRIuSIZE"*%"PRIuSIZE")",
5204 count, elsize);
5205}
5206
5207void
5208ruby_malloc_add_size_overflow(size_t x, size_t y)
5209{
5210 rb_raise(rb_eArgError,
5211 "malloc: possible integer overflow (%"PRIuSIZE"+%"PRIuSIZE")",
5212 x, y);
5213}
5214
5215static void *ruby_xmalloc2_body(size_t n, size_t size);
5216
5217void *
5218ruby_xmalloc2(size_t n, size_t size)
5219{
5220 return handle_malloc_failure(ruby_xmalloc2_body(n, size));
5221}
5222
5223static void *
5224ruby_xmalloc2_body(size_t n, size_t size)
5225{
5226 return rb_gc_impl_malloc(rb_gc_get_objspace(), xmalloc2_size(n, size));
5227}
5228
5229static void *ruby_xcalloc_body(size_t n, size_t size);
5230
5231void *
5232ruby_xcalloc(size_t n, size_t size)
5233{
5234 return handle_malloc_failure(ruby_xcalloc_body(n, size));
5235}
5236
5237static void *
5238ruby_xcalloc_body(size_t n, size_t size)
5239{
5240 return rb_gc_impl_calloc(rb_gc_get_objspace(), xmalloc2_size(n, size));
5241}
5242
5243static void *ruby_sized_xrealloc_body(void *ptr, size_t new_size, size_t old_size);
5244
5245#ifdef ruby_sized_xrealloc
5246#undef ruby_sized_xrealloc
5247#endif
5248void *
5249ruby_sized_xrealloc(void *ptr, size_t new_size, size_t old_size)
5250{
5251 return handle_malloc_failure(ruby_sized_xrealloc_body(ptr, new_size, old_size));
5252}
5253
5254static void *
5255ruby_sized_xrealloc_body(void *ptr, size_t new_size, size_t old_size)
5256{
5257 if ((ssize_t)new_size < 0) {
5258 negative_size_allocation_error("too large allocation size");
5259 }
5260
5261 return rb_gc_impl_realloc(rb_gc_get_objspace(), ptr, new_size, old_size);
5262}
5263
5264void *
5265ruby_xrealloc(void *ptr, size_t new_size)
5266{
5267 return ruby_sized_xrealloc(ptr, new_size, 0);
5268}
5269
5270static void *ruby_sized_xrealloc2_body(void *ptr, size_t n, size_t size, size_t old_n);
5271
5272#ifdef ruby_sized_xrealloc2
5273#undef ruby_sized_xrealloc2
5274#endif
5275void *
5276ruby_sized_xrealloc2(void *ptr, size_t n, size_t size, size_t old_n)
5277{
5278 return handle_malloc_failure(ruby_sized_xrealloc2_body(ptr, n, size, old_n));
5279}
5280
5281static void *
5282ruby_sized_xrealloc2_body(void *ptr, size_t n, size_t size, size_t old_n)
5283{
5284 size_t len = xmalloc2_size(n, size);
5285 return rb_gc_impl_realloc(rb_gc_get_objspace(), ptr, len, old_n * size);
5286}
5287
5288void *
5289ruby_xrealloc2(void *ptr, size_t n, size_t size)
5290{
5291 return ruby_sized_xrealloc2(ptr, n, size, 0);
5292}
5293
5294#ifdef ruby_sized_xfree
5295#undef ruby_sized_xfree
5296#endif
5297void
5298ruby_sized_xfree(void *x, size_t size)
5299{
5300 if (LIKELY(x)) {
5301 /* It's possible for a C extension's pthread destructor function set by pthread_key_create
5302 * to be called after ruby_vm_destruct and attempt to free memory. Fall back to mimfree in
5303 * that case. */
5304 if (LIKELY(GET_VM())) {
5305 rb_gc_impl_free(rb_gc_get_objspace(), x, size);
5306 }
5307 else {
5308 ruby_mimfree(x);
5309 }
5310 }
5311}
5312
5313void
5314ruby_xfree(void *x)
5315{
5316 ruby_sized_xfree(x, 0);
5317}
5318
5319void *
5320rb_xmalloc_mul_add(size_t x, size_t y, size_t z) /* x * y + z */
5321{
5322 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
5323 return ruby_xmalloc(w);
5324}
5325
5326void *
5327rb_xcalloc_mul_add(size_t x, size_t y, size_t z) /* x * y + z */
5328{
5329 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
5330 return ruby_xcalloc(w, 1);
5331}
5332
5333void *
5334rb_xrealloc_mul_add(const void *p, size_t x, size_t y, size_t z) /* x * y + z */
5335{
5336 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
5337 return ruby_xrealloc((void *)p, w);
5338}
5339
5340void *
5341rb_xmalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
5342{
5343 size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
5344 return ruby_xmalloc(u);
5345}
5346
5347void *
5348rb_xcalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
5349{
5350 size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
5351 return ruby_xcalloc(u, 1);
5352}
5353
5354/* Mimic ruby_xmalloc, but need not rb_objspace.
5355 * should return pointer suitable for ruby_xfree
5356 */
5357void *
5358ruby_mimmalloc(size_t size)
5359{
5360 void *mem;
5361#if CALC_EXACT_MALLOC_SIZE
5362 size += sizeof(struct malloc_obj_info);
5363#endif
5364 mem = malloc(size);
5365#if CALC_EXACT_MALLOC_SIZE
5366 if (!mem) {
5367 return NULL;
5368 }
5369 else
5370 /* set 0 for consistency of allocated_size/allocations */
5371 {
5372 struct malloc_obj_info *info = mem;
5373 info->size = 0;
5374 mem = info + 1;
5375 }
5376#endif
5377 return mem;
5378}
5379
5380void *
5381ruby_mimcalloc(size_t num, size_t size)
5382{
5383 void *mem;
5384#if CALC_EXACT_MALLOC_SIZE
5385 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(num, size);
5386 if (UNLIKELY(t.left)) {
5387 return NULL;
5388 }
5389 size = t.right + sizeof(struct malloc_obj_info);
5390 mem = calloc1(size);
5391 if (!mem) {
5392 return NULL;
5393 }
5394 else
5395 /* set 0 for consistency of allocated_size/allocations */
5396 {
5397 struct malloc_obj_info *info = mem;
5398 info->size = 0;
5399 mem = info + 1;
5400 }
5401#else
5402 mem = calloc(num, size);
5403#endif
5404 return mem;
5405}
5406
5407void
5408ruby_mimfree(void *ptr)
5409{
5410#if CALC_EXACT_MALLOC_SIZE
5411 struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
5412 ptr = info;
5413#endif
5414 free(ptr);
5415}
5416
5417void
5418rb_gc_adjust_memory_usage(ssize_t diff)
5419{
5420 unless_objspace(objspace) { return; }
5421
5422 rb_gc_impl_adjust_memory_usage(objspace, diff);
5423}
5424
5425const char *
5426rb_obj_info(VALUE obj)
5427{
5428 return obj_info(obj);
5429}
5430
5431void
5432rb_obj_info_dump(VALUE obj)
5433{
5434 char buff[0x100];
5435 fprintf(stderr, "rb_obj_info_dump: %s\n", rb_raw_obj_info(buff, 0x100, obj));
5436}
5437
5438void
5439rb_obj_info_dump_loc(VALUE obj, const char *file, int line, const char *func)
5440{
5441 char buff[0x100];
5442 fprintf(stderr, "<OBJ_INFO:%s@%s:%d> %s\n", func, file, line, rb_raw_obj_info(buff, 0x100, obj));
5443}
5444
5445void
5446rb_gc_before_fork(void)
5447{
5448 rb_gc_impl_before_fork(rb_gc_get_objspace());
5449}
5450
5451void
5452rb_gc_after_fork(rb_pid_t pid)
5453{
5454 rb_gc_impl_after_fork(rb_gc_get_objspace(), pid);
5455}
5456
5457/*
5458 * Document-module: ObjectSpace
5459 *
5460 * The ObjectSpace module contains a number of routines
5461 * that interact with the garbage collection facility and allow you to
5462 * traverse all living objects with an iterator.
5463 *
5464 * ObjectSpace also provides support for object finalizers, procs that will be
5465 * called after a specific object was destroyed by garbage collection. See
5466 * the documentation for +ObjectSpace.define_finalizer+ for important
5467 * information on how to use this method correctly.
5468 *
5469 * a = "A"
5470 * b = "B"
5471 *
5472 * ObjectSpace.define_finalizer(a, proc {|id| puts "Finalizer one on #{id}" })
5473 * ObjectSpace.define_finalizer(b, proc {|id| puts "Finalizer two on #{id}" })
5474 *
5475 * a = nil
5476 * b = nil
5477 *
5478 * _produces:_
5479 *
5480 * Finalizer two on 537763470
5481 * Finalizer one on 537763480
5482 */
5483
5484/* Document-class: GC::Profiler
5485 *
5486 * The GC profiler provides access to information on GC runs including time,
5487 * length and object space size.
5488 *
5489 * Example:
5490 *
5491 * GC::Profiler.enable
5492 *
5493 * require 'rdoc/rdoc'
5494 *
5495 * GC::Profiler.report
5496 *
5497 * GC::Profiler.disable
5498 *
5499 * See also GC.count, GC.malloc_allocated_size and GC.malloc_allocations
5500 */
5501
5502#include "gc.rbinc"
5503
5504void
5505Init_GC(void)
5506{
5507#undef rb_intern
5508 rb_gc_register_address(&id2ref_value);
5509
5510 malloc_offset = gc_compute_malloc_offset();
5511
5512 rb_mGC = rb_define_module("GC");
5513
5514 VALUE rb_mObjSpace = rb_define_module("ObjectSpace");
5515
5516 rb_define_module_function(rb_mObjSpace, "each_object", os_each_obj, -1);
5517
5518 rb_define_module_function(rb_mObjSpace, "define_finalizer", define_final, -1);
5519 rb_define_module_function(rb_mObjSpace, "undefine_finalizer", undefine_final, 1);
5520
5521 rb_define_module_function(rb_mObjSpace, "_id2ref", os_id2ref, 1);
5522
5523 rb_vm_register_special_exception(ruby_error_nomemory, rb_eNoMemError, "failed to allocate memory");
5524
5525 rb_define_method(rb_cBasicObject, "__id__", rb_obj_id, 0);
5526 rb_define_method(rb_mKernel, "object_id", rb_obj_id, 0);
5527
5528 rb_define_module_function(rb_mObjSpace, "count_objects", count_objects, -1);
5529
5530 rb_gc_impl_init();
5531}
5532
5533// Set a name for the anonymous virtual memory area. `addr` is the starting
5534// address of the area and `size` is its length in bytes. `name` is a
5535// NUL-terminated human-readable string.
5536//
5537// This function is usually called after calling `mmap()`. The human-readable
5538// annotation helps developers identify the call site of `mmap()` that created
5539// the memory mapping.
5540//
5541// This function currently only works on Linux 5.17 or higher. After calling
5542// this function, we can see annotations in the form of "[anon:...]" in
5543// `/proc/self/maps`, where `...` is the content of `name`. This function has
5544// no effect when called on other platforms.
5545void
5546ruby_annotate_mmap(const void *addr, unsigned long size, const char *name)
5547{
5548#if defined(HAVE_SYS_PRCTL_H) && defined(PR_SET_VMA) && defined(PR_SET_VMA_ANON_NAME)
5549 // The name length cannot exceed 80 (including the '\0').
5550 RUBY_ASSERT(strlen(name) < 80);
5551 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, (unsigned long)addr, size, name);
5552 // We ignore errors in prctl. prctl may set errno to EINVAL for several
5553 // reasons.
5554 // 1. The attr (PR_SET_VMA_ANON_NAME) is not a valid attribute.
5555 // 2. addr is an invalid address.
5556 // 3. The string pointed by name is too long.
5557 // The first error indicates PR_SET_VMA_ANON_NAME is not available, and may
5558 // happen if we run the compiled binary on an old kernel. In theory, all
5559 // other errors should result in a failure. But since EINVAL cannot tell
5560 // the first error from others, and this function is mainly used for
5561 // debugging, we silently ignore the error.
5562 errno = 0;
5563#endif
5564}
#define RUBY_ASSERT_ALWAYS(expr,...)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
Definition assert.h:199
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
#define RUBY_ATOMIC_VALUE_CAS(var, oldval, newval)
Identical to RUBY_ATOMIC_CAS, except it expects its arguments are VALUE.
Definition atomic.h:384
#define RUBY_ATOMIC_SIZE_FETCH_ADD(var, val)
Identical to RUBY_ATOMIC_FETCH_ADD, except it expects its arguments to be size_t.
Definition atomic.h:213
#define RUBY_ATOMIC_CAS(var, oldval, newval)
Atomic compare-and-swap.
Definition atomic.h:143
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ATOMIC_FETCH_ADD(var, val)
Atomically replaces the value pointed by var with the result of addition of val to the old value of v...
Definition atomic.h:96
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_module_function(klass, mid, func, arity)
Defines klass#mid and makes it a module function.
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RUBY_INTERNAL_EVENT_NEWOBJ
Object allocated.
Definition event.h:93
static VALUE RB_FL_TEST_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_TEST().
Definition fl_type.h:463
static void RB_FL_SET_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_SET().
Definition fl_type.h:600
@ RUBY_FL_WB_PROTECTED
Definition fl_type.h:198
VALUE rb_define_module(const char *name)
Defines a top-level module.
Definition class.c:1592
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
Retrieves argument from argc and argv to given VALUE references according to the format string.
Definition class.c:3133
#define T_COMPLEX
Old name of RUBY_T_COMPLEX.
Definition value_type.h:59
#define TYPE(_)
Old name of rb_type.
Definition value_type.h:108
#define FL_SINGLETON
Old name of RUBY_FL_SINGLETON.
Definition fl_type.h:58
#define T_FILE
Old name of RUBY_T_FILE.
Definition value_type.h:62
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:400
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define T_MASK
Old name of RUBY_T_MASK.
Definition value_type.h:68
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define T_NIL
Old name of RUBY_T_NIL.
Definition value_type.h:72
#define UNREACHABLE
Old name of RBIMPL_UNREACHABLE.
Definition assume.h:28
#define T_FLOAT
Old name of RUBY_T_FLOAT.
Definition value_type.h:64
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition value_type.h:67
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
Definition value_type.h:57
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
Definition value_type.h:79
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition fl_type.h:134
#define T_FIXNUM
Old name of RUBY_T_FIXNUM.
Definition value_type.h:63
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition assume.h:29
#define SYM2ID
Old name of RB_SYM2ID.
Definition symbol.h:45
#define T_DATA
Old name of RUBY_T_DATA.
Definition value_type.h:60
#define FIXNUM_FLAG
Old name of RUBY_FIXNUM_FLAG.
#define LL2NUM
Old name of RB_LL2NUM.
Definition long_long.h:30
#define T_NONE
Old name of RUBY_T_NONE.
Definition value_type.h:74
#define T_NODE
Old name of RUBY_T_NODE.
Definition value_type.h:73
#define SIZET2NUM
Old name of RB_SIZE2NUM.
Definition size_t.h:62
#define FL_FINALIZE
Old name of RUBY_FL_FINALIZE.
Definition fl_type.h:61
#define T_MODULE
Old name of RUBY_T_MODULE.
Definition value_type.h:70
#define T_TRUE
Old name of RUBY_T_TRUE.
Definition value_type.h:81
#define T_RATIONAL
Old name of RUBY_T_RATIONAL.
Definition value_type.h:76
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define FL_ABLE
Old name of RB_FL_ABLE.
Definition fl_type.h:121
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:131
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition array.h:658
#define LONG2NUM
Old name of RB_LONG2NUM.
Definition long.h:50
#define T_FALSE
Old name of RUBY_T_FALSE.
Definition value_type.h:61
#define ULL2NUM
Old name of RB_ULL2NUM.
Definition long_long.h:31
#define T_UNDEF
Old name of RUBY_T_UNDEF.
Definition value_type.h:82
#define FLONUM_P
Old name of RB_FLONUM_P.
#define Qtrue
Old name of RUBY_Qtrue.
#define T_ZOMBIE
Old name of RUBY_T_ZOMBIE.
Definition value_type.h:83
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define NUM2ULL
Old name of RB_NUM2ULL.
Definition long_long.h:35
#define FL_WB_PROTECTED
Old name of RUBY_FL_WB_PROTECTED.
Definition fl_type.h:59
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
Definition value_type.h:80
#define T_MATCH
Old name of RUBY_T_MATCH.
Definition value_type.h:69
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define T_MOVED
Old name of RUBY_T_MOVED.
Definition value_type.h:71
#define xcalloc
Old name of ruby_xcalloc.
Definition xmalloc.h:55
#define FL_UNSET
Old name of RB_FL_UNSET.
Definition fl_type.h:132
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define NUM2SIZET
Old name of RB_NUM2SIZE.
Definition size_t.h:61
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
#define T_REGEXP
Old name of RUBY_T_REGEXP.
Definition value_type.h:77
size_t ruby_stack_length(VALUE **p)
Queries what Ruby thinks is the machine stack.
Definition gc.c:2566
int ruby_stack_check(void)
Checks for stack overflow.
Definition gc.c:2606
void rb_category_warn(rb_warning_category_t category, const char *fmt,...)
Identical to rb_category_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:476
VALUE rb_eNoMemError
NoMemoryError exception.
Definition error.c:1441
VALUE rb_eRangeError
RangeError exception.
Definition error.c:1434
#define ruby_verbose
This variable controls whether the interpreter is in debug mode.
Definition error.h:475
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1430
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:466
@ RB_WARN_CATEGORY_DEPRECATED
Warning is for deprecated features.
Definition error.h:48
VALUE rb_mKernel
Kernel module.
Definition object.c:61
VALUE rb_mGC
GC module.
Definition gc.c:429
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:243
VALUE rb_cBasicObject
BasicObject class.
Definition object.c:60
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:880
size_t rb_obj_embedded_size(uint32_t fields_count)
Internal header for Object.
Definition object.c:95
VALUE rb_to_int(VALUE val)
Identical to rb_check_to_int(), except it raises in case of conversion mismatch.
Definition object.c:3223
#define RB_POSFIXABLE(_)
Checks if the passed value is in range of fixnum, assuming it is a positive number.
Definition fixnum.h:43
int rb_enc_str_coderange(VALUE str)
Scans the passed string to collect its code range.
Definition string.c:932
VALUE rb_funcall(VALUE recv, ID mid, int n,...)
Calls a method.
Definition vm_eval.c:1117
Defines RBIMPL_HAS_BUILTIN.
void rb_ary_free(VALUE ary)
Destroys the given array for no reason.
#define RETURN_ENUMERATOR(obj, argc, argv)
Identical to RETURN_SIZED_ENUMERATOR(), except its size is unknown.
Definition enumerator.h:239
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:284
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition proc.c:847
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
Definition proc.c:120
void rb_str_free(VALUE str)
Destroys the given string for no reason.
Definition string.c:1717
size_t rb_str_capacity(VALUE str)
Queries the capacity of the given string.
Definition string.c:986
VALUE rb_class_path_cached(VALUE mod)
Just another name of rb_mod_name.
Definition variable.c:387
void rb_free_generic_ivar(VALUE obj)
Frees the list of instance variables.
Definition variable.c:1272
void rb_undef_alloc_func(VALUE klass)
Deletes the allocator function of a class.
Definition vm_method.c:1421
VALUE rb_check_funcall(VALUE recv, ID mid, int argc, const VALUE *argv)
Identical to rb_funcallv(), except it returns RUBY_Qundef instead of raising rb_eNoMethodError.
Definition vm_eval.c:686
rb_alloc_func_t rb_get_alloc_func(VALUE klass)
Queries the allocator function of a class.
Definition vm_method.c:1427
int rb_obj_respond_to(VALUE obj, ID mid, int private_p)
Identical to rb_respond_to(), except it additionally takes the visibility parameter.
Definition vm_method.c:3112
VALUE rb_sym2str(VALUE symbol)
Obtain a frozen string representation of a symbol (not including the leading colon).
Definition symbol.c:1037
int capa
Designed capacity of the buffer.
Definition io.h:11
int rb_io_fptr_finalize(rb_io_t *fptr)
Destroys the given IO.
Definition io.c:5675
int len
Length of the buffer.
Definition io.h:8
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
Definition ractor.h:249
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
Definition thread.c:2037
VALUE rb_yield(VALUE val)
Yields the block.
Definition vm_eval.c:1372
#define RBIMPL_ATTR_MAYBE_UNUSED()
Wraps (or simulates) [[maybe_unused]]
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
VALUE type(ANYARGS)
ANYARGS-ed function type.
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
#define RARRAY(obj)
Convenient casting macro.
Definition rarray.h:44
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:163
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS(obj)
Convenient casting macro.
Definition rclass.h:38
#define DATA_PTR(obj)
Convenient getter macro.
Definition rdata.h:67
#define RDATA(obj)
Convenient casting macro.
Definition rdata.h:59
#define RUBY_DEFAULT_FREE
This is a value you can set to RData::dfree.
Definition rdata.h:78
void(* RUBY_DATA_FUNC)(void *)
This is the type of callbacks registered to RData.
Definition rdata.h:104
#define RFILE(obj)
Convenient casting macro.
Definition rfile.h:50
#define RHASH_SIZE(h)
Queries the size of the hash.
Definition rhash.h:69
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
#define RMATCH(obj)
Convenient casting macro.
Definition rmatch.h:37
#define ROBJECT(obj)
Convenient casting macro.
Definition robject.h:43
static VALUE * ROBJECT_FIELDS(VALUE obj)
Queries the instance variables.
Definition robject.h:126
#define RREGEXP(obj)
Convenient casting macro.
Definition rregexp.h:37
#define RREGEXP_PTR(obj)
Convenient accessor macro.
Definition rregexp.h:45
#define RSTRING(obj)
Convenient casting macro.
Definition rstring.h:41
static bool RTYPEDDATA_P(VALUE obj)
Checks whether the passed object is RTypedData or RData.
Definition rtypeddata.h:578
#define TypedData_Wrap_Struct(klass, data_type, sval)
Converts sval, a pointer to your struct, into a Ruby object.
Definition rtypeddata.h:450
static const struct rb_data_type_struct * RTYPEDDATA_TYPE(VALUE obj)
Queries for the type of given object.
Definition rtypeddata.h:601
const char * rb_obj_classname(VALUE obj)
Queries the name of the class of the passed object.
Definition variable.c:512
void rb_p(VALUE obj)
Inspects an object.
Definition io.c:9067
#define errno
Ractor-aware version of errno.
Definition ruby.h:388
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition thread.c:5759
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
Defines old _.
#define _(args)
This was a transition path from K&R to ANSI.
Definition stdarg.h:35
Definition hash.h:53
Ruby's ordinal objects.
Definition robject.h:83
"Typed" user data.
Definition rtypeddata.h:353
Definition gc.c:2843
Definition method.h:63
Definition constant.h:33
CREF (Class REFerence)
Definition method.h:45
Definition class.h:72
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:203
RUBY_DATA_FUNC dfree
This function is called when the object is no longer used.
Definition rtypeddata.h:233
RUBY_DATA_FUNC dcompact
This function is called when the object is relocated.
Definition rtypeddata.h:254
struct rb_data_type_struct::@54 function
Function pointers.
const char * wrap_struct_name
Name of structs of this kind.
Definition rtypeddata.h:210
RUBY_DATA_FUNC dmark
This function is called when the object is experiencing GC marks.
Definition rtypeddata.h:224
VALUE flags
Type-specific behavioural characteristics.
Definition rtypeddata.h:312
Definition gc_impl.h:15
Ruby's IO, metadata and buffers.
Definition io.h:295
Represents a match.
Definition rmatch.h:71
struct rmatch_offset * char_offset
Capture group offsets, in C array.
Definition rmatch.h:79
int char_offset_num_allocated
Number of rmatch_offset that ::rmatch::char_offset holds.
Definition rmatch.h:82
struct re_registers regs
"Registers" of a match.
Definition rmatch.h:76
Definition method.h:55
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:136
Definition class.h:65
Represents the region of a capture group.
Definition rmatch.h:65
Definition st.h:79
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
intptr_t SIGNED_VALUE
A signed integer type that has the same width with VALUE.
Definition value.h:63
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376
ruby_value_type
C-level type of an object.
Definition value_type.h:113