Ruby 3.5.0dev (2025-04-04 revision 6b5e187d0eb07994fee7b5f0336da388a793dcbb)
gc.c (6b5e187d0eb07994fee7b5f0336da388a793dcbb)
1/**********************************************************************
2
3 gc.c -
4
5 $Author$
6 created at: Tue Oct 5 09:44:46 JST 1993
7
8 Copyright (C) 1993-2007 Yukihiro Matsumoto
9 Copyright (C) 2000 Network Applied Communication Laboratory, Inc.
10 Copyright (C) 2000 Information-technology Promotion Agency, Japan
11
12**********************************************************************/
13
14#define rb_data_object_alloc rb_data_object_alloc
15#define rb_data_typed_object_alloc rb_data_typed_object_alloc
16
17#include "ruby/internal/config.h"
18#ifdef _WIN32
19# include "ruby/ruby.h"
20#endif
21
22#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
23# include "wasm/setjmp.h"
24# include "wasm/machine.h"
25#else
26# include <setjmp.h>
27#endif
28#include <stdarg.h>
29#include <stdio.h>
30
31/* MALLOC_HEADERS_BEGIN */
32#ifndef HAVE_MALLOC_USABLE_SIZE
33# ifdef _WIN32
34# define HAVE_MALLOC_USABLE_SIZE
35# define malloc_usable_size(a) _msize(a)
36# elif defined HAVE_MALLOC_SIZE
37# define HAVE_MALLOC_USABLE_SIZE
38# define malloc_usable_size(a) malloc_size(a)
39# endif
40#endif
41
42#ifdef HAVE_MALLOC_USABLE_SIZE
43# ifdef RUBY_ALTERNATIVE_MALLOC_HEADER
44/* Alternative malloc header is included in ruby/missing.h */
45# elif defined(HAVE_MALLOC_H)
46# include <malloc.h>
47# elif defined(HAVE_MALLOC_NP_H)
48# include <malloc_np.h>
49# elif defined(HAVE_MALLOC_MALLOC_H)
50# include <malloc/malloc.h>
51# endif
52#endif
53
54/* MALLOC_HEADERS_END */
55
56#ifdef HAVE_SYS_TIME_H
57# include <sys/time.h>
58#endif
59
60#ifdef HAVE_SYS_RESOURCE_H
61# include <sys/resource.h>
62#endif
63
64#if defined _WIN32 || defined __CYGWIN__
65# include <windows.h>
66#elif defined(HAVE_POSIX_MEMALIGN)
67#elif defined(HAVE_MEMALIGN)
68# include <malloc.h>
69#endif
70
71#include <sys/types.h>
72
73#ifdef __EMSCRIPTEN__
74#include <emscripten.h>
75#endif
76
77/* For ruby_annotate_mmap */
78#ifdef HAVE_SYS_PRCTL_H
79#include <sys/prctl.h>
80#endif
81
82#undef LIST_HEAD /* ccan/list conflicts with BSD-origin sys/queue.h. */
83
84#include "constant.h"
85#include "darray.h"
86#include "debug_counter.h"
87#include "eval_intern.h"
88#include "gc/gc.h"
89#include "id_table.h"
90#include "internal.h"
91#include "internal/class.h"
92#include "internal/compile.h"
93#include "internal/complex.h"
94#include "internal/cont.h"
95#include "internal/error.h"
96#include "internal/eval.h"
97#include "internal/gc.h"
98#include "internal/hash.h"
99#include "internal/imemo.h"
100#include "internal/io.h"
101#include "internal/numeric.h"
102#include "internal/object.h"
103#include "internal/proc.h"
104#include "internal/rational.h"
105#include "internal/sanitizers.h"
106#include "internal/struct.h"
107#include "internal/symbol.h"
108#include "internal/thread.h"
109#include "internal/variable.h"
110#include "internal/warnings.h"
111#include "probes.h"
112#include "regint.h"
113#include "ruby/debug.h"
114#include "ruby/io.h"
115#include "ruby/re.h"
116#include "ruby/st.h"
117#include "ruby/thread.h"
118#include "ruby/util.h"
119#include "ruby/vm.h"
120#include "ruby_assert.h"
121#include "ruby_atomic.h"
122#include "symbol.h"
123#include "variable.h"
124#include "vm_core.h"
125#include "vm_sync.h"
126#include "vm_callinfo.h"
127#include "ractor_core.h"
128#include "yjit.h"
129
130#include "builtin.h"
131#include "shape.h"
132
133unsigned int
134rb_gc_vm_lock(void)
135{
136 unsigned int lev;
137 RB_VM_LOCK_ENTER_LEV(&lev);
138 return lev;
139}
140
141void
142rb_gc_vm_unlock(unsigned int lev)
143{
144 RB_VM_LOCK_LEAVE_LEV(&lev);
145}
146
147unsigned int
148rb_gc_cr_lock(void)
149{
150 unsigned int lev;
151 RB_VM_LOCK_ENTER_CR_LEV(GET_RACTOR(), &lev);
152 return lev;
153}
154
155void
156rb_gc_cr_unlock(unsigned int lev)
157{
158 RB_VM_LOCK_LEAVE_CR_LEV(GET_RACTOR(), &lev);
159}
160
161unsigned int
162rb_gc_vm_lock_no_barrier(void)
163{
164 unsigned int lev = 0;
165 RB_VM_LOCK_ENTER_LEV_NB(&lev);
166 return lev;
167}
168
169void
170rb_gc_vm_unlock_no_barrier(unsigned int lev)
171{
172 RB_VM_LOCK_LEAVE_LEV(&lev);
173}
174
175void
176rb_gc_vm_barrier(void)
177{
178 rb_vm_barrier();
179}
180
181#if USE_MODULAR_GC
182void *
183rb_gc_get_ractor_newobj_cache(void)
184{
185 return GET_RACTOR()->newobj_cache;
186}
187
188void
189rb_gc_initialize_vm_context(struct rb_gc_vm_context *context)
190{
191 rb_native_mutex_initialize(&context->lock);
192 context->ec = GET_EC();
193}
194
195void
196rb_gc_worker_thread_set_vm_context(struct rb_gc_vm_context *context)
197{
198 rb_native_mutex_lock(&context->lock);
199
200 GC_ASSERT(rb_current_execution_context(false) == NULL);
201
202#ifdef RB_THREAD_LOCAL_SPECIFIER
203 rb_current_ec_set(context->ec);
204#else
205 native_tls_set(ruby_current_ec_key, context->ec);
206#endif
207}
208
209void
210rb_gc_worker_thread_unset_vm_context(struct rb_gc_vm_context *context)
211{
212 rb_native_mutex_unlock(&context->lock);
213
214 GC_ASSERT(rb_current_execution_context(true) == context->ec);
215
216#ifdef RB_THREAD_LOCAL_SPECIFIER
217 rb_current_ec_set(NULL);
218#else
219 native_tls_set(ruby_current_ec_key, NULL);
220#endif
221}
222#endif
223
224bool
225rb_gc_event_hook_required_p(rb_event_flag_t event)
226{
227 return ruby_vm_event_flags & event;
228}
229
230void
231rb_gc_event_hook(VALUE obj, rb_event_flag_t event)
232{
233 if (LIKELY(!rb_gc_event_hook_required_p(event))) return;
234
235 rb_execution_context_t *ec = GET_EC();
236 if (!ec->cfp) return;
237
238 EXEC_EVENT_HOOK(ec, event, ec->cfp->self, 0, 0, 0, obj);
239}
240
241void *
242rb_gc_get_objspace(void)
243{
244 return GET_VM()->gc.objspace;
245}
246
247
248void
249rb_gc_ractor_newobj_cache_foreach(void (*func)(void *cache, void *data), void *data)
250{
251 rb_ractor_t *r = NULL;
252 if (RB_LIKELY(ruby_single_main_ractor)) {
253 GC_ASSERT(
254 ccan_list_empty(&GET_VM()->ractor.set) ||
255 (ccan_list_top(&GET_VM()->ractor.set, rb_ractor_t, vmlr_node) == ruby_single_main_ractor &&
256 ccan_list_tail(&GET_VM()->ractor.set, rb_ractor_t, vmlr_node) == ruby_single_main_ractor)
257 );
258
259 func(ruby_single_main_ractor->newobj_cache, data);
260 }
261 else {
262 ccan_list_for_each(&GET_VM()->ractor.set, r, vmlr_node) {
263 func(r->newobj_cache, data);
264 }
265 }
266}
267
268void
269rb_gc_run_obj_finalizer(VALUE objid, long count, VALUE (*callback)(long i, void *data), void *data)
270{
271 volatile struct {
272 VALUE errinfo;
273 VALUE final;
275 VALUE *sp;
276 long finished;
277 } saved;
278
279 rb_execution_context_t * volatile ec = GET_EC();
280#define RESTORE_FINALIZER() (\
281 ec->cfp = saved.cfp, \
282 ec->cfp->sp = saved.sp, \
283 ec->errinfo = saved.errinfo)
284
285 saved.errinfo = ec->errinfo;
286 saved.cfp = ec->cfp;
287 saved.sp = ec->cfp->sp;
288 saved.finished = 0;
289 saved.final = Qundef;
290
291 EC_PUSH_TAG(ec);
292 enum ruby_tag_type state = EC_EXEC_TAG();
293 if (state != TAG_NONE) {
294 ++saved.finished; /* skip failed finalizer */
295
296 VALUE failed_final = saved.final;
297 saved.final = Qundef;
298 if (!UNDEF_P(failed_final) && !NIL_P(ruby_verbose)) {
299 rb_warn("Exception in finalizer %+"PRIsVALUE, failed_final);
300 rb_ec_error_print(ec, ec->errinfo);
301 }
302 }
303
304 for (long i = saved.finished; RESTORE_FINALIZER(), i < count; saved.finished = ++i) {
305 saved.final = callback(i, data);
306 rb_check_funcall(saved.final, idCall, 1, &objid);
307 }
308 EC_POP_TAG();
309#undef RESTORE_FINALIZER
310}
311
312void
313rb_gc_set_pending_interrupt(void)
314{
315 rb_execution_context_t *ec = GET_EC();
316 ec->interrupt_mask |= PENDING_INTERRUPT_MASK;
317}
318
319void
320rb_gc_unset_pending_interrupt(void)
321{
322 rb_execution_context_t *ec = GET_EC();
323 ec->interrupt_mask &= ~PENDING_INTERRUPT_MASK;
324}
325
326bool
327rb_gc_multi_ractor_p(void)
328{
329 return rb_multi_ractor_p();
330}
331
332bool rb_obj_is_main_ractor(VALUE gv);
333
334bool
335rb_gc_shutdown_call_finalizer_p(VALUE obj)
336{
337 switch (BUILTIN_TYPE(obj)) {
338 case T_DATA:
339 if (!ruby_free_at_exit_p() && (!DATA_PTR(obj) || !RDATA(obj)->dfree)) return false;
340 if (rb_obj_is_thread(obj)) return false;
341 if (rb_obj_is_mutex(obj)) return false;
342 if (rb_obj_is_fiber(obj)) return false;
343 if (rb_obj_is_main_ractor(obj)) return false;
344
345 return true;
346
347 case T_FILE:
348 return true;
349
350 case T_SYMBOL:
351 if (RSYMBOL(obj)->fstr &&
352 (BUILTIN_TYPE(RSYMBOL(obj)->fstr) == T_NONE ||
353 BUILTIN_TYPE(RSYMBOL(obj)->fstr) == T_ZOMBIE)) {
354 RSYMBOL(obj)->fstr = 0;
355 }
356 return true;
357
358 case T_NONE:
359 return false;
360
361 default:
362 return ruby_free_at_exit_p();
363 }
364}
365
366uint32_t
367rb_gc_get_shape(VALUE obj)
368{
369 return (uint32_t)rb_shape_get_shape_id(obj);
370}
371
372void
373rb_gc_set_shape(VALUE obj, uint32_t shape_id)
374{
375 rb_shape_set_shape_id(obj, (uint32_t)shape_id);
376}
377
378uint32_t
379rb_gc_rebuild_shape(VALUE obj, size_t heap_id)
380{
381 rb_shape_t *orig_shape = rb_shape_get_shape(obj);
382
383 if (rb_shape_obj_too_complex(obj)) return (uint32_t)OBJ_TOO_COMPLEX_SHAPE_ID;
384
385 rb_shape_t *initial_shape = rb_shape_get_shape_by_id((shape_id_t)(heap_id + FIRST_T_OBJECT_SHAPE_ID));
386 rb_shape_t *new_shape = rb_shape_traverse_from_new_root(initial_shape, orig_shape);
387
388 if (!new_shape) return 0;
389
390 return (uint32_t)rb_shape_id(new_shape);
391}
392
393void rb_vm_update_references(void *ptr);
394
395#define rb_setjmp(env) RUBY_SETJMP(env)
396#define rb_jmp_buf rb_jmpbuf_t
397#undef rb_data_object_wrap
398
399#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
400#define MAP_ANONYMOUS MAP_ANON
401#endif
402
403#define unless_objspace(objspace) \
404 void *objspace; \
405 rb_vm_t *unless_objspace_vm = GET_VM(); \
406 if (unless_objspace_vm) objspace = unless_objspace_vm->gc.objspace; \
407 else /* return; or objspace will be warned uninitialized */
408
409#define RMOVED(obj) ((struct RMoved *)(obj))
410
411#define TYPED_UPDATE_IF_MOVED(_objspace, _type, _thing) do { \
412 if (rb_gc_impl_object_moved_p((_objspace), (VALUE)(_thing))) { \
413 *(_type *)&(_thing) = (_type)gc_location_internal(_objspace, (VALUE)_thing); \
414 } \
415} while (0)
416
417#define UPDATE_IF_MOVED(_objspace, _thing) TYPED_UPDATE_IF_MOVED(_objspace, VALUE, _thing)
418
419#if RUBY_MARK_FREE_DEBUG
420int ruby_gc_debug_indent = 0;
421#endif
422
423#ifndef RGENGC_OBJ_INFO
424# define RGENGC_OBJ_INFO RGENGC_CHECK_MODE
425#endif
426
427#ifndef CALC_EXACT_MALLOC_SIZE
428# define CALC_EXACT_MALLOC_SIZE 0
429#endif
430
432
433static size_t malloc_offset = 0;
434#if defined(HAVE_MALLOC_USABLE_SIZE)
435static size_t
436gc_compute_malloc_offset(void)
437{
438 // Different allocators use different metadata storage strategies which result in different
439 // ideal sizes.
440 // For instance malloc(64) will waste 8B with glibc, but waste 0B with jemalloc.
441 // But malloc(56) will waste 0B with glibc, but waste 8B with jemalloc.
442 // So we try allocating 64, 56 and 48 bytes and select the first offset that doesn't
443 // waste memory.
444 // This was tested on Linux with glibc 2.35 and jemalloc 5, and for both it result in
445 // no wasted memory.
446 size_t offset = 0;
447 for (offset = 0; offset <= 16; offset += 8) {
448 size_t allocated = (64 - offset);
449 void *test_ptr = malloc(allocated);
450 size_t wasted = malloc_usable_size(test_ptr) - allocated;
451 free(test_ptr);
452
453 if (wasted == 0) {
454 return offset;
455 }
456 }
457 return 0;
458}
459#else
460static size_t
461gc_compute_malloc_offset(void)
462{
463 // If we don't have malloc_usable_size, we use powers of 2.
464 return 0;
465}
466#endif
467
468size_t
469rb_malloc_grow_capa(size_t current, size_t type_size)
470{
471 size_t current_capacity = current;
472 if (current_capacity < 4) {
473 current_capacity = 4;
474 }
475 current_capacity *= type_size;
476
477 // We double the current capacity.
478 size_t new_capacity = (current_capacity * 2);
479
480 // And round up to the next power of 2 if it's not already one.
481 if (rb_popcount64(new_capacity) != 1) {
482 new_capacity = (size_t)(1 << (64 - nlz_int64(new_capacity)));
483 }
484
485 new_capacity -= malloc_offset;
486 new_capacity /= type_size;
487 if (current > new_capacity) {
488 rb_bug("rb_malloc_grow_capa: current_capacity=%zu, new_capacity=%zu, malloc_offset=%zu", current, new_capacity, malloc_offset);
489 }
490 RUBY_ASSERT(new_capacity > current);
491 return new_capacity;
492}
493
494static inline struct rbimpl_size_mul_overflow_tag
495size_mul_add_overflow(size_t x, size_t y, size_t z) /* x * y + z */
496{
497 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
498 struct rbimpl_size_mul_overflow_tag u = rbimpl_size_add_overflow(t.right, z);
499 return (struct rbimpl_size_mul_overflow_tag) { t.left || u.left, u.right };
500}
501
502static inline struct rbimpl_size_mul_overflow_tag
503size_mul_add_mul_overflow(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
504{
505 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
506 struct rbimpl_size_mul_overflow_tag u = rbimpl_size_mul_overflow(z, w);
507 struct rbimpl_size_mul_overflow_tag v = rbimpl_size_add_overflow(t.right, u.right);
508 return (struct rbimpl_size_mul_overflow_tag) { t.left || u.left || v.left, v.right };
509}
510
511PRINTF_ARGS(NORETURN(static void gc_raise(VALUE, const char*, ...)), 2, 3);
512
513static inline size_t
514size_mul_or_raise(size_t x, size_t y, VALUE exc)
515{
516 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
517 if (LIKELY(!t.left)) {
518 return t.right;
519 }
520 else if (rb_during_gc()) {
521 rb_memerror(); /* or...? */
522 }
523 else {
524 gc_raise(
525 exc,
526 "integer overflow: %"PRIuSIZE
527 " * %"PRIuSIZE
528 " > %"PRIuSIZE,
529 x, y, (size_t)SIZE_MAX);
530 }
531}
532
533size_t
534rb_size_mul_or_raise(size_t x, size_t y, VALUE exc)
535{
536 return size_mul_or_raise(x, y, exc);
537}
538
539static inline size_t
540size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
541{
542 struct rbimpl_size_mul_overflow_tag t = size_mul_add_overflow(x, y, z);
543 if (LIKELY(!t.left)) {
544 return t.right;
545 }
546 else if (rb_during_gc()) {
547 rb_memerror(); /* or...? */
548 }
549 else {
550 gc_raise(
551 exc,
552 "integer overflow: %"PRIuSIZE
553 " * %"PRIuSIZE
554 " + %"PRIuSIZE
555 " > %"PRIuSIZE,
556 x, y, z, (size_t)SIZE_MAX);
557 }
558}
559
560size_t
561rb_size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
562{
563 return size_mul_add_or_raise(x, y, z, exc);
564}
565
566static inline size_t
567size_mul_add_mul_or_raise(size_t x, size_t y, size_t z, size_t w, VALUE exc)
568{
569 struct rbimpl_size_mul_overflow_tag t = size_mul_add_mul_overflow(x, y, z, w);
570 if (LIKELY(!t.left)) {
571 return t.right;
572 }
573 else if (rb_during_gc()) {
574 rb_memerror(); /* or...? */
575 }
576 else {
577 gc_raise(
578 exc,
579 "integer overflow: %"PRIdSIZE
580 " * %"PRIdSIZE
581 " + %"PRIdSIZE
582 " * %"PRIdSIZE
583 " > %"PRIdSIZE,
584 x, y, z, w, (size_t)SIZE_MAX);
585 }
586}
587
588#if defined(HAVE_RB_GC_GUARDED_PTR_VAL) && HAVE_RB_GC_GUARDED_PTR_VAL
589/* trick the compiler into thinking a external signal handler uses this */
590volatile VALUE rb_gc_guarded_val;
591volatile VALUE *
592rb_gc_guarded_ptr_val(volatile VALUE *ptr, VALUE val)
593{
594 rb_gc_guarded_val = val;
595
596 return ptr;
597}
598#endif
599
600static const char *obj_type_name(VALUE obj);
601#include "gc/default/default.c"
602
603#if USE_MODULAR_GC && !defined(HAVE_DLOPEN)
604# error "Modular GC requires dlopen"
605#elif USE_MODULAR_GC
606#include <dlfcn.h>
607
608typedef struct gc_function_map {
609 // Bootup
610 void *(*objspace_alloc)(void);
611 void (*objspace_init)(void *objspace_ptr);
612 void *(*ractor_cache_alloc)(void *objspace_ptr, void *ractor);
613 void (*set_params)(void *objspace_ptr);
614 void (*init)(void);
615 size_t *(*heap_sizes)(void *objspace_ptr);
616 // Shutdown
617 void (*shutdown_free_objects)(void *objspace_ptr);
618 void (*objspace_free)(void *objspace_ptr);
619 void (*ractor_cache_free)(void *objspace_ptr, void *cache);
620 // GC
621 void (*start)(void *objspace_ptr, bool full_mark, bool immediate_mark, bool immediate_sweep, bool compact);
622 bool (*during_gc_p)(void *objspace_ptr);
623 void (*prepare_heap)(void *objspace_ptr);
624 void (*gc_enable)(void *objspace_ptr);
625 void (*gc_disable)(void *objspace_ptr, bool finish_current_gc);
626 bool (*gc_enabled_p)(void *objspace_ptr);
627 VALUE (*config_get)(void *objpace_ptr);
628 void (*config_set)(void *objspace_ptr, VALUE hash);
629 void (*stress_set)(void *objspace_ptr, VALUE flag);
630 VALUE (*stress_get)(void *objspace_ptr);
631 // Object allocation
632 VALUE (*new_obj)(void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, bool wb_protected, size_t alloc_size);
633 size_t (*obj_slot_size)(VALUE obj);
634 size_t (*heap_id_for_size)(void *objspace_ptr, size_t size);
635 bool (*size_allocatable_p)(size_t size);
636 // Malloc
637 void *(*malloc)(void *objspace_ptr, size_t size);
638 void *(*calloc)(void *objspace_ptr, size_t size);
639 void *(*realloc)(void *objspace_ptr, void *ptr, size_t new_size, size_t old_size);
640 void (*free)(void *objspace_ptr, void *ptr, size_t old_size);
641 void (*adjust_memory_usage)(void *objspace_ptr, ssize_t diff);
642 // Marking
643 void (*mark)(void *objspace_ptr, VALUE obj);
644 void (*mark_and_move)(void *objspace_ptr, VALUE *ptr);
645 void (*mark_and_pin)(void *objspace_ptr, VALUE obj);
646 void (*mark_maybe)(void *objspace_ptr, VALUE obj);
647 void (*mark_weak)(void *objspace_ptr, VALUE *ptr);
648 void (*remove_weak)(void *objspace_ptr, VALUE parent_obj, VALUE *ptr);
649 // Compaction
650 bool (*object_moved_p)(void *objspace_ptr, VALUE obj);
651 VALUE (*location)(void *objspace_ptr, VALUE value);
652 // Write barriers
653 void (*writebarrier)(void *objspace_ptr, VALUE a, VALUE b);
654 void (*writebarrier_unprotect)(void *objspace_ptr, VALUE obj);
655 void (*writebarrier_remember)(void *objspace_ptr, VALUE obj);
656 // Heap walking
657 void (*each_objects)(void *objspace_ptr, int (*callback)(void *, void *, size_t, void *), void *data);
658 void (*each_object)(void *objspace_ptr, void (*func)(VALUE obj, void *data), void *data);
659 // Finalizers
660 void (*make_zombie)(void *objspace_ptr, VALUE obj, void (*dfree)(void *), void *data);
661 VALUE (*define_finalizer)(void *objspace_ptr, VALUE obj, VALUE block);
662 void (*undefine_finalizer)(void *objspace_ptr, VALUE obj);
663 void (*copy_finalizer)(void *objspace_ptr, VALUE dest, VALUE obj);
664 void (*shutdown_call_finalizer)(void *objspace_ptr);
665 // Object ID
666 VALUE (*object_id)(void *objspace_ptr, VALUE obj);
667 VALUE (*object_id_to_ref)(void *objspace_ptr, VALUE object_id);
668 // Forking
669 void (*before_fork)(void *objspace_ptr);
670 void (*after_fork)(void *objspace_ptr, rb_pid_t pid);
671 // Statistics
672 void (*set_measure_total_time)(void *objspace_ptr, VALUE flag);
673 bool (*get_measure_total_time)(void *objspace_ptr);
674 unsigned long long (*get_total_time)(void *objspace_ptr);
675 size_t (*gc_count)(void *objspace_ptr);
676 VALUE (*latest_gc_info)(void *objspace_ptr, VALUE key);
677 VALUE (*stat)(void *objspace_ptr, VALUE hash_or_sym);
678 VALUE (*stat_heap)(void *objspace_ptr, VALUE heap_name, VALUE hash_or_sym);
679 const char *(*active_gc_name)(void);
680 // Miscellaneous
681 struct rb_gc_object_metadata_entry *(*object_metadata)(void *objspace_ptr, VALUE obj);
682 bool (*pointer_to_heap_p)(void *objspace_ptr, const void *ptr);
683 bool (*garbage_object_p)(void *objspace_ptr, VALUE obj);
684 void (*set_event_hook)(void *objspace_ptr, const rb_event_flag_t event);
685 void (*copy_attributes)(void *objspace_ptr, VALUE dest, VALUE obj);
686
687 bool modular_gc_loaded_p;
688} rb_gc_function_map_t;
689
690static rb_gc_function_map_t rb_gc_functions;
691
692# define RUBY_GC_LIBRARY "RUBY_GC_LIBRARY"
693# define MODULAR_GC_DIR STRINGIZE(modular_gc_dir)
694
695static void
696ruby_modular_gc_init(void)
697{
698 // Assert that the directory path ends with a /
699 RUBY_ASSERT_ALWAYS(MODULAR_GC_DIR[sizeof(MODULAR_GC_DIR) - 2] == '/');
700
701 const char *gc_so_file = getenv(RUBY_GC_LIBRARY);
702
703 rb_gc_function_map_t gc_functions = { 0 };
704
705 char *gc_so_path = NULL;
706 void *handle = NULL;
707 if (gc_so_file) {
708 /* Check to make sure that gc_so_file matches /[\w-_]+/ so that it does
709 * not load a shared object outside of the directory. */
710 for (size_t i = 0; i < strlen(gc_so_file); i++) {
711 char c = gc_so_file[i];
712 if (isalnum(c)) continue;
713 switch (c) {
714 case '-':
715 case '_':
716 break;
717 default:
718 fprintf(stderr, "Only alphanumeric, dash, and underscore is allowed in "RUBY_GC_LIBRARY"\n");
719 exit(1);
720 }
721 }
722
723 size_t gc_so_path_size = strlen(MODULAR_GC_DIR "librubygc." DLEXT) + strlen(gc_so_file) + 1;
724#ifdef LOAD_RELATIVE
725 Dl_info dli;
726 size_t prefix_len = 0;
727 if (dladdr((void *)(uintptr_t)ruby_modular_gc_init, &dli)) {
728 const char *base = strrchr(dli.dli_fname, '/');
729 if (base) {
730 size_t tail = 0;
731# define end_with_p(lit) \
732 (prefix_len >= (tail = rb_strlen_lit(lit)) && \
733 memcmp(base - tail, lit, tail) == 0)
734
735 prefix_len = base - dli.dli_fname;
736 if (end_with_p("/bin") || end_with_p("/lib")) {
737 prefix_len -= tail;
738 }
739 prefix_len += MODULAR_GC_DIR[0] != '/';
740 gc_so_path_size += prefix_len;
741 }
742 }
743#endif
744 gc_so_path = alloca(gc_so_path_size);
745 {
746 size_t gc_so_path_idx = 0;
747#define GC_SO_PATH_APPEND(str) do { \
748 gc_so_path_idx += strlcpy(gc_so_path + gc_so_path_idx, str, gc_so_path_size - gc_so_path_idx); \
749} while (0)
750#ifdef LOAD_RELATIVE
751 if (prefix_len > 0) {
752 memcpy(gc_so_path, dli.dli_fname, prefix_len);
753 gc_so_path_idx = prefix_len;
754 }
755#endif
756 GC_SO_PATH_APPEND(MODULAR_GC_DIR "librubygc.");
757 GC_SO_PATH_APPEND(gc_so_file);
758 GC_SO_PATH_APPEND(DLEXT);
759 GC_ASSERT(gc_so_path_idx == gc_so_path_size - 1);
760#undef GC_SO_PATH_APPEND
761 }
762
763 handle = dlopen(gc_so_path, RTLD_LAZY | RTLD_GLOBAL);
764 if (!handle) {
765 fprintf(stderr, "ruby_modular_gc_init: Shared library %s cannot be opened: %s\n", gc_so_path, dlerror());
766 exit(1);
767 }
768
769 gc_functions.modular_gc_loaded_p = true;
770 }
771
772# define load_modular_gc_func(name) do { \
773 if (handle) { \
774 const char *func_name = "rb_gc_impl_" #name; \
775 gc_functions.name = dlsym(handle, func_name); \
776 if (!gc_functions.name) { \
777 fprintf(stderr, "ruby_modular_gc_init: %s function not exported by library %s\n", func_name, gc_so_path); \
778 exit(1); \
779 } \
780 } \
781 else { \
782 gc_functions.name = rb_gc_impl_##name; \
783 } \
784} while (0)
785
786 // Bootup
787 load_modular_gc_func(objspace_alloc);
788 load_modular_gc_func(objspace_init);
789 load_modular_gc_func(ractor_cache_alloc);
790 load_modular_gc_func(set_params);
791 load_modular_gc_func(init);
792 load_modular_gc_func(heap_sizes);
793 // Shutdown
794 load_modular_gc_func(shutdown_free_objects);
795 load_modular_gc_func(objspace_free);
796 load_modular_gc_func(ractor_cache_free);
797 // GC
798 load_modular_gc_func(start);
799 load_modular_gc_func(during_gc_p);
800 load_modular_gc_func(prepare_heap);
801 load_modular_gc_func(gc_enable);
802 load_modular_gc_func(gc_disable);
803 load_modular_gc_func(gc_enabled_p);
804 load_modular_gc_func(config_set);
805 load_modular_gc_func(config_get);
806 load_modular_gc_func(stress_set);
807 load_modular_gc_func(stress_get);
808 // Object allocation
809 load_modular_gc_func(new_obj);
810 load_modular_gc_func(obj_slot_size);
811 load_modular_gc_func(heap_id_for_size);
812 load_modular_gc_func(size_allocatable_p);
813 // Malloc
814 load_modular_gc_func(malloc);
815 load_modular_gc_func(calloc);
816 load_modular_gc_func(realloc);
817 load_modular_gc_func(free);
818 load_modular_gc_func(adjust_memory_usage);
819 // Marking
820 load_modular_gc_func(mark);
821 load_modular_gc_func(mark_and_move);
822 load_modular_gc_func(mark_and_pin);
823 load_modular_gc_func(mark_maybe);
824 load_modular_gc_func(mark_weak);
825 load_modular_gc_func(remove_weak);
826 // Compaction
827 load_modular_gc_func(object_moved_p);
828 load_modular_gc_func(location);
829 // Write barriers
830 load_modular_gc_func(writebarrier);
831 load_modular_gc_func(writebarrier_unprotect);
832 load_modular_gc_func(writebarrier_remember);
833 // Heap walking
834 load_modular_gc_func(each_objects);
835 load_modular_gc_func(each_object);
836 // Finalizers
837 load_modular_gc_func(make_zombie);
838 load_modular_gc_func(define_finalizer);
839 load_modular_gc_func(undefine_finalizer);
840 load_modular_gc_func(copy_finalizer);
841 load_modular_gc_func(shutdown_call_finalizer);
842 // Object ID
843 load_modular_gc_func(object_id);
844 load_modular_gc_func(object_id_to_ref);
845 // Forking
846 load_modular_gc_func(before_fork);
847 load_modular_gc_func(after_fork);
848 // Statistics
849 load_modular_gc_func(set_measure_total_time);
850 load_modular_gc_func(get_measure_total_time);
851 load_modular_gc_func(get_total_time);
852 load_modular_gc_func(gc_count);
853 load_modular_gc_func(latest_gc_info);
854 load_modular_gc_func(stat);
855 load_modular_gc_func(stat_heap);
856 load_modular_gc_func(active_gc_name);
857 // Miscellaneous
858 load_modular_gc_func(object_metadata);
859 load_modular_gc_func(pointer_to_heap_p);
860 load_modular_gc_func(garbage_object_p);
861 load_modular_gc_func(set_event_hook);
862 load_modular_gc_func(copy_attributes);
863
864# undef load_modular_gc_func
865
866 rb_gc_functions = gc_functions;
867}
868
869// Bootup
870# define rb_gc_impl_objspace_alloc rb_gc_functions.objspace_alloc
871# define rb_gc_impl_objspace_init rb_gc_functions.objspace_init
872# define rb_gc_impl_ractor_cache_alloc rb_gc_functions.ractor_cache_alloc
873# define rb_gc_impl_set_params rb_gc_functions.set_params
874# define rb_gc_impl_init rb_gc_functions.init
875# define rb_gc_impl_heap_sizes rb_gc_functions.heap_sizes
876// Shutdown
877# define rb_gc_impl_shutdown_free_objects rb_gc_functions.shutdown_free_objects
878# define rb_gc_impl_objspace_free rb_gc_functions.objspace_free
879# define rb_gc_impl_ractor_cache_free rb_gc_functions.ractor_cache_free
880// GC
881# define rb_gc_impl_start rb_gc_functions.start
882# define rb_gc_impl_during_gc_p rb_gc_functions.during_gc_p
883# define rb_gc_impl_prepare_heap rb_gc_functions.prepare_heap
884# define rb_gc_impl_gc_enable rb_gc_functions.gc_enable
885# define rb_gc_impl_gc_disable rb_gc_functions.gc_disable
886# define rb_gc_impl_gc_enabled_p rb_gc_functions.gc_enabled_p
887# define rb_gc_impl_config_get rb_gc_functions.config_get
888# define rb_gc_impl_config_set rb_gc_functions.config_set
889# define rb_gc_impl_stress_set rb_gc_functions.stress_set
890# define rb_gc_impl_stress_get rb_gc_functions.stress_get
891// Object allocation
892# define rb_gc_impl_new_obj rb_gc_functions.new_obj
893# define rb_gc_impl_obj_slot_size rb_gc_functions.obj_slot_size
894# define rb_gc_impl_heap_id_for_size rb_gc_functions.heap_id_for_size
895# define rb_gc_impl_size_allocatable_p rb_gc_functions.size_allocatable_p
896// Malloc
897# define rb_gc_impl_malloc rb_gc_functions.malloc
898# define rb_gc_impl_calloc rb_gc_functions.calloc
899# define rb_gc_impl_realloc rb_gc_functions.realloc
900# define rb_gc_impl_free rb_gc_functions.free
901# define rb_gc_impl_adjust_memory_usage rb_gc_functions.adjust_memory_usage
902// Marking
903# define rb_gc_impl_mark rb_gc_functions.mark
904# define rb_gc_impl_mark_and_move rb_gc_functions.mark_and_move
905# define rb_gc_impl_mark_and_pin rb_gc_functions.mark_and_pin
906# define rb_gc_impl_mark_maybe rb_gc_functions.mark_maybe
907# define rb_gc_impl_mark_weak rb_gc_functions.mark_weak
908# define rb_gc_impl_remove_weak rb_gc_functions.remove_weak
909// Compaction
910# define rb_gc_impl_object_moved_p rb_gc_functions.object_moved_p
911# define rb_gc_impl_location rb_gc_functions.location
912// Write barriers
913# define rb_gc_impl_writebarrier rb_gc_functions.writebarrier
914# define rb_gc_impl_writebarrier_unprotect rb_gc_functions.writebarrier_unprotect
915# define rb_gc_impl_writebarrier_remember rb_gc_functions.writebarrier_remember
916// Heap walking
917# define rb_gc_impl_each_objects rb_gc_functions.each_objects
918# define rb_gc_impl_each_object rb_gc_functions.each_object
919// Finalizers
920# define rb_gc_impl_make_zombie rb_gc_functions.make_zombie
921# define rb_gc_impl_define_finalizer rb_gc_functions.define_finalizer
922# define rb_gc_impl_undefine_finalizer rb_gc_functions.undefine_finalizer
923# define rb_gc_impl_copy_finalizer rb_gc_functions.copy_finalizer
924# define rb_gc_impl_shutdown_call_finalizer rb_gc_functions.shutdown_call_finalizer
925// Object ID
926# define rb_gc_impl_object_id rb_gc_functions.object_id
927# define rb_gc_impl_object_id_to_ref rb_gc_functions.object_id_to_ref
928// Forking
929# define rb_gc_impl_before_fork rb_gc_functions.before_fork
930# define rb_gc_impl_after_fork rb_gc_functions.after_fork
931// Statistics
932# define rb_gc_impl_set_measure_total_time rb_gc_functions.set_measure_total_time
933# define rb_gc_impl_get_measure_total_time rb_gc_functions.get_measure_total_time
934# define rb_gc_impl_get_total_time rb_gc_functions.get_total_time
935# define rb_gc_impl_gc_count rb_gc_functions.gc_count
936# define rb_gc_impl_latest_gc_info rb_gc_functions.latest_gc_info
937# define rb_gc_impl_stat rb_gc_functions.stat
938# define rb_gc_impl_stat_heap rb_gc_functions.stat_heap
939# define rb_gc_impl_active_gc_name rb_gc_functions.active_gc_name
940// Miscellaneous
941# define rb_gc_impl_object_metadata rb_gc_functions.object_metadata
942# define rb_gc_impl_pointer_to_heap_p rb_gc_functions.pointer_to_heap_p
943# define rb_gc_impl_garbage_object_p rb_gc_functions.garbage_object_p
944# define rb_gc_impl_set_event_hook rb_gc_functions.set_event_hook
945# define rb_gc_impl_copy_attributes rb_gc_functions.copy_attributes
946#endif
947
948#ifdef RUBY_ASAN_ENABLED
949static void
950asan_death_callback(void)
951{
952 if (GET_VM()) {
953 rb_bug_without_die("ASAN error");
954 }
955}
956#endif
957
958static VALUE initial_stress = Qfalse;
959
960void *
961rb_objspace_alloc(void)
962{
963#if USE_MODULAR_GC
964 ruby_modular_gc_init();
965#endif
966
967 void *objspace = rb_gc_impl_objspace_alloc();
968 ruby_current_vm_ptr->gc.objspace = objspace;
969 rb_gc_impl_objspace_init(objspace);
970 rb_gc_impl_stress_set(objspace, initial_stress);
971
972#ifdef RUBY_ASAN_ENABLED
973 __sanitizer_set_death_callback(asan_death_callback);
974#endif
975
976 return objspace;
977}
978
979void
980rb_objspace_free(void *objspace)
981{
982 rb_gc_impl_objspace_free(objspace);
983}
984
985size_t
986rb_gc_obj_slot_size(VALUE obj)
987{
988 return rb_gc_impl_obj_slot_size(obj);
989}
990
991static inline void
992gc_validate_pc(void) {
993#if RUBY_DEBUG
994 rb_execution_context_t *ec = GET_EC();
995 const rb_control_frame_t *cfp = ec->cfp;
996 if (cfp && VM_FRAME_RUBYFRAME_P(cfp) && cfp->pc) {
997 RUBY_ASSERT(cfp->pc >= ISEQ_BODY(cfp->iseq)->iseq_encoded);
998 RUBY_ASSERT(cfp->pc <= ISEQ_BODY(cfp->iseq)->iseq_encoded + ISEQ_BODY(cfp->iseq)->iseq_size);
999 }
1000#endif
1001}
1002
1003static inline VALUE
1004newobj_of(rb_ractor_t *cr, VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, bool wb_protected, size_t size)
1005{
1006 VALUE obj = rb_gc_impl_new_obj(rb_gc_get_objspace(), cr->newobj_cache, klass, flags, v1, v2, v3, wb_protected, size);
1007
1008 gc_validate_pc();
1009
1010 if (UNLIKELY(rb_gc_event_hook_required_p(RUBY_INTERNAL_EVENT_NEWOBJ))) {
1011 unsigned int lev;
1012 RB_VM_LOCK_ENTER_CR_LEV(cr, &lev);
1013 {
1014 memset((char *)obj + RVALUE_SIZE, 0, rb_gc_obj_slot_size(obj) - RVALUE_SIZE);
1015
1016 /* We must disable GC here because the callback could call xmalloc
1017 * which could potentially trigger a GC, and a lot of code is unsafe
1018 * to trigger a GC right after an object has been allocated because
1019 * they perform initialization for the object and assume that the
1020 * GC does not trigger before then. */
1021 bool gc_disabled = RTEST(rb_gc_disable_no_rest());
1022 {
1023 rb_gc_event_hook(obj, RUBY_INTERNAL_EVENT_NEWOBJ);
1024 }
1025 if (!gc_disabled) rb_gc_enable();
1026 }
1027 RB_VM_LOCK_LEAVE_CR_LEV(cr, &lev);
1028 }
1029
1030 return obj;
1031}
1032
1033VALUE
1034rb_wb_unprotected_newobj_of(VALUE klass, VALUE flags, size_t size)
1035{
1036 GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
1037 return newobj_of(GET_RACTOR(), klass, flags, 0, 0, 0, FALSE, size);
1038}
1039
1040VALUE
1041rb_wb_protected_newobj_of(rb_execution_context_t *ec, VALUE klass, VALUE flags, size_t size)
1042{
1043 GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
1044 return newobj_of(rb_ec_ractor_ptr(ec), klass, flags, 0, 0, 0, TRUE, size);
1045}
1046
1047#define UNEXPECTED_NODE(func) \
1048 rb_bug(#func"(): GC does not handle T_NODE 0x%x(%p) 0x%"PRIxVALUE, \
1049 BUILTIN_TYPE(obj), (void*)(obj), RBASIC(obj)->flags)
1050
1051static inline void
1052rb_data_object_check(VALUE klass)
1053{
1054 if (klass != rb_cObject && (rb_get_alloc_func(klass) == rb_class_allocate_instance)) {
1055 rb_undef_alloc_func(klass);
1056 rb_warn("undefining the allocator of T_DATA class %"PRIsVALUE, klass);
1057 }
1058}
1059
1060VALUE
1061rb_data_object_wrap(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
1062{
1064 if (klass) rb_data_object_check(klass);
1065 return newobj_of(GET_RACTOR(), klass, T_DATA, (VALUE)dmark, (VALUE)dfree, (VALUE)datap, !dmark, sizeof(struct RTypedData));
1066}
1067
1068VALUE
1069rb_data_object_zalloc(VALUE klass, size_t size, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
1070{
1071 VALUE obj = rb_data_object_wrap(klass, 0, dmark, dfree);
1072 DATA_PTR(obj) = xcalloc(1, size);
1073 return obj;
1074}
1075
1076static VALUE
1077typed_data_alloc(VALUE klass, VALUE typed_flag, void *datap, const rb_data_type_t *type, size_t size)
1078{
1079 RBIMPL_NONNULL_ARG(type);
1080 if (klass) rb_data_object_check(klass);
1081 bool wb_protected = (type->flags & RUBY_FL_WB_PROTECTED) || !type->function.dmark;
1082 return newobj_of(GET_RACTOR(), klass, T_DATA, (VALUE)type, 1 | typed_flag, (VALUE)datap, wb_protected, size);
1083}
1084
1085VALUE
1086rb_data_typed_object_wrap(VALUE klass, void *datap, const rb_data_type_t *type)
1087{
1088 if (UNLIKELY(type->flags & RUBY_TYPED_EMBEDDABLE)) {
1089 rb_raise(rb_eTypeError, "Cannot wrap an embeddable TypedData");
1090 }
1091
1092 return typed_data_alloc(klass, 0, datap, type, sizeof(struct RTypedData));
1093}
1094
1095VALUE
1096rb_data_typed_object_zalloc(VALUE klass, size_t size, const rb_data_type_t *type)
1097{
1098 if (type->flags & RUBY_TYPED_EMBEDDABLE) {
1099 if (!(type->flags & RUBY_TYPED_FREE_IMMEDIATELY)) {
1100 rb_raise(rb_eTypeError, "Embeddable TypedData must be freed immediately");
1101 }
1102
1103 size_t embed_size = offsetof(struct RTypedData, data) + size;
1104 if (rb_gc_size_allocatable_p(embed_size)) {
1105 VALUE obj = typed_data_alloc(klass, TYPED_DATA_EMBEDDED, 0, type, embed_size);
1106 memset((char *)obj + offsetof(struct RTypedData, data), 0, size);
1107 return obj;
1108 }
1109 }
1110
1111 VALUE obj = typed_data_alloc(klass, 0, NULL, type, sizeof(struct RTypedData));
1112 DATA_PTR(obj) = xcalloc(1, size);
1113 return obj;
1114}
1115
1116static size_t
1117rb_objspace_data_type_memsize(VALUE obj)
1118{
1119 size_t size = 0;
1120 if (RTYPEDDATA_P(obj)) {
1121 const rb_data_type_t *type = RTYPEDDATA_TYPE(obj);
1122 const void *ptr = RTYPEDDATA_GET_DATA(obj);
1123
1124 if (RTYPEDDATA_TYPE(obj)->flags & RUBY_TYPED_EMBEDDABLE && !RTYPEDDATA_EMBEDDED_P(obj)) {
1125#ifdef HAVE_MALLOC_USABLE_SIZE
1126 size += malloc_usable_size((void *)ptr);
1127#endif
1128 }
1129
1130 if (ptr && type->function.dsize) {
1131 size += type->function.dsize(ptr);
1132 }
1133 }
1134
1135 return size;
1136}
1137
1138const char *
1139rb_objspace_data_type_name(VALUE obj)
1140{
1141 if (RTYPEDDATA_P(obj)) {
1142 return RTYPEDDATA_TYPE(obj)->wrap_struct_name;
1143 }
1144 else {
1145 return 0;
1146 }
1147}
1148
1149static enum rb_id_table_iterator_result
1150cvar_table_free_i(VALUE value, void *ctx)
1151{
1152 xfree((void *)value);
1153 return ID_TABLE_CONTINUE;
1154}
1155
1156static inline void
1157make_io_zombie(void *objspace, VALUE obj)
1158{
1159 rb_io_t *fptr = RFILE(obj)->fptr;
1160 rb_gc_impl_make_zombie(objspace, obj, rb_io_fptr_finalize_internal, fptr);
1161}
1162
1163static bool
1164rb_data_free(void *objspace, VALUE obj)
1165{
1166 void *data = RTYPEDDATA_P(obj) ? RTYPEDDATA_GET_DATA(obj) : DATA_PTR(obj);
1167 if (data) {
1168 int free_immediately = false;
1169 void (*dfree)(void *);
1170
1171 if (RTYPEDDATA_P(obj)) {
1172 free_immediately = (RTYPEDDATA(obj)->type->flags & RUBY_TYPED_FREE_IMMEDIATELY) != 0;
1173 dfree = RTYPEDDATA(obj)->type->function.dfree;
1174 }
1175 else {
1176 dfree = RDATA(obj)->dfree;
1177 }
1178
1179 if (dfree) {
1180 if (dfree == RUBY_DEFAULT_FREE) {
1181 if (!RTYPEDDATA_P(obj) || !RTYPEDDATA_EMBEDDED_P(obj)) {
1182 xfree(data);
1183 RB_DEBUG_COUNTER_INC(obj_data_xfree);
1184 }
1185 }
1186 else if (free_immediately) {
1187 (*dfree)(data);
1188 if (RTYPEDDATA_TYPE(obj)->flags & RUBY_TYPED_EMBEDDABLE && !RTYPEDDATA_EMBEDDED_P(obj)) {
1189 xfree(data);
1190 }
1191
1192 RB_DEBUG_COUNTER_INC(obj_data_imm_free);
1193 }
1194 else {
1195 rb_gc_impl_make_zombie(objspace, obj, dfree, data);
1196 RB_DEBUG_COUNTER_INC(obj_data_zombie);
1197 return FALSE;
1198 }
1199 }
1200 else {
1201 RB_DEBUG_COUNTER_INC(obj_data_empty);
1202 }
1203 }
1204
1205 return true;
1206}
1207
1208void
1209rb_gc_obj_free_vm_weak_references(VALUE obj)
1210{
1211 if (FL_TEST(obj, FL_EXIVAR)) {
1213 FL_UNSET(obj, FL_EXIVAR);
1214 }
1215
1216 switch (BUILTIN_TYPE(obj)) {
1217 case T_STRING:
1218 if (FL_TEST(obj, RSTRING_FSTR)) {
1219 st_data_t fstr = (st_data_t)obj;
1220 st_delete(rb_vm_fstring_table(), &fstr, NULL);
1221 RB_DEBUG_COUNTER_INC(obj_str_fstr);
1222
1223 FL_UNSET(obj, RSTRING_FSTR);
1224 }
1225 break;
1226 case T_SYMBOL:
1227 rb_gc_free_dsymbol(obj);
1228 break;
1229 case T_IMEMO:
1230 switch (imemo_type(obj)) {
1231 case imemo_callinfo:
1232 rb_vm_ci_free((const struct rb_callinfo *)obj);
1233 break;
1234 case imemo_ment:
1235 rb_free_method_entry_vm_weak_references((const rb_method_entry_t *)obj);
1236 break;
1237 default:
1238 break;
1239 }
1240 break;
1241 default:
1242 break;
1243 }
1244}
1245
1246bool
1247rb_gc_obj_free(void *objspace, VALUE obj)
1248{
1249 RB_DEBUG_COUNTER_INC(obj_free);
1250
1251 switch (BUILTIN_TYPE(obj)) {
1252 case T_NIL:
1253 case T_FIXNUM:
1254 case T_TRUE:
1255 case T_FALSE:
1256 rb_bug("obj_free() called for broken object");
1257 break;
1258 default:
1259 break;
1260 }
1261
1262 switch (BUILTIN_TYPE(obj)) {
1263 case T_OBJECT:
1264 if (rb_shape_obj_too_complex(obj)) {
1265 RB_DEBUG_COUNTER_INC(obj_obj_too_complex);
1266 st_free_table(ROBJECT_IV_HASH(obj));
1267 }
1268 else if (RBASIC(obj)->flags & ROBJECT_EMBED) {
1269 RB_DEBUG_COUNTER_INC(obj_obj_embed);
1270 }
1271 else {
1272 xfree(ROBJECT(obj)->as.heap.ivptr);
1273 RB_DEBUG_COUNTER_INC(obj_obj_ptr);
1274 }
1275 break;
1276 case T_MODULE:
1277 case T_CLASS:
1278 rb_id_table_free(RCLASS_M_TBL(obj));
1279 rb_cc_table_free(obj);
1280 if (rb_shape_obj_too_complex(obj)) {
1281 st_free_table((st_table *)RCLASS_IVPTR(obj));
1282 }
1283 else {
1284 xfree(RCLASS_IVPTR(obj));
1285 }
1286
1287 if (RCLASS_CONST_TBL(obj)) {
1288 rb_free_const_table(RCLASS_CONST_TBL(obj));
1289 }
1290 if (RCLASS_CVC_TBL(obj)) {
1291 rb_id_table_foreach_values(RCLASS_CVC_TBL(obj), cvar_table_free_i, NULL);
1292 rb_id_table_free(RCLASS_CVC_TBL(obj));
1293 }
1294 rb_class_remove_subclass_head(obj);
1295 rb_class_remove_from_module_subclasses(obj);
1296 rb_class_remove_from_super_subclasses(obj);
1297 if (FL_TEST_RAW(obj, RCLASS_SUPERCLASSES_INCLUDE_SELF)) {
1298 xfree(RCLASS_SUPERCLASSES(obj));
1299 }
1300
1301 (void)RB_DEBUG_COUNTER_INC_IF(obj_module_ptr, BUILTIN_TYPE(obj) == T_MODULE);
1302 (void)RB_DEBUG_COUNTER_INC_IF(obj_class_ptr, BUILTIN_TYPE(obj) == T_CLASS);
1303 break;
1304 case T_STRING:
1305 rb_str_free(obj);
1306 break;
1307 case T_ARRAY:
1308 rb_ary_free(obj);
1309 break;
1310 case T_HASH:
1311#if USE_DEBUG_COUNTER
1312 switch (RHASH_SIZE(obj)) {
1313 case 0:
1314 RB_DEBUG_COUNTER_INC(obj_hash_empty);
1315 break;
1316 case 1:
1317 RB_DEBUG_COUNTER_INC(obj_hash_1);
1318 break;
1319 case 2:
1320 RB_DEBUG_COUNTER_INC(obj_hash_2);
1321 break;
1322 case 3:
1323 RB_DEBUG_COUNTER_INC(obj_hash_3);
1324 break;
1325 case 4:
1326 RB_DEBUG_COUNTER_INC(obj_hash_4);
1327 break;
1328 case 5:
1329 case 6:
1330 case 7:
1331 case 8:
1332 RB_DEBUG_COUNTER_INC(obj_hash_5_8);
1333 break;
1334 default:
1335 GC_ASSERT(RHASH_SIZE(obj) > 8);
1336 RB_DEBUG_COUNTER_INC(obj_hash_g8);
1337 }
1338
1339 if (RHASH_AR_TABLE_P(obj)) {
1340 if (RHASH_AR_TABLE(obj) == NULL) {
1341 RB_DEBUG_COUNTER_INC(obj_hash_null);
1342 }
1343 else {
1344 RB_DEBUG_COUNTER_INC(obj_hash_ar);
1345 }
1346 }
1347 else {
1348 RB_DEBUG_COUNTER_INC(obj_hash_st);
1349 }
1350#endif
1351
1352 rb_hash_free(obj);
1353 break;
1354 case T_REGEXP:
1355 if (RREGEXP(obj)->ptr) {
1356 onig_free(RREGEXP(obj)->ptr);
1357 RB_DEBUG_COUNTER_INC(obj_regexp_ptr);
1358 }
1359 break;
1360 case T_DATA:
1361 if (!rb_data_free(objspace, obj)) return false;
1362 break;
1363 case T_MATCH:
1364 {
1365 rb_matchext_t *rm = RMATCH_EXT(obj);
1366#if USE_DEBUG_COUNTER
1367 if (rm->regs.num_regs >= 8) {
1368 RB_DEBUG_COUNTER_INC(obj_match_ge8);
1369 }
1370 else if (rm->regs.num_regs >= 4) {
1371 RB_DEBUG_COUNTER_INC(obj_match_ge4);
1372 }
1373 else if (rm->regs.num_regs >= 1) {
1374 RB_DEBUG_COUNTER_INC(obj_match_under4);
1375 }
1376#endif
1377 onig_region_free(&rm->regs, 0);
1378 xfree(rm->char_offset);
1379
1380 RB_DEBUG_COUNTER_INC(obj_match_ptr);
1381 }
1382 break;
1383 case T_FILE:
1384 if (RFILE(obj)->fptr) {
1385 make_io_zombie(objspace, obj);
1386 RB_DEBUG_COUNTER_INC(obj_file_ptr);
1387 return FALSE;
1388 }
1389 break;
1390 case T_RATIONAL:
1391 RB_DEBUG_COUNTER_INC(obj_rational);
1392 break;
1393 case T_COMPLEX:
1394 RB_DEBUG_COUNTER_INC(obj_complex);
1395 break;
1396 case T_MOVED:
1397 break;
1398 case T_ICLASS:
1399 /* Basically , T_ICLASS shares table with the module */
1400 if (RICLASS_OWNS_M_TBL_P(obj)) {
1401 /* Method table is not shared for origin iclasses of classes */
1402 rb_id_table_free(RCLASS_M_TBL(obj));
1403 }
1404 if (RCLASS_CALLABLE_M_TBL(obj) != NULL) {
1405 rb_id_table_free(RCLASS_CALLABLE_M_TBL(obj));
1406 }
1407 rb_class_remove_subclass_head(obj);
1408 rb_cc_table_free(obj);
1409 rb_class_remove_from_module_subclasses(obj);
1410 rb_class_remove_from_super_subclasses(obj);
1411
1412 RB_DEBUG_COUNTER_INC(obj_iclass_ptr);
1413 break;
1414
1415 case T_FLOAT:
1416 RB_DEBUG_COUNTER_INC(obj_float);
1417 break;
1418
1419 case T_BIGNUM:
1420 if (!BIGNUM_EMBED_P(obj) && BIGNUM_DIGITS(obj)) {
1421 xfree(BIGNUM_DIGITS(obj));
1422 RB_DEBUG_COUNTER_INC(obj_bignum_ptr);
1423 }
1424 else {
1425 RB_DEBUG_COUNTER_INC(obj_bignum_embed);
1426 }
1427 break;
1428
1429 case T_NODE:
1430 UNEXPECTED_NODE(obj_free);
1431 break;
1432
1433 case T_STRUCT:
1434 if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) ||
1435 RSTRUCT(obj)->as.heap.ptr == NULL) {
1436 RB_DEBUG_COUNTER_INC(obj_struct_embed);
1437 }
1438 else {
1439 xfree((void *)RSTRUCT(obj)->as.heap.ptr);
1440 RB_DEBUG_COUNTER_INC(obj_struct_ptr);
1441 }
1442 break;
1443
1444 case T_SYMBOL:
1445 RB_DEBUG_COUNTER_INC(obj_symbol);
1446 break;
1447
1448 case T_IMEMO:
1449 rb_imemo_free((VALUE)obj);
1450 break;
1451
1452 default:
1453 rb_bug("gc_sweep(): unknown data type 0x%x(%p) 0x%"PRIxVALUE,
1454 BUILTIN_TYPE(obj), (void*)obj, RBASIC(obj)->flags);
1455 }
1456
1457 if (FL_TEST(obj, FL_FINALIZE)) {
1458 rb_gc_impl_make_zombie(objspace, obj, 0, 0);
1459 return FALSE;
1460 }
1461 else {
1462 return TRUE;
1463 }
1464}
1465
1466void
1467rb_objspace_set_event_hook(const rb_event_flag_t event)
1468{
1469 rb_gc_impl_set_event_hook(rb_gc_get_objspace(), event);
1470}
1471
1472static int
1473internal_object_p(VALUE obj)
1474{
1475 void *ptr = asan_unpoison_object_temporary(obj);
1476
1477 if (RBASIC(obj)->flags) {
1478 switch (BUILTIN_TYPE(obj)) {
1479 case T_NODE:
1480 UNEXPECTED_NODE(internal_object_p);
1481 break;
1482 case T_NONE:
1483 case T_MOVED:
1484 case T_IMEMO:
1485 case T_ICLASS:
1486 case T_ZOMBIE:
1487 break;
1488 case T_CLASS:
1489 if (!RBASIC(obj)->klass) break;
1490 if (RCLASS_SINGLETON_P(obj)) {
1491 return rb_singleton_class_internal_p(obj);
1492 }
1493 return 0;
1494 default:
1495 if (!RBASIC(obj)->klass) break;
1496 return 0;
1497 }
1498 }
1499 if (ptr || !RBASIC(obj)->flags) {
1500 rb_asan_poison_object(obj);
1501 }
1502 return 1;
1503}
1504
1505int
1506rb_objspace_internal_object_p(VALUE obj)
1507{
1508 return internal_object_p(obj);
1509}
1510
1512 size_t num;
1513 VALUE of;
1514};
1515
1516static int
1517os_obj_of_i(void *vstart, void *vend, size_t stride, void *data)
1518{
1519 struct os_each_struct *oes = (struct os_each_struct *)data;
1520
1521 VALUE v = (VALUE)vstart;
1522 for (; v != (VALUE)vend; v += stride) {
1523 if (!internal_object_p(v)) {
1524 if (!oes->of || rb_obj_is_kind_of(v, oes->of)) {
1525 if (!rb_multi_ractor_p() || rb_ractor_shareable_p(v)) {
1526 rb_yield(v);
1527 oes->num++;
1528 }
1529 }
1530 }
1531 }
1532
1533 return 0;
1534}
1535
1536static VALUE
1537os_obj_of(VALUE of)
1538{
1539 struct os_each_struct oes;
1540
1541 oes.num = 0;
1542 oes.of = of;
1543 rb_objspace_each_objects(os_obj_of_i, &oes);
1544 return SIZET2NUM(oes.num);
1545}
1546
1547/*
1548 * call-seq:
1549 * ObjectSpace.each_object([module]) {|obj| ... } -> integer
1550 * ObjectSpace.each_object([module]) -> an_enumerator
1551 *
1552 * Calls the block once for each living, nonimmediate object in this
1553 * Ruby process. If <i>module</i> is specified, calls the block
1554 * for only those classes or modules that match (or are a subclass of)
1555 * <i>module</i>. Returns the number of objects found. Immediate
1556 * objects (<code>Fixnum</code>s, <code>Symbol</code>s
1557 * <code>true</code>, <code>false</code>, and <code>nil</code>) are
1558 * never returned. In the example below, #each_object returns both
1559 * the numbers we defined and several constants defined in the Math
1560 * module.
1561 *
1562 * If no block is given, an enumerator is returned instead.
1563 *
1564 * a = 102.7
1565 * b = 95 # Won't be returned
1566 * c = 12345678987654321
1567 * count = ObjectSpace.each_object(Numeric) {|x| p x }
1568 * puts "Total count: #{count}"
1569 *
1570 * <em>produces:</em>
1571 *
1572 * 12345678987654321
1573 * 102.7
1574 * 2.71828182845905
1575 * 3.14159265358979
1576 * 2.22044604925031e-16
1577 * 1.7976931348623157e+308
1578 * 2.2250738585072e-308
1579 * Total count: 7
1580 *
1581 */
1582
1583static VALUE
1584os_each_obj(int argc, VALUE *argv, VALUE os)
1585{
1586 VALUE of;
1587
1588 of = (!rb_check_arity(argc, 0, 1) ? 0 : argv[0]);
1589 RETURN_ENUMERATOR(os, 1, &of);
1590 return os_obj_of(of);
1591}
1592
1593/*
1594 * call-seq:
1595 * ObjectSpace.undefine_finalizer(obj)
1596 *
1597 * Removes all finalizers for <i>obj</i>.
1598 *
1599 */
1600
1601static VALUE
1602undefine_final(VALUE os, VALUE obj)
1603{
1604 return rb_undefine_finalizer(obj);
1605}
1606
1607VALUE
1608rb_undefine_finalizer(VALUE obj)
1609{
1610 rb_check_frozen(obj);
1611
1612 rb_gc_impl_undefine_finalizer(rb_gc_get_objspace(), obj);
1613
1614 return obj;
1615}
1616
1617static void
1618should_be_callable(VALUE block)
1619{
1620 if (!rb_obj_respond_to(block, idCall, TRUE)) {
1621 rb_raise(rb_eArgError, "wrong type argument %"PRIsVALUE" (should be callable)",
1622 rb_obj_class(block));
1623 }
1624}
1625
1626static void
1627should_be_finalizable(VALUE obj)
1628{
1629 if (!FL_ABLE(obj)) {
1630 rb_raise(rb_eArgError, "cannot define finalizer for %s",
1631 rb_obj_classname(obj));
1632 }
1633 rb_check_frozen(obj);
1634}
1635
1636void
1637rb_gc_copy_finalizer(VALUE dest, VALUE obj)
1638{
1639 rb_gc_impl_copy_finalizer(rb_gc_get_objspace(), dest, obj);
1640}
1641
1642/*
1643 * call-seq:
1644 * ObjectSpace.define_finalizer(obj, aProc=proc())
1645 *
1646 * Adds <i>aProc</i> as a finalizer, to be called after <i>obj</i>
1647 * was destroyed. The object ID of the <i>obj</i> will be passed
1648 * as an argument to <i>aProc</i>. If <i>aProc</i> is a lambda or
1649 * method, make sure it can be called with a single argument.
1650 *
1651 * The return value is an array <code>[0, aProc]</code>.
1652 *
1653 * The two recommended patterns are to either create the finaliser proc
1654 * in a non-instance method where it can safely capture the needed state,
1655 * or to use a custom callable object that stores the needed state
1656 * explicitly as instance variables.
1657 *
1658 * class Foo
1659 * def initialize(data_needed_for_finalization)
1660 * ObjectSpace.define_finalizer(self, self.class.create_finalizer(data_needed_for_finalization))
1661 * end
1662 *
1663 * def self.create_finalizer(data_needed_for_finalization)
1664 * proc {
1665 * puts "finalizing #{data_needed_for_finalization}"
1666 * }
1667 * end
1668 * end
1669 *
1670 * class Bar
1671 * class Remover
1672 * def initialize(data_needed_for_finalization)
1673 * @data_needed_for_finalization = data_needed_for_finalization
1674 * end
1675 *
1676 * def call(id)
1677 * puts "finalizing #{@data_needed_for_finalization}"
1678 * end
1679 * end
1680 *
1681 * def initialize(data_needed_for_finalization)
1682 * ObjectSpace.define_finalizer(self, Remover.new(data_needed_for_finalization))
1683 * end
1684 * end
1685 *
1686 * Note that if your finalizer references the object to be
1687 * finalized it will never be run on GC, although it will still be
1688 * run at exit. You will get a warning if you capture the object
1689 * to be finalized as the receiver of the finalizer.
1690 *
1691 * class CapturesSelf
1692 * def initialize(name)
1693 * ObjectSpace.define_finalizer(self, proc {
1694 * # this finalizer will only be run on exit
1695 * puts "finalizing #{name}"
1696 * })
1697 * end
1698 * end
1699 *
1700 * Also note that finalization can be unpredictable and is never guaranteed
1701 * to be run except on exit.
1702 */
1703
1704static VALUE
1705define_final(int argc, VALUE *argv, VALUE os)
1706{
1707 VALUE obj, block;
1708
1709 rb_scan_args(argc, argv, "11", &obj, &block);
1710 if (argc == 1) {
1711 block = rb_block_proc();
1712 }
1713
1714 if (rb_callable_receiver(block) == obj) {
1715 rb_warn("finalizer references object to be finalized");
1716 }
1717
1718 return rb_define_finalizer(obj, block);
1719}
1720
1721VALUE
1722rb_define_finalizer(VALUE obj, VALUE block)
1723{
1724 should_be_finalizable(obj);
1725 should_be_callable(block);
1726
1727 block = rb_gc_impl_define_finalizer(rb_gc_get_objspace(), obj, block);
1728
1729 block = rb_ary_new3(2, INT2FIX(0), block);
1730 OBJ_FREEZE(block);
1731 return block;
1732}
1733
1734void
1735rb_objspace_call_finalizer(void)
1736{
1737 rb_gc_impl_shutdown_call_finalizer(rb_gc_get_objspace());
1738}
1739
1740void
1741rb_objspace_free_objects(void *objspace)
1742{
1743 rb_gc_impl_shutdown_free_objects(objspace);
1744}
1745
1746int
1747rb_objspace_garbage_object_p(VALUE obj)
1748{
1749 return rb_gc_impl_garbage_object_p(rb_gc_get_objspace(), obj);
1750}
1751
1752bool
1753rb_gc_pointer_to_heap_p(VALUE obj)
1754{
1755 return rb_gc_impl_pointer_to_heap_p(rb_gc_get_objspace(), (void *)obj);
1756}
1757
1758/*
1759 * call-seq:
1760 * ObjectSpace._id2ref(object_id) -> an_object
1761 *
1762 * Converts an object id to a reference to the object. May not be
1763 * called on an object id passed as a parameter to a finalizer.
1764 *
1765 * s = "I am a string" #=> "I am a string"
1766 * r = ObjectSpace._id2ref(s.object_id) #=> "I am a string"
1767 * r == s #=> true
1768 *
1769 * On multi-ractor mode, if the object is not shareable, it raises
1770 * RangeError.
1771 */
1772
1773static VALUE
1774id2ref(VALUE objid)
1775{
1776#if SIZEOF_LONG == SIZEOF_VOIDP
1777#define NUM2PTR(x) NUM2ULONG(x)
1778#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
1779#define NUM2PTR(x) NUM2ULL(x)
1780#endif
1781 objid = rb_to_int(objid);
1782 if (FIXNUM_P(objid) || rb_big_size(objid) <= SIZEOF_VOIDP) {
1783 VALUE ptr = NUM2PTR(objid);
1784 if (SPECIAL_CONST_P(ptr)) {
1785 if (ptr == Qtrue) return Qtrue;
1786 if (ptr == Qfalse) return Qfalse;
1787 if (NIL_P(ptr)) return Qnil;
1788 if (FIXNUM_P(ptr)) return ptr;
1789 if (FLONUM_P(ptr)) return ptr;
1790
1791 if (SYMBOL_P(ptr)) {
1792 // Check that the symbol is valid
1793 if (rb_static_id_valid_p(SYM2ID(ptr))) {
1794 return ptr;
1795 }
1796 else {
1797 rb_raise(rb_eRangeError, "%p is not symbol id value", (void *)ptr);
1798 }
1799 }
1800
1801 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is not id value", rb_int2str(objid, 10));
1802 }
1803 }
1804
1805 VALUE obj = rb_gc_impl_object_id_to_ref(rb_gc_get_objspace(), objid);
1806 if (!rb_multi_ractor_p() || rb_ractor_shareable_p(obj)) {
1807 return obj;
1808 }
1809 else {
1810 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is id of the unshareable object on multi-ractor", rb_int2str(objid, 10));
1811 }
1812}
1813
1814/* :nodoc: */
1815static VALUE
1816os_id2ref(VALUE os, VALUE objid)
1817{
1818 return id2ref(objid);
1819}
1820
1821static VALUE
1822rb_find_object_id(void *objspace, VALUE obj, VALUE (*get_heap_object_id)(void *, VALUE))
1823{
1824 if (SPECIAL_CONST_P(obj)) {
1825#if SIZEOF_LONG == SIZEOF_VOIDP
1826 return LONG2NUM((SIGNED_VALUE)obj);
1827#else
1828 return LL2NUM((SIGNED_VALUE)obj);
1829#endif
1830 }
1831
1832 return get_heap_object_id(objspace, obj);
1833}
1834
1835static VALUE
1836nonspecial_obj_id(void *_objspace, VALUE obj)
1837{
1838#if SIZEOF_LONG == SIZEOF_VOIDP
1839 return (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG);
1840#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
1841 return LL2NUM((SIGNED_VALUE)(obj) / 2);
1842#else
1843# error not supported
1844#endif
1845}
1846
1847VALUE
1848rb_memory_id(VALUE obj)
1849{
1850 return rb_find_object_id(NULL, obj, nonspecial_obj_id);
1851}
1852
1853/*
1854 * Document-method: __id__
1855 * Document-method: object_id
1856 *
1857 * call-seq:
1858 * obj.__id__ -> integer
1859 * obj.object_id -> integer
1860 *
1861 * Returns an integer identifier for +obj+.
1862 *
1863 * The same number will be returned on all calls to +object_id+ for a given
1864 * object, and no two active objects will share an id.
1865 *
1866 * Note: that some objects of builtin classes are reused for optimization.
1867 * This is the case for immediate values and frozen string literals.
1868 *
1869 * BasicObject implements +__id__+, Kernel implements +object_id+.
1870 *
1871 * Immediate values are not passed by reference but are passed by value:
1872 * +nil+, +true+, +false+, Fixnums, Symbols, and some Floats.
1873 *
1874 * Object.new.object_id == Object.new.object_id # => false
1875 * (21 * 2).object_id == (21 * 2).object_id # => true
1876 * "hello".object_id == "hello".object_id # => false
1877 * "hi".freeze.object_id == "hi".freeze.object_id # => true
1878 */
1879
1880VALUE
1881rb_obj_id(VALUE obj)
1882{
1883 /* If obj is an immediate, the object ID is obj directly converted to a Numeric.
1884 * Otherwise, the object ID is a Numeric that is a non-zero multiple of
1885 * (RUBY_IMMEDIATE_MASK + 1) which guarantees that it does not collide with
1886 * any immediates. */
1887 return rb_find_object_id(rb_gc_get_objspace(), obj, rb_gc_impl_object_id);
1888}
1889
1890static enum rb_id_table_iterator_result
1891cc_table_memsize_i(VALUE ccs_ptr, void *data_ptr)
1892{
1893 size_t *total_size = data_ptr;
1894 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
1895 *total_size += sizeof(*ccs);
1896 *total_size += sizeof(ccs->entries[0]) * ccs->capa;
1897 return ID_TABLE_CONTINUE;
1898}
1899
1900static size_t
1901cc_table_memsize(struct rb_id_table *cc_table)
1902{
1903 size_t total = rb_id_table_memsize(cc_table);
1904 rb_id_table_foreach_values(cc_table, cc_table_memsize_i, &total);
1905 return total;
1906}
1907
1908size_t
1909rb_obj_memsize_of(VALUE obj)
1910{
1911 size_t size = 0;
1912
1913 if (SPECIAL_CONST_P(obj)) {
1914 return 0;
1915 }
1916
1917 if (FL_TEST(obj, FL_EXIVAR)) {
1918 size += rb_generic_ivar_memsize(obj);
1919 }
1920
1921 switch (BUILTIN_TYPE(obj)) {
1922 case T_OBJECT:
1923 if (rb_shape_obj_too_complex(obj)) {
1924 size += rb_st_memsize(ROBJECT_IV_HASH(obj));
1925 }
1926 else if (!(RBASIC(obj)->flags & ROBJECT_EMBED)) {
1927 size += ROBJECT_IV_CAPACITY(obj) * sizeof(VALUE);
1928 }
1929 break;
1930 case T_MODULE:
1931 case T_CLASS:
1932 if (RCLASS_M_TBL(obj)) {
1933 size += rb_id_table_memsize(RCLASS_M_TBL(obj));
1934 }
1935 // class IV sizes are allocated as powers of two
1936 size += SIZEOF_VALUE << bit_length(RCLASS_IV_COUNT(obj));
1937 if (RCLASS_CVC_TBL(obj)) {
1938 size += rb_id_table_memsize(RCLASS_CVC_TBL(obj));
1939 }
1940 if (RCLASS_EXT(obj)->const_tbl) {
1941 size += rb_id_table_memsize(RCLASS_EXT(obj)->const_tbl);
1942 }
1943 if (RCLASS_CC_TBL(obj)) {
1944 size += cc_table_memsize(RCLASS_CC_TBL(obj));
1945 }
1946 if (FL_TEST_RAW(obj, RCLASS_SUPERCLASSES_INCLUDE_SELF)) {
1947 size += (RCLASS_SUPERCLASS_DEPTH(obj) + 1) * sizeof(VALUE);
1948 }
1949 break;
1950 case T_ICLASS:
1951 if (RICLASS_OWNS_M_TBL_P(obj)) {
1952 if (RCLASS_M_TBL(obj)) {
1953 size += rb_id_table_memsize(RCLASS_M_TBL(obj));
1954 }
1955 }
1956 if (RCLASS_CC_TBL(obj)) {
1957 size += cc_table_memsize(RCLASS_CC_TBL(obj));
1958 }
1959 break;
1960 case T_STRING:
1961 size += rb_str_memsize(obj);
1962 break;
1963 case T_ARRAY:
1964 size += rb_ary_memsize(obj);
1965 break;
1966 case T_HASH:
1967 if (RHASH_ST_TABLE_P(obj)) {
1968 VM_ASSERT(RHASH_ST_TABLE(obj) != NULL);
1969 /* st_table is in the slot */
1970 size += st_memsize(RHASH_ST_TABLE(obj)) - sizeof(st_table);
1971 }
1972 break;
1973 case T_REGEXP:
1974 if (RREGEXP_PTR(obj)) {
1975 size += onig_memsize(RREGEXP_PTR(obj));
1976 }
1977 break;
1978 case T_DATA:
1979 size += rb_objspace_data_type_memsize(obj);
1980 break;
1981 case T_MATCH:
1982 {
1983 rb_matchext_t *rm = RMATCH_EXT(obj);
1984 size += onig_region_memsize(&rm->regs);
1985 size += sizeof(struct rmatch_offset) * rm->char_offset_num_allocated;
1986 }
1987 break;
1988 case T_FILE:
1989 if (RFILE(obj)->fptr) {
1990 size += rb_io_memsize(RFILE(obj)->fptr);
1991 }
1992 break;
1993 case T_RATIONAL:
1994 case T_COMPLEX:
1995 break;
1996 case T_IMEMO:
1997 size += rb_imemo_memsize(obj);
1998 break;
1999
2000 case T_FLOAT:
2001 case T_SYMBOL:
2002 break;
2003
2004 case T_BIGNUM:
2005 if (!(RBASIC(obj)->flags & BIGNUM_EMBED_FLAG) && BIGNUM_DIGITS(obj)) {
2006 size += BIGNUM_LEN(obj) * sizeof(BDIGIT);
2007 }
2008 break;
2009
2010 case T_NODE:
2011 UNEXPECTED_NODE(obj_memsize_of);
2012 break;
2013
2014 case T_STRUCT:
2015 if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) == 0 &&
2016 RSTRUCT(obj)->as.heap.ptr) {
2017 size += sizeof(VALUE) * RSTRUCT_LEN(obj);
2018 }
2019 break;
2020
2021 case T_ZOMBIE:
2022 case T_MOVED:
2023 break;
2024
2025 default:
2026 rb_bug("objspace/memsize_of(): unknown data type 0x%x(%p)",
2027 BUILTIN_TYPE(obj), (void*)obj);
2028 }
2029
2030 return size + rb_gc_obj_slot_size(obj);
2031}
2032
2033static int
2034set_zero(st_data_t key, st_data_t val, st_data_t arg)
2035{
2036 VALUE k = (VALUE)key;
2037 VALUE hash = (VALUE)arg;
2038 rb_hash_aset(hash, k, INT2FIX(0));
2039 return ST_CONTINUE;
2040}
2041
2043 size_t counts[T_MASK+1];
2044 size_t freed;
2045 size_t total;
2046};
2047
2048static void
2049count_objects_i(VALUE obj, void *d)
2050{
2051 struct count_objects_data *data = (struct count_objects_data *)d;
2052
2053 if (RBASIC(obj)->flags) {
2054 data->counts[BUILTIN_TYPE(obj)]++;
2055 }
2056 else {
2057 data->freed++;
2058 }
2059
2060 data->total++;
2061}
2062
2063/*
2064 * call-seq:
2065 * ObjectSpace.count_objects([result_hash]) -> hash
2066 *
2067 * Counts all objects grouped by type.
2068 *
2069 * It returns a hash, such as:
2070 * {
2071 * :TOTAL=>10000,
2072 * :FREE=>3011,
2073 * :T_OBJECT=>6,
2074 * :T_CLASS=>404,
2075 * # ...
2076 * }
2077 *
2078 * The contents of the returned hash are implementation specific.
2079 * It may be changed in future.
2080 *
2081 * The keys starting with +:T_+ means live objects.
2082 * For example, +:T_ARRAY+ is the number of arrays.
2083 * +:FREE+ means object slots which is not used now.
2084 * +:TOTAL+ means sum of above.
2085 *
2086 * If the optional argument +result_hash+ is given,
2087 * it is overwritten and returned. This is intended to avoid probe effect.
2088 *
2089 * h = {}
2090 * ObjectSpace.count_objects(h)
2091 * puts h
2092 * # => { :TOTAL=>10000, :T_CLASS=>158280, :T_MODULE=>20672, :T_STRING=>527249 }
2093 *
2094 * This method is only expected to work on C Ruby.
2095 *
2096 */
2097
2098static VALUE
2099count_objects(int argc, VALUE *argv, VALUE os)
2100{
2101 struct count_objects_data data = { 0 };
2102 VALUE hash = Qnil;
2103
2104 if (rb_check_arity(argc, 0, 1) == 1) {
2105 hash = argv[0];
2106 if (!RB_TYPE_P(hash, T_HASH))
2107 rb_raise(rb_eTypeError, "non-hash given");
2108 }
2109
2110 rb_gc_impl_each_object(rb_gc_get_objspace(), count_objects_i, &data);
2111
2112 if (NIL_P(hash)) {
2113 hash = rb_hash_new();
2114 }
2115 else if (!RHASH_EMPTY_P(hash)) {
2116 rb_hash_stlike_foreach(hash, set_zero, hash);
2117 }
2118 rb_hash_aset(hash, ID2SYM(rb_intern("TOTAL")), SIZET2NUM(data.total));
2119 rb_hash_aset(hash, ID2SYM(rb_intern("FREE")), SIZET2NUM(data.freed));
2120
2121 for (size_t i = 0; i <= T_MASK; i++) {
2122 VALUE type = type_sym(i);
2123 if (data.counts[i])
2124 rb_hash_aset(hash, type, SIZET2NUM(data.counts[i]));
2125 }
2126
2127 return hash;
2128}
2129
2130#define SET_STACK_END SET_MACHINE_STACK_END(&ec->machine.stack_end)
2131
2132#define STACK_START (ec->machine.stack_start)
2133#define STACK_END (ec->machine.stack_end)
2134#define STACK_LEVEL_MAX (ec->machine.stack_maxsize/sizeof(VALUE))
2135
2136#if STACK_GROW_DIRECTION < 0
2137# define STACK_LENGTH (size_t)(STACK_START - STACK_END)
2138#elif STACK_GROW_DIRECTION > 0
2139# define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
2140#else
2141# define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
2142 : (size_t)(STACK_END - STACK_START + 1))
2143#endif
2144#if !STACK_GROW_DIRECTION
2145int ruby_stack_grow_direction;
2146int
2147ruby_get_stack_grow_direction(volatile VALUE *addr)
2148{
2149 VALUE *end;
2150 SET_MACHINE_STACK_END(&end);
2151
2152 if (end > addr) return ruby_stack_grow_direction = 1;
2153 return ruby_stack_grow_direction = -1;
2154}
2155#endif
2156
2157size_t
2159{
2160 rb_execution_context_t *ec = GET_EC();
2161 SET_STACK_END;
2162 if (p) *p = STACK_UPPER(STACK_END, STACK_START, STACK_END);
2163 return STACK_LENGTH;
2164}
2165
2166#define PREVENT_STACK_OVERFLOW 1
2167#ifndef PREVENT_STACK_OVERFLOW
2168#if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
2169# define PREVENT_STACK_OVERFLOW 1
2170#else
2171# define PREVENT_STACK_OVERFLOW 0
2172#endif
2173#endif
2174#if PREVENT_STACK_OVERFLOW && !defined(__EMSCRIPTEN__)
2175static int
2176stack_check(rb_execution_context_t *ec, int water_mark)
2177{
2178 SET_STACK_END;
2179
2180 size_t length = STACK_LENGTH;
2181 size_t maximum_length = STACK_LEVEL_MAX - water_mark;
2182
2183 return length > maximum_length;
2184}
2185#else
2186#define stack_check(ec, water_mark) FALSE
2187#endif
2188
2189#define STACKFRAME_FOR_CALL_CFUNC 2048
2190
2191int
2192rb_ec_stack_check(rb_execution_context_t *ec)
2193{
2194 return stack_check(ec, STACKFRAME_FOR_CALL_CFUNC);
2195}
2196
2197int
2199{
2200 return stack_check(GET_EC(), STACKFRAME_FOR_CALL_CFUNC);
2201}
2202
2203/* ==================== Marking ==================== */
2204
2205#define RB_GC_MARK_OR_TRAVERSE(func, obj_or_ptr, obj, check_obj) do { \
2206 if (!RB_SPECIAL_CONST_P(obj)) { \
2207 rb_vm_t *vm = GET_VM(); \
2208 void *objspace = vm->gc.objspace; \
2209 if (LIKELY(vm->gc.mark_func_data == NULL)) { \
2210 GC_ASSERT(rb_gc_impl_during_gc_p(objspace)); \
2211 (func)(objspace, (obj_or_ptr)); \
2212 } \
2213 else if (check_obj ? \
2214 rb_gc_impl_pointer_to_heap_p(objspace, (const void *)obj) && \
2215 !rb_gc_impl_garbage_object_p(objspace, obj) : \
2216 true) { \
2217 GC_ASSERT(!rb_gc_impl_during_gc_p(objspace)); \
2218 struct gc_mark_func_data_struct *mark_func_data = vm->gc.mark_func_data; \
2219 vm->gc.mark_func_data = NULL; \
2220 mark_func_data->mark_func((obj), mark_func_data->data); \
2221 vm->gc.mark_func_data = mark_func_data; \
2222 } \
2223 } \
2224} while (0)
2225
2226static inline void
2227gc_mark_internal(VALUE obj)
2228{
2229 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark, obj, obj, false);
2230}
2231
2232void
2233rb_gc_mark_movable(VALUE obj)
2234{
2235 gc_mark_internal(obj);
2236}
2237
2238void
2239rb_gc_mark_and_move(VALUE *ptr)
2240{
2241 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark_and_move, ptr, *ptr, false);
2242}
2243
2244static inline void
2245gc_mark_and_pin_internal(VALUE obj)
2246{
2247 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark_and_pin, obj, obj, false);
2248}
2249
2250void
2251rb_gc_mark(VALUE obj)
2252{
2253 gc_mark_and_pin_internal(obj);
2254}
2255
2256static inline void
2257gc_mark_maybe_internal(VALUE obj)
2258{
2259 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark_maybe, obj, obj, true);
2260}
2261
2262void
2263rb_gc_mark_maybe(VALUE obj)
2264{
2265 gc_mark_maybe_internal(obj);
2266}
2267
2268void
2269rb_gc_mark_weak(VALUE *ptr)
2270{
2271 if (RB_SPECIAL_CONST_P(*ptr)) return;
2272
2273 rb_vm_t *vm = GET_VM();
2274 void *objspace = vm->gc.objspace;
2275 if (LIKELY(vm->gc.mark_func_data == NULL)) {
2276 GC_ASSERT(rb_gc_impl_during_gc_p(objspace));
2277
2278 rb_gc_impl_mark_weak(objspace, ptr);
2279 }
2280 else {
2281 GC_ASSERT(!rb_gc_impl_during_gc_p(objspace));
2282 }
2283}
2284
2285void
2286rb_gc_remove_weak(VALUE parent_obj, VALUE *ptr)
2287{
2288 rb_gc_impl_remove_weak(rb_gc_get_objspace(), parent_obj, ptr);
2289}
2290
2291ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS(static void each_location(register const VALUE *x, register long n, void (*cb)(VALUE, void *), void *data));
2292static void
2293each_location(register const VALUE *x, register long n, void (*cb)(VALUE, void *), void *data)
2294{
2295 VALUE v;
2296 while (n--) {
2297 v = *x;
2298 cb(v, data);
2299 x++;
2300 }
2301}
2302
2303static void
2304each_location_ptr(const VALUE *start, const VALUE *end, void (*cb)(VALUE, void *), void *data)
2305{
2306 if (end <= start) return;
2307 each_location(start, end - start, cb, data);
2308}
2309
2310static void
2311gc_mark_maybe_each_location(VALUE obj, void *data)
2312{
2313 gc_mark_maybe_internal(obj);
2314}
2315
2316void
2317rb_gc_mark_locations(const VALUE *start, const VALUE *end)
2318{
2319 each_location_ptr(start, end, gc_mark_maybe_each_location, NULL);
2320}
2321
2322void
2323rb_gc_mark_values(long n, const VALUE *values)
2324{
2325 for (long i = 0; i < n; i++) {
2326 gc_mark_internal(values[i]);
2327 }
2328}
2329
2330void
2331rb_gc_mark_vm_stack_values(long n, const VALUE *values)
2332{
2333 for (long i = 0; i < n; i++) {
2334 gc_mark_and_pin_internal(values[i]);
2335 }
2336}
2337
2338static int
2339mark_key(st_data_t key, st_data_t value, st_data_t data)
2340{
2341 gc_mark_and_pin_internal((VALUE)key);
2342
2343 return ST_CONTINUE;
2344}
2345
2346void
2347rb_mark_set(st_table *tbl)
2348{
2349 if (!tbl) return;
2350
2351 st_foreach(tbl, mark_key, (st_data_t)rb_gc_get_objspace());
2352}
2353
2354static int
2355mark_keyvalue(st_data_t key, st_data_t value, st_data_t data)
2356{
2357 gc_mark_internal((VALUE)key);
2358 gc_mark_internal((VALUE)value);
2359
2360 return ST_CONTINUE;
2361}
2362
2363static int
2364pin_key_pin_value(st_data_t key, st_data_t value, st_data_t data)
2365{
2366 gc_mark_and_pin_internal((VALUE)key);
2367 gc_mark_and_pin_internal((VALUE)value);
2368
2369 return ST_CONTINUE;
2370}
2371
2372static int
2373pin_key_mark_value(st_data_t key, st_data_t value, st_data_t data)
2374{
2375 gc_mark_and_pin_internal((VALUE)key);
2376 gc_mark_internal((VALUE)value);
2377
2378 return ST_CONTINUE;
2379}
2380
2381static void
2382mark_hash(VALUE hash)
2383{
2384 if (rb_hash_compare_by_id_p(hash)) {
2385 rb_hash_stlike_foreach(hash, pin_key_mark_value, 0);
2386 }
2387 else {
2388 rb_hash_stlike_foreach(hash, mark_keyvalue, 0);
2389 }
2390
2391 gc_mark_internal(RHASH(hash)->ifnone);
2392}
2393
2394void
2395rb_mark_hash(st_table *tbl)
2396{
2397 if (!tbl) return;
2398
2399 st_foreach(tbl, pin_key_pin_value, 0);
2400}
2401
2402static enum rb_id_table_iterator_result
2403mark_method_entry_i(VALUE me, void *objspace)
2404{
2405 gc_mark_internal(me);
2406
2407 return ID_TABLE_CONTINUE;
2408}
2409
2410static void
2411mark_m_tbl(void *objspace, struct rb_id_table *tbl)
2412{
2413 if (tbl) {
2414 rb_id_table_foreach_values(tbl, mark_method_entry_i, objspace);
2415 }
2416}
2417
2418#if STACK_GROW_DIRECTION < 0
2419#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
2420#elif STACK_GROW_DIRECTION > 0
2421#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
2422#else
2423#define GET_STACK_BOUNDS(start, end, appendix) \
2424 ((STACK_END < STACK_START) ? \
2425 ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
2426#endif
2427
2428static void
2429gc_mark_machine_stack_location_maybe(VALUE obj, void *data)
2430{
2431 gc_mark_maybe_internal(obj);
2432
2433#ifdef RUBY_ASAN_ENABLED
2434 const rb_execution_context_t *ec = (const rb_execution_context_t *)data;
2435 void *fake_frame_start;
2436 void *fake_frame_end;
2437 bool is_fake_frame = asan_get_fake_stack_extents(
2438 ec->machine.asan_fake_stack_handle, obj,
2439 ec->machine.stack_start, ec->machine.stack_end,
2440 &fake_frame_start, &fake_frame_end
2441 );
2442 if (is_fake_frame) {
2443 each_location_ptr(fake_frame_start, fake_frame_end, gc_mark_maybe_each_location, NULL);
2444 }
2445#endif
2446}
2447
2448static VALUE
2449gc_location_internal(void *objspace, VALUE value)
2450{
2451 if (SPECIAL_CONST_P(value)) {
2452 return value;
2453 }
2454
2455 GC_ASSERT(rb_gc_impl_pointer_to_heap_p(objspace, (void *)value));
2456
2457 return rb_gc_impl_location(objspace, value);
2458}
2459
2460VALUE
2461rb_gc_location(VALUE value)
2462{
2463 return gc_location_internal(rb_gc_get_objspace(), value);
2464}
2465
2466#if defined(__wasm__)
2467
2468
2469static VALUE *rb_stack_range_tmp[2];
2470
2471static void
2472rb_mark_locations(void *begin, void *end)
2473{
2474 rb_stack_range_tmp[0] = begin;
2475 rb_stack_range_tmp[1] = end;
2476}
2477
2478void
2479rb_gc_save_machine_context(void)
2480{
2481 // no-op
2482}
2483
2484# if defined(__EMSCRIPTEN__)
2485
2486static void
2487mark_current_machine_context(const rb_execution_context_t *ec)
2488{
2489 emscripten_scan_stack(rb_mark_locations);
2490 each_location_ptr(rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe_each_location, NULL);
2491
2492 emscripten_scan_registers(rb_mark_locations);
2493 each_location_ptr(rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe_each_location, NULL);
2494}
2495# else // use Asyncify version
2496
2497static void
2498mark_current_machine_context(rb_execution_context_t *ec)
2499{
2500 VALUE *stack_start, *stack_end;
2501 SET_STACK_END;
2502 GET_STACK_BOUNDS(stack_start, stack_end, 1);
2503 each_location_ptr(stack_start, stack_end, gc_mark_maybe_each_location, NULL);
2504
2505 rb_wasm_scan_locals(rb_mark_locations);
2506 each_location_ptr(rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe_each_location, NULL);
2507}
2508
2509# endif
2510
2511#else // !defined(__wasm__)
2512
2513void
2514rb_gc_save_machine_context(void)
2515{
2516 rb_thread_t *thread = GET_THREAD();
2517
2518 RB_VM_SAVE_MACHINE_CONTEXT(thread);
2519}
2520
2521
2522static void
2523mark_current_machine_context(const rb_execution_context_t *ec)
2524{
2525 rb_gc_mark_machine_context(ec);
2526}
2527#endif
2528
2529void
2530rb_gc_mark_machine_context(const rb_execution_context_t *ec)
2531{
2532 VALUE *stack_start, *stack_end;
2533
2534 GET_STACK_BOUNDS(stack_start, stack_end, 0);
2535 RUBY_DEBUG_LOG("ec->th:%u stack_start:%p stack_end:%p", rb_ec_thread_ptr(ec)->serial, stack_start, stack_end);
2536
2537 void *data =
2538#ifdef RUBY_ASAN_ENABLED
2539 /* gc_mark_machine_stack_location_maybe() uses data as const */
2541#else
2542 NULL;
2543#endif
2544
2545 each_location_ptr(stack_start, stack_end, gc_mark_machine_stack_location_maybe, data);
2546 int num_regs = sizeof(ec->machine.regs)/(sizeof(VALUE));
2547 each_location((VALUE*)&ec->machine.regs, num_regs, gc_mark_machine_stack_location_maybe, data);
2548}
2549
2550static int
2551rb_mark_tbl_i(st_data_t key, st_data_t value, st_data_t data)
2552{
2553 gc_mark_and_pin_internal((VALUE)value);
2554
2555 return ST_CONTINUE;
2556}
2557
2558void
2559rb_mark_tbl(st_table *tbl)
2560{
2561 if (!tbl || tbl->num_entries == 0) return;
2562
2563 st_foreach(tbl, rb_mark_tbl_i, 0);
2564}
2565
2566static void
2567gc_mark_tbl_no_pin(st_table *tbl)
2568{
2569 if (!tbl || tbl->num_entries == 0) return;
2570
2571 st_foreach(tbl, gc_mark_tbl_no_pin_i, 0);
2572}
2573
2574void
2575rb_mark_tbl_no_pin(st_table *tbl)
2576{
2577 gc_mark_tbl_no_pin(tbl);
2578}
2579
2580static enum rb_id_table_iterator_result
2581mark_cvc_tbl_i(VALUE cvc_entry, void *objspace)
2582{
2583 struct rb_cvar_class_tbl_entry *entry;
2584
2585 entry = (struct rb_cvar_class_tbl_entry *)cvc_entry;
2586
2587 RUBY_ASSERT(entry->cref == 0 || (BUILTIN_TYPE((VALUE)entry->cref) == T_IMEMO && IMEMO_TYPE_P(entry->cref, imemo_cref)));
2588 gc_mark_internal((VALUE)entry->cref);
2589
2590 return ID_TABLE_CONTINUE;
2591}
2592
2593static void
2594mark_cvc_tbl(void *objspace, VALUE klass)
2595{
2596 struct rb_id_table *tbl = RCLASS_CVC_TBL(klass);
2597 if (tbl) {
2598 rb_id_table_foreach_values(tbl, mark_cvc_tbl_i, objspace);
2599 }
2600}
2601
2602static bool
2603gc_declarative_marking_p(const rb_data_type_t *type)
2604{
2605 return (type->flags & RUBY_TYPED_DECL_MARKING) != 0;
2606}
2607
2608static enum rb_id_table_iterator_result
2609mark_const_table_i(VALUE value, void *objspace)
2610{
2611 const rb_const_entry_t *ce = (const rb_const_entry_t *)value;
2612
2613 gc_mark_internal(ce->value);
2614 gc_mark_internal(ce->file);
2615
2616 return ID_TABLE_CONTINUE;
2617}
2618
2619void
2620rb_gc_mark_roots(void *objspace, const char **categoryp)
2621{
2622 rb_execution_context_t *ec = GET_EC();
2623 rb_vm_t *vm = rb_ec_vm_ptr(ec);
2624
2625#define MARK_CHECKPOINT(category) do { \
2626 if (categoryp) *categoryp = category; \
2627} while (0)
2628
2629 MARK_CHECKPOINT("vm");
2630 rb_vm_mark(vm);
2631 if (vm->self) gc_mark_internal(vm->self);
2632
2633 MARK_CHECKPOINT("end_proc");
2634 rb_mark_end_proc();
2635
2636 MARK_CHECKPOINT("global_tbl");
2637 rb_gc_mark_global_tbl();
2638
2639#if USE_YJIT
2640 void rb_yjit_root_mark(void); // in Rust
2641
2642 if (rb_yjit_enabled_p) {
2643 MARK_CHECKPOINT("YJIT");
2644 rb_yjit_root_mark();
2645 }
2646#endif
2647
2648 MARK_CHECKPOINT("machine_context");
2649 mark_current_machine_context(ec);
2650
2651 MARK_CHECKPOINT("global_symbols");
2652 rb_sym_global_symbols_mark();
2653
2654 MARK_CHECKPOINT("finish");
2655
2656#undef MARK_CHECKPOINT
2657}
2658
2659#define TYPED_DATA_REFS_OFFSET_LIST(d) (size_t *)(uintptr_t)RTYPEDDATA(d)->type->function.dmark
2660
2661void
2662rb_gc_ractor_moved(VALUE dest, VALUE src)
2663{
2664 rb_gc_obj_free(rb_gc_get_objspace(), src);
2665 MEMZERO((void *)src, char, rb_gc_obj_slot_size(src));
2666 RBASIC(src)->flags = T_OBJECT | FL_FREEZE; // Avoid mutations using bind_call, etc.
2667}
2668
2669void
2670rb_gc_mark_children(void *objspace, VALUE obj)
2671{
2672 if (FL_TEST(obj, FL_EXIVAR)) {
2673 rb_mark_generic_ivar(obj);
2674 }
2675
2676 switch (BUILTIN_TYPE(obj)) {
2677 case T_FLOAT:
2678 case T_BIGNUM:
2679 case T_SYMBOL:
2680 /* Not immediates, but does not have references and singleton class.
2681 *
2682 * RSYMBOL(obj)->fstr intentionally not marked. See log for 96815f1e
2683 * ("symbol.c: remove rb_gc_mark_symbols()") */
2684 return;
2685
2686 case T_NIL:
2687 case T_FIXNUM:
2688 rb_bug("rb_gc_mark() called for broken object");
2689 break;
2690
2691 case T_NODE:
2692 UNEXPECTED_NODE(rb_gc_mark);
2693 break;
2694
2695 case T_IMEMO:
2696 rb_imemo_mark_and_move(obj, false);
2697 return;
2698
2699 default:
2700 break;
2701 }
2702
2703 gc_mark_internal(RBASIC(obj)->klass);
2704
2705 switch (BUILTIN_TYPE(obj)) {
2706 case T_CLASS:
2707 if (FL_TEST(obj, FL_SINGLETON)) {
2708 gc_mark_internal(RCLASS_ATTACHED_OBJECT(obj));
2709 }
2710 // Continue to the shared T_CLASS/T_MODULE
2711 case T_MODULE:
2712 if (RCLASS_SUPER(obj)) {
2713 gc_mark_internal(RCLASS_SUPER(obj));
2714 }
2715
2716 mark_m_tbl(objspace, RCLASS_M_TBL(obj));
2717 mark_cvc_tbl(objspace, obj);
2718 rb_cc_table_mark(obj);
2719 if (rb_shape_obj_too_complex(obj)) {
2720 gc_mark_tbl_no_pin((st_table *)RCLASS_IVPTR(obj));
2721 }
2722 else {
2723 for (attr_index_t i = 0; i < RCLASS_IV_COUNT(obj); i++) {
2724 gc_mark_internal(RCLASS_IVPTR(obj)[i]);
2725 }
2726 }
2727
2728 if (RCLASS_CONST_TBL(obj)) {
2729 rb_id_table_foreach_values(RCLASS_CONST_TBL(obj), mark_const_table_i, objspace);
2730 }
2731
2732 gc_mark_internal(RCLASS_EXT(obj)->classpath);
2733 break;
2734
2735 case T_ICLASS:
2736 if (RICLASS_OWNS_M_TBL_P(obj)) {
2737 mark_m_tbl(objspace, RCLASS_M_TBL(obj));
2738 }
2739 if (RCLASS_SUPER(obj)) {
2740 gc_mark_internal(RCLASS_SUPER(obj));
2741 }
2742
2743 if (RCLASS_INCLUDER(obj)) {
2744 gc_mark_internal(RCLASS_INCLUDER(obj));
2745 }
2746 mark_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj));
2747 rb_cc_table_mark(obj);
2748 break;
2749
2750 case T_ARRAY:
2751 if (ARY_SHARED_P(obj)) {
2752 VALUE root = ARY_SHARED_ROOT(obj);
2753 gc_mark_internal(root);
2754 }
2755 else {
2756 long len = RARRAY_LEN(obj);
2757 const VALUE *ptr = RARRAY_CONST_PTR(obj);
2758 for (long i = 0; i < len; i++) {
2759 gc_mark_internal(ptr[i]);
2760 }
2761 }
2762 break;
2763
2764 case T_HASH:
2765 mark_hash(obj);
2766 break;
2767
2768 case T_STRING:
2769 if (STR_SHARED_P(obj)) {
2770 if (STR_EMBED_P(RSTRING(obj)->as.heap.aux.shared)) {
2771 /* Embedded shared strings cannot be moved because this string
2772 * points into the slot of the shared string. There may be code
2773 * using the RSTRING_PTR on the stack, which would pin this
2774 * string but not pin the shared string, causing it to move. */
2775 gc_mark_and_pin_internal(RSTRING(obj)->as.heap.aux.shared);
2776 }
2777 else {
2778 gc_mark_internal(RSTRING(obj)->as.heap.aux.shared);
2779 }
2780 }
2781 break;
2782
2783 case T_DATA: {
2784 void *const ptr = RTYPEDDATA_P(obj) ? RTYPEDDATA_GET_DATA(obj) : DATA_PTR(obj);
2785
2786 if (ptr) {
2787 if (RTYPEDDATA_P(obj) && gc_declarative_marking_p(RTYPEDDATA(obj)->type)) {
2788 size_t *offset_list = TYPED_DATA_REFS_OFFSET_LIST(obj);
2789
2790 for (size_t offset = *offset_list; offset != RUBY_REF_END; offset = *offset_list++) {
2791 gc_mark_internal(*(VALUE *)((char *)ptr + offset));
2792 }
2793 }
2794 else {
2795 RUBY_DATA_FUNC mark_func = RTYPEDDATA_P(obj) ?
2796 RTYPEDDATA(obj)->type->function.dmark :
2797 RDATA(obj)->dmark;
2798 if (mark_func) (*mark_func)(ptr);
2799 }
2800 }
2801
2802 break;
2803 }
2804
2805 case T_OBJECT: {
2806 rb_shape_t *shape = rb_shape_get_shape_by_id(ROBJECT_SHAPE_ID(obj));
2807
2808 if (rb_shape_obj_too_complex(obj)) {
2809 gc_mark_tbl_no_pin(ROBJECT_IV_HASH(obj));
2810 }
2811 else {
2812 const VALUE * const ptr = ROBJECT_IVPTR(obj);
2813
2814 uint32_t len = ROBJECT_IV_COUNT(obj);
2815 for (uint32_t i = 0; i < len; i++) {
2816 gc_mark_internal(ptr[i]);
2817 }
2818 }
2819
2820 if (shape) {
2821 VALUE klass = RBASIC_CLASS(obj);
2822
2823 // Increment max_iv_count if applicable, used to determine size pool allocation
2824 attr_index_t num_of_ivs = shape->next_iv_index;
2825 if (RCLASS_EXT(klass)->max_iv_count < num_of_ivs) {
2826 RCLASS_EXT(klass)->max_iv_count = num_of_ivs;
2827 }
2828 }
2829
2830 break;
2831 }
2832
2833 case T_FILE:
2834 if (RFILE(obj)->fptr) {
2835 gc_mark_internal(RFILE(obj)->fptr->self);
2836 gc_mark_internal(RFILE(obj)->fptr->pathv);
2837 gc_mark_internal(RFILE(obj)->fptr->tied_io_for_writing);
2838 gc_mark_internal(RFILE(obj)->fptr->writeconv_asciicompat);
2839 gc_mark_internal(RFILE(obj)->fptr->writeconv_pre_ecopts);
2840 gc_mark_internal(RFILE(obj)->fptr->encs.ecopts);
2841 gc_mark_internal(RFILE(obj)->fptr->write_lock);
2842 gc_mark_internal(RFILE(obj)->fptr->timeout);
2843 }
2844 break;
2845
2846 case T_REGEXP:
2847 gc_mark_internal(RREGEXP(obj)->src);
2848 break;
2849
2850 case T_MATCH:
2851 gc_mark_internal(RMATCH(obj)->regexp);
2852 if (RMATCH(obj)->str) {
2853 gc_mark_internal(RMATCH(obj)->str);
2854 }
2855 break;
2856
2857 case T_RATIONAL:
2858 gc_mark_internal(RRATIONAL(obj)->num);
2859 gc_mark_internal(RRATIONAL(obj)->den);
2860 break;
2861
2862 case T_COMPLEX:
2863 gc_mark_internal(RCOMPLEX(obj)->real);
2864 gc_mark_internal(RCOMPLEX(obj)->imag);
2865 break;
2866
2867 case T_STRUCT: {
2868 const long len = RSTRUCT_LEN(obj);
2869 const VALUE * const ptr = RSTRUCT_CONST_PTR(obj);
2870
2871 for (long i = 0; i < len; i++) {
2872 gc_mark_internal(ptr[i]);
2873 }
2874
2875 break;
2876 }
2877
2878 default:
2879 if (BUILTIN_TYPE(obj) == T_MOVED) rb_bug("rb_gc_mark(): %p is T_MOVED", (void *)obj);
2880 if (BUILTIN_TYPE(obj) == T_NONE) rb_bug("rb_gc_mark(): %p is T_NONE", (void *)obj);
2881 if (BUILTIN_TYPE(obj) == T_ZOMBIE) rb_bug("rb_gc_mark(): %p is T_ZOMBIE", (void *)obj);
2882 rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
2883 BUILTIN_TYPE(obj), (void *)obj,
2884 rb_gc_impl_pointer_to_heap_p(objspace, (void *)obj) ? "corrupted object" : "non object");
2885 }
2886}
2887
2888size_t
2889rb_gc_obj_optimal_size(VALUE obj)
2890{
2891 switch (BUILTIN_TYPE(obj)) {
2892 case T_ARRAY:
2893 return rb_ary_size_as_embedded(obj);
2894
2895 case T_OBJECT:
2896 if (rb_shape_obj_too_complex(obj)) {
2897 return sizeof(struct RObject);
2898 }
2899 else {
2900 return rb_obj_embedded_size(ROBJECT_IV_CAPACITY(obj));
2901 }
2902
2903 case T_STRING:
2904 return rb_str_size_as_embedded(obj);
2905
2906 case T_HASH:
2907 return sizeof(struct RHash) + (RHASH_ST_TABLE_P(obj) ? sizeof(st_table) : sizeof(ar_table));
2908
2909 default:
2910 return 0;
2911 }
2912}
2913
2914void
2915rb_gc_writebarrier(VALUE a, VALUE b)
2916{
2917 rb_gc_impl_writebarrier(rb_gc_get_objspace(), a, b);
2918}
2919
2920void
2921rb_gc_writebarrier_unprotect(VALUE obj)
2922{
2923 rb_gc_impl_writebarrier_unprotect(rb_gc_get_objspace(), obj);
2924}
2925
2926/*
2927 * remember `obj' if needed.
2928 */
2929void
2930rb_gc_writebarrier_remember(VALUE obj)
2931{
2932 rb_gc_impl_writebarrier_remember(rb_gc_get_objspace(), obj);
2933}
2934
2935void
2936rb_gc_copy_attributes(VALUE dest, VALUE obj)
2937{
2938 rb_gc_impl_copy_attributes(rb_gc_get_objspace(), dest, obj);
2939}
2940
2941int
2942rb_gc_modular_gc_loaded_p(void)
2943{
2944#if USE_MODULAR_GC
2945 return rb_gc_functions.modular_gc_loaded_p;
2946#else
2947 return false;
2948#endif
2949}
2950
2951const char *
2952rb_gc_active_gc_name(void)
2953{
2954 const char *gc_name = rb_gc_impl_active_gc_name();
2955
2956 const size_t len = strlen(gc_name);
2957 if (len > RB_GC_MAX_NAME_LEN) {
2958 rb_bug("GC should have a name no more than %d chars long. Currently: %zu (%s)",
2959 RB_GC_MAX_NAME_LEN, len, gc_name);
2960 }
2961
2962 return gc_name;
2963}
2964
2966rb_gc_object_metadata(VALUE obj)
2967{
2968 return rb_gc_impl_object_metadata(rb_gc_get_objspace(), obj);
2969}
2970
2971/* GC */
2972
2973void *
2974rb_gc_ractor_cache_alloc(rb_ractor_t *ractor)
2975{
2976 return rb_gc_impl_ractor_cache_alloc(rb_gc_get_objspace(), ractor);
2977}
2978
2979void
2980rb_gc_ractor_cache_free(void *cache)
2981{
2982 rb_gc_impl_ractor_cache_free(rb_gc_get_objspace(), cache);
2983}
2984
2985void
2986rb_gc_register_mark_object(VALUE obj)
2987{
2988 if (!rb_gc_impl_pointer_to_heap_p(rb_gc_get_objspace(), (void *)obj))
2989 return;
2990
2991 rb_vm_register_global_object(obj);
2992}
2993
2994void
2995rb_gc_register_address(VALUE *addr)
2996{
2997 rb_vm_t *vm = GET_VM();
2998
2999 VALUE obj = *addr;
3000
3001 struct global_object_list *tmp = ALLOC(struct global_object_list);
3002 tmp->next = vm->global_object_list;
3003 tmp->varptr = addr;
3004 vm->global_object_list = tmp;
3005
3006 /*
3007 * Because some C extensions have assignment-then-register bugs,
3008 * we guard `obj` here so that it would not get swept defensively.
3009 */
3010 RB_GC_GUARD(obj);
3011 if (0 && !SPECIAL_CONST_P(obj)) {
3012 rb_warn("Object is assigned to registering address already: %"PRIsVALUE,
3013 rb_obj_class(obj));
3014 rb_print_backtrace(stderr);
3015 }
3016}
3017
3018void
3019rb_gc_unregister_address(VALUE *addr)
3020{
3021 rb_vm_t *vm = GET_VM();
3022 struct global_object_list *tmp = vm->global_object_list;
3023
3024 if (tmp->varptr == addr) {
3025 vm->global_object_list = tmp->next;
3026 xfree(tmp);
3027 return;
3028 }
3029 while (tmp->next) {
3030 if (tmp->next->varptr == addr) {
3031 struct global_object_list *t = tmp->next;
3032
3033 tmp->next = tmp->next->next;
3034 xfree(t);
3035 break;
3036 }
3037 tmp = tmp->next;
3038 }
3039}
3040
3041void
3043{
3044 rb_gc_register_address(var);
3045}
3046
3047static VALUE
3048gc_start_internal(rb_execution_context_t *ec, VALUE self, VALUE full_mark, VALUE immediate_mark, VALUE immediate_sweep, VALUE compact)
3049{
3050 rb_gc_impl_start(rb_gc_get_objspace(), RTEST(full_mark), RTEST(immediate_mark), RTEST(immediate_sweep), RTEST(compact));
3051
3052 return Qnil;
3053}
3054
3055/*
3056 * rb_objspace_each_objects() is special C API to walk through
3057 * Ruby object space. This C API is too difficult to use it.
3058 * To be frank, you should not use it. Or you need to read the
3059 * source code of this function and understand what this function does.
3060 *
3061 * 'callback' will be called several times (the number of heap page,
3062 * at current implementation) with:
3063 * vstart: a pointer to the first living object of the heap_page.
3064 * vend: a pointer to next to the valid heap_page area.
3065 * stride: a distance to next VALUE.
3066 *
3067 * If callback() returns non-zero, the iteration will be stopped.
3068 *
3069 * This is a sample callback code to iterate liveness objects:
3070 *
3071 * static int
3072 * sample_callback(void *vstart, void *vend, int stride, void *data)
3073 * {
3074 * VALUE v = (VALUE)vstart;
3075 * for (; v != (VALUE)vend; v += stride) {
3076 * if (!rb_objspace_internal_object_p(v)) { // liveness check
3077 * // do something with live object 'v'
3078 * }
3079 * }
3080 * return 0; // continue to iteration
3081 * }
3082 *
3083 * Note: 'vstart' is not a top of heap_page. This point the first
3084 * living object to grasp at least one object to avoid GC issue.
3085 * This means that you can not walk through all Ruby object page
3086 * including freed object page.
3087 *
3088 * Note: On this implementation, 'stride' is the same as sizeof(RVALUE).
3089 * However, there are possibilities to pass variable values with
3090 * 'stride' with some reasons. You must use stride instead of
3091 * use some constant value in the iteration.
3092 */
3093void
3094rb_objspace_each_objects(int (*callback)(void *, void *, size_t, void *), void *data)
3095{
3096 rb_gc_impl_each_objects(rb_gc_get_objspace(), callback, data);
3097}
3098
3099static void
3100gc_ref_update_array(void *objspace, VALUE v)
3101{
3102 if (ARY_SHARED_P(v)) {
3103 VALUE old_root = RARRAY(v)->as.heap.aux.shared_root;
3104
3105 UPDATE_IF_MOVED(objspace, RARRAY(v)->as.heap.aux.shared_root);
3106
3107 VALUE new_root = RARRAY(v)->as.heap.aux.shared_root;
3108 // If the root is embedded and its location has changed
3109 if (ARY_EMBED_P(new_root) && new_root != old_root) {
3110 size_t offset = (size_t)(RARRAY(v)->as.heap.ptr - RARRAY(old_root)->as.ary);
3111 GC_ASSERT(RARRAY(v)->as.heap.ptr >= RARRAY(old_root)->as.ary);
3112 RARRAY(v)->as.heap.ptr = RARRAY(new_root)->as.ary + offset;
3113 }
3114 }
3115 else {
3116 long len = RARRAY_LEN(v);
3117
3118 if (len > 0) {
3119 VALUE *ptr = (VALUE *)RARRAY_CONST_PTR(v);
3120 for (long i = 0; i < len; i++) {
3121 UPDATE_IF_MOVED(objspace, ptr[i]);
3122 }
3123 }
3124
3125 if (rb_gc_obj_slot_size(v) >= rb_ary_size_as_embedded(v)) {
3126 if (rb_ary_embeddable_p(v)) {
3127 rb_ary_make_embedded(v);
3128 }
3129 }
3130 }
3131}
3132
3133static void
3134gc_ref_update_object(void *objspace, VALUE v)
3135{
3136 VALUE *ptr = ROBJECT_IVPTR(v);
3137
3138 if (rb_shape_obj_too_complex(v)) {
3139 gc_ref_update_table_values_only(ROBJECT_IV_HASH(v));
3140 return;
3141 }
3142
3143 size_t slot_size = rb_gc_obj_slot_size(v);
3144 size_t embed_size = rb_obj_embedded_size(ROBJECT_IV_CAPACITY(v));
3145 if (slot_size >= embed_size && !RB_FL_TEST_RAW(v, ROBJECT_EMBED)) {
3146 // Object can be re-embedded
3147 memcpy(ROBJECT(v)->as.ary, ptr, sizeof(VALUE) * ROBJECT_IV_COUNT(v));
3148 RB_FL_SET_RAW(v, ROBJECT_EMBED);
3149 xfree(ptr);
3150 ptr = ROBJECT(v)->as.ary;
3151 }
3152
3153 for (uint32_t i = 0; i < ROBJECT_IV_COUNT(v); i++) {
3154 UPDATE_IF_MOVED(objspace, ptr[i]);
3155 }
3156}
3157
3158void
3159rb_gc_ref_update_table_values_only(st_table *tbl)
3160{
3161 gc_ref_update_table_values_only(tbl);
3162}
3163
3164/* Update MOVED references in a VALUE=>VALUE st_table */
3165void
3166rb_gc_update_tbl_refs(st_table *ptr)
3167{
3168 gc_update_table_refs(ptr);
3169}
3170
3171static void
3172gc_ref_update_hash(void *objspace, VALUE v)
3173{
3174 rb_hash_stlike_foreach_with_replace(v, hash_foreach_replace, hash_replace_ref, (st_data_t)objspace);
3175}
3176
3177static void
3178gc_update_values(void *objspace, long n, VALUE *values)
3179{
3180 for (long i = 0; i < n; i++) {
3181 UPDATE_IF_MOVED(objspace, values[i]);
3182 }
3183}
3184
3185void
3186rb_gc_update_values(long n, VALUE *values)
3187{
3188 gc_update_values(rb_gc_get_objspace(), n, values);
3189}
3190
3191static enum rb_id_table_iterator_result
3192check_id_table_move(VALUE value, void *data)
3193{
3194 void *objspace = (void *)data;
3195
3196 if (rb_gc_impl_object_moved_p(objspace, (VALUE)value)) {
3197 return ID_TABLE_REPLACE;
3198 }
3199
3200 return ID_TABLE_CONTINUE;
3201}
3202
3203void
3204rb_gc_prepare_heap_process_object(VALUE obj)
3205{
3206 switch (BUILTIN_TYPE(obj)) {
3207 case T_STRING:
3208 // Precompute the string coderange. This both save time for when it will be
3209 // eventually needed, and avoid mutating heap pages after a potential fork.
3211 break;
3212 default:
3213 break;
3214 }
3215}
3216
3217void
3218rb_gc_prepare_heap(void)
3219{
3220 rb_gc_impl_prepare_heap(rb_gc_get_objspace());
3221}
3222
3223size_t
3224rb_gc_heap_id_for_size(size_t size)
3225{
3226 return rb_gc_impl_heap_id_for_size(rb_gc_get_objspace(), size);
3227}
3228
3229bool
3230rb_gc_size_allocatable_p(size_t size)
3231{
3232 return rb_gc_impl_size_allocatable_p(size);
3233}
3234
3235static enum rb_id_table_iterator_result
3236update_id_table(VALUE *value, void *data, int existing)
3237{
3238 void *objspace = (void *)data;
3239
3240 if (rb_gc_impl_object_moved_p(objspace, (VALUE)*value)) {
3241 *value = gc_location_internal(objspace, (VALUE)*value);
3242 }
3243
3244 return ID_TABLE_CONTINUE;
3245}
3246
3247static void
3248update_m_tbl(void *objspace, struct rb_id_table *tbl)
3249{
3250 if (tbl) {
3251 rb_id_table_foreach_values_with_replace(tbl, check_id_table_move, update_id_table, objspace);
3252 }
3253}
3254
3255static enum rb_id_table_iterator_result
3256update_cc_tbl_i(VALUE ccs_ptr, void *objspace)
3257{
3258 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
3259 VM_ASSERT(vm_ccs_p(ccs));
3260
3261 if (rb_gc_impl_object_moved_p(objspace, (VALUE)ccs->cme)) {
3262 ccs->cme = (const rb_callable_method_entry_t *)gc_location_internal(objspace, (VALUE)ccs->cme);
3263 }
3264
3265 for (int i=0; i<ccs->len; i++) {
3266 if (rb_gc_impl_object_moved_p(objspace, (VALUE)ccs->entries[i].cc)) {
3267 ccs->entries[i].cc = (struct rb_callcache *)gc_location_internal(objspace, (VALUE)ccs->entries[i].cc);
3268 }
3269 }
3270
3271 // do not replace
3272 return ID_TABLE_CONTINUE;
3273}
3274
3275static void
3276update_cc_tbl(void *objspace, VALUE klass)
3277{
3278 struct rb_id_table *tbl = RCLASS_CC_TBL(klass);
3279 if (tbl) {
3280 rb_id_table_foreach_values(tbl, update_cc_tbl_i, objspace);
3281 }
3282}
3283
3284static enum rb_id_table_iterator_result
3285update_cvc_tbl_i(VALUE cvc_entry, void *objspace)
3286{
3287 struct rb_cvar_class_tbl_entry *entry;
3288
3289 entry = (struct rb_cvar_class_tbl_entry *)cvc_entry;
3290
3291 if (entry->cref) {
3292 TYPED_UPDATE_IF_MOVED(objspace, rb_cref_t *, entry->cref);
3293 }
3294
3295 entry->class_value = gc_location_internal(objspace, entry->class_value);
3296
3297 return ID_TABLE_CONTINUE;
3298}
3299
3300static void
3301update_cvc_tbl(void *objspace, VALUE klass)
3302{
3303 struct rb_id_table *tbl = RCLASS_CVC_TBL(klass);
3304 if (tbl) {
3305 rb_id_table_foreach_values(tbl, update_cvc_tbl_i, objspace);
3306 }
3307}
3308
3309static enum rb_id_table_iterator_result
3310update_const_table(VALUE value, void *objspace)
3311{
3312 rb_const_entry_t *ce = (rb_const_entry_t *)value;
3313
3314 if (rb_gc_impl_object_moved_p(objspace, ce->value)) {
3315 ce->value = gc_location_internal(objspace, ce->value);
3316 }
3317
3318 if (rb_gc_impl_object_moved_p(objspace, ce->file)) {
3319 ce->file = gc_location_internal(objspace, ce->file);
3320 }
3321
3322 return ID_TABLE_CONTINUE;
3323}
3324
3325static void
3326update_const_tbl(void *objspace, struct rb_id_table *tbl)
3327{
3328 if (!tbl) return;
3329 rb_id_table_foreach_values(tbl, update_const_table, objspace);
3330}
3331
3332static void
3333update_subclass_entries(void *objspace, rb_subclass_entry_t *entry)
3334{
3335 while (entry) {
3336 UPDATE_IF_MOVED(objspace, entry->klass);
3337 entry = entry->next;
3338 }
3339}
3340
3341static void
3342update_class_ext(void *objspace, rb_classext_t *ext)
3343{
3344 UPDATE_IF_MOVED(objspace, ext->origin_);
3345 UPDATE_IF_MOVED(objspace, ext->includer);
3346 UPDATE_IF_MOVED(objspace, ext->refined_class);
3347 update_subclass_entries(objspace, ext->subclasses);
3348}
3349
3350static void
3351update_superclasses(void *objspace, VALUE obj)
3352{
3353 if (FL_TEST_RAW(obj, RCLASS_SUPERCLASSES_INCLUDE_SELF)) {
3354 for (size_t i = 0; i < RCLASS_SUPERCLASS_DEPTH(obj) + 1; i++) {
3355 UPDATE_IF_MOVED(objspace, RCLASS_SUPERCLASSES(obj)[i]);
3356 }
3357 }
3358}
3359
3360extern rb_symbols_t ruby_global_symbols;
3361#define global_symbols ruby_global_symbols
3362
3364 vm_table_foreach_callback_func callback;
3365 vm_table_update_callback_func update_callback;
3366 void *data;
3367 bool weak_only;
3368};
3369
3370static int
3371vm_weak_table_foreach_weak_key(st_data_t key, st_data_t value, st_data_t data, int error)
3372{
3373 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3374
3375 int ret = iter_data->callback((VALUE)key, iter_data->data);
3376
3377 if (!iter_data->weak_only) {
3378 if (ret != ST_CONTINUE) return ret;
3379
3380 ret = iter_data->callback((VALUE)value, iter_data->data);
3381 }
3382
3383 return ret;
3384}
3385
3386static int
3387vm_weak_table_foreach_update_weak_key(st_data_t *key, st_data_t *value, st_data_t data, int existing)
3388{
3389 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3390
3391 int ret = iter_data->update_callback((VALUE *)key, iter_data->data);
3392
3393 if (!iter_data->weak_only) {
3394 if (ret != ST_CONTINUE) return ret;
3395
3396 ret = iter_data->update_callback((VALUE *)value, iter_data->data);
3397 }
3398
3399 return ret;
3400}
3401
3402static int
3403vm_weak_table_str_sym_foreach(st_data_t key, st_data_t value, st_data_t data, int error)
3404{
3405 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3406
3407 if (!iter_data->weak_only) {
3408 int ret = iter_data->callback((VALUE)key, iter_data->data);
3409 if (ret != ST_CONTINUE) return ret;
3410 }
3411
3412 if (STATIC_SYM_P(value)) {
3413 return ST_CONTINUE;
3414 }
3415 else {
3416 return iter_data->callback((VALUE)value, iter_data->data);
3417 }
3418}
3419
3420static int
3421vm_weak_table_foreach_update_weak_value(st_data_t *key, st_data_t *value, st_data_t data, int existing)
3422{
3423 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3424
3425 if (!iter_data->weak_only) {
3426 int ret = iter_data->update_callback((VALUE *)key, iter_data->data);
3427 if (ret != ST_CONTINUE) return ret;
3428 }
3429
3430 return iter_data->update_callback((VALUE *)value, iter_data->data);
3431}
3432
3433static void
3434free_gen_ivtbl(VALUE obj, struct gen_ivtbl *ivtbl)
3435{
3436 if (UNLIKELY(rb_shape_obj_too_complex(obj))) {
3437 st_free_table(ivtbl->as.complex.table);
3438 }
3439
3440 xfree(ivtbl);
3441}
3442
3443static int
3444vm_weak_table_gen_ivar_foreach_too_complex_i(st_data_t _key, st_data_t value, st_data_t data, int error)
3445{
3446 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3447
3448 GC_ASSERT(!iter_data->weak_only);
3449
3450 if (SPECIAL_CONST_P((VALUE)value)) return ST_CONTINUE;
3451
3452 return iter_data->callback((VALUE)value, iter_data->data);
3453}
3454
3455static int
3456vm_weak_table_gen_ivar_foreach_too_complex_replace_i(st_data_t *_key, st_data_t *value, st_data_t data, int existing)
3457{
3458 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3459
3460 GC_ASSERT(!iter_data->weak_only);
3461
3462 return iter_data->update_callback((VALUE *)value, iter_data->data);
3463}
3464
3465struct st_table *rb_generic_ivtbl_get(void);
3466
3467static int
3468vm_weak_table_gen_ivar_foreach(st_data_t key, st_data_t value, st_data_t data)
3469{
3470 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3471
3472 int ret = iter_data->callback((VALUE)key, iter_data->data);
3473
3474 switch (ret) {
3475 case ST_CONTINUE:
3476 break;
3477
3478 case ST_DELETE:
3479 free_gen_ivtbl((VALUE)key, (struct gen_ivtbl *)value);
3480
3481 FL_UNSET((VALUE)key, FL_EXIVAR);
3482 return ST_DELETE;
3483
3484 case ST_REPLACE: {
3485 VALUE new_key = (VALUE)key;
3486 ret = iter_data->update_callback(&new_key, iter_data->data);
3487 if (key != new_key) ret = ST_DELETE;
3488 DURING_GC_COULD_MALLOC_REGION_START();
3489 {
3490 st_insert(rb_generic_ivtbl_get(), (st_data_t)new_key, value);
3491 }
3492 DURING_GC_COULD_MALLOC_REGION_END();
3493 key = (st_data_t)new_key;
3494 break;
3495 }
3496
3497 default:
3498 return ret;
3499 }
3500
3501 if (!iter_data->weak_only) {
3502 struct gen_ivtbl *ivtbl = (struct gen_ivtbl *)value;
3503
3504 if (rb_shape_obj_too_complex((VALUE)key)) {
3505 st_foreach_with_replace(
3506 ivtbl->as.complex.table,
3507 vm_weak_table_gen_ivar_foreach_too_complex_i,
3508 vm_weak_table_gen_ivar_foreach_too_complex_replace_i,
3509 data
3510 );
3511 }
3512 else {
3513 for (uint32_t i = 0; i < ivtbl->as.shape.numiv; i++) {
3514 if (SPECIAL_CONST_P(ivtbl->as.shape.ivptr[i])) continue;
3515
3516 int ivar_ret = iter_data->callback(ivtbl->as.shape.ivptr[i], iter_data->data);
3517 switch (ivar_ret) {
3518 case ST_CONTINUE:
3519 break;
3520 case ST_REPLACE:
3521 iter_data->update_callback(&ivtbl->as.shape.ivptr[i], iter_data->data);
3522 break;
3523 default:
3524 rb_bug("vm_weak_table_gen_ivar_foreach: return value %d not supported", ivar_ret);
3525 }
3526 }
3527 }
3528 }
3529
3530 return ret;
3531}
3532
3533static int
3534vm_weak_table_frozen_strings_foreach(st_data_t key, st_data_t value, st_data_t data, int error)
3535{
3536 int retval = vm_weak_table_foreach_weak_key(key, value, data, error);
3537 if (retval == ST_DELETE) {
3538 FL_UNSET((VALUE)key, RSTRING_FSTR);
3539 }
3540 return retval;
3541}
3542
3543void
3544rb_gc_vm_weak_table_foreach(vm_table_foreach_callback_func callback,
3545 vm_table_update_callback_func update_callback,
3546 void *data,
3547 bool weak_only,
3548 enum rb_gc_vm_weak_tables table)
3549{
3550 rb_vm_t *vm = GET_VM();
3551
3552 struct global_vm_table_foreach_data foreach_data = {
3553 .callback = callback,
3554 .update_callback = update_callback,
3555 .data = data,
3556 .weak_only = weak_only,
3557 };
3558
3559 switch (table) {
3560 case RB_GC_VM_CI_TABLE: {
3561 if (vm->ci_table) {
3562 st_foreach_with_replace(
3563 vm->ci_table,
3564 vm_weak_table_foreach_weak_key,
3565 vm_weak_table_foreach_update_weak_key,
3566 (st_data_t)&foreach_data
3567 );
3568 }
3569 break;
3570 }
3571 case RB_GC_VM_OVERLOADED_CME_TABLE: {
3572 if (vm->overloaded_cme_table) {
3573 st_foreach_with_replace(
3574 vm->overloaded_cme_table,
3575 vm_weak_table_foreach_weak_key,
3576 vm_weak_table_foreach_update_weak_key,
3577 (st_data_t)&foreach_data
3578 );
3579 }
3580 break;
3581 }
3582 case RB_GC_VM_GLOBAL_SYMBOLS_TABLE: {
3583 if (global_symbols.str_sym) {
3584 st_foreach_with_replace(
3585 global_symbols.str_sym,
3586 vm_weak_table_str_sym_foreach,
3587 vm_weak_table_foreach_update_weak_value,
3588 (st_data_t)&foreach_data
3589 );
3590 }
3591 break;
3592 }
3593 case RB_GC_VM_GENERIC_IV_TABLE: {
3594 st_table *generic_iv_tbl = rb_generic_ivtbl_get();
3595 if (generic_iv_tbl) {
3596 st_foreach(
3597 generic_iv_tbl,
3598 vm_weak_table_gen_ivar_foreach,
3599 (st_data_t)&foreach_data
3600 );
3601 }
3602 break;
3603 }
3604 case RB_GC_VM_FROZEN_STRINGS_TABLE: {
3605 if (vm->frozen_strings) {
3606 st_foreach_with_replace(
3607 vm->frozen_strings,
3608 vm_weak_table_frozen_strings_foreach,
3609 vm_weak_table_foreach_update_weak_key,
3610 (st_data_t)&foreach_data
3611 );
3612 }
3613 break;
3614 }
3615 default:
3616 rb_bug("rb_gc_vm_weak_table_foreach: unknown table %d", table);
3617 }
3618}
3619
3620void
3621rb_gc_update_vm_references(void *objspace)
3622{
3623 rb_execution_context_t *ec = GET_EC();
3624 rb_vm_t *vm = rb_ec_vm_ptr(ec);
3625
3626 rb_vm_update_references(vm);
3627 rb_gc_update_global_tbl();
3628 rb_sym_global_symbols_update_references();
3629
3630#if USE_YJIT
3631 void rb_yjit_root_update_references(void); // in Rust
3632
3633 if (rb_yjit_enabled_p) {
3634 rb_yjit_root_update_references();
3635 }
3636#endif
3637}
3638
3639void
3640rb_gc_update_object_references(void *objspace, VALUE obj)
3641{
3642 switch (BUILTIN_TYPE(obj)) {
3643 case T_CLASS:
3644 if (FL_TEST(obj, FL_SINGLETON)) {
3645 UPDATE_IF_MOVED(objspace, RCLASS_ATTACHED_OBJECT(obj));
3646 }
3647 // Continue to the shared T_CLASS/T_MODULE
3648 case T_MODULE:
3649 if (RCLASS_SUPER((VALUE)obj)) {
3650 UPDATE_IF_MOVED(objspace, RCLASS(obj)->super);
3651 }
3652 update_m_tbl(objspace, RCLASS_M_TBL(obj));
3653 update_cc_tbl(objspace, obj);
3654 update_cvc_tbl(objspace, obj);
3655 update_superclasses(objspace, obj);
3656
3657 if (rb_shape_obj_too_complex(obj)) {
3658 gc_ref_update_table_values_only(RCLASS_IV_HASH(obj));
3659 }
3660 else {
3661 for (attr_index_t i = 0; i < RCLASS_IV_COUNT(obj); i++) {
3662 UPDATE_IF_MOVED(objspace, RCLASS_IVPTR(obj)[i]);
3663 }
3664 }
3665
3666 update_class_ext(objspace, RCLASS_EXT(obj));
3667 update_const_tbl(objspace, RCLASS_CONST_TBL(obj));
3668
3669 UPDATE_IF_MOVED(objspace, RCLASS_EXT(obj)->classpath);
3670 break;
3671
3672 case T_ICLASS:
3673 if (RICLASS_OWNS_M_TBL_P(obj)) {
3674 update_m_tbl(objspace, RCLASS_M_TBL(obj));
3675 }
3676 if (RCLASS_SUPER((VALUE)obj)) {
3677 UPDATE_IF_MOVED(objspace, RCLASS(obj)->super);
3678 }
3679 update_class_ext(objspace, RCLASS_EXT(obj));
3680 update_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj));
3681 update_cc_tbl(objspace, obj);
3682 break;
3683
3684 case T_IMEMO:
3685 rb_imemo_mark_and_move(obj, true);
3686 return;
3687
3688 case T_NIL:
3689 case T_FIXNUM:
3690 case T_NODE:
3691 case T_MOVED:
3692 case T_NONE:
3693 /* These can't move */
3694 return;
3695
3696 case T_ARRAY:
3697 gc_ref_update_array(objspace, obj);
3698 break;
3699
3700 case T_HASH:
3701 gc_ref_update_hash(objspace, obj);
3702 UPDATE_IF_MOVED(objspace, RHASH(obj)->ifnone);
3703 break;
3704
3705 case T_STRING:
3706 {
3707 if (STR_SHARED_P(obj)) {
3708 UPDATE_IF_MOVED(objspace, RSTRING(obj)->as.heap.aux.shared);
3709 }
3710
3711 /* If, after move the string is not embedded, and can fit in the
3712 * slot it's been placed in, then re-embed it. */
3713 if (rb_gc_obj_slot_size(obj) >= rb_str_size_as_embedded(obj)) {
3714 if (!STR_EMBED_P(obj) && rb_str_reembeddable_p(obj)) {
3715 rb_str_make_embedded(obj);
3716 }
3717 }
3718
3719 break;
3720 }
3721 case T_DATA:
3722 /* Call the compaction callback, if it exists */
3723 {
3724 void *const ptr = RTYPEDDATA_P(obj) ? RTYPEDDATA_GET_DATA(obj) : DATA_PTR(obj);
3725 if (ptr) {
3726 if (RTYPEDDATA_P(obj) && gc_declarative_marking_p(RTYPEDDATA(obj)->type)) {
3727 size_t *offset_list = TYPED_DATA_REFS_OFFSET_LIST(obj);
3728
3729 for (size_t offset = *offset_list; offset != RUBY_REF_END; offset = *offset_list++) {
3730 VALUE *ref = (VALUE *)((char *)ptr + offset);
3731 *ref = gc_location_internal(objspace, *ref);
3732 }
3733 }
3734 else if (RTYPEDDATA_P(obj)) {
3735 RUBY_DATA_FUNC compact_func = RTYPEDDATA(obj)->type->function.dcompact;
3736 if (compact_func) (*compact_func)(ptr);
3737 }
3738 }
3739 }
3740 break;
3741
3742 case T_OBJECT:
3743 gc_ref_update_object(objspace, obj);
3744 break;
3745
3746 case T_FILE:
3747 if (RFILE(obj)->fptr) {
3748 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->self);
3749 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->pathv);
3750 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->tied_io_for_writing);
3751 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->writeconv_asciicompat);
3752 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->writeconv_pre_ecopts);
3753 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->encs.ecopts);
3754 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->write_lock);
3755 }
3756 break;
3757 case T_REGEXP:
3758 UPDATE_IF_MOVED(objspace, RREGEXP(obj)->src);
3759 break;
3760
3761 case T_SYMBOL:
3762 UPDATE_IF_MOVED(objspace, RSYMBOL(obj)->fstr);
3763 break;
3764
3765 case T_FLOAT:
3766 case T_BIGNUM:
3767 break;
3768
3769 case T_MATCH:
3770 UPDATE_IF_MOVED(objspace, RMATCH(obj)->regexp);
3771
3772 if (RMATCH(obj)->str) {
3773 UPDATE_IF_MOVED(objspace, RMATCH(obj)->str);
3774 }
3775 break;
3776
3777 case T_RATIONAL:
3778 UPDATE_IF_MOVED(objspace, RRATIONAL(obj)->num);
3779 UPDATE_IF_MOVED(objspace, RRATIONAL(obj)->den);
3780 break;
3781
3782 case T_COMPLEX:
3783 UPDATE_IF_MOVED(objspace, RCOMPLEX(obj)->real);
3784 UPDATE_IF_MOVED(objspace, RCOMPLEX(obj)->imag);
3785
3786 break;
3787
3788 case T_STRUCT:
3789 {
3790 long i, len = RSTRUCT_LEN(obj);
3791 VALUE *ptr = (VALUE *)RSTRUCT_CONST_PTR(obj);
3792
3793 for (i = 0; i < len; i++) {
3794 UPDATE_IF_MOVED(objspace, ptr[i]);
3795 }
3796 }
3797 break;
3798 default:
3799 rb_bug("unreachable");
3800 break;
3801 }
3802
3803 UPDATE_IF_MOVED(objspace, RBASIC(obj)->klass);
3804}
3805
3806VALUE
3807rb_gc_start(void)
3808{
3809 rb_gc();
3810 return Qnil;
3811}
3812
3813void
3814rb_gc(void)
3815{
3816 unless_objspace(objspace) { return; }
3817
3818 rb_gc_impl_start(objspace, true, true, true, false);
3819}
3820
3821int
3822rb_during_gc(void)
3823{
3824 unless_objspace(objspace) { return FALSE; }
3825
3826 return rb_gc_impl_during_gc_p(objspace);
3827}
3828
3829size_t
3830rb_gc_count(void)
3831{
3832 return rb_gc_impl_gc_count(rb_gc_get_objspace());
3833}
3834
3835static VALUE
3836gc_count(rb_execution_context_t *ec, VALUE self)
3837{
3838 return SIZET2NUM(rb_gc_count());
3839}
3840
3841VALUE
3842rb_gc_latest_gc_info(VALUE key)
3843{
3844 if (!SYMBOL_P(key) && !RB_TYPE_P(key, T_HASH)) {
3845 rb_raise(rb_eTypeError, "non-hash or symbol given");
3846 }
3847
3848 VALUE val = rb_gc_impl_latest_gc_info(rb_gc_get_objspace(), key);
3849
3850 if (val == Qundef) {
3851 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
3852 }
3853
3854 return val;
3855}
3856
3857static VALUE
3858gc_stat(rb_execution_context_t *ec, VALUE self, VALUE arg) // arg is (nil || hash || symbol)
3859{
3860 if (NIL_P(arg)) {
3861 arg = rb_hash_new();
3862 }
3863 else if (!RB_TYPE_P(arg, T_HASH) && !SYMBOL_P(arg)) {
3864 rb_raise(rb_eTypeError, "non-hash or symbol given");
3865 }
3866
3867 VALUE ret = rb_gc_impl_stat(rb_gc_get_objspace(), arg);
3868
3869 if (ret == Qundef) {
3870 GC_ASSERT(SYMBOL_P(arg));
3871
3872 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(arg));
3873 }
3874
3875 return ret;
3876}
3877
3878size_t
3879rb_gc_stat(VALUE arg)
3880{
3881 if (!RB_TYPE_P(arg, T_HASH) && !SYMBOL_P(arg)) {
3882 rb_raise(rb_eTypeError, "non-hash or symbol given");
3883 }
3884
3885 VALUE ret = rb_gc_impl_stat(rb_gc_get_objspace(), arg);
3886
3887 if (ret == Qundef) {
3888 GC_ASSERT(SYMBOL_P(arg));
3889
3890 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(arg));
3891 }
3892
3893 if (SYMBOL_P(arg)) {
3894 return NUM2SIZET(ret);
3895 }
3896 else {
3897 return 0;
3898 }
3899}
3900
3901static VALUE
3902gc_stat_heap(rb_execution_context_t *ec, VALUE self, VALUE heap_name, VALUE arg)
3903{
3904 if (NIL_P(arg)) {
3905 arg = rb_hash_new();
3906 }
3907
3908 if (NIL_P(heap_name)) {
3909 if (!RB_TYPE_P(arg, T_HASH)) {
3910 rb_raise(rb_eTypeError, "non-hash given");
3911 }
3912 }
3913 else if (FIXNUM_P(heap_name)) {
3914 if (!SYMBOL_P(arg) && !RB_TYPE_P(arg, T_HASH)) {
3915 rb_raise(rb_eTypeError, "non-hash or symbol given");
3916 }
3917 }
3918 else {
3919 rb_raise(rb_eTypeError, "heap_name must be nil or an Integer");
3920 }
3921
3922 VALUE ret = rb_gc_impl_stat_heap(rb_gc_get_objspace(), heap_name, arg);
3923
3924 if (ret == Qundef) {
3925 GC_ASSERT(SYMBOL_P(arg));
3926
3927 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(arg));
3928 }
3929
3930 return ret;
3931}
3932
3933static VALUE
3934gc_config_get(rb_execution_context_t *ec, VALUE self)
3935{
3936 VALUE cfg_hash = rb_gc_impl_config_get(rb_gc_get_objspace());
3937 rb_hash_aset(cfg_hash, sym("implementation"), rb_fstring_cstr(rb_gc_impl_active_gc_name()));
3938
3939 return cfg_hash;
3940}
3941
3942static VALUE
3943gc_config_set(rb_execution_context_t *ec, VALUE self, VALUE hash)
3944{
3945 void *objspace = rb_gc_get_objspace();
3946
3947 rb_gc_impl_config_set(objspace, hash);
3948
3949 return rb_gc_impl_config_get(objspace);
3950}
3951
3952static VALUE
3953gc_stress_get(rb_execution_context_t *ec, VALUE self)
3954{
3955 return rb_gc_impl_stress_get(rb_gc_get_objspace());
3956}
3957
3958static VALUE
3959gc_stress_set_m(rb_execution_context_t *ec, VALUE self, VALUE flag)
3960{
3961 rb_gc_impl_stress_set(rb_gc_get_objspace(), flag);
3962
3963 return flag;
3964}
3965
3966void
3967rb_gc_initial_stress_set(VALUE flag)
3968{
3969 initial_stress = flag;
3970}
3971
3972size_t *
3973rb_gc_heap_sizes(void)
3974{
3975 return rb_gc_impl_heap_sizes(rb_gc_get_objspace());
3976}
3977
3978VALUE
3979rb_gc_enable(void)
3980{
3981 return rb_objspace_gc_enable(rb_gc_get_objspace());
3982}
3983
3984VALUE
3985rb_objspace_gc_enable(void *objspace)
3986{
3987 bool disabled = !rb_gc_impl_gc_enabled_p(objspace);
3988 rb_gc_impl_gc_enable(objspace);
3989 return RBOOL(disabled);
3990}
3991
3992static VALUE
3993gc_enable(rb_execution_context_t *ec, VALUE _)
3994{
3995 return rb_gc_enable();
3996}
3997
3998static VALUE
3999gc_disable_no_rest(void *objspace)
4000{
4001 bool disabled = !rb_gc_impl_gc_enabled_p(objspace);
4002 rb_gc_impl_gc_disable(objspace, false);
4003 return RBOOL(disabled);
4004}
4005
4006VALUE
4007rb_gc_disable_no_rest(void)
4008{
4009 return gc_disable_no_rest(rb_gc_get_objspace());
4010}
4011
4012VALUE
4013rb_gc_disable(void)
4014{
4015 return rb_objspace_gc_disable(rb_gc_get_objspace());
4016}
4017
4018VALUE
4019rb_objspace_gc_disable(void *objspace)
4020{
4021 bool disabled = !rb_gc_impl_gc_enabled_p(objspace);
4022 rb_gc_impl_gc_disable(objspace, true);
4023 return RBOOL(disabled);
4024}
4025
4026static VALUE
4027gc_disable(rb_execution_context_t *ec, VALUE _)
4028{
4029 return rb_gc_disable();
4030}
4031
4032// TODO: think about moving ruby_gc_set_params into Init_heap or Init_gc
4033void
4034ruby_gc_set_params(void)
4035{
4036 rb_gc_impl_set_params(rb_gc_get_objspace());
4037}
4038
4039void
4040rb_objspace_reachable_objects_from(VALUE obj, void (func)(VALUE, void *), void *data)
4041{
4042 RB_VM_LOCK_ENTER();
4043 {
4044 if (rb_gc_impl_during_gc_p(rb_gc_get_objspace())) rb_bug("rb_objspace_reachable_objects_from() is not supported while during GC");
4045
4046 if (!RB_SPECIAL_CONST_P(obj)) {
4047 rb_vm_t *vm = GET_VM();
4048 struct gc_mark_func_data_struct *prev_mfd = vm->gc.mark_func_data;
4049 struct gc_mark_func_data_struct mfd = {
4050 .mark_func = func,
4051 .data = data,
4052 };
4053
4054 vm->gc.mark_func_data = &mfd;
4055 rb_gc_mark_children(rb_gc_get_objspace(), obj);
4056 vm->gc.mark_func_data = prev_mfd;
4057 }
4058 }
4059 RB_VM_LOCK_LEAVE();
4060}
4061
4063 const char *category;
4064 void (*func)(const char *category, VALUE, void *);
4065 void *data;
4066};
4067
4068static void
4069root_objects_from(VALUE obj, void *ptr)
4070{
4071 const struct root_objects_data *data = (struct root_objects_data *)ptr;
4072 (*data->func)(data->category, obj, data->data);
4073}
4074
4075void
4076rb_objspace_reachable_objects_from_root(void (func)(const char *category, VALUE, void *), void *passing_data)
4077{
4078 if (rb_gc_impl_during_gc_p(rb_gc_get_objspace())) rb_bug("rb_gc_impl_objspace_reachable_objects_from_root() is not supported while during GC");
4079
4080 rb_vm_t *vm = GET_VM();
4081
4082 struct root_objects_data data = {
4083 .func = func,
4084 .data = passing_data,
4085 };
4086
4087 struct gc_mark_func_data_struct *prev_mfd = vm->gc.mark_func_data;
4088 struct gc_mark_func_data_struct mfd = {
4089 .mark_func = root_objects_from,
4090 .data = &data,
4091 };
4092
4093 vm->gc.mark_func_data = &mfd;
4094 rb_gc_save_machine_context();
4095 rb_gc_mark_roots(vm->gc.objspace, &data.category);
4096 vm->gc.mark_func_data = prev_mfd;
4097}
4098
4099/*
4100 ------------------------------ DEBUG ------------------------------
4101*/
4102
4103static const char *
4104type_name(int type, VALUE obj)
4105{
4106 switch (type) {
4107#define TYPE_NAME(t) case (t): return #t;
4108 TYPE_NAME(T_NONE);
4109 TYPE_NAME(T_OBJECT);
4110 TYPE_NAME(T_CLASS);
4111 TYPE_NAME(T_MODULE);
4112 TYPE_NAME(T_FLOAT);
4113 TYPE_NAME(T_STRING);
4114 TYPE_NAME(T_REGEXP);
4115 TYPE_NAME(T_ARRAY);
4116 TYPE_NAME(T_HASH);
4117 TYPE_NAME(T_STRUCT);
4118 TYPE_NAME(T_BIGNUM);
4119 TYPE_NAME(T_FILE);
4120 TYPE_NAME(T_MATCH);
4121 TYPE_NAME(T_COMPLEX);
4122 TYPE_NAME(T_RATIONAL);
4123 TYPE_NAME(T_NIL);
4124 TYPE_NAME(T_TRUE);
4125 TYPE_NAME(T_FALSE);
4126 TYPE_NAME(T_SYMBOL);
4127 TYPE_NAME(T_FIXNUM);
4128 TYPE_NAME(T_UNDEF);
4129 TYPE_NAME(T_IMEMO);
4130 TYPE_NAME(T_ICLASS);
4131 TYPE_NAME(T_MOVED);
4132 TYPE_NAME(T_ZOMBIE);
4133 case T_DATA:
4134 if (obj && rb_objspace_data_type_name(obj)) {
4135 return rb_objspace_data_type_name(obj);
4136 }
4137 return "T_DATA";
4138#undef TYPE_NAME
4139 }
4140 return "unknown";
4141}
4142
4143static const char *
4144obj_type_name(VALUE obj)
4145{
4146 return type_name(TYPE(obj), obj);
4147}
4148
4149const char *
4150rb_method_type_name(rb_method_type_t type)
4151{
4152 switch (type) {
4153 case VM_METHOD_TYPE_ISEQ: return "iseq";
4154 case VM_METHOD_TYPE_ATTRSET: return "attrest";
4155 case VM_METHOD_TYPE_IVAR: return "ivar";
4156 case VM_METHOD_TYPE_BMETHOD: return "bmethod";
4157 case VM_METHOD_TYPE_ALIAS: return "alias";
4158 case VM_METHOD_TYPE_REFINED: return "refined";
4159 case VM_METHOD_TYPE_CFUNC: return "cfunc";
4160 case VM_METHOD_TYPE_ZSUPER: return "zsuper";
4161 case VM_METHOD_TYPE_MISSING: return "missing";
4162 case VM_METHOD_TYPE_OPTIMIZED: return "optimized";
4163 case VM_METHOD_TYPE_UNDEF: return "undef";
4164 case VM_METHOD_TYPE_NOTIMPLEMENTED: return "notimplemented";
4165 }
4166 rb_bug("rb_method_type_name: unreachable (type: %d)", type);
4167}
4168
4169static void
4170rb_raw_iseq_info(char *const buff, const size_t buff_size, const rb_iseq_t *iseq)
4171{
4172 if (buff_size > 0 && ISEQ_BODY(iseq) && ISEQ_BODY(iseq)->location.label && !RB_TYPE_P(ISEQ_BODY(iseq)->location.pathobj, T_MOVED)) {
4173 VALUE path = rb_iseq_path(iseq);
4174 int n = ISEQ_BODY(iseq)->location.first_lineno;
4175 snprintf(buff, buff_size, " %s@%s:%d",
4176 RSTRING_PTR(ISEQ_BODY(iseq)->location.label),
4177 RSTRING_PTR(path), n);
4178 }
4179}
4180
4181static int
4182str_len_no_raise(VALUE str)
4183{
4184 long len = RSTRING_LEN(str);
4185 if (len < 0) return 0;
4186 if (len > INT_MAX) return INT_MAX;
4187 return (int)len;
4188}
4189
4190#define BUFF_ARGS buff + pos, buff_size - pos
4191#define APPEND_F(...) if ((pos += snprintf(BUFF_ARGS, "" __VA_ARGS__)) >= buff_size) goto end
4192#define APPEND_S(s) do { \
4193 if ((pos + (int)rb_strlen_lit(s)) >= buff_size) { \
4194 goto end; \
4195 } \
4196 else { \
4197 memcpy(buff + pos, (s), rb_strlen_lit(s) + 1); \
4198 } \
4199 } while (0)
4200#define C(c, s) ((c) != 0 ? (s) : " ")
4201
4202static size_t
4203rb_raw_obj_info_common(char *const buff, const size_t buff_size, const VALUE obj)
4204{
4205 size_t pos = 0;
4206
4207 if (SPECIAL_CONST_P(obj)) {
4208 APPEND_F("%s", obj_type_name(obj));
4209
4210 if (FIXNUM_P(obj)) {
4211 APPEND_F(" %ld", FIX2LONG(obj));
4212 }
4213 else if (SYMBOL_P(obj)) {
4214 APPEND_F(" %s", rb_id2name(SYM2ID(obj)));
4215 }
4216 }
4217 else {
4218 // const int age = RVALUE_AGE_GET(obj);
4219
4220 if (rb_gc_impl_pointer_to_heap_p(rb_gc_get_objspace(), (void *)obj)) {
4221 // TODO: fixme
4222 // APPEND_F("%p [%d%s%s%s%s%s%s] %s ",
4223 // (void *)obj, age,
4224 // C(RVALUE_UNCOLLECTIBLE_BITMAP(obj), "L"),
4225 // C(RVALUE_MARK_BITMAP(obj), "M"),
4226 // C(RVALUE_PIN_BITMAP(obj), "P"),
4227 // C(RVALUE_MARKING_BITMAP(obj), "R"),
4228 // C(RVALUE_WB_UNPROTECTED_BITMAP(obj), "U"),
4229 // C(rb_objspace_garbage_object_p(obj), "G"),
4230 // obj_type_name(obj));
4231 }
4232 else {
4233 /* fake */
4234 // APPEND_F("%p [%dXXXX] %s",
4235 // (void *)obj, age,
4236 // obj_type_name(obj));
4237 }
4238
4239 if (internal_object_p(obj)) {
4240 /* ignore */
4241 }
4242 else if (RBASIC(obj)->klass == 0) {
4243 APPEND_S("(temporary internal)");
4244 }
4245 else if (RTEST(RBASIC(obj)->klass)) {
4246 VALUE class_path = rb_class_path_cached(RBASIC(obj)->klass);
4247 if (!NIL_P(class_path)) {
4248 APPEND_F("(%s)", RSTRING_PTR(class_path));
4249 }
4250 }
4251 }
4252 end:
4253
4254 return pos;
4255}
4256
4257const char *rb_raw_obj_info(char *const buff, const size_t buff_size, VALUE obj);
4258
4259static size_t
4260rb_raw_obj_info_buitin_type(char *const buff, const size_t buff_size, const VALUE obj, size_t pos)
4261{
4262 if (LIKELY(pos < buff_size) && !SPECIAL_CONST_P(obj)) {
4263 const enum ruby_value_type type = BUILTIN_TYPE(obj);
4264
4265 switch (type) {
4266 case T_NODE:
4267 UNEXPECTED_NODE(rb_raw_obj_info);
4268 break;
4269 case T_ARRAY:
4270 if (ARY_SHARED_P(obj)) {
4271 APPEND_S("shared -> ");
4272 rb_raw_obj_info(BUFF_ARGS, ARY_SHARED_ROOT(obj));
4273 }
4274 else if (ARY_EMBED_P(obj)) {
4275 APPEND_F("[%s%s] len: %ld (embed)",
4276 C(ARY_EMBED_P(obj), "E"),
4277 C(ARY_SHARED_P(obj), "S"),
4278 RARRAY_LEN(obj));
4279 }
4280 else {
4281 APPEND_F("[%s%s] len: %ld, capa:%ld ptr:%p",
4282 C(ARY_EMBED_P(obj), "E"),
4283 C(ARY_SHARED_P(obj), "S"),
4284 RARRAY_LEN(obj),
4285 ARY_EMBED_P(obj) ? -1L : RARRAY(obj)->as.heap.aux.capa,
4286 (void *)RARRAY_CONST_PTR(obj));
4287 }
4288 break;
4289 case T_STRING: {
4290 if (STR_SHARED_P(obj)) {
4291 APPEND_F(" [shared] len: %ld", RSTRING_LEN(obj));
4292 }
4293 else {
4294 if (STR_EMBED_P(obj)) APPEND_S(" [embed]");
4295
4296 APPEND_F(" len: %ld, capa: %" PRIdSIZE, RSTRING_LEN(obj), rb_str_capacity(obj));
4297 }
4298 APPEND_F(" \"%.*s\"", str_len_no_raise(obj), RSTRING_PTR(obj));
4299 break;
4300 }
4301 case T_SYMBOL: {
4302 VALUE fstr = RSYMBOL(obj)->fstr;
4303 ID id = RSYMBOL(obj)->id;
4304 if (RB_TYPE_P(fstr, T_STRING)) {
4305 APPEND_F(":%s id:%d", RSTRING_PTR(fstr), (unsigned int)id);
4306 }
4307 else {
4308 APPEND_F("(%p) id:%d", (void *)fstr, (unsigned int)id);
4309 }
4310 break;
4311 }
4312 case T_MOVED: {
4313 APPEND_F("-> %p", (void*)gc_location_internal(rb_gc_get_objspace(), obj));
4314 break;
4315 }
4316 case T_HASH: {
4317 APPEND_F("[%c] %"PRIdSIZE,
4318 RHASH_AR_TABLE_P(obj) ? 'A' : 'S',
4319 RHASH_SIZE(obj));
4320 break;
4321 }
4322 case T_CLASS:
4323 case T_MODULE:
4324 {
4325 VALUE class_path = rb_class_path_cached(obj);
4326 if (!NIL_P(class_path)) {
4327 APPEND_F("%s", RSTRING_PTR(class_path));
4328 }
4329 else {
4330 APPEND_S("(anon)");
4331 }
4332 break;
4333 }
4334 case T_ICLASS:
4335 {
4336 VALUE class_path = rb_class_path_cached(RBASIC_CLASS(obj));
4337 if (!NIL_P(class_path)) {
4338 APPEND_F("src:%s", RSTRING_PTR(class_path));
4339 }
4340 break;
4341 }
4342 case T_OBJECT:
4343 {
4344 if (rb_shape_obj_too_complex(obj)) {
4345 size_t hash_len = rb_st_table_size(ROBJECT_IV_HASH(obj));
4346 APPEND_F("(too_complex) len:%zu", hash_len);
4347 }
4348 else {
4349 uint32_t len = ROBJECT_IV_CAPACITY(obj);
4350
4351 if (RBASIC(obj)->flags & ROBJECT_EMBED) {
4352 APPEND_F("(embed) len:%d", len);
4353 }
4354 else {
4355 VALUE *ptr = ROBJECT_IVPTR(obj);
4356 APPEND_F("len:%d ptr:%p", len, (void *)ptr);
4357 }
4358 }
4359 }
4360 break;
4361 case T_DATA: {
4362 const struct rb_block *block;
4363 const rb_iseq_t *iseq;
4364 if (rb_obj_is_proc(obj) &&
4365 (block = vm_proc_block(obj)) != NULL &&
4366 (vm_block_type(block) == block_type_iseq) &&
4367 (iseq = vm_block_iseq(block)) != NULL) {
4368 rb_raw_iseq_info(BUFF_ARGS, iseq);
4369 }
4370 else if (rb_ractor_p(obj)) {
4371 rb_ractor_t *r = (void *)DATA_PTR(obj);
4372 if (r) {
4373 APPEND_F("r:%d", r->pub.id);
4374 }
4375 }
4376 else {
4377 const char * const type_name = rb_objspace_data_type_name(obj);
4378 if (type_name) {
4379 APPEND_F("%s", type_name);
4380 }
4381 }
4382 break;
4383 }
4384 case T_IMEMO: {
4385 APPEND_F("<%s> ", rb_imemo_name(imemo_type(obj)));
4386
4387 switch (imemo_type(obj)) {
4388 case imemo_ment:
4389 {
4390 const rb_method_entry_t *me = (const rb_method_entry_t *)obj;
4391
4392 APPEND_F(":%s (%s%s%s%s) type:%s aliased:%d owner:%p defined_class:%p",
4393 rb_id2name(me->called_id),
4394 METHOD_ENTRY_VISI(me) == METHOD_VISI_PUBLIC ? "pub" :
4395 METHOD_ENTRY_VISI(me) == METHOD_VISI_PRIVATE ? "pri" : "pro",
4396 METHOD_ENTRY_COMPLEMENTED(me) ? ",cmp" : "",
4397 METHOD_ENTRY_CACHED(me) ? ",cc" : "",
4398 METHOD_ENTRY_INVALIDATED(me) ? ",inv" : "",
4399 me->def ? rb_method_type_name(me->def->type) : "NULL",
4400 me->def ? me->def->aliased : -1,
4401 (void *)me->owner, // obj_info(me->owner),
4402 (void *)me->defined_class); //obj_info(me->defined_class)));
4403
4404 if (me->def) {
4405 switch (me->def->type) {
4406 case VM_METHOD_TYPE_ISEQ:
4407 APPEND_S(" (iseq:");
4408 rb_raw_obj_info(BUFF_ARGS, (VALUE)me->def->body.iseq.iseqptr);
4409 APPEND_S(")");
4410 break;
4411 default:
4412 break;
4413 }
4414 }
4415
4416 break;
4417 }
4418 case imemo_iseq: {
4419 const rb_iseq_t *iseq = (const rb_iseq_t *)obj;
4420 rb_raw_iseq_info(BUFF_ARGS, iseq);
4421 break;
4422 }
4423 case imemo_callinfo:
4424 {
4425 const struct rb_callinfo *ci = (const struct rb_callinfo *)obj;
4426 APPEND_F("(mid:%s, flag:%x argc:%d, kwarg:%s)",
4427 rb_id2name(vm_ci_mid(ci)),
4428 vm_ci_flag(ci),
4429 vm_ci_argc(ci),
4430 vm_ci_kwarg(ci) ? "available" : "NULL");
4431 break;
4432 }
4433 case imemo_callcache:
4434 {
4435 const struct rb_callcache *cc = (const struct rb_callcache *)obj;
4436 VALUE class_path = cc->klass ? rb_class_path_cached(cc->klass) : Qnil;
4437 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4438
4439 APPEND_F("(klass:%s cme:%s%s (%p) call:%p",
4440 NIL_P(class_path) ? (cc->klass ? "??" : "<NULL>") : RSTRING_PTR(class_path),
4441 cme ? rb_id2name(cme->called_id) : "<NULL>",
4442 cme ? (METHOD_ENTRY_INVALIDATED(cme) ? " [inv]" : "") : "",
4443 (void *)cme,
4444 (void *)(uintptr_t)vm_cc_call(cc));
4445 break;
4446 }
4447 default:
4448 break;
4449 }
4450 }
4451 default:
4452 break;
4453 }
4454 }
4455 end:
4456
4457 return pos;
4458}
4459
4460#undef C
4461
4462void
4463rb_asan_poison_object(VALUE obj)
4464{
4465 MAYBE_UNUSED(struct RVALUE *) ptr = (void *)obj;
4466 asan_poison_memory_region(ptr, rb_gc_obj_slot_size(obj));
4467}
4468
4469void
4470rb_asan_unpoison_object(VALUE obj, bool newobj_p)
4471{
4472 MAYBE_UNUSED(struct RVALUE *) ptr = (void *)obj;
4473 asan_unpoison_memory_region(ptr, rb_gc_obj_slot_size(obj), newobj_p);
4474}
4475
4476void *
4477rb_asan_poisoned_object_p(VALUE obj)
4478{
4479 MAYBE_UNUSED(struct RVALUE *) ptr = (void *)obj;
4480 return __asan_region_is_poisoned(ptr, rb_gc_obj_slot_size(obj));
4481}
4482
4483#define asan_unpoisoning_object(obj) \
4484 for (void *poisoned = asan_unpoison_object_temporary(obj), \
4485 *unpoisoning = &poisoned; /* flag to loop just once */ \
4486 unpoisoning; \
4487 unpoisoning = asan_poison_object_restore(obj, poisoned))
4488
4489const char *
4490rb_raw_obj_info(char *const buff, const size_t buff_size, VALUE obj)
4491{
4492 asan_unpoisoning_object(obj) {
4493 size_t pos = rb_raw_obj_info_common(buff, buff_size, obj);
4494 pos = rb_raw_obj_info_buitin_type(buff, buff_size, obj, pos);
4495 if (pos >= buff_size) {} // truncated
4496 }
4497
4498 return buff;
4499}
4500
4501#undef APPEND_S
4502#undef APPEND_F
4503#undef BUFF_ARGS
4504
4505#if RGENGC_OBJ_INFO
4506#define OBJ_INFO_BUFFERS_NUM 10
4507#define OBJ_INFO_BUFFERS_SIZE 0x100
4508static rb_atomic_t obj_info_buffers_index = 0;
4509static char obj_info_buffers[OBJ_INFO_BUFFERS_NUM][OBJ_INFO_BUFFERS_SIZE];
4510
4511/* Increments *var atomically and resets *var to 0 when maxval is
4512 * reached. Returns the wraparound old *var value (0...maxval). */
4513static rb_atomic_t
4514atomic_inc_wraparound(rb_atomic_t *var, const rb_atomic_t maxval)
4515{
4516 rb_atomic_t oldval = RUBY_ATOMIC_FETCH_ADD(*var, 1);
4517 if (RB_UNLIKELY(oldval >= maxval - 1)) { // wraparound *var
4518 const rb_atomic_t newval = oldval + 1;
4519 RUBY_ATOMIC_CAS(*var, newval, newval % maxval);
4520 oldval %= maxval;
4521 }
4522 return oldval;
4523}
4524
4525static const char *
4526obj_info(VALUE obj)
4527{
4528 rb_atomic_t index = atomic_inc_wraparound(&obj_info_buffers_index, OBJ_INFO_BUFFERS_NUM);
4529 char *const buff = obj_info_buffers[index];
4530 return rb_raw_obj_info(buff, OBJ_INFO_BUFFERS_SIZE, obj);
4531}
4532#else
4533static const char *
4534obj_info(VALUE obj)
4535{
4536 return obj_type_name(obj);
4537}
4538#endif
4539
4540/*
4541 ------------------------ Extended allocator ------------------------
4542*/
4543
4545 VALUE exc;
4546 const char *fmt;
4547 va_list *ap;
4548};
4549
4550static void *
4551gc_vraise(void *ptr)
4552{
4553 struct gc_raise_tag *argv = ptr;
4554 rb_vraise(argv->exc, argv->fmt, *argv->ap);
4555 UNREACHABLE_RETURN(NULL);
4556}
4557
4558static void
4559gc_raise(VALUE exc, const char *fmt, ...)
4560{
4561 va_list ap;
4562 va_start(ap, fmt);
4563 struct gc_raise_tag argv = {
4564 exc, fmt, &ap,
4565 };
4566
4567 if (ruby_thread_has_gvl_p()) {
4568 gc_vraise(&argv);
4570 }
4571 else if (ruby_native_thread_p()) {
4572 rb_thread_call_with_gvl(gc_vraise, &argv);
4574 }
4575 else {
4576 /* Not in a ruby thread */
4577 fprintf(stderr, "%s", "[FATAL] ");
4578 vfprintf(stderr, fmt, ap);
4579 }
4580
4581 va_end(ap);
4582 abort();
4583}
4584
4585NORETURN(static void negative_size_allocation_error(const char *));
4586static void
4587negative_size_allocation_error(const char *msg)
4588{
4589 gc_raise(rb_eNoMemError, "%s", msg);
4590}
4591
4592static void *
4593ruby_memerror_body(void *dummy)
4594{
4595 rb_memerror();
4596 return 0;
4597}
4598
4599NORETURN(static void ruby_memerror(void));
4601static void
4602ruby_memerror(void)
4603{
4604 if (ruby_thread_has_gvl_p()) {
4605 rb_memerror();
4606 }
4607 else {
4608 if (ruby_native_thread_p()) {
4609 rb_thread_call_with_gvl(ruby_memerror_body, 0);
4610 }
4611 else {
4612 /* no ruby thread */
4613 fprintf(stderr, "[FATAL] failed to allocate memory\n");
4614 }
4615 }
4616
4617 /* We have discussions whether we should die here; */
4618 /* We might rethink about it later. */
4619 exit(EXIT_FAILURE);
4620}
4621
4622void
4623rb_memerror(void)
4624{
4625 /* the `GET_VM()->special_exceptions` below assumes that
4626 * the VM is reachable from the current thread. We should
4627 * definitely make sure of that. */
4628 RUBY_ASSERT_ALWAYS(ruby_thread_has_gvl_p());
4629
4630 rb_execution_context_t *ec = GET_EC();
4631 VALUE exc = GET_VM()->special_exceptions[ruby_error_nomemory];
4632
4633 if (!exc ||
4634 rb_ec_raised_p(ec, RAISED_NOMEMORY) ||
4635 rb_ec_vm_lock_rec(ec) != ec->tag->lock_rec) {
4636 fprintf(stderr, "[FATAL] failed to allocate memory\n");
4637 exit(EXIT_FAILURE);
4638 }
4639 if (rb_ec_raised_p(ec, RAISED_NOMEMORY)) {
4640 rb_ec_raised_clear(ec);
4641 }
4642 else {
4643 rb_ec_raised_set(ec, RAISED_NOMEMORY);
4644 exc = ruby_vm_special_exception_copy(exc);
4645 }
4646 ec->errinfo = exc;
4647 EC_JUMP_TAG(ec, TAG_RAISE);
4648}
4649
4650bool
4651rb_memerror_reentered(void)
4652{
4653 rb_execution_context_t *ec = GET_EC();
4654 return (ec && rb_ec_raised_p(ec, RAISED_NOMEMORY));
4655}
4656
4657void
4658rb_malloc_info_show_results(void)
4659{
4660}
4661
4662static void *
4663handle_malloc_failure(void *ptr)
4664{
4665 if (LIKELY(ptr)) {
4666 return ptr;
4667 }
4668 else {
4669 ruby_memerror();
4670 UNREACHABLE_RETURN(ptr);
4671 }
4672}
4673
4674static void *ruby_xmalloc_body(size_t size);
4675
4676void *
4677ruby_xmalloc(size_t size)
4678{
4679 return handle_malloc_failure(ruby_xmalloc_body(size));
4680}
4681
4682static void *
4683ruby_xmalloc_body(size_t size)
4684{
4685 if ((ssize_t)size < 0) {
4686 negative_size_allocation_error("too large allocation size");
4687 }
4688
4689 return rb_gc_impl_malloc(rb_gc_get_objspace(), size);
4690}
4691
4692void
4693ruby_malloc_size_overflow(size_t count, size_t elsize)
4694{
4695 rb_raise(rb_eArgError,
4696 "malloc: possible integer overflow (%"PRIuSIZE"*%"PRIuSIZE")",
4697 count, elsize);
4698}
4699
4700void
4701ruby_malloc_add_size_overflow(size_t x, size_t y)
4702{
4703 rb_raise(rb_eArgError,
4704 "malloc: possible integer overflow (%"PRIuSIZE"+%"PRIuSIZE")",
4705 x, y);
4706}
4707
4708static void *ruby_xmalloc2_body(size_t n, size_t size);
4709
4710void *
4711ruby_xmalloc2(size_t n, size_t size)
4712{
4713 return handle_malloc_failure(ruby_xmalloc2_body(n, size));
4714}
4715
4716static void *
4717ruby_xmalloc2_body(size_t n, size_t size)
4718{
4719 return rb_gc_impl_malloc(rb_gc_get_objspace(), xmalloc2_size(n, size));
4720}
4721
4722static void *ruby_xcalloc_body(size_t n, size_t size);
4723
4724void *
4725ruby_xcalloc(size_t n, size_t size)
4726{
4727 return handle_malloc_failure(ruby_xcalloc_body(n, size));
4728}
4729
4730static void *
4731ruby_xcalloc_body(size_t n, size_t size)
4732{
4733 return rb_gc_impl_calloc(rb_gc_get_objspace(), xmalloc2_size(n, size));
4734}
4735
4736static void *ruby_sized_xrealloc_body(void *ptr, size_t new_size, size_t old_size);
4737
4738#ifdef ruby_sized_xrealloc
4739#undef ruby_sized_xrealloc
4740#endif
4741void *
4742ruby_sized_xrealloc(void *ptr, size_t new_size, size_t old_size)
4743{
4744 return handle_malloc_failure(ruby_sized_xrealloc_body(ptr, new_size, old_size));
4745}
4746
4747static void *
4748ruby_sized_xrealloc_body(void *ptr, size_t new_size, size_t old_size)
4749{
4750 if ((ssize_t)new_size < 0) {
4751 negative_size_allocation_error("too large allocation size");
4752 }
4753
4754 return rb_gc_impl_realloc(rb_gc_get_objspace(), ptr, new_size, old_size);
4755}
4756
4757void *
4758ruby_xrealloc(void *ptr, size_t new_size)
4759{
4760 return ruby_sized_xrealloc(ptr, new_size, 0);
4761}
4762
4763static void *ruby_sized_xrealloc2_body(void *ptr, size_t n, size_t size, size_t old_n);
4764
4765#ifdef ruby_sized_xrealloc2
4766#undef ruby_sized_xrealloc2
4767#endif
4768void *
4769ruby_sized_xrealloc2(void *ptr, size_t n, size_t size, size_t old_n)
4770{
4771 return handle_malloc_failure(ruby_sized_xrealloc2_body(ptr, n, size, old_n));
4772}
4773
4774static void *
4775ruby_sized_xrealloc2_body(void *ptr, size_t n, size_t size, size_t old_n)
4776{
4777 size_t len = xmalloc2_size(n, size);
4778 return rb_gc_impl_realloc(rb_gc_get_objspace(), ptr, len, old_n * size);
4779}
4780
4781void *
4782ruby_xrealloc2(void *ptr, size_t n, size_t size)
4783{
4784 return ruby_sized_xrealloc2(ptr, n, size, 0);
4785}
4786
4787#ifdef ruby_sized_xfree
4788#undef ruby_sized_xfree
4789#endif
4790void
4791ruby_sized_xfree(void *x, size_t size)
4792{
4793 if (LIKELY(x)) {
4794 /* It's possible for a C extension's pthread destructor function set by pthread_key_create
4795 * to be called after ruby_vm_destruct and attempt to free memory. Fall back to mimfree in
4796 * that case. */
4797 if (LIKELY(GET_VM())) {
4798 rb_gc_impl_free(rb_gc_get_objspace(), x, size);
4799 }
4800 else {
4801 ruby_mimfree(x);
4802 }
4803 }
4804}
4805
4806void
4807ruby_xfree(void *x)
4808{
4809 ruby_sized_xfree(x, 0);
4810}
4811
4812void *
4813rb_xmalloc_mul_add(size_t x, size_t y, size_t z) /* x * y + z */
4814{
4815 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
4816 return ruby_xmalloc(w);
4817}
4818
4819void *
4820rb_xcalloc_mul_add(size_t x, size_t y, size_t z) /* x * y + z */
4821{
4822 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
4823 return ruby_xcalloc(w, 1);
4824}
4825
4826void *
4827rb_xrealloc_mul_add(const void *p, size_t x, size_t y, size_t z) /* x * y + z */
4828{
4829 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
4830 return ruby_xrealloc((void *)p, w);
4831}
4832
4833void *
4834rb_xmalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
4835{
4836 size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
4837 return ruby_xmalloc(u);
4838}
4839
4840void *
4841rb_xcalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
4842{
4843 size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
4844 return ruby_xcalloc(u, 1);
4845}
4846
4847/* Mimic ruby_xmalloc, but need not rb_objspace.
4848 * should return pointer suitable for ruby_xfree
4849 */
4850void *
4851ruby_mimmalloc(size_t size)
4852{
4853 void *mem;
4854#if CALC_EXACT_MALLOC_SIZE
4855 size += sizeof(struct malloc_obj_info);
4856#endif
4857 mem = malloc(size);
4858#if CALC_EXACT_MALLOC_SIZE
4859 if (!mem) {
4860 return NULL;
4861 }
4862 else
4863 /* set 0 for consistency of allocated_size/allocations */
4864 {
4865 struct malloc_obj_info *info = mem;
4866 info->size = 0;
4867 mem = info + 1;
4868 }
4869#endif
4870 return mem;
4871}
4872
4873void *
4874ruby_mimcalloc(size_t num, size_t size)
4875{
4876 void *mem;
4877#if CALC_EXACT_MALLOC_SIZE
4878 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(num, size);
4879 if (UNLIKELY(t.left)) {
4880 return NULL;
4881 }
4882 size = t.right + sizeof(struct malloc_obj_info);
4883 mem = calloc1(size);
4884 if (!mem) {
4885 return NULL;
4886 }
4887 else
4888 /* set 0 for consistency of allocated_size/allocations */
4889 {
4890 struct malloc_obj_info *info = mem;
4891 info->size = 0;
4892 mem = info + 1;
4893 }
4894#else
4895 mem = calloc(num, size);
4896#endif
4897 return mem;
4898}
4899
4900void
4901ruby_mimfree(void *ptr)
4902{
4903#if CALC_EXACT_MALLOC_SIZE
4904 struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
4905 ptr = info;
4906#endif
4907 free(ptr);
4908}
4909
4910void
4911rb_gc_adjust_memory_usage(ssize_t diff)
4912{
4913 unless_objspace(objspace) { return; }
4914
4915 rb_gc_impl_adjust_memory_usage(objspace, diff);
4916}
4917
4918const char *
4919rb_obj_info(VALUE obj)
4920{
4921 return obj_info(obj);
4922}
4923
4924void
4925rb_obj_info_dump(VALUE obj)
4926{
4927 char buff[0x100];
4928 fprintf(stderr, "rb_obj_info_dump: %s\n", rb_raw_obj_info(buff, 0x100, obj));
4929}
4930
4931void
4932rb_obj_info_dump_loc(VALUE obj, const char *file, int line, const char *func)
4933{
4934 char buff[0x100];
4935 fprintf(stderr, "<OBJ_INFO:%s@%s:%d> %s\n", func, file, line, rb_raw_obj_info(buff, 0x100, obj));
4936}
4937
4938void
4939rb_gc_before_fork(void)
4940{
4941 rb_gc_impl_before_fork(rb_gc_get_objspace());
4942}
4943
4944void
4945rb_gc_after_fork(rb_pid_t pid)
4946{
4947 rb_gc_impl_after_fork(rb_gc_get_objspace(), pid);
4948}
4949
4950/*
4951 * Document-module: ObjectSpace
4952 *
4953 * The ObjectSpace module contains a number of routines
4954 * that interact with the garbage collection facility and allow you to
4955 * traverse all living objects with an iterator.
4956 *
4957 * ObjectSpace also provides support for object finalizers, procs that will be
4958 * called after a specific object was destroyed by garbage collection. See
4959 * the documentation for +ObjectSpace.define_finalizer+ for important
4960 * information on how to use this method correctly.
4961 *
4962 * a = "A"
4963 * b = "B"
4964 *
4965 * ObjectSpace.define_finalizer(a, proc {|id| puts "Finalizer one on #{id}" })
4966 * ObjectSpace.define_finalizer(b, proc {|id| puts "Finalizer two on #{id}" })
4967 *
4968 * a = nil
4969 * b = nil
4970 *
4971 * _produces:_
4972 *
4973 * Finalizer two on 537763470
4974 * Finalizer one on 537763480
4975 */
4976
4977/* Document-class: GC::Profiler
4978 *
4979 * The GC profiler provides access to information on GC runs including time,
4980 * length and object space size.
4981 *
4982 * Example:
4983 *
4984 * GC::Profiler.enable
4985 *
4986 * require 'rdoc/rdoc'
4987 *
4988 * GC::Profiler.report
4989 *
4990 * GC::Profiler.disable
4991 *
4992 * See also GC.count, GC.malloc_allocated_size and GC.malloc_allocations
4993 */
4994
4995#include "gc.rbinc"
4996
4997void
4998Init_GC(void)
4999{
5000#undef rb_intern
5001 malloc_offset = gc_compute_malloc_offset();
5002
5003 rb_mGC = rb_define_module("GC");
5004
5005 VALUE rb_mObjSpace = rb_define_module("ObjectSpace");
5006
5007 rb_define_module_function(rb_mObjSpace, "each_object", os_each_obj, -1);
5008
5009 rb_define_module_function(rb_mObjSpace, "define_finalizer", define_final, -1);
5010 rb_define_module_function(rb_mObjSpace, "undefine_finalizer", undefine_final, 1);
5011
5012 rb_define_module_function(rb_mObjSpace, "_id2ref", os_id2ref, 1);
5013
5014 rb_vm_register_special_exception(ruby_error_nomemory, rb_eNoMemError, "failed to allocate memory");
5015
5016 rb_define_method(rb_cBasicObject, "__id__", rb_obj_id, 0);
5017 rb_define_method(rb_mKernel, "object_id", rb_obj_id, 0);
5018
5019 rb_define_module_function(rb_mObjSpace, "count_objects", count_objects, -1);
5020
5021 rb_gc_impl_init();
5022}
5023
5024// Set a name for the anonymous virtual memory area. `addr` is the starting
5025// address of the area and `size` is its length in bytes. `name` is a
5026// NUL-terminated human-readable string.
5027//
5028// This function is usually called after calling `mmap()`. The human-readable
5029// annotation helps developers identify the call site of `mmap()` that created
5030// the memory mapping.
5031//
5032// This function currently only works on Linux 5.17 or higher. After calling
5033// this function, we can see annotations in the form of "[anon:...]" in
5034// `/proc/self/maps`, where `...` is the content of `name`. This function has
5035// no effect when called on other platforms.
5036void
5037ruby_annotate_mmap(const void *addr, unsigned long size, const char *name)
5038{
5039#if defined(HAVE_SYS_PRCTL_H) && defined(PR_SET_VMA) && defined(PR_SET_VMA_ANON_NAME)
5040 // The name length cannot exceed 80 (including the '\0').
5041 RUBY_ASSERT(strlen(name) < 80);
5042 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, (unsigned long)addr, size, name);
5043 // We ignore errors in prctl. prctl may set errno to EINVAL for several
5044 // reasons.
5045 // 1. The attr (PR_SET_VMA_ANON_NAME) is not a valid attribute.
5046 // 2. addr is an invalid address.
5047 // 3. The string pointed by name is too long.
5048 // The first error indicates PR_SET_VMA_ANON_NAME is not available, and may
5049 // happen if we run the compiled binary on an old kernel. In theory, all
5050 // other errors should result in a failure. But since EINVAL cannot tell
5051 // the first error from others, and this function is mainly used for
5052 // debugging, we silently ignore the error.
5053 errno = 0;
5054#endif
5055}
#define RUBY_ASSERT_ALWAYS(expr,...)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
Definition assert.h:199
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
#define RUBY_ATOMIC_CAS(var, oldval, newval)
Atomic compare-and-swap.
Definition atomic.h:140
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ATOMIC_FETCH_ADD(var, val)
Atomically replaces the value pointed by var with the result of addition of val to the old value of v...
Definition atomic.h:93
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_module_function(klass, mid, func, arity)
Defines klass#mid and makes it a module function.
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RUBY_INTERNAL_EVENT_NEWOBJ
Object allocated.
Definition event.h:93
static VALUE RB_FL_TEST_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_TEST().
Definition fl_type.h:469
static void RB_FL_SET_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_SET().
Definition fl_type.h:606
@ RUBY_FL_WB_PROTECTED
Definition fl_type.h:199
VALUE rb_define_module(const char *name)
Defines a top-level module.
Definition class.c:1098
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
Retrieves argument from argc and argv to given VALUE references according to the format string.
Definition class.c:2638
#define T_COMPLEX
Old name of RUBY_T_COMPLEX.
Definition value_type.h:59
#define TYPE(_)
Old name of rb_type.
Definition value_type.h:108
#define FL_SINGLETON
Old name of RUBY_FL_SINGLETON.
Definition fl_type.h:58
#define T_FILE
Old name of RUBY_T_FILE.
Definition value_type.h:62
#define FL_EXIVAR
Old name of RUBY_FL_EXIVAR.
Definition fl_type.h:66
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:400
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define T_MASK
Old name of RUBY_T_MASK.
Definition value_type.h:68
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define T_NIL
Old name of RUBY_T_NIL.
Definition value_type.h:72
#define UNREACHABLE
Old name of RBIMPL_UNREACHABLE.
Definition assume.h:28
#define T_FLOAT
Old name of RUBY_T_FLOAT.
Definition value_type.h:64
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition value_type.h:67
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
Definition value_type.h:57
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
Definition value_type.h:79
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition fl_type.h:135
#define T_FIXNUM
Old name of RUBY_T_FIXNUM.
Definition value_type.h:63
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition assume.h:29
#define SYM2ID
Old name of RB_SYM2ID.
Definition symbol.h:45
#define T_DATA
Old name of RUBY_T_DATA.
Definition value_type.h:60
#define FIXNUM_FLAG
Old name of RUBY_FIXNUM_FLAG.
#define LL2NUM
Old name of RB_LL2NUM.
Definition long_long.h:30
#define T_NONE
Old name of RUBY_T_NONE.
Definition value_type.h:74
#define T_NODE
Old name of RUBY_T_NODE.
Definition value_type.h:73
#define SIZET2NUM
Old name of RB_SIZE2NUM.
Definition size_t.h:62
#define FL_FINALIZE
Old name of RUBY_FL_FINALIZE.
Definition fl_type.h:61
#define T_MODULE
Old name of RUBY_T_MODULE.
Definition value_type.h:70
#define STATIC_SYM_P
Old name of RB_STATIC_SYM_P.
#define T_TRUE
Old name of RUBY_T_TRUE.
Definition value_type.h:81
#define T_RATIONAL
Old name of RUBY_T_RATIONAL.
Definition value_type.h:76
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define FL_ABLE
Old name of RB_FL_ABLE.
Definition fl_type.h:122
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:132
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition array.h:658
#define LONG2NUM
Old name of RB_LONG2NUM.
Definition long.h:50
#define T_FALSE
Old name of RUBY_T_FALSE.
Definition value_type.h:61
#define T_UNDEF
Old name of RUBY_T_UNDEF.
Definition value_type.h:82
#define FLONUM_P
Old name of RB_FLONUM_P.
#define Qtrue
Old name of RUBY_Qtrue.
#define T_ZOMBIE
Old name of RUBY_T_ZOMBIE.
Definition value_type.h:83
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define FL_WB_PROTECTED
Old name of RUBY_FL_WB_PROTECTED.
Definition fl_type.h:59
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
Definition value_type.h:80
#define T_MATCH
Old name of RUBY_T_MATCH.
Definition value_type.h:69
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define T_MOVED
Old name of RUBY_T_MOVED.
Definition value_type.h:71
#define FL_TEST
Old name of RB_FL_TEST.
Definition fl_type.h:131
#define FL_FREEZE
Old name of RUBY_FL_FREEZE.
Definition fl_type.h:67
#define xcalloc
Old name of ruby_xcalloc.
Definition xmalloc.h:55
#define FL_UNSET
Old name of RB_FL_UNSET.
Definition fl_type.h:133
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define NUM2SIZET
Old name of RB_NUM2SIZE.
Definition size_t.h:61
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
#define T_REGEXP
Old name of RUBY_T_REGEXP.
Definition value_type.h:77
size_t ruby_stack_length(VALUE **p)
Queries what Ruby thinks is the machine stack.
Definition gc.c:2158
int ruby_stack_check(void)
Checks for stack overflow.
Definition gc.c:2198
VALUE rb_eNoMemError
NoMemoryError exception.
Definition error.c:1441
VALUE rb_eRangeError
RangeError exception.
Definition error.c:1434
#define ruby_verbose
This variable controls whether the interpreter is in debug mode.
Definition error.h:475
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1430
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:466
size_t rb_obj_embedded_size(uint32_t numiv)
Internal header for Object.
Definition object.c:98
VALUE rb_mKernel
Kernel module.
Definition object.c:65
VALUE rb_mGC
GC module.
Definition gc.c:431
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:247
VALUE rb_cBasicObject
BasicObject class.
Definition object.c:64
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:865
VALUE rb_to_int(VALUE val)
Identical to rb_check_to_int(), except it raises in case of conversion mismatch.
Definition object.c:3192
int rb_enc_str_coderange(VALUE str)
Scans the passed string to collect its code range.
Definition string.c:909
Defines RBIMPL_HAS_BUILTIN.
#define RETURN_ENUMERATOR(obj, argc, argv)
Identical to RETURN_SIZED_ENUMERATOR(), except its size is unknown.
Definition enumerator.h:239
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:284
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition proc.c:839
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
Definition proc.c:119
void rb_str_free(VALUE str)
Destroys the given string for no reason.
Definition string.c:1694
size_t rb_str_capacity(VALUE str)
Queries the capacity of the given string.
Definition string.c:963
VALUE rb_class_path_cached(VALUE mod)
Just another name of rb_mod_name.
Definition variable.c:382
void rb_free_generic_ivar(VALUE obj)
Frees the list of instance variables.
Definition variable.c:1220
void rb_undef_alloc_func(VALUE klass)
Deletes the allocator function of a class.
Definition vm_method.c:1287
VALUE rb_check_funcall(VALUE recv, ID mid, int argc, const VALUE *argv)
Identical to rb_funcallv(), except it returns RUBY_Qundef instead of raising rb_eNoMethodError.
Definition vm_eval.c:668
rb_alloc_func_t rb_get_alloc_func(VALUE klass)
Queries the allocator function of a class.
Definition vm_method.c:1293
int rb_obj_respond_to(VALUE obj, ID mid, int private_p)
Identical to rb_respond_to(), except it additionally takes the visibility parameter.
Definition vm_method.c:2940
VALUE rb_sym2str(VALUE symbol)
Obtain a frozen string representation of a symbol (not including the leading colon).
Definition symbol.c:986
int capa
Designed capacity of the buffer.
Definition io.h:11
int len
Length of the buffer.
Definition io.h:8
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
Definition ractor.h:249
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
Definition thread.c:1902
VALUE rb_yield(VALUE val)
Yields the block.
Definition vm_eval.c:1354
#define RBIMPL_ATTR_MAYBE_UNUSED()
Wraps (or simulates) [[maybe_unused]]
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
Definition memory.h:360
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
VALUE type(ANYARGS)
ANYARGS-ed function type.
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
#define RARRAY(obj)
Convenient casting macro.
Definition rarray.h:44
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:150
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
Definition rclass.h:44
#define RCLASS(obj)
Convenient casting macro.
Definition rclass.h:38
#define DATA_PTR(obj)
Convenient getter macro.
Definition rdata.h:67
#define RDATA(obj)
Convenient casting macro.
Definition rdata.h:59
#define RUBY_DEFAULT_FREE
This is a value you can set to RData::dfree.
Definition rdata.h:78
void(* RUBY_DATA_FUNC)(void *)
This is the type of callbacks registered to RData.
Definition rdata.h:104
#define RFILE(obj)
Convenient casting macro.
Definition rfile.h:50
#define RHASH_SIZE(h)
Queries the size of the hash.
Definition rhash.h:69
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
#define RMATCH(obj)
Convenient casting macro.
Definition rmatch.h:37
#define ROBJECT(obj)
Convenient casting macro.
Definition robject.h:43
static VALUE * ROBJECT_IVPTR(VALUE obj)
Queries the instance variables.
Definition robject.h:126
#define RREGEXP(obj)
Convenient casting macro.
Definition rregexp.h:37
#define RREGEXP_PTR(obj)
Convenient accessor macro.
Definition rregexp.h:45
#define RSTRING(obj)
Convenient casting macro.
Definition rstring.h:41
static bool RTYPEDDATA_P(VALUE obj)
Checks whether the passed object is RTypedData or RData.
Definition rtypeddata.h:579
#define RTYPEDDATA(obj)
Convenient casting macro.
Definition rtypeddata.h:94
static const struct rb_data_type_struct * RTYPEDDATA_TYPE(VALUE obj)
Queries for the type of given object.
Definition rtypeddata.h:602
const char * rb_obj_classname(VALUE obj)
Queries the name of the class of the passed object.
Definition variable.c:507
#define errno
Ractor-aware version of errno.
Definition ruby.h:388
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition thread.c:5551
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
Defines old _.
#define _(args)
This was a transition path from K&R to ANSI.
Definition stdarg.h:35
Definition hash.h:53
Ruby's ordinal objects.
Definition robject.h:83
"Typed" user data.
Definition rtypeddata.h:350
Definition method.h:63
Definition constant.h:33
CREF (Class REFerence)
Definition method.h:45
Definition class.h:36
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:200
const char * wrap_struct_name
Name of structs of this kind.
Definition rtypeddata.h:207
VALUE flags
Type-specific behavioural characteristics.
Definition rtypeddata.h:309
Definition gc_impl.h:15
Ruby's IO, metadata and buffers.
Definition io.h:143
Represents a match.
Definition rmatch.h:71
struct rmatch_offset * char_offset
Capture group offsets, in C array.
Definition rmatch.h:79
int char_offset_num_allocated
Number of rmatch_offset that ::rmatch::char_offset holds.
Definition rmatch.h:82
struct re_registers regs
"Registers" of a match.
Definition rmatch.h:76
Definition method.h:55
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:136
Internal header for Class.
Definition class.h:29
Represents the region of a capture group.
Definition rmatch.h:65
Definition st.h:79
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
intptr_t SIGNED_VALUE
A signed integer type that has the same width with VALUE.
Definition value.h:63
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
Definition value.h:69
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376
ruby_value_type
C-level type of an object.
Definition value_type.h:113