Ruby 3.5.0dev (2025-04-25 revision 62a7f17157c5c67956d95a2582f8f256df13f9e2)
gc.c (62a7f17157c5c67956d95a2582f8f256df13f9e2)
1/**********************************************************************
2
3 gc.c -
4
5 $Author$
6 created at: Tue Oct 5 09:44:46 JST 1993
7
8 Copyright (C) 1993-2007 Yukihiro Matsumoto
9 Copyright (C) 2000 Network Applied Communication Laboratory, Inc.
10 Copyright (C) 2000 Information-technology Promotion Agency, Japan
11
12**********************************************************************/
13
14#define rb_data_object_alloc rb_data_object_alloc
15#define rb_data_typed_object_alloc rb_data_typed_object_alloc
16
17#include "ruby/internal/config.h"
18#ifdef _WIN32
19# include "ruby/ruby.h"
20#endif
21
22#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
23# include "wasm/setjmp.h"
24# include "wasm/machine.h"
25#else
26# include <setjmp.h>
27#endif
28#include <stdarg.h>
29#include <stdio.h>
30
31/* MALLOC_HEADERS_BEGIN */
32#ifndef HAVE_MALLOC_USABLE_SIZE
33# ifdef _WIN32
34# define HAVE_MALLOC_USABLE_SIZE
35# define malloc_usable_size(a) _msize(a)
36# elif defined HAVE_MALLOC_SIZE
37# define HAVE_MALLOC_USABLE_SIZE
38# define malloc_usable_size(a) malloc_size(a)
39# endif
40#endif
41
42#ifdef HAVE_MALLOC_USABLE_SIZE
43# ifdef RUBY_ALTERNATIVE_MALLOC_HEADER
44/* Alternative malloc header is included in ruby/missing.h */
45# elif defined(HAVE_MALLOC_H)
46# include <malloc.h>
47# elif defined(HAVE_MALLOC_NP_H)
48# include <malloc_np.h>
49# elif defined(HAVE_MALLOC_MALLOC_H)
50# include <malloc/malloc.h>
51# endif
52#endif
53
54/* MALLOC_HEADERS_END */
55
56#ifdef HAVE_SYS_TIME_H
57# include <sys/time.h>
58#endif
59
60#ifdef HAVE_SYS_RESOURCE_H
61# include <sys/resource.h>
62#endif
63
64#if defined _WIN32 || defined __CYGWIN__
65# include <windows.h>
66#elif defined(HAVE_POSIX_MEMALIGN)
67#elif defined(HAVE_MEMALIGN)
68# include <malloc.h>
69#endif
70
71#include <sys/types.h>
72
73#ifdef __EMSCRIPTEN__
74#include <emscripten.h>
75#endif
76
77/* For ruby_annotate_mmap */
78#ifdef HAVE_SYS_PRCTL_H
79#include <sys/prctl.h>
80#endif
81
82#undef LIST_HEAD /* ccan/list conflicts with BSD-origin sys/queue.h. */
83
84#include "constant.h"
85#include "darray.h"
86#include "debug_counter.h"
87#include "eval_intern.h"
88#include "gc/gc.h"
89#include "id_table.h"
90#include "internal.h"
91#include "internal/class.h"
92#include "internal/compile.h"
93#include "internal/complex.h"
94#include "internal/cont.h"
95#include "internal/error.h"
96#include "internal/eval.h"
97#include "internal/gc.h"
98#include "internal/hash.h"
99#include "internal/imemo.h"
100#include "internal/io.h"
101#include "internal/numeric.h"
102#include "internal/object.h"
103#include "internal/proc.h"
104#include "internal/rational.h"
105#include "internal/sanitizers.h"
106#include "internal/struct.h"
107#include "internal/symbol.h"
108#include "internal/thread.h"
109#include "internal/variable.h"
110#include "internal/warnings.h"
111#include "probes.h"
112#include "regint.h"
113#include "ruby/debug.h"
114#include "ruby/io.h"
115#include "ruby/re.h"
116#include "ruby/st.h"
117#include "ruby/thread.h"
118#include "ruby/util.h"
119#include "ruby/vm.h"
120#include "ruby_assert.h"
121#include "ruby_atomic.h"
122#include "symbol.h"
123#include "variable.h"
124#include "vm_core.h"
125#include "vm_sync.h"
126#include "vm_callinfo.h"
127#include "ractor_core.h"
128#include "yjit.h"
129
130#include "builtin.h"
131#include "shape.h"
132
133unsigned int
134rb_gc_vm_lock(void)
135{
136 unsigned int lev;
137 RB_VM_LOCK_ENTER_LEV(&lev);
138 return lev;
139}
140
141void
142rb_gc_vm_unlock(unsigned int lev)
143{
144 RB_VM_LOCK_LEAVE_LEV(&lev);
145}
146
147unsigned int
148rb_gc_cr_lock(void)
149{
150 unsigned int lev;
151 RB_VM_LOCK_ENTER_CR_LEV(GET_RACTOR(), &lev);
152 return lev;
153}
154
155void
156rb_gc_cr_unlock(unsigned int lev)
157{
158 RB_VM_LOCK_LEAVE_CR_LEV(GET_RACTOR(), &lev);
159}
160
161unsigned int
162rb_gc_vm_lock_no_barrier(void)
163{
164 unsigned int lev = 0;
165 RB_VM_LOCK_ENTER_LEV_NB(&lev);
166 return lev;
167}
168
169void
170rb_gc_vm_unlock_no_barrier(unsigned int lev)
171{
172 RB_VM_LOCK_LEAVE_LEV(&lev);
173}
174
175void
176rb_gc_vm_barrier(void)
177{
178 rb_vm_barrier();
179}
180
181#if USE_MODULAR_GC
182void *
183rb_gc_get_ractor_newobj_cache(void)
184{
185 return GET_RACTOR()->newobj_cache;
186}
187
188void
189rb_gc_initialize_vm_context(struct rb_gc_vm_context *context)
190{
191 rb_native_mutex_initialize(&context->lock);
192 context->ec = GET_EC();
193}
194
195void
196rb_gc_worker_thread_set_vm_context(struct rb_gc_vm_context *context)
197{
198 rb_native_mutex_lock(&context->lock);
199
200 GC_ASSERT(rb_current_execution_context(false) == NULL);
201
202#ifdef RB_THREAD_LOCAL_SPECIFIER
203 rb_current_ec_set(context->ec);
204#else
205 native_tls_set(ruby_current_ec_key, context->ec);
206#endif
207}
208
209void
210rb_gc_worker_thread_unset_vm_context(struct rb_gc_vm_context *context)
211{
212 rb_native_mutex_unlock(&context->lock);
213
214 GC_ASSERT(rb_current_execution_context(true) == context->ec);
215
216#ifdef RB_THREAD_LOCAL_SPECIFIER
217 rb_current_ec_set(NULL);
218#else
219 native_tls_set(ruby_current_ec_key, NULL);
220#endif
221}
222#endif
223
224bool
225rb_gc_event_hook_required_p(rb_event_flag_t event)
226{
227 return ruby_vm_event_flags & event;
228}
229
230void
231rb_gc_event_hook(VALUE obj, rb_event_flag_t event)
232{
233 if (LIKELY(!rb_gc_event_hook_required_p(event))) return;
234
235 rb_execution_context_t *ec = GET_EC();
236 if (!ec->cfp) return;
237
238 EXEC_EVENT_HOOK(ec, event, ec->cfp->self, 0, 0, 0, obj);
239}
240
241void *
242rb_gc_get_objspace(void)
243{
244 return GET_VM()->gc.objspace;
245}
246
247
248void
249rb_gc_ractor_newobj_cache_foreach(void (*func)(void *cache, void *data), void *data)
250{
251 rb_ractor_t *r = NULL;
252 if (RB_LIKELY(ruby_single_main_ractor)) {
253 GC_ASSERT(
254 ccan_list_empty(&GET_VM()->ractor.set) ||
255 (ccan_list_top(&GET_VM()->ractor.set, rb_ractor_t, vmlr_node) == ruby_single_main_ractor &&
256 ccan_list_tail(&GET_VM()->ractor.set, rb_ractor_t, vmlr_node) == ruby_single_main_ractor)
257 );
258
259 func(ruby_single_main_ractor->newobj_cache, data);
260 }
261 else {
262 ccan_list_for_each(&GET_VM()->ractor.set, r, vmlr_node) {
263 func(r->newobj_cache, data);
264 }
265 }
266}
267
268void
269rb_gc_run_obj_finalizer(VALUE objid, long count, VALUE (*callback)(long i, void *data), void *data)
270{
271 volatile struct {
272 VALUE errinfo;
273 VALUE final;
275 VALUE *sp;
276 long finished;
277 } saved;
278
279 rb_execution_context_t * volatile ec = GET_EC();
280#define RESTORE_FINALIZER() (\
281 ec->cfp = saved.cfp, \
282 ec->cfp->sp = saved.sp, \
283 ec->errinfo = saved.errinfo)
284
285 saved.errinfo = ec->errinfo;
286 saved.cfp = ec->cfp;
287 saved.sp = ec->cfp->sp;
288 saved.finished = 0;
289 saved.final = Qundef;
290
291 EC_PUSH_TAG(ec);
292 enum ruby_tag_type state = EC_EXEC_TAG();
293 if (state != TAG_NONE) {
294 ++saved.finished; /* skip failed finalizer */
295
296 VALUE failed_final = saved.final;
297 saved.final = Qundef;
298 if (!UNDEF_P(failed_final) && !NIL_P(ruby_verbose)) {
299 rb_warn("Exception in finalizer %+"PRIsVALUE, failed_final);
300 rb_ec_error_print(ec, ec->errinfo);
301 }
302 }
303
304 for (long i = saved.finished; RESTORE_FINALIZER(), i < count; saved.finished = ++i) {
305 saved.final = callback(i, data);
306 rb_check_funcall(saved.final, idCall, 1, &objid);
307 }
308 EC_POP_TAG();
309#undef RESTORE_FINALIZER
310}
311
312void
313rb_gc_set_pending_interrupt(void)
314{
315 rb_execution_context_t *ec = GET_EC();
316 ec->interrupt_mask |= PENDING_INTERRUPT_MASK;
317}
318
319void
320rb_gc_unset_pending_interrupt(void)
321{
322 rb_execution_context_t *ec = GET_EC();
323 ec->interrupt_mask &= ~PENDING_INTERRUPT_MASK;
324}
325
326bool
327rb_gc_multi_ractor_p(void)
328{
329 return rb_multi_ractor_p();
330}
331
332bool rb_obj_is_main_ractor(VALUE gv);
333
334bool
335rb_gc_shutdown_call_finalizer_p(VALUE obj)
336{
337 switch (BUILTIN_TYPE(obj)) {
338 case T_DATA:
339 if (!ruby_free_at_exit_p() && (!DATA_PTR(obj) || !RDATA(obj)->dfree)) return false;
340 if (rb_obj_is_thread(obj)) return false;
341 if (rb_obj_is_mutex(obj)) return false;
342 if (rb_obj_is_fiber(obj)) return false;
343 if (rb_obj_is_main_ractor(obj)) return false;
344 if (rb_obj_is_fstring_table(obj)) return false;
345
346 return true;
347
348 case T_FILE:
349 return true;
350
351 case T_SYMBOL:
352 if (RSYMBOL(obj)->fstr &&
353 (BUILTIN_TYPE(RSYMBOL(obj)->fstr) == T_NONE ||
354 BUILTIN_TYPE(RSYMBOL(obj)->fstr) == T_ZOMBIE)) {
355 RSYMBOL(obj)->fstr = 0;
356 }
357 return true;
358
359 case T_NONE:
360 return false;
361
362 default:
363 return ruby_free_at_exit_p();
364 }
365}
366
367uint32_t
368rb_gc_get_shape(VALUE obj)
369{
370 return (uint32_t)rb_shape_get_shape_id(obj);
371}
372
373void
374rb_gc_set_shape(VALUE obj, uint32_t shape_id)
375{
376 rb_shape_set_shape_id(obj, (uint32_t)shape_id);
377}
378
379uint32_t
380rb_gc_rebuild_shape(VALUE obj, size_t heap_id)
381{
382 rb_shape_t *orig_shape = rb_shape_get_shape(obj);
383
384 if (rb_shape_obj_too_complex(obj)) return (uint32_t)OBJ_TOO_COMPLEX_SHAPE_ID;
385
386 rb_shape_t *initial_shape = rb_shape_get_shape_by_id((shape_id_t)(heap_id + FIRST_T_OBJECT_SHAPE_ID));
387 rb_shape_t *new_shape = rb_shape_traverse_from_new_root(initial_shape, orig_shape);
388
389 if (!new_shape) return 0;
390
391 return (uint32_t)rb_shape_id(new_shape);
392}
393
394void rb_vm_update_references(void *ptr);
395
396#define rb_setjmp(env) RUBY_SETJMP(env)
397#define rb_jmp_buf rb_jmpbuf_t
398#undef rb_data_object_wrap
399
400#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
401#define MAP_ANONYMOUS MAP_ANON
402#endif
403
404#define unless_objspace(objspace) \
405 void *objspace; \
406 rb_vm_t *unless_objspace_vm = GET_VM(); \
407 if (unless_objspace_vm) objspace = unless_objspace_vm->gc.objspace; \
408 else /* return; or objspace will be warned uninitialized */
409
410#define RMOVED(obj) ((struct RMoved *)(obj))
411
412#define TYPED_UPDATE_IF_MOVED(_objspace, _type, _thing) do { \
413 if (rb_gc_impl_object_moved_p((_objspace), (VALUE)(_thing))) { \
414 *(_type *)&(_thing) = (_type)gc_location_internal(_objspace, (VALUE)_thing); \
415 } \
416} while (0)
417
418#define UPDATE_IF_MOVED(_objspace, _thing) TYPED_UPDATE_IF_MOVED(_objspace, VALUE, _thing)
419
420#if RUBY_MARK_FREE_DEBUG
421int ruby_gc_debug_indent = 0;
422#endif
423
424#ifndef RGENGC_OBJ_INFO
425# define RGENGC_OBJ_INFO RGENGC_CHECK_MODE
426#endif
427
428#ifndef CALC_EXACT_MALLOC_SIZE
429# define CALC_EXACT_MALLOC_SIZE 0
430#endif
431
433
434static size_t malloc_offset = 0;
435#if defined(HAVE_MALLOC_USABLE_SIZE)
436static size_t
437gc_compute_malloc_offset(void)
438{
439 // Different allocators use different metadata storage strategies which result in different
440 // ideal sizes.
441 // For instance malloc(64) will waste 8B with glibc, but waste 0B with jemalloc.
442 // But malloc(56) will waste 0B with glibc, but waste 8B with jemalloc.
443 // So we try allocating 64, 56 and 48 bytes and select the first offset that doesn't
444 // waste memory.
445 // This was tested on Linux with glibc 2.35 and jemalloc 5, and for both it result in
446 // no wasted memory.
447 size_t offset = 0;
448 for (offset = 0; offset <= 16; offset += 8) {
449 size_t allocated = (64 - offset);
450 void *test_ptr = malloc(allocated);
451 size_t wasted = malloc_usable_size(test_ptr) - allocated;
452 free(test_ptr);
453
454 if (wasted == 0) {
455 return offset;
456 }
457 }
458 return 0;
459}
460#else
461static size_t
462gc_compute_malloc_offset(void)
463{
464 // If we don't have malloc_usable_size, we use powers of 2.
465 return 0;
466}
467#endif
468
469size_t
470rb_malloc_grow_capa(size_t current, size_t type_size)
471{
472 size_t current_capacity = current;
473 if (current_capacity < 4) {
474 current_capacity = 4;
475 }
476 current_capacity *= type_size;
477
478 // We double the current capacity.
479 size_t new_capacity = (current_capacity * 2);
480
481 // And round up to the next power of 2 if it's not already one.
482 if (rb_popcount64(new_capacity) != 1) {
483 new_capacity = (size_t)(1 << (64 - nlz_int64(new_capacity)));
484 }
485
486 new_capacity -= malloc_offset;
487 new_capacity /= type_size;
488 if (current > new_capacity) {
489 rb_bug("rb_malloc_grow_capa: current_capacity=%zu, new_capacity=%zu, malloc_offset=%zu", current, new_capacity, malloc_offset);
490 }
491 RUBY_ASSERT(new_capacity > current);
492 return new_capacity;
493}
494
495static inline struct rbimpl_size_mul_overflow_tag
496size_mul_add_overflow(size_t x, size_t y, size_t z) /* x * y + z */
497{
498 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
499 struct rbimpl_size_mul_overflow_tag u = rbimpl_size_add_overflow(t.right, z);
500 return (struct rbimpl_size_mul_overflow_tag) { t.left || u.left, u.right };
501}
502
503static inline struct rbimpl_size_mul_overflow_tag
504size_mul_add_mul_overflow(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
505{
506 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
507 struct rbimpl_size_mul_overflow_tag u = rbimpl_size_mul_overflow(z, w);
508 struct rbimpl_size_mul_overflow_tag v = rbimpl_size_add_overflow(t.right, u.right);
509 return (struct rbimpl_size_mul_overflow_tag) { t.left || u.left || v.left, v.right };
510}
511
512PRINTF_ARGS(NORETURN(static void gc_raise(VALUE, const char*, ...)), 2, 3);
513
514static inline size_t
515size_mul_or_raise(size_t x, size_t y, VALUE exc)
516{
517 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
518 if (LIKELY(!t.left)) {
519 return t.right;
520 }
521 else if (rb_during_gc()) {
522 rb_memerror(); /* or...? */
523 }
524 else {
525 gc_raise(
526 exc,
527 "integer overflow: %"PRIuSIZE
528 " * %"PRIuSIZE
529 " > %"PRIuSIZE,
530 x, y, (size_t)SIZE_MAX);
531 }
532}
533
534size_t
535rb_size_mul_or_raise(size_t x, size_t y, VALUE exc)
536{
537 return size_mul_or_raise(x, y, exc);
538}
539
540static inline size_t
541size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
542{
543 struct rbimpl_size_mul_overflow_tag t = size_mul_add_overflow(x, y, z);
544 if (LIKELY(!t.left)) {
545 return t.right;
546 }
547 else if (rb_during_gc()) {
548 rb_memerror(); /* or...? */
549 }
550 else {
551 gc_raise(
552 exc,
553 "integer overflow: %"PRIuSIZE
554 " * %"PRIuSIZE
555 " + %"PRIuSIZE
556 " > %"PRIuSIZE,
557 x, y, z, (size_t)SIZE_MAX);
558 }
559}
560
561size_t
562rb_size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
563{
564 return size_mul_add_or_raise(x, y, z, exc);
565}
566
567static inline size_t
568size_mul_add_mul_or_raise(size_t x, size_t y, size_t z, size_t w, VALUE exc)
569{
570 struct rbimpl_size_mul_overflow_tag t = size_mul_add_mul_overflow(x, y, z, w);
571 if (LIKELY(!t.left)) {
572 return t.right;
573 }
574 else if (rb_during_gc()) {
575 rb_memerror(); /* or...? */
576 }
577 else {
578 gc_raise(
579 exc,
580 "integer overflow: %"PRIdSIZE
581 " * %"PRIdSIZE
582 " + %"PRIdSIZE
583 " * %"PRIdSIZE
584 " > %"PRIdSIZE,
585 x, y, z, w, (size_t)SIZE_MAX);
586 }
587}
588
589#if defined(HAVE_RB_GC_GUARDED_PTR_VAL) && HAVE_RB_GC_GUARDED_PTR_VAL
590/* trick the compiler into thinking a external signal handler uses this */
591volatile VALUE rb_gc_guarded_val;
592volatile VALUE *
593rb_gc_guarded_ptr_val(volatile VALUE *ptr, VALUE val)
594{
595 rb_gc_guarded_val = val;
596
597 return ptr;
598}
599#endif
600
601static const char *obj_type_name(VALUE obj);
602#include "gc/default/default.c"
603
604#if USE_MODULAR_GC && !defined(HAVE_DLOPEN)
605# error "Modular GC requires dlopen"
606#elif USE_MODULAR_GC
607#include <dlfcn.h>
608
609typedef struct gc_function_map {
610 // Bootup
611 void *(*objspace_alloc)(void);
612 void (*objspace_init)(void *objspace_ptr);
613 void *(*ractor_cache_alloc)(void *objspace_ptr, void *ractor);
614 void (*set_params)(void *objspace_ptr);
615 void (*init)(void);
616 size_t *(*heap_sizes)(void *objspace_ptr);
617 // Shutdown
618 void (*shutdown_free_objects)(void *objspace_ptr);
619 void (*objspace_free)(void *objspace_ptr);
620 void (*ractor_cache_free)(void *objspace_ptr, void *cache);
621 // GC
622 void (*start)(void *objspace_ptr, bool full_mark, bool immediate_mark, bool immediate_sweep, bool compact);
623 bool (*during_gc_p)(void *objspace_ptr);
624 void (*prepare_heap)(void *objspace_ptr);
625 void (*gc_enable)(void *objspace_ptr);
626 void (*gc_disable)(void *objspace_ptr, bool finish_current_gc);
627 bool (*gc_enabled_p)(void *objspace_ptr);
628 VALUE (*config_get)(void *objpace_ptr);
629 void (*config_set)(void *objspace_ptr, VALUE hash);
630 void (*stress_set)(void *objspace_ptr, VALUE flag);
631 VALUE (*stress_get)(void *objspace_ptr);
632 // Object allocation
633 VALUE (*new_obj)(void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, bool wb_protected, size_t alloc_size);
634 size_t (*obj_slot_size)(VALUE obj);
635 size_t (*heap_id_for_size)(void *objspace_ptr, size_t size);
636 bool (*size_allocatable_p)(size_t size);
637 // Malloc
638 void *(*malloc)(void *objspace_ptr, size_t size);
639 void *(*calloc)(void *objspace_ptr, size_t size);
640 void *(*realloc)(void *objspace_ptr, void *ptr, size_t new_size, size_t old_size);
641 void (*free)(void *objspace_ptr, void *ptr, size_t old_size);
642 void (*adjust_memory_usage)(void *objspace_ptr, ssize_t diff);
643 // Marking
644 void (*mark)(void *objspace_ptr, VALUE obj);
645 void (*mark_and_move)(void *objspace_ptr, VALUE *ptr);
646 void (*mark_and_pin)(void *objspace_ptr, VALUE obj);
647 void (*mark_maybe)(void *objspace_ptr, VALUE obj);
648 void (*mark_weak)(void *objspace_ptr, VALUE *ptr);
649 void (*remove_weak)(void *objspace_ptr, VALUE parent_obj, VALUE *ptr);
650 // Compaction
651 bool (*object_moved_p)(void *objspace_ptr, VALUE obj);
652 VALUE (*location)(void *objspace_ptr, VALUE value);
653 // Write barriers
654 void (*writebarrier)(void *objspace_ptr, VALUE a, VALUE b);
655 void (*writebarrier_unprotect)(void *objspace_ptr, VALUE obj);
656 void (*writebarrier_remember)(void *objspace_ptr, VALUE obj);
657 // Heap walking
658 void (*each_objects)(void *objspace_ptr, int (*callback)(void *, void *, size_t, void *), void *data);
659 void (*each_object)(void *objspace_ptr, void (*func)(VALUE obj, void *data), void *data);
660 // Finalizers
661 void (*make_zombie)(void *objspace_ptr, VALUE obj, void (*dfree)(void *), void *data);
662 VALUE (*define_finalizer)(void *objspace_ptr, VALUE obj, VALUE block);
663 void (*undefine_finalizer)(void *objspace_ptr, VALUE obj);
664 void (*copy_finalizer)(void *objspace_ptr, VALUE dest, VALUE obj);
665 void (*shutdown_call_finalizer)(void *objspace_ptr);
666 // Object ID
667 VALUE (*object_id)(void *objspace_ptr, VALUE obj);
668 VALUE (*object_id_to_ref)(void *objspace_ptr, VALUE object_id);
669 // Forking
670 void (*before_fork)(void *objspace_ptr);
671 void (*after_fork)(void *objspace_ptr, rb_pid_t pid);
672 // Statistics
673 void (*set_measure_total_time)(void *objspace_ptr, VALUE flag);
674 bool (*get_measure_total_time)(void *objspace_ptr);
675 unsigned long long (*get_total_time)(void *objspace_ptr);
676 size_t (*gc_count)(void *objspace_ptr);
677 VALUE (*latest_gc_info)(void *objspace_ptr, VALUE key);
678 VALUE (*stat)(void *objspace_ptr, VALUE hash_or_sym);
679 VALUE (*stat_heap)(void *objspace_ptr, VALUE heap_name, VALUE hash_or_sym);
680 const char *(*active_gc_name)(void);
681 // Miscellaneous
682 struct rb_gc_object_metadata_entry *(*object_metadata)(void *objspace_ptr, VALUE obj);
683 bool (*pointer_to_heap_p)(void *objspace_ptr, const void *ptr);
684 bool (*garbage_object_p)(void *objspace_ptr, VALUE obj);
685 void (*set_event_hook)(void *objspace_ptr, const rb_event_flag_t event);
686 void (*copy_attributes)(void *objspace_ptr, VALUE dest, VALUE obj);
687
688 bool modular_gc_loaded_p;
689} rb_gc_function_map_t;
690
691static rb_gc_function_map_t rb_gc_functions;
692
693# define RUBY_GC_LIBRARY "RUBY_GC_LIBRARY"
694# define MODULAR_GC_DIR STRINGIZE(modular_gc_dir)
695
696static void
697ruby_modular_gc_init(void)
698{
699 // Assert that the directory path ends with a /
700 RUBY_ASSERT_ALWAYS(MODULAR_GC_DIR[sizeof(MODULAR_GC_DIR) - 2] == '/');
701
702 const char *gc_so_file = getenv(RUBY_GC_LIBRARY);
703
704 rb_gc_function_map_t gc_functions = { 0 };
705
706 char *gc_so_path = NULL;
707 void *handle = NULL;
708 if (gc_so_file) {
709 /* Check to make sure that gc_so_file matches /[\w-_]+/ so that it does
710 * not load a shared object outside of the directory. */
711 for (size_t i = 0; i < strlen(gc_so_file); i++) {
712 char c = gc_so_file[i];
713 if (isalnum(c)) continue;
714 switch (c) {
715 case '-':
716 case '_':
717 break;
718 default:
719 fprintf(stderr, "Only alphanumeric, dash, and underscore is allowed in "RUBY_GC_LIBRARY"\n");
720 exit(1);
721 }
722 }
723
724 size_t gc_so_path_size = strlen(MODULAR_GC_DIR "librubygc." DLEXT) + strlen(gc_so_file) + 1;
725#ifdef LOAD_RELATIVE
726 Dl_info dli;
727 size_t prefix_len = 0;
728 if (dladdr((void *)(uintptr_t)ruby_modular_gc_init, &dli)) {
729 const char *base = strrchr(dli.dli_fname, '/');
730 if (base) {
731 size_t tail = 0;
732# define end_with_p(lit) \
733 (prefix_len >= (tail = rb_strlen_lit(lit)) && \
734 memcmp(base - tail, lit, tail) == 0)
735
736 prefix_len = base - dli.dli_fname;
737 if (end_with_p("/bin") || end_with_p("/lib")) {
738 prefix_len -= tail;
739 }
740 prefix_len += MODULAR_GC_DIR[0] != '/';
741 gc_so_path_size += prefix_len;
742 }
743 }
744#endif
745 gc_so_path = alloca(gc_so_path_size);
746 {
747 size_t gc_so_path_idx = 0;
748#define GC_SO_PATH_APPEND(str) do { \
749 gc_so_path_idx += strlcpy(gc_so_path + gc_so_path_idx, str, gc_so_path_size - gc_so_path_idx); \
750} while (0)
751#ifdef LOAD_RELATIVE
752 if (prefix_len > 0) {
753 memcpy(gc_so_path, dli.dli_fname, prefix_len);
754 gc_so_path_idx = prefix_len;
755 }
756#endif
757 GC_SO_PATH_APPEND(MODULAR_GC_DIR "librubygc.");
758 GC_SO_PATH_APPEND(gc_so_file);
759 GC_SO_PATH_APPEND(DLEXT);
760 GC_ASSERT(gc_so_path_idx == gc_so_path_size - 1);
761#undef GC_SO_PATH_APPEND
762 }
763
764 handle = dlopen(gc_so_path, RTLD_LAZY | RTLD_GLOBAL);
765 if (!handle) {
766 fprintf(stderr, "ruby_modular_gc_init: Shared library %s cannot be opened: %s\n", gc_so_path, dlerror());
767 exit(1);
768 }
769
770 gc_functions.modular_gc_loaded_p = true;
771 }
772
773# define load_modular_gc_func(name) do { \
774 if (handle) { \
775 const char *func_name = "rb_gc_impl_" #name; \
776 gc_functions.name = dlsym(handle, func_name); \
777 if (!gc_functions.name) { \
778 fprintf(stderr, "ruby_modular_gc_init: %s function not exported by library %s\n", func_name, gc_so_path); \
779 exit(1); \
780 } \
781 } \
782 else { \
783 gc_functions.name = rb_gc_impl_##name; \
784 } \
785} while (0)
786
787 // Bootup
788 load_modular_gc_func(objspace_alloc);
789 load_modular_gc_func(objspace_init);
790 load_modular_gc_func(ractor_cache_alloc);
791 load_modular_gc_func(set_params);
792 load_modular_gc_func(init);
793 load_modular_gc_func(heap_sizes);
794 // Shutdown
795 load_modular_gc_func(shutdown_free_objects);
796 load_modular_gc_func(objspace_free);
797 load_modular_gc_func(ractor_cache_free);
798 // GC
799 load_modular_gc_func(start);
800 load_modular_gc_func(during_gc_p);
801 load_modular_gc_func(prepare_heap);
802 load_modular_gc_func(gc_enable);
803 load_modular_gc_func(gc_disable);
804 load_modular_gc_func(gc_enabled_p);
805 load_modular_gc_func(config_set);
806 load_modular_gc_func(config_get);
807 load_modular_gc_func(stress_set);
808 load_modular_gc_func(stress_get);
809 // Object allocation
810 load_modular_gc_func(new_obj);
811 load_modular_gc_func(obj_slot_size);
812 load_modular_gc_func(heap_id_for_size);
813 load_modular_gc_func(size_allocatable_p);
814 // Malloc
815 load_modular_gc_func(malloc);
816 load_modular_gc_func(calloc);
817 load_modular_gc_func(realloc);
818 load_modular_gc_func(free);
819 load_modular_gc_func(adjust_memory_usage);
820 // Marking
821 load_modular_gc_func(mark);
822 load_modular_gc_func(mark_and_move);
823 load_modular_gc_func(mark_and_pin);
824 load_modular_gc_func(mark_maybe);
825 load_modular_gc_func(mark_weak);
826 load_modular_gc_func(remove_weak);
827 // Compaction
828 load_modular_gc_func(object_moved_p);
829 load_modular_gc_func(location);
830 // Write barriers
831 load_modular_gc_func(writebarrier);
832 load_modular_gc_func(writebarrier_unprotect);
833 load_modular_gc_func(writebarrier_remember);
834 // Heap walking
835 load_modular_gc_func(each_objects);
836 load_modular_gc_func(each_object);
837 // Finalizers
838 load_modular_gc_func(make_zombie);
839 load_modular_gc_func(define_finalizer);
840 load_modular_gc_func(undefine_finalizer);
841 load_modular_gc_func(copy_finalizer);
842 load_modular_gc_func(shutdown_call_finalizer);
843 // Object ID
844 load_modular_gc_func(object_id);
845 load_modular_gc_func(object_id_to_ref);
846 // Forking
847 load_modular_gc_func(before_fork);
848 load_modular_gc_func(after_fork);
849 // Statistics
850 load_modular_gc_func(set_measure_total_time);
851 load_modular_gc_func(get_measure_total_time);
852 load_modular_gc_func(get_total_time);
853 load_modular_gc_func(gc_count);
854 load_modular_gc_func(latest_gc_info);
855 load_modular_gc_func(stat);
856 load_modular_gc_func(stat_heap);
857 load_modular_gc_func(active_gc_name);
858 // Miscellaneous
859 load_modular_gc_func(object_metadata);
860 load_modular_gc_func(pointer_to_heap_p);
861 load_modular_gc_func(garbage_object_p);
862 load_modular_gc_func(set_event_hook);
863 load_modular_gc_func(copy_attributes);
864
865# undef load_modular_gc_func
866
867 rb_gc_functions = gc_functions;
868}
869
870// Bootup
871# define rb_gc_impl_objspace_alloc rb_gc_functions.objspace_alloc
872# define rb_gc_impl_objspace_init rb_gc_functions.objspace_init
873# define rb_gc_impl_ractor_cache_alloc rb_gc_functions.ractor_cache_alloc
874# define rb_gc_impl_set_params rb_gc_functions.set_params
875# define rb_gc_impl_init rb_gc_functions.init
876# define rb_gc_impl_heap_sizes rb_gc_functions.heap_sizes
877// Shutdown
878# define rb_gc_impl_shutdown_free_objects rb_gc_functions.shutdown_free_objects
879# define rb_gc_impl_objspace_free rb_gc_functions.objspace_free
880# define rb_gc_impl_ractor_cache_free rb_gc_functions.ractor_cache_free
881// GC
882# define rb_gc_impl_start rb_gc_functions.start
883# define rb_gc_impl_during_gc_p rb_gc_functions.during_gc_p
884# define rb_gc_impl_prepare_heap rb_gc_functions.prepare_heap
885# define rb_gc_impl_gc_enable rb_gc_functions.gc_enable
886# define rb_gc_impl_gc_disable rb_gc_functions.gc_disable
887# define rb_gc_impl_gc_enabled_p rb_gc_functions.gc_enabled_p
888# define rb_gc_impl_config_get rb_gc_functions.config_get
889# define rb_gc_impl_config_set rb_gc_functions.config_set
890# define rb_gc_impl_stress_set rb_gc_functions.stress_set
891# define rb_gc_impl_stress_get rb_gc_functions.stress_get
892// Object allocation
893# define rb_gc_impl_new_obj rb_gc_functions.new_obj
894# define rb_gc_impl_obj_slot_size rb_gc_functions.obj_slot_size
895# define rb_gc_impl_heap_id_for_size rb_gc_functions.heap_id_for_size
896# define rb_gc_impl_size_allocatable_p rb_gc_functions.size_allocatable_p
897// Malloc
898# define rb_gc_impl_malloc rb_gc_functions.malloc
899# define rb_gc_impl_calloc rb_gc_functions.calloc
900# define rb_gc_impl_realloc rb_gc_functions.realloc
901# define rb_gc_impl_free rb_gc_functions.free
902# define rb_gc_impl_adjust_memory_usage rb_gc_functions.adjust_memory_usage
903// Marking
904# define rb_gc_impl_mark rb_gc_functions.mark
905# define rb_gc_impl_mark_and_move rb_gc_functions.mark_and_move
906# define rb_gc_impl_mark_and_pin rb_gc_functions.mark_and_pin
907# define rb_gc_impl_mark_maybe rb_gc_functions.mark_maybe
908# define rb_gc_impl_mark_weak rb_gc_functions.mark_weak
909# define rb_gc_impl_remove_weak rb_gc_functions.remove_weak
910// Compaction
911# define rb_gc_impl_object_moved_p rb_gc_functions.object_moved_p
912# define rb_gc_impl_location rb_gc_functions.location
913// Write barriers
914# define rb_gc_impl_writebarrier rb_gc_functions.writebarrier
915# define rb_gc_impl_writebarrier_unprotect rb_gc_functions.writebarrier_unprotect
916# define rb_gc_impl_writebarrier_remember rb_gc_functions.writebarrier_remember
917// Heap walking
918# define rb_gc_impl_each_objects rb_gc_functions.each_objects
919# define rb_gc_impl_each_object rb_gc_functions.each_object
920// Finalizers
921# define rb_gc_impl_make_zombie rb_gc_functions.make_zombie
922# define rb_gc_impl_define_finalizer rb_gc_functions.define_finalizer
923# define rb_gc_impl_undefine_finalizer rb_gc_functions.undefine_finalizer
924# define rb_gc_impl_copy_finalizer rb_gc_functions.copy_finalizer
925# define rb_gc_impl_shutdown_call_finalizer rb_gc_functions.shutdown_call_finalizer
926// Object ID
927# define rb_gc_impl_object_id rb_gc_functions.object_id
928# define rb_gc_impl_object_id_to_ref rb_gc_functions.object_id_to_ref
929// Forking
930# define rb_gc_impl_before_fork rb_gc_functions.before_fork
931# define rb_gc_impl_after_fork rb_gc_functions.after_fork
932// Statistics
933# define rb_gc_impl_set_measure_total_time rb_gc_functions.set_measure_total_time
934# define rb_gc_impl_get_measure_total_time rb_gc_functions.get_measure_total_time
935# define rb_gc_impl_get_total_time rb_gc_functions.get_total_time
936# define rb_gc_impl_gc_count rb_gc_functions.gc_count
937# define rb_gc_impl_latest_gc_info rb_gc_functions.latest_gc_info
938# define rb_gc_impl_stat rb_gc_functions.stat
939# define rb_gc_impl_stat_heap rb_gc_functions.stat_heap
940# define rb_gc_impl_active_gc_name rb_gc_functions.active_gc_name
941// Miscellaneous
942# define rb_gc_impl_object_metadata rb_gc_functions.object_metadata
943# define rb_gc_impl_pointer_to_heap_p rb_gc_functions.pointer_to_heap_p
944# define rb_gc_impl_garbage_object_p rb_gc_functions.garbage_object_p
945# define rb_gc_impl_set_event_hook rb_gc_functions.set_event_hook
946# define rb_gc_impl_copy_attributes rb_gc_functions.copy_attributes
947#endif
948
949#ifdef RUBY_ASAN_ENABLED
950static void
951asan_death_callback(void)
952{
953 if (GET_VM()) {
954 rb_bug_without_die("ASAN error");
955 }
956}
957#endif
958
959static VALUE initial_stress = Qfalse;
960
961void *
962rb_objspace_alloc(void)
963{
964#if USE_MODULAR_GC
965 ruby_modular_gc_init();
966#endif
967
968 void *objspace = rb_gc_impl_objspace_alloc();
969 ruby_current_vm_ptr->gc.objspace = objspace;
970 rb_gc_impl_objspace_init(objspace);
971 rb_gc_impl_stress_set(objspace, initial_stress);
972
973#ifdef RUBY_ASAN_ENABLED
974 __sanitizer_set_death_callback(asan_death_callback);
975#endif
976
977 return objspace;
978}
979
980void
981rb_objspace_free(void *objspace)
982{
983 rb_gc_impl_objspace_free(objspace);
984}
985
986size_t
987rb_gc_obj_slot_size(VALUE obj)
988{
989 return rb_gc_impl_obj_slot_size(obj);
990}
991
992static inline void
993gc_validate_pc(void)
994{
995#if RUBY_DEBUG
996 rb_execution_context_t *ec = GET_EC();
997 const rb_control_frame_t *cfp = ec->cfp;
998 if (cfp && VM_FRAME_RUBYFRAME_P(cfp) && cfp->pc) {
999 RUBY_ASSERT(cfp->pc >= ISEQ_BODY(cfp->iseq)->iseq_encoded);
1000 RUBY_ASSERT(cfp->pc <= ISEQ_BODY(cfp->iseq)->iseq_encoded + ISEQ_BODY(cfp->iseq)->iseq_size);
1001 }
1002#endif
1003}
1004
1005static inline VALUE
1006newobj_of(rb_ractor_t *cr, VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, bool wb_protected, size_t size)
1007{
1008 VALUE obj = rb_gc_impl_new_obj(rb_gc_get_objspace(), cr->newobj_cache, klass, flags, v1, v2, v3, wb_protected, size);
1009
1010 gc_validate_pc();
1011
1012 if (UNLIKELY(rb_gc_event_hook_required_p(RUBY_INTERNAL_EVENT_NEWOBJ))) {
1013 unsigned int lev;
1014 RB_VM_LOCK_ENTER_CR_LEV(cr, &lev);
1015 {
1016 memset((char *)obj + RVALUE_SIZE, 0, rb_gc_obj_slot_size(obj) - RVALUE_SIZE);
1017
1018 /* We must disable GC here because the callback could call xmalloc
1019 * which could potentially trigger a GC, and a lot of code is unsafe
1020 * to trigger a GC right after an object has been allocated because
1021 * they perform initialization for the object and assume that the
1022 * GC does not trigger before then. */
1023 bool gc_disabled = RTEST(rb_gc_disable_no_rest());
1024 {
1025 rb_gc_event_hook(obj, RUBY_INTERNAL_EVENT_NEWOBJ);
1026 }
1027 if (!gc_disabled) rb_gc_enable();
1028 }
1029 RB_VM_LOCK_LEAVE_CR_LEV(cr, &lev);
1030 }
1031
1032 return obj;
1033}
1034
1035VALUE
1036rb_wb_unprotected_newobj_of(VALUE klass, VALUE flags, size_t size)
1037{
1038 GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
1039 return newobj_of(GET_RACTOR(), klass, flags, 0, 0, 0, FALSE, size);
1040}
1041
1042VALUE
1043rb_wb_protected_newobj_of(rb_execution_context_t *ec, VALUE klass, VALUE flags, size_t size)
1044{
1045 GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
1046 return newobj_of(rb_ec_ractor_ptr(ec), klass, flags, 0, 0, 0, TRUE, size);
1047}
1048
1049#define UNEXPECTED_NODE(func) \
1050 rb_bug(#func"(): GC does not handle T_NODE 0x%x(%p) 0x%"PRIxVALUE, \
1051 BUILTIN_TYPE(obj), (void*)(obj), RBASIC(obj)->flags)
1052
1053static inline void
1054rb_data_object_check(VALUE klass)
1055{
1056 if (klass != rb_cObject && (rb_get_alloc_func(klass) == rb_class_allocate_instance)) {
1057 rb_undef_alloc_func(klass);
1058 rb_warn("undefining the allocator of T_DATA class %"PRIsVALUE, klass);
1059 }
1060}
1061
1062VALUE
1063rb_data_object_wrap(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
1064{
1066 if (klass) rb_data_object_check(klass);
1067 return newobj_of(GET_RACTOR(), klass, T_DATA, (VALUE)dmark, (VALUE)dfree, (VALUE)datap, !dmark, sizeof(struct RTypedData));
1068}
1069
1070VALUE
1071rb_data_object_zalloc(VALUE klass, size_t size, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
1072{
1073 VALUE obj = rb_data_object_wrap(klass, 0, dmark, dfree);
1074 DATA_PTR(obj) = xcalloc(1, size);
1075 return obj;
1076}
1077
1078static VALUE
1079typed_data_alloc(VALUE klass, VALUE typed_flag, void *datap, const rb_data_type_t *type, size_t size)
1080{
1081 RBIMPL_NONNULL_ARG(type);
1082 if (klass) rb_data_object_check(klass);
1083 bool wb_protected = (type->flags & RUBY_FL_WB_PROTECTED) || !type->function.dmark;
1084 return newobj_of(GET_RACTOR(), klass, T_DATA, (VALUE)type, 1 | typed_flag, (VALUE)datap, wb_protected, size);
1085}
1086
1087VALUE
1088rb_data_typed_object_wrap(VALUE klass, void *datap, const rb_data_type_t *type)
1089{
1090 if (UNLIKELY(type->flags & RUBY_TYPED_EMBEDDABLE)) {
1091 rb_raise(rb_eTypeError, "Cannot wrap an embeddable TypedData");
1092 }
1093
1094 return typed_data_alloc(klass, 0, datap, type, sizeof(struct RTypedData));
1095}
1096
1097VALUE
1098rb_data_typed_object_zalloc(VALUE klass, size_t size, const rb_data_type_t *type)
1099{
1100 if (type->flags & RUBY_TYPED_EMBEDDABLE) {
1101 if (!(type->flags & RUBY_TYPED_FREE_IMMEDIATELY)) {
1102 rb_raise(rb_eTypeError, "Embeddable TypedData must be freed immediately");
1103 }
1104
1105 size_t embed_size = offsetof(struct RTypedData, data) + size;
1106 if (rb_gc_size_allocatable_p(embed_size)) {
1107 VALUE obj = typed_data_alloc(klass, TYPED_DATA_EMBEDDED, 0, type, embed_size);
1108 memset((char *)obj + offsetof(struct RTypedData, data), 0, size);
1109 return obj;
1110 }
1111 }
1112
1113 VALUE obj = typed_data_alloc(klass, 0, NULL, type, sizeof(struct RTypedData));
1114 DATA_PTR(obj) = xcalloc(1, size);
1115 return obj;
1116}
1117
1118static size_t
1119rb_objspace_data_type_memsize(VALUE obj)
1120{
1121 size_t size = 0;
1122 if (RTYPEDDATA_P(obj)) {
1123 const rb_data_type_t *type = RTYPEDDATA_TYPE(obj);
1124 const void *ptr = RTYPEDDATA_GET_DATA(obj);
1125
1126 if (RTYPEDDATA_TYPE(obj)->flags & RUBY_TYPED_EMBEDDABLE && !RTYPEDDATA_EMBEDDED_P(obj)) {
1127#ifdef HAVE_MALLOC_USABLE_SIZE
1128 size += malloc_usable_size((void *)ptr);
1129#endif
1130 }
1131
1132 if (ptr && type->function.dsize) {
1133 size += type->function.dsize(ptr);
1134 }
1135 }
1136
1137 return size;
1138}
1139
1140const char *
1141rb_objspace_data_type_name(VALUE obj)
1142{
1143 if (RTYPEDDATA_P(obj)) {
1144 return RTYPEDDATA_TYPE(obj)->wrap_struct_name;
1145 }
1146 else {
1147 return 0;
1148 }
1149}
1150
1151static enum rb_id_table_iterator_result
1152cvar_table_free_i(VALUE value, void *ctx)
1153{
1154 xfree((void *)value);
1155 return ID_TABLE_CONTINUE;
1156}
1157
1158static void
1159io_fptr_finalize(void *fptr)
1160{
1161 rb_io_fptr_finalize((struct rb_io *)fptr);
1162}
1163
1164static inline void
1165make_io_zombie(void *objspace, VALUE obj)
1166{
1167 rb_io_t *fptr = RFILE(obj)->fptr;
1168 rb_gc_impl_make_zombie(objspace, obj, io_fptr_finalize, fptr);
1169}
1170
1171static bool
1172rb_data_free(void *objspace, VALUE obj)
1173{
1174 void *data = RTYPEDDATA_P(obj) ? RTYPEDDATA_GET_DATA(obj) : DATA_PTR(obj);
1175 if (data) {
1176 int free_immediately = false;
1177 void (*dfree)(void *);
1178
1179 if (RTYPEDDATA_P(obj)) {
1180 free_immediately = (RTYPEDDATA(obj)->type->flags & RUBY_TYPED_FREE_IMMEDIATELY) != 0;
1181 dfree = RTYPEDDATA(obj)->type->function.dfree;
1182 }
1183 else {
1184 dfree = RDATA(obj)->dfree;
1185 }
1186
1187 if (dfree) {
1188 if (dfree == RUBY_DEFAULT_FREE) {
1189 if (!RTYPEDDATA_P(obj) || !RTYPEDDATA_EMBEDDED_P(obj)) {
1190 xfree(data);
1191 RB_DEBUG_COUNTER_INC(obj_data_xfree);
1192 }
1193 }
1194 else if (free_immediately) {
1195 (*dfree)(data);
1196 if (RTYPEDDATA_TYPE(obj)->flags & RUBY_TYPED_EMBEDDABLE && !RTYPEDDATA_EMBEDDED_P(obj)) {
1197 xfree(data);
1198 }
1199
1200 RB_DEBUG_COUNTER_INC(obj_data_imm_free);
1201 }
1202 else {
1203 rb_gc_impl_make_zombie(objspace, obj, dfree, data);
1204 RB_DEBUG_COUNTER_INC(obj_data_zombie);
1205 return FALSE;
1206 }
1207 }
1208 else {
1209 RB_DEBUG_COUNTER_INC(obj_data_empty);
1210 }
1211 }
1212
1213 return true;
1214}
1215
1216void
1217rb_gc_obj_free_vm_weak_references(VALUE obj)
1218{
1219 if (FL_TEST(obj, FL_EXIVAR)) {
1221 FL_UNSET(obj, FL_EXIVAR);
1222 }
1223
1224 switch (BUILTIN_TYPE(obj)) {
1225 case T_STRING:
1226 if (FL_TEST(obj, RSTRING_FSTR)) {
1227 rb_gc_free_fstring(obj);
1228 }
1229 break;
1230 case T_SYMBOL:
1231 rb_gc_free_dsymbol(obj);
1232 break;
1233 case T_IMEMO:
1234 switch (imemo_type(obj)) {
1235 case imemo_callinfo:
1236 rb_vm_ci_free((const struct rb_callinfo *)obj);
1237 break;
1238 case imemo_ment:
1239 rb_free_method_entry_vm_weak_references((const rb_method_entry_t *)obj);
1240 break;
1241 default:
1242 break;
1243 }
1244 break;
1245 default:
1246 break;
1247 }
1248}
1249
1250bool
1251rb_gc_obj_free(void *objspace, VALUE obj)
1252{
1253 RB_DEBUG_COUNTER_INC(obj_free);
1254
1255 switch (BUILTIN_TYPE(obj)) {
1256 case T_NIL:
1257 case T_FIXNUM:
1258 case T_TRUE:
1259 case T_FALSE:
1260 rb_bug("obj_free() called for broken object");
1261 break;
1262 default:
1263 break;
1264 }
1265
1266 switch (BUILTIN_TYPE(obj)) {
1267 case T_OBJECT:
1268 if (rb_shape_obj_too_complex(obj)) {
1269 RB_DEBUG_COUNTER_INC(obj_obj_too_complex);
1270 st_free_table(ROBJECT_IV_HASH(obj));
1271 }
1272 else if (RBASIC(obj)->flags & ROBJECT_EMBED) {
1273 RB_DEBUG_COUNTER_INC(obj_obj_embed);
1274 }
1275 else {
1276 xfree(ROBJECT(obj)->as.heap.ivptr);
1277 RB_DEBUG_COUNTER_INC(obj_obj_ptr);
1278 }
1279 break;
1280 case T_MODULE:
1281 case T_CLASS:
1282 rb_id_table_free(RCLASS_M_TBL(obj));
1283 rb_cc_table_free(obj);
1284 if (rb_shape_obj_too_complex(obj)) {
1285 st_free_table((st_table *)RCLASS_IVPTR(obj));
1286 }
1287 else {
1288 xfree(RCLASS_IVPTR(obj));
1289 }
1290
1291 if (RCLASS_CONST_TBL(obj)) {
1292 rb_free_const_table(RCLASS_CONST_TBL(obj));
1293 }
1294 if (RCLASS_CVC_TBL(obj)) {
1295 rb_id_table_foreach_values(RCLASS_CVC_TBL(obj), cvar_table_free_i, NULL);
1296 rb_id_table_free(RCLASS_CVC_TBL(obj));
1297 }
1298 rb_class_remove_subclass_head(obj);
1299 rb_class_remove_from_module_subclasses(obj);
1300 rb_class_remove_from_super_subclasses(obj);
1301 if (FL_TEST_RAW(obj, RCLASS_SUPERCLASSES_INCLUDE_SELF)) {
1302 xfree(RCLASS_SUPERCLASSES(obj));
1303 }
1304
1305 (void)RB_DEBUG_COUNTER_INC_IF(obj_module_ptr, BUILTIN_TYPE(obj) == T_MODULE);
1306 (void)RB_DEBUG_COUNTER_INC_IF(obj_class_ptr, BUILTIN_TYPE(obj) == T_CLASS);
1307 break;
1308 case T_STRING:
1309 rb_str_free(obj);
1310 break;
1311 case T_ARRAY:
1312 rb_ary_free(obj);
1313 break;
1314 case T_HASH:
1315#if USE_DEBUG_COUNTER
1316 switch (RHASH_SIZE(obj)) {
1317 case 0:
1318 RB_DEBUG_COUNTER_INC(obj_hash_empty);
1319 break;
1320 case 1:
1321 RB_DEBUG_COUNTER_INC(obj_hash_1);
1322 break;
1323 case 2:
1324 RB_DEBUG_COUNTER_INC(obj_hash_2);
1325 break;
1326 case 3:
1327 RB_DEBUG_COUNTER_INC(obj_hash_3);
1328 break;
1329 case 4:
1330 RB_DEBUG_COUNTER_INC(obj_hash_4);
1331 break;
1332 case 5:
1333 case 6:
1334 case 7:
1335 case 8:
1336 RB_DEBUG_COUNTER_INC(obj_hash_5_8);
1337 break;
1338 default:
1339 GC_ASSERT(RHASH_SIZE(obj) > 8);
1340 RB_DEBUG_COUNTER_INC(obj_hash_g8);
1341 }
1342
1343 if (RHASH_AR_TABLE_P(obj)) {
1344 if (RHASH_AR_TABLE(obj) == NULL) {
1345 RB_DEBUG_COUNTER_INC(obj_hash_null);
1346 }
1347 else {
1348 RB_DEBUG_COUNTER_INC(obj_hash_ar);
1349 }
1350 }
1351 else {
1352 RB_DEBUG_COUNTER_INC(obj_hash_st);
1353 }
1354#endif
1355
1356 rb_hash_free(obj);
1357 break;
1358 case T_REGEXP:
1359 if (RREGEXP(obj)->ptr) {
1360 onig_free(RREGEXP(obj)->ptr);
1361 RB_DEBUG_COUNTER_INC(obj_regexp_ptr);
1362 }
1363 break;
1364 case T_DATA:
1365 if (!rb_data_free(objspace, obj)) return false;
1366 break;
1367 case T_MATCH:
1368 {
1369 rb_matchext_t *rm = RMATCH_EXT(obj);
1370#if USE_DEBUG_COUNTER
1371 if (rm->regs.num_regs >= 8) {
1372 RB_DEBUG_COUNTER_INC(obj_match_ge8);
1373 }
1374 else if (rm->regs.num_regs >= 4) {
1375 RB_DEBUG_COUNTER_INC(obj_match_ge4);
1376 }
1377 else if (rm->regs.num_regs >= 1) {
1378 RB_DEBUG_COUNTER_INC(obj_match_under4);
1379 }
1380#endif
1381 onig_region_free(&rm->regs, 0);
1382 xfree(rm->char_offset);
1383
1384 RB_DEBUG_COUNTER_INC(obj_match_ptr);
1385 }
1386 break;
1387 case T_FILE:
1388 if (RFILE(obj)->fptr) {
1389 make_io_zombie(objspace, obj);
1390 RB_DEBUG_COUNTER_INC(obj_file_ptr);
1391 return FALSE;
1392 }
1393 break;
1394 case T_RATIONAL:
1395 RB_DEBUG_COUNTER_INC(obj_rational);
1396 break;
1397 case T_COMPLEX:
1398 RB_DEBUG_COUNTER_INC(obj_complex);
1399 break;
1400 case T_MOVED:
1401 break;
1402 case T_ICLASS:
1403 /* Basically , T_ICLASS shares table with the module */
1404 if (RICLASS_OWNS_M_TBL_P(obj)) {
1405 /* Method table is not shared for origin iclasses of classes */
1406 rb_id_table_free(RCLASS_M_TBL(obj));
1407 }
1408 if (RCLASS_CALLABLE_M_TBL(obj) != NULL) {
1409 rb_id_table_free(RCLASS_CALLABLE_M_TBL(obj));
1410 }
1411 rb_class_remove_subclass_head(obj);
1412 rb_cc_table_free(obj);
1413 rb_class_remove_from_module_subclasses(obj);
1414 rb_class_remove_from_super_subclasses(obj);
1415
1416 RB_DEBUG_COUNTER_INC(obj_iclass_ptr);
1417 break;
1418
1419 case T_FLOAT:
1420 RB_DEBUG_COUNTER_INC(obj_float);
1421 break;
1422
1423 case T_BIGNUM:
1424 if (!BIGNUM_EMBED_P(obj) && BIGNUM_DIGITS(obj)) {
1425 xfree(BIGNUM_DIGITS(obj));
1426 RB_DEBUG_COUNTER_INC(obj_bignum_ptr);
1427 }
1428 else {
1429 RB_DEBUG_COUNTER_INC(obj_bignum_embed);
1430 }
1431 break;
1432
1433 case T_NODE:
1434 UNEXPECTED_NODE(obj_free);
1435 break;
1436
1437 case T_STRUCT:
1438 if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) ||
1439 RSTRUCT(obj)->as.heap.ptr == NULL) {
1440 RB_DEBUG_COUNTER_INC(obj_struct_embed);
1441 }
1442 else {
1443 xfree((void *)RSTRUCT(obj)->as.heap.ptr);
1444 RB_DEBUG_COUNTER_INC(obj_struct_ptr);
1445 }
1446 break;
1447
1448 case T_SYMBOL:
1449 RB_DEBUG_COUNTER_INC(obj_symbol);
1450 break;
1451
1452 case T_IMEMO:
1453 rb_imemo_free((VALUE)obj);
1454 break;
1455
1456 default:
1457 rb_bug("gc_sweep(): unknown data type 0x%x(%p) 0x%"PRIxVALUE,
1458 BUILTIN_TYPE(obj), (void*)obj, RBASIC(obj)->flags);
1459 }
1460
1461 if (FL_TEST(obj, FL_FINALIZE)) {
1462 rb_gc_impl_make_zombie(objspace, obj, 0, 0);
1463 return FALSE;
1464 }
1465 else {
1466 return TRUE;
1467 }
1468}
1469
1470void
1471rb_objspace_set_event_hook(const rb_event_flag_t event)
1472{
1473 rb_gc_impl_set_event_hook(rb_gc_get_objspace(), event);
1474}
1475
1476static int
1477internal_object_p(VALUE obj)
1478{
1479 void *ptr = asan_unpoison_object_temporary(obj);
1480
1481 if (RBASIC(obj)->flags) {
1482 switch (BUILTIN_TYPE(obj)) {
1483 case T_NODE:
1484 UNEXPECTED_NODE(internal_object_p);
1485 break;
1486 case T_NONE:
1487 case T_MOVED:
1488 case T_IMEMO:
1489 case T_ICLASS:
1490 case T_ZOMBIE:
1491 break;
1492 case T_CLASS:
1493 if (!RBASIC(obj)->klass) break;
1494 if (RCLASS_SINGLETON_P(obj)) {
1495 return rb_singleton_class_internal_p(obj);
1496 }
1497 return 0;
1498 default:
1499 if (!RBASIC(obj)->klass) break;
1500 return 0;
1501 }
1502 }
1503 if (ptr || !RBASIC(obj)->flags) {
1504 rb_asan_poison_object(obj);
1505 }
1506 return 1;
1507}
1508
1509int
1510rb_objspace_internal_object_p(VALUE obj)
1511{
1512 return internal_object_p(obj);
1513}
1514
1516 size_t num;
1517 VALUE of;
1518};
1519
1520static int
1521os_obj_of_i(void *vstart, void *vend, size_t stride, void *data)
1522{
1523 struct os_each_struct *oes = (struct os_each_struct *)data;
1524
1525 VALUE v = (VALUE)vstart;
1526 for (; v != (VALUE)vend; v += stride) {
1527 if (!internal_object_p(v)) {
1528 if (!oes->of || rb_obj_is_kind_of(v, oes->of)) {
1529 if (!rb_multi_ractor_p() || rb_ractor_shareable_p(v)) {
1530 rb_yield(v);
1531 oes->num++;
1532 }
1533 }
1534 }
1535 }
1536
1537 return 0;
1538}
1539
1540static VALUE
1541os_obj_of(VALUE of)
1542{
1543 struct os_each_struct oes;
1544
1545 oes.num = 0;
1546 oes.of = of;
1547 rb_objspace_each_objects(os_obj_of_i, &oes);
1548 return SIZET2NUM(oes.num);
1549}
1550
1551/*
1552 * call-seq:
1553 * ObjectSpace.each_object([module]) {|obj| ... } -> integer
1554 * ObjectSpace.each_object([module]) -> an_enumerator
1555 *
1556 * Calls the block once for each living, nonimmediate object in this
1557 * Ruby process. If <i>module</i> is specified, calls the block
1558 * for only those classes or modules that match (or are a subclass of)
1559 * <i>module</i>. Returns the number of objects found. Immediate
1560 * objects (<code>Fixnum</code>s, <code>Symbol</code>s
1561 * <code>true</code>, <code>false</code>, and <code>nil</code>) are
1562 * never returned. In the example below, #each_object returns both
1563 * the numbers we defined and several constants defined in the Math
1564 * module.
1565 *
1566 * If no block is given, an enumerator is returned instead.
1567 *
1568 * a = 102.7
1569 * b = 95 # Won't be returned
1570 * c = 12345678987654321
1571 * count = ObjectSpace.each_object(Numeric) {|x| p x }
1572 * puts "Total count: #{count}"
1573 *
1574 * <em>produces:</em>
1575 *
1576 * 12345678987654321
1577 * 102.7
1578 * 2.71828182845905
1579 * 3.14159265358979
1580 * 2.22044604925031e-16
1581 * 1.7976931348623157e+308
1582 * 2.2250738585072e-308
1583 * Total count: 7
1584 *
1585 */
1586
1587static VALUE
1588os_each_obj(int argc, VALUE *argv, VALUE os)
1589{
1590 VALUE of;
1591
1592 of = (!rb_check_arity(argc, 0, 1) ? 0 : argv[0]);
1593 RETURN_ENUMERATOR(os, 1, &of);
1594 return os_obj_of(of);
1595}
1596
1597/*
1598 * call-seq:
1599 * ObjectSpace.undefine_finalizer(obj)
1600 *
1601 * Removes all finalizers for <i>obj</i>.
1602 *
1603 */
1604
1605static VALUE
1606undefine_final(VALUE os, VALUE obj)
1607{
1608 return rb_undefine_finalizer(obj);
1609}
1610
1611VALUE
1612rb_undefine_finalizer(VALUE obj)
1613{
1614 rb_check_frozen(obj);
1615
1616 rb_gc_impl_undefine_finalizer(rb_gc_get_objspace(), obj);
1617
1618 return obj;
1619}
1620
1621static void
1622should_be_callable(VALUE block)
1623{
1624 if (!rb_obj_respond_to(block, idCall, TRUE)) {
1625 rb_raise(rb_eArgError, "wrong type argument %"PRIsVALUE" (should be callable)",
1626 rb_obj_class(block));
1627 }
1628}
1629
1630static void
1631should_be_finalizable(VALUE obj)
1632{
1633 if (!FL_ABLE(obj)) {
1634 rb_raise(rb_eArgError, "cannot define finalizer for %s",
1635 rb_obj_classname(obj));
1636 }
1637 rb_check_frozen(obj);
1638}
1639
1640void
1641rb_gc_copy_finalizer(VALUE dest, VALUE obj)
1642{
1643 rb_gc_impl_copy_finalizer(rb_gc_get_objspace(), dest, obj);
1644}
1645
1646/*
1647 * call-seq:
1648 * ObjectSpace.define_finalizer(obj, aProc=proc())
1649 *
1650 * Adds <i>aProc</i> as a finalizer, to be called after <i>obj</i>
1651 * was destroyed. The object ID of the <i>obj</i> will be passed
1652 * as an argument to <i>aProc</i>. If <i>aProc</i> is a lambda or
1653 * method, make sure it can be called with a single argument.
1654 *
1655 * The return value is an array <code>[0, aProc]</code>.
1656 *
1657 * The two recommended patterns are to either create the finaliser proc
1658 * in a non-instance method where it can safely capture the needed state,
1659 * or to use a custom callable object that stores the needed state
1660 * explicitly as instance variables.
1661 *
1662 * class Foo
1663 * def initialize(data_needed_for_finalization)
1664 * ObjectSpace.define_finalizer(self, self.class.create_finalizer(data_needed_for_finalization))
1665 * end
1666 *
1667 * def self.create_finalizer(data_needed_for_finalization)
1668 * proc {
1669 * puts "finalizing #{data_needed_for_finalization}"
1670 * }
1671 * end
1672 * end
1673 *
1674 * class Bar
1675 * class Remover
1676 * def initialize(data_needed_for_finalization)
1677 * @data_needed_for_finalization = data_needed_for_finalization
1678 * end
1679 *
1680 * def call(id)
1681 * puts "finalizing #{@data_needed_for_finalization}"
1682 * end
1683 * end
1684 *
1685 * def initialize(data_needed_for_finalization)
1686 * ObjectSpace.define_finalizer(self, Remover.new(data_needed_for_finalization))
1687 * end
1688 * end
1689 *
1690 * Note that if your finalizer references the object to be
1691 * finalized it will never be run on GC, although it will still be
1692 * run at exit. You will get a warning if you capture the object
1693 * to be finalized as the receiver of the finalizer.
1694 *
1695 * class CapturesSelf
1696 * def initialize(name)
1697 * ObjectSpace.define_finalizer(self, proc {
1698 * # this finalizer will only be run on exit
1699 * puts "finalizing #{name}"
1700 * })
1701 * end
1702 * end
1703 *
1704 * Also note that finalization can be unpredictable and is never guaranteed
1705 * to be run except on exit.
1706 */
1707
1708static VALUE
1709define_final(int argc, VALUE *argv, VALUE os)
1710{
1711 VALUE obj, block;
1712
1713 rb_scan_args(argc, argv, "11", &obj, &block);
1714 if (argc == 1) {
1715 block = rb_block_proc();
1716 }
1717
1718 if (rb_callable_receiver(block) == obj) {
1719 rb_warn("finalizer references object to be finalized");
1720 }
1721
1722 return rb_define_finalizer(obj, block);
1723}
1724
1725VALUE
1726rb_define_finalizer(VALUE obj, VALUE block)
1727{
1728 should_be_finalizable(obj);
1729 should_be_callable(block);
1730
1731 block = rb_gc_impl_define_finalizer(rb_gc_get_objspace(), obj, block);
1732
1733 block = rb_ary_new3(2, INT2FIX(0), block);
1734 OBJ_FREEZE(block);
1735 return block;
1736}
1737
1738void
1739rb_objspace_call_finalizer(void)
1740{
1741 rb_gc_impl_shutdown_call_finalizer(rb_gc_get_objspace());
1742}
1743
1744void
1745rb_objspace_free_objects(void *objspace)
1746{
1747 rb_gc_impl_shutdown_free_objects(objspace);
1748}
1749
1750int
1751rb_objspace_garbage_object_p(VALUE obj)
1752{
1753 return rb_gc_impl_garbage_object_p(rb_gc_get_objspace(), obj);
1754}
1755
1756bool
1757rb_gc_pointer_to_heap_p(VALUE obj)
1758{
1759 return rb_gc_impl_pointer_to_heap_p(rb_gc_get_objspace(), (void *)obj);
1760}
1761
1762/*
1763 * call-seq:
1764 * ObjectSpace._id2ref(object_id) -> an_object
1765 *
1766 * Converts an object id to a reference to the object. May not be
1767 * called on an object id passed as a parameter to a finalizer.
1768 *
1769 * s = "I am a string" #=> "I am a string"
1770 * r = ObjectSpace._id2ref(s.object_id) #=> "I am a string"
1771 * r == s #=> true
1772 *
1773 * On multi-ractor mode, if the object is not shareable, it raises
1774 * RangeError.
1775 */
1776
1777static VALUE
1778id2ref(VALUE objid)
1779{
1780#if SIZEOF_LONG == SIZEOF_VOIDP
1781#define NUM2PTR(x) NUM2ULONG(x)
1782#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
1783#define NUM2PTR(x) NUM2ULL(x)
1784#endif
1785 objid = rb_to_int(objid);
1786 if (FIXNUM_P(objid) || rb_big_size(objid) <= SIZEOF_VOIDP) {
1787 VALUE ptr = NUM2PTR(objid);
1788 if (SPECIAL_CONST_P(ptr)) {
1789 if (ptr == Qtrue) return Qtrue;
1790 if (ptr == Qfalse) return Qfalse;
1791 if (NIL_P(ptr)) return Qnil;
1792 if (FIXNUM_P(ptr)) return ptr;
1793 if (FLONUM_P(ptr)) return ptr;
1794
1795 if (SYMBOL_P(ptr)) {
1796 // Check that the symbol is valid
1797 if (rb_static_id_valid_p(SYM2ID(ptr))) {
1798 return ptr;
1799 }
1800 else {
1801 rb_raise(rb_eRangeError, "%p is not a symbol id value", (void *)ptr);
1802 }
1803 }
1804
1805 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is not an id value", rb_int2str(objid, 10));
1806 }
1807 }
1808
1809 VALUE obj = rb_gc_impl_object_id_to_ref(rb_gc_get_objspace(), objid);
1810 if (!rb_multi_ractor_p() || rb_ractor_shareable_p(obj)) {
1811 return obj;
1812 }
1813 else {
1814 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is the id of an unshareable object on multi-ractor", rb_int2str(objid, 10));
1815 }
1816}
1817
1818/* :nodoc: */
1819static VALUE
1820os_id2ref(VALUE os, VALUE objid)
1821{
1822 return id2ref(objid);
1823}
1824
1825static VALUE
1826rb_find_object_id(void *objspace, VALUE obj, VALUE (*get_heap_object_id)(void *, VALUE))
1827{
1828 if (SPECIAL_CONST_P(obj)) {
1829#if SIZEOF_LONG == SIZEOF_VOIDP
1830 return LONG2NUM((SIGNED_VALUE)obj);
1831#else
1832 return LL2NUM((SIGNED_VALUE)obj);
1833#endif
1834 }
1835
1836 return get_heap_object_id(objspace, obj);
1837}
1838
1839static VALUE
1840nonspecial_obj_id(void *_objspace, VALUE obj)
1841{
1842#if SIZEOF_LONG == SIZEOF_VOIDP
1843 return (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG);
1844#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
1845 return LL2NUM((SIGNED_VALUE)(obj) / 2);
1846#else
1847# error not supported
1848#endif
1849}
1850
1851VALUE
1852rb_memory_id(VALUE obj)
1853{
1854 return rb_find_object_id(NULL, obj, nonspecial_obj_id);
1855}
1856
1857/*
1858 * Document-method: __id__
1859 * Document-method: object_id
1860 *
1861 * call-seq:
1862 * obj.__id__ -> integer
1863 * obj.object_id -> integer
1864 *
1865 * Returns an integer identifier for +obj+.
1866 *
1867 * The same number will be returned on all calls to +object_id+ for a given
1868 * object, and no two active objects will share an id.
1869 *
1870 * Note: that some objects of builtin classes are reused for optimization.
1871 * This is the case for immediate values and frozen string literals.
1872 *
1873 * BasicObject implements +__id__+, Kernel implements +object_id+.
1874 *
1875 * Immediate values are not passed by reference but are passed by value:
1876 * +nil+, +true+, +false+, Fixnums, Symbols, and some Floats.
1877 *
1878 * Object.new.object_id == Object.new.object_id # => false
1879 * (21 * 2).object_id == (21 * 2).object_id # => true
1880 * "hello".object_id == "hello".object_id # => false
1881 * "hi".freeze.object_id == "hi".freeze.object_id # => true
1882 */
1883
1884VALUE
1885rb_obj_id(VALUE obj)
1886{
1887 /* If obj is an immediate, the object ID is obj directly converted to a Numeric.
1888 * Otherwise, the object ID is a Numeric that is a non-zero multiple of
1889 * (RUBY_IMMEDIATE_MASK + 1) which guarantees that it does not collide with
1890 * any immediates. */
1891 return rb_find_object_id(rb_gc_get_objspace(), obj, rb_gc_impl_object_id);
1892}
1893
1894static enum rb_id_table_iterator_result
1895cc_table_memsize_i(VALUE ccs_ptr, void *data_ptr)
1896{
1897 size_t *total_size = data_ptr;
1898 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
1899 *total_size += sizeof(*ccs);
1900 *total_size += sizeof(ccs->entries[0]) * ccs->capa;
1901 return ID_TABLE_CONTINUE;
1902}
1903
1904static size_t
1905cc_table_memsize(struct rb_id_table *cc_table)
1906{
1907 size_t total = rb_id_table_memsize(cc_table);
1908 rb_id_table_foreach_values(cc_table, cc_table_memsize_i, &total);
1909 return total;
1910}
1911
1912size_t
1913rb_obj_memsize_of(VALUE obj)
1914{
1915 size_t size = 0;
1916
1917 if (SPECIAL_CONST_P(obj)) {
1918 return 0;
1919 }
1920
1921 if (FL_TEST(obj, FL_EXIVAR)) {
1922 size += rb_generic_ivar_memsize(obj);
1923 }
1924
1925 switch (BUILTIN_TYPE(obj)) {
1926 case T_OBJECT:
1927 if (rb_shape_obj_too_complex(obj)) {
1928 size += rb_st_memsize(ROBJECT_IV_HASH(obj));
1929 }
1930 else if (!(RBASIC(obj)->flags & ROBJECT_EMBED)) {
1931 size += ROBJECT_IV_CAPACITY(obj) * sizeof(VALUE);
1932 }
1933 break;
1934 case T_MODULE:
1935 case T_CLASS:
1936 if (RCLASS_M_TBL(obj)) {
1937 size += rb_id_table_memsize(RCLASS_M_TBL(obj));
1938 }
1939 // class IV sizes are allocated as powers of two
1940 size += SIZEOF_VALUE << bit_length(RCLASS_IV_COUNT(obj));
1941 if (RCLASS_CVC_TBL(obj)) {
1942 size += rb_id_table_memsize(RCLASS_CVC_TBL(obj));
1943 }
1944 if (RCLASS_EXT(obj)->const_tbl) {
1945 size += rb_id_table_memsize(RCLASS_EXT(obj)->const_tbl);
1946 }
1947 if (RCLASS_CC_TBL(obj)) {
1948 size += cc_table_memsize(RCLASS_CC_TBL(obj));
1949 }
1950 if (FL_TEST_RAW(obj, RCLASS_SUPERCLASSES_INCLUDE_SELF)) {
1951 size += (RCLASS_SUPERCLASS_DEPTH(obj) + 1) * sizeof(VALUE);
1952 }
1953 break;
1954 case T_ICLASS:
1955 if (RICLASS_OWNS_M_TBL_P(obj)) {
1956 if (RCLASS_M_TBL(obj)) {
1957 size += rb_id_table_memsize(RCLASS_M_TBL(obj));
1958 }
1959 }
1960 if (RCLASS_CC_TBL(obj)) {
1961 size += cc_table_memsize(RCLASS_CC_TBL(obj));
1962 }
1963 break;
1964 case T_STRING:
1965 size += rb_str_memsize(obj);
1966 break;
1967 case T_ARRAY:
1968 size += rb_ary_memsize(obj);
1969 break;
1970 case T_HASH:
1971 if (RHASH_ST_TABLE_P(obj)) {
1972 VM_ASSERT(RHASH_ST_TABLE(obj) != NULL);
1973 /* st_table is in the slot */
1974 size += st_memsize(RHASH_ST_TABLE(obj)) - sizeof(st_table);
1975 }
1976 break;
1977 case T_REGEXP:
1978 if (RREGEXP_PTR(obj)) {
1979 size += onig_memsize(RREGEXP_PTR(obj));
1980 }
1981 break;
1982 case T_DATA:
1983 size += rb_objspace_data_type_memsize(obj);
1984 break;
1985 case T_MATCH:
1986 {
1987 rb_matchext_t *rm = RMATCH_EXT(obj);
1988 size += onig_region_memsize(&rm->regs);
1989 size += sizeof(struct rmatch_offset) * rm->char_offset_num_allocated;
1990 }
1991 break;
1992 case T_FILE:
1993 if (RFILE(obj)->fptr) {
1994 size += rb_io_memsize(RFILE(obj)->fptr);
1995 }
1996 break;
1997 case T_RATIONAL:
1998 case T_COMPLEX:
1999 break;
2000 case T_IMEMO:
2001 size += rb_imemo_memsize(obj);
2002 break;
2003
2004 case T_FLOAT:
2005 case T_SYMBOL:
2006 break;
2007
2008 case T_BIGNUM:
2009 if (!(RBASIC(obj)->flags & BIGNUM_EMBED_FLAG) && BIGNUM_DIGITS(obj)) {
2010 size += BIGNUM_LEN(obj) * sizeof(BDIGIT);
2011 }
2012 break;
2013
2014 case T_NODE:
2015 UNEXPECTED_NODE(obj_memsize_of);
2016 break;
2017
2018 case T_STRUCT:
2019 if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) == 0 &&
2020 RSTRUCT(obj)->as.heap.ptr) {
2021 size += sizeof(VALUE) * RSTRUCT_LEN(obj);
2022 }
2023 break;
2024
2025 case T_ZOMBIE:
2026 case T_MOVED:
2027 break;
2028
2029 default:
2030 rb_bug("objspace/memsize_of(): unknown data type 0x%x(%p)",
2031 BUILTIN_TYPE(obj), (void*)obj);
2032 }
2033
2034 return size + rb_gc_obj_slot_size(obj);
2035}
2036
2037static int
2038set_zero(st_data_t key, st_data_t val, st_data_t arg)
2039{
2040 VALUE k = (VALUE)key;
2041 VALUE hash = (VALUE)arg;
2042 rb_hash_aset(hash, k, INT2FIX(0));
2043 return ST_CONTINUE;
2044}
2045
2047 size_t counts[T_MASK+1];
2048 size_t freed;
2049 size_t total;
2050};
2051
2052static void
2053count_objects_i(VALUE obj, void *d)
2054{
2055 struct count_objects_data *data = (struct count_objects_data *)d;
2056
2057 if (RBASIC(obj)->flags) {
2058 data->counts[BUILTIN_TYPE(obj)]++;
2059 }
2060 else {
2061 data->freed++;
2062 }
2063
2064 data->total++;
2065}
2066
2067/*
2068 * call-seq:
2069 * ObjectSpace.count_objects([result_hash]) -> hash
2070 *
2071 * Counts all objects grouped by type.
2072 *
2073 * It returns a hash, such as:
2074 * {
2075 * :TOTAL=>10000,
2076 * :FREE=>3011,
2077 * :T_OBJECT=>6,
2078 * :T_CLASS=>404,
2079 * # ...
2080 * }
2081 *
2082 * The contents of the returned hash are implementation specific.
2083 * It may be changed in future.
2084 *
2085 * The keys starting with +:T_+ means live objects.
2086 * For example, +:T_ARRAY+ is the number of arrays.
2087 * +:FREE+ means object slots which is not used now.
2088 * +:TOTAL+ means sum of above.
2089 *
2090 * If the optional argument +result_hash+ is given,
2091 * it is overwritten and returned. This is intended to avoid probe effect.
2092 *
2093 * h = {}
2094 * ObjectSpace.count_objects(h)
2095 * puts h
2096 * # => { :TOTAL=>10000, :T_CLASS=>158280, :T_MODULE=>20672, :T_STRING=>527249 }
2097 *
2098 * This method is only expected to work on C Ruby.
2099 *
2100 */
2101
2102static VALUE
2103count_objects(int argc, VALUE *argv, VALUE os)
2104{
2105 struct count_objects_data data = { 0 };
2106 VALUE hash = Qnil;
2107
2108 if (rb_check_arity(argc, 0, 1) == 1) {
2109 hash = argv[0];
2110 if (!RB_TYPE_P(hash, T_HASH))
2111 rb_raise(rb_eTypeError, "non-hash given");
2112 }
2113
2114 rb_gc_impl_each_object(rb_gc_get_objspace(), count_objects_i, &data);
2115
2116 if (NIL_P(hash)) {
2117 hash = rb_hash_new();
2118 }
2119 else if (!RHASH_EMPTY_P(hash)) {
2120 rb_hash_stlike_foreach(hash, set_zero, hash);
2121 }
2122 rb_hash_aset(hash, ID2SYM(rb_intern("TOTAL")), SIZET2NUM(data.total));
2123 rb_hash_aset(hash, ID2SYM(rb_intern("FREE")), SIZET2NUM(data.freed));
2124
2125 for (size_t i = 0; i <= T_MASK; i++) {
2126 VALUE type = type_sym(i);
2127 if (data.counts[i])
2128 rb_hash_aset(hash, type, SIZET2NUM(data.counts[i]));
2129 }
2130
2131 return hash;
2132}
2133
2134#define SET_STACK_END SET_MACHINE_STACK_END(&ec->machine.stack_end)
2135
2136#define STACK_START (ec->machine.stack_start)
2137#define STACK_END (ec->machine.stack_end)
2138#define STACK_LEVEL_MAX (ec->machine.stack_maxsize/sizeof(VALUE))
2139
2140#if STACK_GROW_DIRECTION < 0
2141# define STACK_LENGTH (size_t)(STACK_START - STACK_END)
2142#elif STACK_GROW_DIRECTION > 0
2143# define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
2144#else
2145# define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
2146 : (size_t)(STACK_END - STACK_START + 1))
2147#endif
2148#if !STACK_GROW_DIRECTION
2149int ruby_stack_grow_direction;
2150int
2151ruby_get_stack_grow_direction(volatile VALUE *addr)
2152{
2153 VALUE *end;
2154 SET_MACHINE_STACK_END(&end);
2155
2156 if (end > addr) return ruby_stack_grow_direction = 1;
2157 return ruby_stack_grow_direction = -1;
2158}
2159#endif
2160
2161size_t
2163{
2164 rb_execution_context_t *ec = GET_EC();
2165 SET_STACK_END;
2166 if (p) *p = STACK_UPPER(STACK_END, STACK_START, STACK_END);
2167 return STACK_LENGTH;
2168}
2169
2170#define PREVENT_STACK_OVERFLOW 1
2171#ifndef PREVENT_STACK_OVERFLOW
2172#if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
2173# define PREVENT_STACK_OVERFLOW 1
2174#else
2175# define PREVENT_STACK_OVERFLOW 0
2176#endif
2177#endif
2178#if PREVENT_STACK_OVERFLOW && !defined(__EMSCRIPTEN__)
2179static int
2180stack_check(rb_execution_context_t *ec, int water_mark)
2181{
2182 SET_STACK_END;
2183
2184 size_t length = STACK_LENGTH;
2185 size_t maximum_length = STACK_LEVEL_MAX - water_mark;
2186
2187 return length > maximum_length;
2188}
2189#else
2190#define stack_check(ec, water_mark) FALSE
2191#endif
2192
2193#define STACKFRAME_FOR_CALL_CFUNC 2048
2194
2195int
2196rb_ec_stack_check(rb_execution_context_t *ec)
2197{
2198 return stack_check(ec, STACKFRAME_FOR_CALL_CFUNC);
2199}
2200
2201int
2203{
2204 return stack_check(GET_EC(), STACKFRAME_FOR_CALL_CFUNC);
2205}
2206
2207/* ==================== Marking ==================== */
2208
2209#define RB_GC_MARK_OR_TRAVERSE(func, obj_or_ptr, obj, check_obj) do { \
2210 if (!RB_SPECIAL_CONST_P(obj)) { \
2211 rb_vm_t *vm = GET_VM(); \
2212 void *objspace = vm->gc.objspace; \
2213 if (LIKELY(vm->gc.mark_func_data == NULL)) { \
2214 GC_ASSERT(rb_gc_impl_during_gc_p(objspace)); \
2215 (func)(objspace, (obj_or_ptr)); \
2216 } \
2217 else if (check_obj ? \
2218 rb_gc_impl_pointer_to_heap_p(objspace, (const void *)obj) && \
2219 !rb_gc_impl_garbage_object_p(objspace, obj) : \
2220 true) { \
2221 GC_ASSERT(!rb_gc_impl_during_gc_p(objspace)); \
2222 struct gc_mark_func_data_struct *mark_func_data = vm->gc.mark_func_data; \
2223 vm->gc.mark_func_data = NULL; \
2224 mark_func_data->mark_func((obj), mark_func_data->data); \
2225 vm->gc.mark_func_data = mark_func_data; \
2226 } \
2227 } \
2228} while (0)
2229
2230static inline void
2231gc_mark_internal(VALUE obj)
2232{
2233 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark, obj, obj, false);
2234}
2235
2236void
2237rb_gc_mark_movable(VALUE obj)
2238{
2239 gc_mark_internal(obj);
2240}
2241
2242void
2243rb_gc_mark_and_move(VALUE *ptr)
2244{
2245 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark_and_move, ptr, *ptr, false);
2246}
2247
2248static inline void
2249gc_mark_and_pin_internal(VALUE obj)
2250{
2251 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark_and_pin, obj, obj, false);
2252}
2253
2254void
2255rb_gc_mark(VALUE obj)
2256{
2257 gc_mark_and_pin_internal(obj);
2258}
2259
2260static inline void
2261gc_mark_maybe_internal(VALUE obj)
2262{
2263 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark_maybe, obj, obj, true);
2264}
2265
2266void
2267rb_gc_mark_maybe(VALUE obj)
2268{
2269 gc_mark_maybe_internal(obj);
2270}
2271
2272void
2273rb_gc_mark_weak(VALUE *ptr)
2274{
2275 if (RB_SPECIAL_CONST_P(*ptr)) return;
2276
2277 rb_vm_t *vm = GET_VM();
2278 void *objspace = vm->gc.objspace;
2279 if (LIKELY(vm->gc.mark_func_data == NULL)) {
2280 GC_ASSERT(rb_gc_impl_during_gc_p(objspace));
2281
2282 rb_gc_impl_mark_weak(objspace, ptr);
2283 }
2284 else {
2285 GC_ASSERT(!rb_gc_impl_during_gc_p(objspace));
2286 }
2287}
2288
2289void
2290rb_gc_remove_weak(VALUE parent_obj, VALUE *ptr)
2291{
2292 rb_gc_impl_remove_weak(rb_gc_get_objspace(), parent_obj, ptr);
2293}
2294
2295ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS(static void each_location(register const VALUE *x, register long n, void (*cb)(VALUE, void *), void *data));
2296static void
2297each_location(register const VALUE *x, register long n, void (*cb)(VALUE, void *), void *data)
2298{
2299 VALUE v;
2300 while (n--) {
2301 v = *x;
2302 cb(v, data);
2303 x++;
2304 }
2305}
2306
2307static void
2308each_location_ptr(const VALUE *start, const VALUE *end, void (*cb)(VALUE, void *), void *data)
2309{
2310 if (end <= start) return;
2311 each_location(start, end - start, cb, data);
2312}
2313
2314static void
2315gc_mark_maybe_each_location(VALUE obj, void *data)
2316{
2317 gc_mark_maybe_internal(obj);
2318}
2319
2320void
2321rb_gc_mark_locations(const VALUE *start, const VALUE *end)
2322{
2323 each_location_ptr(start, end, gc_mark_maybe_each_location, NULL);
2324}
2325
2326void
2327rb_gc_mark_values(long n, const VALUE *values)
2328{
2329 for (long i = 0; i < n; i++) {
2330 gc_mark_internal(values[i]);
2331 }
2332}
2333
2334void
2335rb_gc_mark_vm_stack_values(long n, const VALUE *values)
2336{
2337 for (long i = 0; i < n; i++) {
2338 gc_mark_and_pin_internal(values[i]);
2339 }
2340}
2341
2342static int
2343mark_key(st_data_t key, st_data_t value, st_data_t data)
2344{
2345 gc_mark_and_pin_internal((VALUE)key);
2346
2347 return ST_CONTINUE;
2348}
2349
2350void
2351rb_mark_set(st_table *tbl)
2352{
2353 if (!tbl) return;
2354
2355 st_foreach(tbl, mark_key, (st_data_t)rb_gc_get_objspace());
2356}
2357
2358static int
2359mark_keyvalue(st_data_t key, st_data_t value, st_data_t data)
2360{
2361 gc_mark_internal((VALUE)key);
2362 gc_mark_internal((VALUE)value);
2363
2364 return ST_CONTINUE;
2365}
2366
2367static int
2368pin_key_pin_value(st_data_t key, st_data_t value, st_data_t data)
2369{
2370 gc_mark_and_pin_internal((VALUE)key);
2371 gc_mark_and_pin_internal((VALUE)value);
2372
2373 return ST_CONTINUE;
2374}
2375
2376static int
2377pin_key_mark_value(st_data_t key, st_data_t value, st_data_t data)
2378{
2379 gc_mark_and_pin_internal((VALUE)key);
2380 gc_mark_internal((VALUE)value);
2381
2382 return ST_CONTINUE;
2383}
2384
2385static void
2386mark_hash(VALUE hash)
2387{
2388 if (rb_hash_compare_by_id_p(hash)) {
2389 rb_hash_stlike_foreach(hash, pin_key_mark_value, 0);
2390 }
2391 else {
2392 rb_hash_stlike_foreach(hash, mark_keyvalue, 0);
2393 }
2394
2395 gc_mark_internal(RHASH(hash)->ifnone);
2396}
2397
2398void
2399rb_mark_hash(st_table *tbl)
2400{
2401 if (!tbl) return;
2402
2403 st_foreach(tbl, pin_key_pin_value, 0);
2404}
2405
2406static enum rb_id_table_iterator_result
2407mark_method_entry_i(VALUE me, void *objspace)
2408{
2409 gc_mark_internal(me);
2410
2411 return ID_TABLE_CONTINUE;
2412}
2413
2414static void
2415mark_m_tbl(void *objspace, struct rb_id_table *tbl)
2416{
2417 if (tbl) {
2418 rb_id_table_foreach_values(tbl, mark_method_entry_i, objspace);
2419 }
2420}
2421
2422#if STACK_GROW_DIRECTION < 0
2423#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
2424#elif STACK_GROW_DIRECTION > 0
2425#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
2426#else
2427#define GET_STACK_BOUNDS(start, end, appendix) \
2428 ((STACK_END < STACK_START) ? \
2429 ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
2430#endif
2431
2432static void
2433gc_mark_machine_stack_location_maybe(VALUE obj, void *data)
2434{
2435 gc_mark_maybe_internal(obj);
2436
2437#ifdef RUBY_ASAN_ENABLED
2438 const rb_execution_context_t *ec = (const rb_execution_context_t *)data;
2439 void *fake_frame_start;
2440 void *fake_frame_end;
2441 bool is_fake_frame = asan_get_fake_stack_extents(
2442 ec->machine.asan_fake_stack_handle, obj,
2443 ec->machine.stack_start, ec->machine.stack_end,
2444 &fake_frame_start, &fake_frame_end
2445 );
2446 if (is_fake_frame) {
2447 each_location_ptr(fake_frame_start, fake_frame_end, gc_mark_maybe_each_location, NULL);
2448 }
2449#endif
2450}
2451
2452static VALUE
2453gc_location_internal(void *objspace, VALUE value)
2454{
2455 if (SPECIAL_CONST_P(value)) {
2456 return value;
2457 }
2458
2459 GC_ASSERT(rb_gc_impl_pointer_to_heap_p(objspace, (void *)value));
2460
2461 return rb_gc_impl_location(objspace, value);
2462}
2463
2464VALUE
2465rb_gc_location(VALUE value)
2466{
2467 return gc_location_internal(rb_gc_get_objspace(), value);
2468}
2469
2470#if defined(__wasm__)
2471
2472
2473static VALUE *rb_stack_range_tmp[2];
2474
2475static void
2476rb_mark_locations(void *begin, void *end)
2477{
2478 rb_stack_range_tmp[0] = begin;
2479 rb_stack_range_tmp[1] = end;
2480}
2481
2482void
2483rb_gc_save_machine_context(void)
2484{
2485 // no-op
2486}
2487
2488# if defined(__EMSCRIPTEN__)
2489
2490static void
2491mark_current_machine_context(const rb_execution_context_t *ec)
2492{
2493 emscripten_scan_stack(rb_mark_locations);
2494 each_location_ptr(rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe_each_location, NULL);
2495
2496 emscripten_scan_registers(rb_mark_locations);
2497 each_location_ptr(rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe_each_location, NULL);
2498}
2499# else // use Asyncify version
2500
2501static void
2502mark_current_machine_context(rb_execution_context_t *ec)
2503{
2504 VALUE *stack_start, *stack_end;
2505 SET_STACK_END;
2506 GET_STACK_BOUNDS(stack_start, stack_end, 1);
2507 each_location_ptr(stack_start, stack_end, gc_mark_maybe_each_location, NULL);
2508
2509 rb_wasm_scan_locals(rb_mark_locations);
2510 each_location_ptr(rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe_each_location, NULL);
2511}
2512
2513# endif
2514
2515#else // !defined(__wasm__)
2516
2517void
2518rb_gc_save_machine_context(void)
2519{
2520 rb_thread_t *thread = GET_THREAD();
2521
2522 RB_VM_SAVE_MACHINE_CONTEXT(thread);
2523}
2524
2525
2526static void
2527mark_current_machine_context(const rb_execution_context_t *ec)
2528{
2529 rb_gc_mark_machine_context(ec);
2530}
2531#endif
2532
2533void
2534rb_gc_mark_machine_context(const rb_execution_context_t *ec)
2535{
2536 VALUE *stack_start, *stack_end;
2537
2538 GET_STACK_BOUNDS(stack_start, stack_end, 0);
2539 RUBY_DEBUG_LOG("ec->th:%u stack_start:%p stack_end:%p", rb_ec_thread_ptr(ec)->serial, stack_start, stack_end);
2540
2541 void *data =
2542#ifdef RUBY_ASAN_ENABLED
2543 /* gc_mark_machine_stack_location_maybe() uses data as const */
2545#else
2546 NULL;
2547#endif
2548
2549 each_location_ptr(stack_start, stack_end, gc_mark_machine_stack_location_maybe, data);
2550 int num_regs = sizeof(ec->machine.regs)/(sizeof(VALUE));
2551 each_location((VALUE*)&ec->machine.regs, num_regs, gc_mark_machine_stack_location_maybe, data);
2552}
2553
2554static int
2555rb_mark_tbl_i(st_data_t key, st_data_t value, st_data_t data)
2556{
2557 gc_mark_and_pin_internal((VALUE)value);
2558
2559 return ST_CONTINUE;
2560}
2561
2562void
2563rb_mark_tbl(st_table *tbl)
2564{
2565 if (!tbl || tbl->num_entries == 0) return;
2566
2567 st_foreach(tbl, rb_mark_tbl_i, 0);
2568}
2569
2570static void
2571gc_mark_tbl_no_pin(st_table *tbl)
2572{
2573 if (!tbl || tbl->num_entries == 0) return;
2574
2575 st_foreach(tbl, gc_mark_tbl_no_pin_i, 0);
2576}
2577
2578void
2579rb_mark_tbl_no_pin(st_table *tbl)
2580{
2581 gc_mark_tbl_no_pin(tbl);
2582}
2583
2584static enum rb_id_table_iterator_result
2585mark_cvc_tbl_i(VALUE cvc_entry, void *objspace)
2586{
2587 struct rb_cvar_class_tbl_entry *entry;
2588
2589 entry = (struct rb_cvar_class_tbl_entry *)cvc_entry;
2590
2591 RUBY_ASSERT(entry->cref == 0 || (BUILTIN_TYPE((VALUE)entry->cref) == T_IMEMO && IMEMO_TYPE_P(entry->cref, imemo_cref)));
2592 gc_mark_internal((VALUE)entry->cref);
2593
2594 return ID_TABLE_CONTINUE;
2595}
2596
2597static void
2598mark_cvc_tbl(void *objspace, VALUE klass)
2599{
2600 struct rb_id_table *tbl = RCLASS_CVC_TBL(klass);
2601 if (tbl) {
2602 rb_id_table_foreach_values(tbl, mark_cvc_tbl_i, objspace);
2603 }
2604}
2605
2606static bool
2607gc_declarative_marking_p(const rb_data_type_t *type)
2608{
2609 return (type->flags & RUBY_TYPED_DECL_MARKING) != 0;
2610}
2611
2612static enum rb_id_table_iterator_result
2613mark_const_table_i(VALUE value, void *objspace)
2614{
2615 const rb_const_entry_t *ce = (const rb_const_entry_t *)value;
2616
2617 gc_mark_internal(ce->value);
2618 gc_mark_internal(ce->file);
2619
2620 return ID_TABLE_CONTINUE;
2621}
2622
2623void
2624rb_gc_mark_roots(void *objspace, const char **categoryp)
2625{
2626 rb_execution_context_t *ec = GET_EC();
2627 rb_vm_t *vm = rb_ec_vm_ptr(ec);
2628
2629#define MARK_CHECKPOINT(category) do { \
2630 if (categoryp) *categoryp = category; \
2631} while (0)
2632
2633 MARK_CHECKPOINT("vm");
2634 rb_vm_mark(vm);
2635 if (vm->self) gc_mark_internal(vm->self);
2636
2637 MARK_CHECKPOINT("end_proc");
2638 rb_mark_end_proc();
2639
2640 MARK_CHECKPOINT("global_tbl");
2641 rb_gc_mark_global_tbl();
2642
2643#if USE_YJIT
2644 void rb_yjit_root_mark(void); // in Rust
2645
2646 if (rb_yjit_enabled_p) {
2647 MARK_CHECKPOINT("YJIT");
2648 rb_yjit_root_mark();
2649 }
2650#endif
2651
2652 MARK_CHECKPOINT("machine_context");
2653 mark_current_machine_context(ec);
2654
2655 MARK_CHECKPOINT("global_symbols");
2656 rb_sym_global_symbols_mark();
2657
2658 MARK_CHECKPOINT("finish");
2659
2660#undef MARK_CHECKPOINT
2661}
2662
2663#define TYPED_DATA_REFS_OFFSET_LIST(d) (size_t *)(uintptr_t)RTYPEDDATA(d)->type->function.dmark
2664
2665void
2666rb_gc_mark_children(void *objspace, VALUE obj)
2667{
2668 if (FL_TEST(obj, FL_EXIVAR)) {
2669 rb_mark_generic_ivar(obj);
2670 }
2671
2672 switch (BUILTIN_TYPE(obj)) {
2673 case T_FLOAT:
2674 case T_BIGNUM:
2675 case T_SYMBOL:
2676 /* Not immediates, but does not have references and singleton class.
2677 *
2678 * RSYMBOL(obj)->fstr intentionally not marked. See log for 96815f1e
2679 * ("symbol.c: remove rb_gc_mark_symbols()") */
2680 return;
2681
2682 case T_NIL:
2683 case T_FIXNUM:
2684 rb_bug("rb_gc_mark() called for broken object");
2685 break;
2686
2687 case T_NODE:
2688 UNEXPECTED_NODE(rb_gc_mark);
2689 break;
2690
2691 case T_IMEMO:
2692 rb_imemo_mark_and_move(obj, false);
2693 return;
2694
2695 default:
2696 break;
2697 }
2698
2699 gc_mark_internal(RBASIC(obj)->klass);
2700
2701 switch (BUILTIN_TYPE(obj)) {
2702 case T_CLASS:
2703 if (FL_TEST(obj, FL_SINGLETON)) {
2704 gc_mark_internal(RCLASS_ATTACHED_OBJECT(obj));
2705 }
2706 // Continue to the shared T_CLASS/T_MODULE
2707 case T_MODULE:
2708 if (RCLASS_SUPER(obj)) {
2709 gc_mark_internal(RCLASS_SUPER(obj));
2710 }
2711
2712 mark_m_tbl(objspace, RCLASS_M_TBL(obj));
2713 mark_cvc_tbl(objspace, obj);
2714 rb_cc_table_mark(obj);
2715 if (rb_shape_obj_too_complex(obj)) {
2716 gc_mark_tbl_no_pin((st_table *)RCLASS_IVPTR(obj));
2717 }
2718 else {
2719 for (attr_index_t i = 0; i < RCLASS_IV_COUNT(obj); i++) {
2720 gc_mark_internal(RCLASS_IVPTR(obj)[i]);
2721 }
2722 }
2723
2724 if (RCLASS_CONST_TBL(obj)) {
2725 rb_id_table_foreach_values(RCLASS_CONST_TBL(obj), mark_const_table_i, objspace);
2726 }
2727
2728 gc_mark_internal(RCLASS_EXT(obj)->classpath);
2729 break;
2730
2731 case T_ICLASS:
2732 if (RICLASS_OWNS_M_TBL_P(obj)) {
2733 mark_m_tbl(objspace, RCLASS_M_TBL(obj));
2734 }
2735 if (RCLASS_SUPER(obj)) {
2736 gc_mark_internal(RCLASS_SUPER(obj));
2737 }
2738
2739 if (RCLASS_INCLUDER(obj)) {
2740 gc_mark_internal(RCLASS_INCLUDER(obj));
2741 }
2742 mark_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj));
2743 rb_cc_table_mark(obj);
2744 break;
2745
2746 case T_ARRAY:
2747 if (ARY_SHARED_P(obj)) {
2748 VALUE root = ARY_SHARED_ROOT(obj);
2749 gc_mark_internal(root);
2750 }
2751 else {
2752 long len = RARRAY_LEN(obj);
2753 const VALUE *ptr = RARRAY_CONST_PTR(obj);
2754 for (long i = 0; i < len; i++) {
2755 gc_mark_internal(ptr[i]);
2756 }
2757 }
2758 break;
2759
2760 case T_HASH:
2761 mark_hash(obj);
2762 break;
2763
2764 case T_STRING:
2765 if (STR_SHARED_P(obj)) {
2766 if (STR_EMBED_P(RSTRING(obj)->as.heap.aux.shared)) {
2767 /* Embedded shared strings cannot be moved because this string
2768 * points into the slot of the shared string. There may be code
2769 * using the RSTRING_PTR on the stack, which would pin this
2770 * string but not pin the shared string, causing it to move. */
2771 gc_mark_and_pin_internal(RSTRING(obj)->as.heap.aux.shared);
2772 }
2773 else {
2774 gc_mark_internal(RSTRING(obj)->as.heap.aux.shared);
2775 }
2776 }
2777 break;
2778
2779 case T_DATA: {
2780 void *const ptr = RTYPEDDATA_P(obj) ? RTYPEDDATA_GET_DATA(obj) : DATA_PTR(obj);
2781
2782 if (ptr) {
2783 if (RTYPEDDATA_P(obj) && gc_declarative_marking_p(RTYPEDDATA(obj)->type)) {
2784 size_t *offset_list = TYPED_DATA_REFS_OFFSET_LIST(obj);
2785
2786 for (size_t offset = *offset_list; offset != RUBY_REF_END; offset = *offset_list++) {
2787 gc_mark_internal(*(VALUE *)((char *)ptr + offset));
2788 }
2789 }
2790 else {
2791 RUBY_DATA_FUNC mark_func = RTYPEDDATA_P(obj) ?
2792 RTYPEDDATA(obj)->type->function.dmark :
2793 RDATA(obj)->dmark;
2794 if (mark_func) (*mark_func)(ptr);
2795 }
2796 }
2797
2798 break;
2799 }
2800
2801 case T_OBJECT: {
2802 rb_shape_t *shape = rb_shape_get_shape_by_id(ROBJECT_SHAPE_ID(obj));
2803
2804 if (rb_shape_obj_too_complex(obj)) {
2805 gc_mark_tbl_no_pin(ROBJECT_IV_HASH(obj));
2806 }
2807 else {
2808 const VALUE * const ptr = ROBJECT_IVPTR(obj);
2809
2810 uint32_t len = ROBJECT_IV_COUNT(obj);
2811 for (uint32_t i = 0; i < len; i++) {
2812 gc_mark_internal(ptr[i]);
2813 }
2814 }
2815
2816 if (shape) {
2817 VALUE klass = RBASIC_CLASS(obj);
2818
2819 // Increment max_iv_count if applicable, used to determine size pool allocation
2820 attr_index_t num_of_ivs = shape->next_iv_index;
2821 if (RCLASS_EXT(klass)->max_iv_count < num_of_ivs) {
2822 RCLASS_EXT(klass)->max_iv_count = num_of_ivs;
2823 }
2824 }
2825
2826 break;
2827 }
2828
2829 case T_FILE:
2830 if (RFILE(obj)->fptr) {
2831 gc_mark_internal(RFILE(obj)->fptr->self);
2832 gc_mark_internal(RFILE(obj)->fptr->pathv);
2833 gc_mark_internal(RFILE(obj)->fptr->tied_io_for_writing);
2834 gc_mark_internal(RFILE(obj)->fptr->writeconv_asciicompat);
2835 gc_mark_internal(RFILE(obj)->fptr->writeconv_pre_ecopts);
2836 gc_mark_internal(RFILE(obj)->fptr->encs.ecopts);
2837 gc_mark_internal(RFILE(obj)->fptr->write_lock);
2838 gc_mark_internal(RFILE(obj)->fptr->timeout);
2839 }
2840 break;
2841
2842 case T_REGEXP:
2843 gc_mark_internal(RREGEXP(obj)->src);
2844 break;
2845
2846 case T_MATCH:
2847 gc_mark_internal(RMATCH(obj)->regexp);
2848 if (RMATCH(obj)->str) {
2849 gc_mark_internal(RMATCH(obj)->str);
2850 }
2851 break;
2852
2853 case T_RATIONAL:
2854 gc_mark_internal(RRATIONAL(obj)->num);
2855 gc_mark_internal(RRATIONAL(obj)->den);
2856 break;
2857
2858 case T_COMPLEX:
2859 gc_mark_internal(RCOMPLEX(obj)->real);
2860 gc_mark_internal(RCOMPLEX(obj)->imag);
2861 break;
2862
2863 case T_STRUCT: {
2864 const long len = RSTRUCT_LEN(obj);
2865 const VALUE * const ptr = RSTRUCT_CONST_PTR(obj);
2866
2867 for (long i = 0; i < len; i++) {
2868 gc_mark_internal(ptr[i]);
2869 }
2870
2871 break;
2872 }
2873
2874 default:
2875 if (BUILTIN_TYPE(obj) == T_MOVED) rb_bug("rb_gc_mark(): %p is T_MOVED", (void *)obj);
2876 if (BUILTIN_TYPE(obj) == T_NONE) rb_bug("rb_gc_mark(): %p is T_NONE", (void *)obj);
2877 if (BUILTIN_TYPE(obj) == T_ZOMBIE) rb_bug("rb_gc_mark(): %p is T_ZOMBIE", (void *)obj);
2878 rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
2879 BUILTIN_TYPE(obj), (void *)obj,
2880 rb_gc_impl_pointer_to_heap_p(objspace, (void *)obj) ? "corrupted object" : "non object");
2881 }
2882}
2883
2884size_t
2885rb_gc_obj_optimal_size(VALUE obj)
2886{
2887 switch (BUILTIN_TYPE(obj)) {
2888 case T_ARRAY:
2889 return rb_ary_size_as_embedded(obj);
2890
2891 case T_OBJECT:
2892 if (rb_shape_obj_too_complex(obj)) {
2893 return sizeof(struct RObject);
2894 }
2895 else {
2896 return rb_obj_embedded_size(ROBJECT_IV_CAPACITY(obj));
2897 }
2898
2899 case T_STRING:
2900 return rb_str_size_as_embedded(obj);
2901
2902 case T_HASH:
2903 return sizeof(struct RHash) + (RHASH_ST_TABLE_P(obj) ? sizeof(st_table) : sizeof(ar_table));
2904
2905 default:
2906 return 0;
2907 }
2908}
2909
2910void
2911rb_gc_writebarrier(VALUE a, VALUE b)
2912{
2913 rb_gc_impl_writebarrier(rb_gc_get_objspace(), a, b);
2914}
2915
2916void
2917rb_gc_writebarrier_unprotect(VALUE obj)
2918{
2919 rb_gc_impl_writebarrier_unprotect(rb_gc_get_objspace(), obj);
2920}
2921
2922/*
2923 * remember `obj' if needed.
2924 */
2925void
2926rb_gc_writebarrier_remember(VALUE obj)
2927{
2928 rb_gc_impl_writebarrier_remember(rb_gc_get_objspace(), obj);
2929}
2930
2931void
2932rb_gc_copy_attributes(VALUE dest, VALUE obj)
2933{
2934 rb_gc_impl_copy_attributes(rb_gc_get_objspace(), dest, obj);
2935}
2936
2937int
2938rb_gc_modular_gc_loaded_p(void)
2939{
2940#if USE_MODULAR_GC
2941 return rb_gc_functions.modular_gc_loaded_p;
2942#else
2943 return false;
2944#endif
2945}
2946
2947const char *
2948rb_gc_active_gc_name(void)
2949{
2950 const char *gc_name = rb_gc_impl_active_gc_name();
2951
2952 const size_t len = strlen(gc_name);
2953 if (len > RB_GC_MAX_NAME_LEN) {
2954 rb_bug("GC should have a name no more than %d chars long. Currently: %zu (%s)",
2955 RB_GC_MAX_NAME_LEN, len, gc_name);
2956 }
2957
2958 return gc_name;
2959}
2960
2962rb_gc_object_metadata(VALUE obj)
2963{
2964 return rb_gc_impl_object_metadata(rb_gc_get_objspace(), obj);
2965}
2966
2967/* GC */
2968
2969void *
2970rb_gc_ractor_cache_alloc(rb_ractor_t *ractor)
2971{
2972 return rb_gc_impl_ractor_cache_alloc(rb_gc_get_objspace(), ractor);
2973}
2974
2975void
2976rb_gc_ractor_cache_free(void *cache)
2977{
2978 rb_gc_impl_ractor_cache_free(rb_gc_get_objspace(), cache);
2979}
2980
2981void
2982rb_gc_register_mark_object(VALUE obj)
2983{
2984 if (!rb_gc_impl_pointer_to_heap_p(rb_gc_get_objspace(), (void *)obj))
2985 return;
2986
2987 rb_vm_register_global_object(obj);
2988}
2989
2990void
2991rb_gc_register_address(VALUE *addr)
2992{
2993 rb_vm_t *vm = GET_VM();
2994
2995 VALUE obj = *addr;
2996
2997 struct global_object_list *tmp = ALLOC(struct global_object_list);
2998 tmp->next = vm->global_object_list;
2999 tmp->varptr = addr;
3000 vm->global_object_list = tmp;
3001
3002 /*
3003 * Because some C extensions have assignment-then-register bugs,
3004 * we guard `obj` here so that it would not get swept defensively.
3005 */
3006 RB_GC_GUARD(obj);
3007 if (0 && !SPECIAL_CONST_P(obj)) {
3008 rb_warn("Object is assigned to registering address already: %"PRIsVALUE,
3009 rb_obj_class(obj));
3010 rb_print_backtrace(stderr);
3011 }
3012}
3013
3014void
3015rb_gc_unregister_address(VALUE *addr)
3016{
3017 rb_vm_t *vm = GET_VM();
3018 struct global_object_list *tmp = vm->global_object_list;
3019
3020 if (tmp->varptr == addr) {
3021 vm->global_object_list = tmp->next;
3022 xfree(tmp);
3023 return;
3024 }
3025 while (tmp->next) {
3026 if (tmp->next->varptr == addr) {
3027 struct global_object_list *t = tmp->next;
3028
3029 tmp->next = tmp->next->next;
3030 xfree(t);
3031 break;
3032 }
3033 tmp = tmp->next;
3034 }
3035}
3036
3037void
3039{
3040 rb_gc_register_address(var);
3041}
3042
3043static VALUE
3044gc_start_internal(rb_execution_context_t *ec, VALUE self, VALUE full_mark, VALUE immediate_mark, VALUE immediate_sweep, VALUE compact)
3045{
3046 rb_gc_impl_start(rb_gc_get_objspace(), RTEST(full_mark), RTEST(immediate_mark), RTEST(immediate_sweep), RTEST(compact));
3047
3048 return Qnil;
3049}
3050
3051/*
3052 * rb_objspace_each_objects() is special C API to walk through
3053 * Ruby object space. This C API is too difficult to use it.
3054 * To be frank, you should not use it. Or you need to read the
3055 * source code of this function and understand what this function does.
3056 *
3057 * 'callback' will be called several times (the number of heap page,
3058 * at current implementation) with:
3059 * vstart: a pointer to the first living object of the heap_page.
3060 * vend: a pointer to next to the valid heap_page area.
3061 * stride: a distance to next VALUE.
3062 *
3063 * If callback() returns non-zero, the iteration will be stopped.
3064 *
3065 * This is a sample callback code to iterate liveness objects:
3066 *
3067 * static int
3068 * sample_callback(void *vstart, void *vend, int stride, void *data)
3069 * {
3070 * VALUE v = (VALUE)vstart;
3071 * for (; v != (VALUE)vend; v += stride) {
3072 * if (!rb_objspace_internal_object_p(v)) { // liveness check
3073 * // do something with live object 'v'
3074 * }
3075 * }
3076 * return 0; // continue to iteration
3077 * }
3078 *
3079 * Note: 'vstart' is not a top of heap_page. This point the first
3080 * living object to grasp at least one object to avoid GC issue.
3081 * This means that you can not walk through all Ruby object page
3082 * including freed object page.
3083 *
3084 * Note: On this implementation, 'stride' is the same as sizeof(RVALUE).
3085 * However, there are possibilities to pass variable values with
3086 * 'stride' with some reasons. You must use stride instead of
3087 * use some constant value in the iteration.
3088 */
3089void
3090rb_objspace_each_objects(int (*callback)(void *, void *, size_t, void *), void *data)
3091{
3092 rb_gc_impl_each_objects(rb_gc_get_objspace(), callback, data);
3093}
3094
3095static void
3096gc_ref_update_array(void *objspace, VALUE v)
3097{
3098 if (ARY_SHARED_P(v)) {
3099 VALUE old_root = RARRAY(v)->as.heap.aux.shared_root;
3100
3101 UPDATE_IF_MOVED(objspace, RARRAY(v)->as.heap.aux.shared_root);
3102
3103 VALUE new_root = RARRAY(v)->as.heap.aux.shared_root;
3104 // If the root is embedded and its location has changed
3105 if (ARY_EMBED_P(new_root) && new_root != old_root) {
3106 size_t offset = (size_t)(RARRAY(v)->as.heap.ptr - RARRAY(old_root)->as.ary);
3107 GC_ASSERT(RARRAY(v)->as.heap.ptr >= RARRAY(old_root)->as.ary);
3108 RARRAY(v)->as.heap.ptr = RARRAY(new_root)->as.ary + offset;
3109 }
3110 }
3111 else {
3112 long len = RARRAY_LEN(v);
3113
3114 if (len > 0) {
3115 VALUE *ptr = (VALUE *)RARRAY_CONST_PTR(v);
3116 for (long i = 0; i < len; i++) {
3117 UPDATE_IF_MOVED(objspace, ptr[i]);
3118 }
3119 }
3120
3121 if (rb_gc_obj_slot_size(v) >= rb_ary_size_as_embedded(v)) {
3122 if (rb_ary_embeddable_p(v)) {
3123 rb_ary_make_embedded(v);
3124 }
3125 }
3126 }
3127}
3128
3129static void
3130gc_ref_update_object(void *objspace, VALUE v)
3131{
3132 VALUE *ptr = ROBJECT_IVPTR(v);
3133
3134 if (rb_shape_obj_too_complex(v)) {
3135 gc_ref_update_table_values_only(ROBJECT_IV_HASH(v));
3136 return;
3137 }
3138
3139 size_t slot_size = rb_gc_obj_slot_size(v);
3140 size_t embed_size = rb_obj_embedded_size(ROBJECT_IV_CAPACITY(v));
3141 if (slot_size >= embed_size && !RB_FL_TEST_RAW(v, ROBJECT_EMBED)) {
3142 // Object can be re-embedded
3143 memcpy(ROBJECT(v)->as.ary, ptr, sizeof(VALUE) * ROBJECT_IV_COUNT(v));
3144 RB_FL_SET_RAW(v, ROBJECT_EMBED);
3145 xfree(ptr);
3146 ptr = ROBJECT(v)->as.ary;
3147 }
3148
3149 for (uint32_t i = 0; i < ROBJECT_IV_COUNT(v); i++) {
3150 UPDATE_IF_MOVED(objspace, ptr[i]);
3151 }
3152}
3153
3154void
3155rb_gc_ref_update_table_values_only(st_table *tbl)
3156{
3157 gc_ref_update_table_values_only(tbl);
3158}
3159
3160/* Update MOVED references in a VALUE=>VALUE st_table */
3161void
3162rb_gc_update_tbl_refs(st_table *ptr)
3163{
3164 gc_update_table_refs(ptr);
3165}
3166
3167static void
3168gc_ref_update_hash(void *objspace, VALUE v)
3169{
3170 rb_hash_stlike_foreach_with_replace(v, hash_foreach_replace, hash_replace_ref, (st_data_t)objspace);
3171}
3172
3173static void
3174gc_update_values(void *objspace, long n, VALUE *values)
3175{
3176 for (long i = 0; i < n; i++) {
3177 UPDATE_IF_MOVED(objspace, values[i]);
3178 }
3179}
3180
3181void
3182rb_gc_update_values(long n, VALUE *values)
3183{
3184 gc_update_values(rb_gc_get_objspace(), n, values);
3185}
3186
3187static enum rb_id_table_iterator_result
3188check_id_table_move(VALUE value, void *data)
3189{
3190 void *objspace = (void *)data;
3191
3192 if (rb_gc_impl_object_moved_p(objspace, (VALUE)value)) {
3193 return ID_TABLE_REPLACE;
3194 }
3195
3196 return ID_TABLE_CONTINUE;
3197}
3198
3199void
3200rb_gc_prepare_heap_process_object(VALUE obj)
3201{
3202 switch (BUILTIN_TYPE(obj)) {
3203 case T_STRING:
3204 // Precompute the string coderange. This both save time for when it will be
3205 // eventually needed, and avoid mutating heap pages after a potential fork.
3207 break;
3208 default:
3209 break;
3210 }
3211}
3212
3213void
3214rb_gc_prepare_heap(void)
3215{
3216 rb_gc_impl_prepare_heap(rb_gc_get_objspace());
3217}
3218
3219size_t
3220rb_gc_heap_id_for_size(size_t size)
3221{
3222 return rb_gc_impl_heap_id_for_size(rb_gc_get_objspace(), size);
3223}
3224
3225bool
3226rb_gc_size_allocatable_p(size_t size)
3227{
3228 return rb_gc_impl_size_allocatable_p(size);
3229}
3230
3231static enum rb_id_table_iterator_result
3232update_id_table(VALUE *value, void *data, int existing)
3233{
3234 void *objspace = (void *)data;
3235
3236 if (rb_gc_impl_object_moved_p(objspace, (VALUE)*value)) {
3237 *value = gc_location_internal(objspace, (VALUE)*value);
3238 }
3239
3240 return ID_TABLE_CONTINUE;
3241}
3242
3243static void
3244update_m_tbl(void *objspace, struct rb_id_table *tbl)
3245{
3246 if (tbl) {
3247 rb_id_table_foreach_values_with_replace(tbl, check_id_table_move, update_id_table, objspace);
3248 }
3249}
3250
3251static enum rb_id_table_iterator_result
3252update_cc_tbl_i(VALUE ccs_ptr, void *objspace)
3253{
3254 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
3255 VM_ASSERT(vm_ccs_p(ccs));
3256
3257 if (rb_gc_impl_object_moved_p(objspace, (VALUE)ccs->cme)) {
3258 ccs->cme = (const rb_callable_method_entry_t *)gc_location_internal(objspace, (VALUE)ccs->cme);
3259 }
3260
3261 for (int i=0; i<ccs->len; i++) {
3262 if (rb_gc_impl_object_moved_p(objspace, (VALUE)ccs->entries[i].cc)) {
3263 ccs->entries[i].cc = (struct rb_callcache *)gc_location_internal(objspace, (VALUE)ccs->entries[i].cc);
3264 }
3265 }
3266
3267 // do not replace
3268 return ID_TABLE_CONTINUE;
3269}
3270
3271static void
3272update_cc_tbl(void *objspace, VALUE klass)
3273{
3274 struct rb_id_table *tbl = RCLASS_CC_TBL(klass);
3275 if (tbl) {
3276 rb_id_table_foreach_values(tbl, update_cc_tbl_i, objspace);
3277 }
3278}
3279
3280static enum rb_id_table_iterator_result
3281update_cvc_tbl_i(VALUE cvc_entry, void *objspace)
3282{
3283 struct rb_cvar_class_tbl_entry *entry;
3284
3285 entry = (struct rb_cvar_class_tbl_entry *)cvc_entry;
3286
3287 if (entry->cref) {
3288 TYPED_UPDATE_IF_MOVED(objspace, rb_cref_t *, entry->cref);
3289 }
3290
3291 entry->class_value = gc_location_internal(objspace, entry->class_value);
3292
3293 return ID_TABLE_CONTINUE;
3294}
3295
3296static void
3297update_cvc_tbl(void *objspace, VALUE klass)
3298{
3299 struct rb_id_table *tbl = RCLASS_CVC_TBL(klass);
3300 if (tbl) {
3301 rb_id_table_foreach_values(tbl, update_cvc_tbl_i, objspace);
3302 }
3303}
3304
3305static enum rb_id_table_iterator_result
3306update_const_table(VALUE value, void *objspace)
3307{
3308 rb_const_entry_t *ce = (rb_const_entry_t *)value;
3309
3310 if (rb_gc_impl_object_moved_p(objspace, ce->value)) {
3311 ce->value = gc_location_internal(objspace, ce->value);
3312 }
3313
3314 if (rb_gc_impl_object_moved_p(objspace, ce->file)) {
3315 ce->file = gc_location_internal(objspace, ce->file);
3316 }
3317
3318 return ID_TABLE_CONTINUE;
3319}
3320
3321static void
3322update_const_tbl(void *objspace, struct rb_id_table *tbl)
3323{
3324 if (!tbl) return;
3325 rb_id_table_foreach_values(tbl, update_const_table, objspace);
3326}
3327
3328static void
3329update_subclass_entries(void *objspace, rb_subclass_entry_t *entry)
3330{
3331 while (entry) {
3332 UPDATE_IF_MOVED(objspace, entry->klass);
3333 entry = entry->next;
3334 }
3335}
3336
3337static void
3338update_class_ext(void *objspace, rb_classext_t *ext)
3339{
3340 UPDATE_IF_MOVED(objspace, ext->origin_);
3341 UPDATE_IF_MOVED(objspace, ext->includer);
3342 UPDATE_IF_MOVED(objspace, ext->refined_class);
3343 update_subclass_entries(objspace, ext->subclasses);
3344}
3345
3346static void
3347update_superclasses(void *objspace, VALUE obj)
3348{
3349 if (FL_TEST_RAW(obj, RCLASS_SUPERCLASSES_INCLUDE_SELF)) {
3350 for (size_t i = 0; i < RCLASS_SUPERCLASS_DEPTH(obj) + 1; i++) {
3351 UPDATE_IF_MOVED(objspace, RCLASS_SUPERCLASSES(obj)[i]);
3352 }
3353 }
3354}
3355
3356extern rb_symbols_t ruby_global_symbols;
3357#define global_symbols ruby_global_symbols
3358
3360 vm_table_foreach_callback_func callback;
3361 vm_table_update_callback_func update_callback;
3362 void *data;
3363 bool weak_only;
3364};
3365
3366static int
3367vm_weak_table_foreach_weak_key(st_data_t key, st_data_t value, st_data_t data, int error)
3368{
3369 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3370
3371 int ret = iter_data->callback((VALUE)key, iter_data->data);
3372
3373 if (!iter_data->weak_only) {
3374 if (ret != ST_CONTINUE) return ret;
3375
3376 ret = iter_data->callback((VALUE)value, iter_data->data);
3377 }
3378
3379 return ret;
3380}
3381
3382static int
3383vm_weak_table_foreach_update_weak_key(st_data_t *key, st_data_t *value, st_data_t data, int existing)
3384{
3385 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3386
3387 int ret = iter_data->update_callback((VALUE *)key, iter_data->data);
3388
3389 if (!iter_data->weak_only) {
3390 if (ret != ST_CONTINUE) return ret;
3391
3392 ret = iter_data->update_callback((VALUE *)value, iter_data->data);
3393 }
3394
3395 return ret;
3396}
3397
3398static int
3399vm_weak_table_str_sym_foreach(st_data_t key, st_data_t value, st_data_t data, int error)
3400{
3401 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3402
3403 if (!iter_data->weak_only) {
3404 int ret = iter_data->callback((VALUE)key, iter_data->data);
3405 if (ret != ST_CONTINUE) return ret;
3406 }
3407
3408 if (STATIC_SYM_P(value)) {
3409 return ST_CONTINUE;
3410 }
3411 else {
3412 return iter_data->callback((VALUE)value, iter_data->data);
3413 }
3414}
3415
3416static int
3417vm_weak_table_foreach_update_weak_value(st_data_t *key, st_data_t *value, st_data_t data, int existing)
3418{
3419 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3420
3421 if (!iter_data->weak_only) {
3422 int ret = iter_data->update_callback((VALUE *)key, iter_data->data);
3423 if (ret != ST_CONTINUE) return ret;
3424 }
3425
3426 return iter_data->update_callback((VALUE *)value, iter_data->data);
3427}
3428
3429static void
3430free_gen_ivtbl(VALUE obj, struct gen_ivtbl *ivtbl)
3431{
3432 if (UNLIKELY(rb_shape_obj_too_complex(obj))) {
3433 st_free_table(ivtbl->as.complex.table);
3434 }
3435
3436 xfree(ivtbl);
3437}
3438
3439static int
3440vm_weak_table_gen_ivar_foreach_too_complex_i(st_data_t _key, st_data_t value, st_data_t data, int error)
3441{
3442 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3443
3444 GC_ASSERT(!iter_data->weak_only);
3445
3446 if (SPECIAL_CONST_P((VALUE)value)) return ST_CONTINUE;
3447
3448 return iter_data->callback((VALUE)value, iter_data->data);
3449}
3450
3451static int
3452vm_weak_table_gen_ivar_foreach_too_complex_replace_i(st_data_t *_key, st_data_t *value, st_data_t data, int existing)
3453{
3454 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3455
3456 GC_ASSERT(!iter_data->weak_only);
3457
3458 return iter_data->update_callback((VALUE *)value, iter_data->data);
3459}
3460
3461struct st_table *rb_generic_ivtbl_get(void);
3462
3463static int
3464vm_weak_table_gen_ivar_foreach(st_data_t key, st_data_t value, st_data_t data)
3465{
3466 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3467
3468 int ret = iter_data->callback((VALUE)key, iter_data->data);
3469
3470 switch (ret) {
3471 case ST_CONTINUE:
3472 break;
3473
3474 case ST_DELETE:
3475 free_gen_ivtbl((VALUE)key, (struct gen_ivtbl *)value);
3476
3477 FL_UNSET((VALUE)key, FL_EXIVAR);
3478 return ST_DELETE;
3479
3480 case ST_REPLACE: {
3481 VALUE new_key = (VALUE)key;
3482 ret = iter_data->update_callback(&new_key, iter_data->data);
3483 if (key != new_key) ret = ST_DELETE;
3484 DURING_GC_COULD_MALLOC_REGION_START();
3485 {
3486 st_insert(rb_generic_ivtbl_get(), (st_data_t)new_key, value);
3487 }
3488 DURING_GC_COULD_MALLOC_REGION_END();
3489 key = (st_data_t)new_key;
3490 break;
3491 }
3492
3493 default:
3494 return ret;
3495 }
3496
3497 if (!iter_data->weak_only) {
3498 struct gen_ivtbl *ivtbl = (struct gen_ivtbl *)value;
3499
3500 if (rb_shape_obj_too_complex((VALUE)key)) {
3501 st_foreach_with_replace(
3502 ivtbl->as.complex.table,
3503 vm_weak_table_gen_ivar_foreach_too_complex_i,
3504 vm_weak_table_gen_ivar_foreach_too_complex_replace_i,
3505 data
3506 );
3507 }
3508 else {
3509 for (uint32_t i = 0; i < ivtbl->as.shape.numiv; i++) {
3510 if (SPECIAL_CONST_P(ivtbl->as.shape.ivptr[i])) continue;
3511
3512 int ivar_ret = iter_data->callback(ivtbl->as.shape.ivptr[i], iter_data->data);
3513 switch (ivar_ret) {
3514 case ST_CONTINUE:
3515 break;
3516 case ST_REPLACE:
3517 iter_data->update_callback(&ivtbl->as.shape.ivptr[i], iter_data->data);
3518 break;
3519 default:
3520 rb_bug("vm_weak_table_gen_ivar_foreach: return value %d not supported", ivar_ret);
3521 }
3522 }
3523 }
3524 }
3525
3526 return ret;
3527}
3528
3529static int
3530vm_weak_table_frozen_strings_foreach(st_data_t key, st_data_t value, st_data_t data, int error)
3531{
3532 int retval = vm_weak_table_foreach_weak_key(key, value, data, error);
3533 if (retval == ST_DELETE) {
3534 FL_UNSET((VALUE)key, RSTRING_FSTR);
3535 }
3536 return retval;
3537}
3538
3539void rb_fstring_foreach_with_replace(st_foreach_check_callback_func *func, st_update_callback_func *replace, st_data_t arg);
3540void
3541rb_gc_vm_weak_table_foreach(vm_table_foreach_callback_func callback,
3542 vm_table_update_callback_func update_callback,
3543 void *data,
3544 bool weak_only,
3545 enum rb_gc_vm_weak_tables table)
3546{
3547 rb_vm_t *vm = GET_VM();
3548
3549 struct global_vm_table_foreach_data foreach_data = {
3550 .callback = callback,
3551 .update_callback = update_callback,
3552 .data = data,
3553 .weak_only = weak_only,
3554 };
3555
3556 switch (table) {
3557 case RB_GC_VM_CI_TABLE: {
3558 if (vm->ci_table) {
3559 st_foreach_with_replace(
3560 vm->ci_table,
3561 vm_weak_table_foreach_weak_key,
3562 vm_weak_table_foreach_update_weak_key,
3563 (st_data_t)&foreach_data
3564 );
3565 }
3566 break;
3567 }
3568 case RB_GC_VM_OVERLOADED_CME_TABLE: {
3569 if (vm->overloaded_cme_table) {
3570 st_foreach_with_replace(
3571 vm->overloaded_cme_table,
3572 vm_weak_table_foreach_weak_key,
3573 vm_weak_table_foreach_update_weak_key,
3574 (st_data_t)&foreach_data
3575 );
3576 }
3577 break;
3578 }
3579 case RB_GC_VM_GLOBAL_SYMBOLS_TABLE: {
3580 if (global_symbols.str_sym) {
3581 st_foreach_with_replace(
3582 global_symbols.str_sym,
3583 vm_weak_table_str_sym_foreach,
3584 vm_weak_table_foreach_update_weak_value,
3585 (st_data_t)&foreach_data
3586 );
3587 }
3588 break;
3589 }
3590 case RB_GC_VM_GENERIC_IV_TABLE: {
3591 st_table *generic_iv_tbl = rb_generic_ivtbl_get();
3592 if (generic_iv_tbl) {
3593 st_foreach(
3594 generic_iv_tbl,
3595 vm_weak_table_gen_ivar_foreach,
3596 (st_data_t)&foreach_data
3597 );
3598 }
3599 break;
3600 }
3601 case RB_GC_VM_FROZEN_STRINGS_TABLE: {
3602 rb_fstring_foreach_with_replace(
3603 vm_weak_table_frozen_strings_foreach,
3604 vm_weak_table_foreach_update_weak_key,
3605 (st_data_t)&foreach_data
3606 );
3607 break;
3608 }
3609 default:
3610 rb_bug("rb_gc_vm_weak_table_foreach: unknown table %d", table);
3611 }
3612}
3613
3614void
3615rb_gc_update_vm_references(void *objspace)
3616{
3617 rb_execution_context_t *ec = GET_EC();
3618 rb_vm_t *vm = rb_ec_vm_ptr(ec);
3619
3620 rb_vm_update_references(vm);
3621 rb_gc_update_global_tbl();
3622 rb_sym_global_symbols_update_references();
3623
3624#if USE_YJIT
3625 void rb_yjit_root_update_references(void); // in Rust
3626
3627 if (rb_yjit_enabled_p) {
3628 rb_yjit_root_update_references();
3629 }
3630#endif
3631}
3632
3633void
3634rb_gc_update_object_references(void *objspace, VALUE obj)
3635{
3636 switch (BUILTIN_TYPE(obj)) {
3637 case T_CLASS:
3638 if (FL_TEST(obj, FL_SINGLETON)) {
3639 UPDATE_IF_MOVED(objspace, RCLASS_ATTACHED_OBJECT(obj));
3640 }
3641 // Continue to the shared T_CLASS/T_MODULE
3642 case T_MODULE:
3643 if (RCLASS_SUPER((VALUE)obj)) {
3644 UPDATE_IF_MOVED(objspace, RCLASS(obj)->super);
3645 }
3646 update_m_tbl(objspace, RCLASS_M_TBL(obj));
3647 update_cc_tbl(objspace, obj);
3648 update_cvc_tbl(objspace, obj);
3649 update_superclasses(objspace, obj);
3650
3651 if (rb_shape_obj_too_complex(obj)) {
3652 gc_ref_update_table_values_only(RCLASS_IV_HASH(obj));
3653 }
3654 else {
3655 for (attr_index_t i = 0; i < RCLASS_IV_COUNT(obj); i++) {
3656 UPDATE_IF_MOVED(objspace, RCLASS_IVPTR(obj)[i]);
3657 }
3658 }
3659
3660 update_class_ext(objspace, RCLASS_EXT(obj));
3661 update_const_tbl(objspace, RCLASS_CONST_TBL(obj));
3662
3663 UPDATE_IF_MOVED(objspace, RCLASS_EXT(obj)->classpath);
3664 break;
3665
3666 case T_ICLASS:
3667 if (RICLASS_OWNS_M_TBL_P(obj)) {
3668 update_m_tbl(objspace, RCLASS_M_TBL(obj));
3669 }
3670 if (RCLASS_SUPER((VALUE)obj)) {
3671 UPDATE_IF_MOVED(objspace, RCLASS(obj)->super);
3672 }
3673 update_class_ext(objspace, RCLASS_EXT(obj));
3674 update_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj));
3675 update_cc_tbl(objspace, obj);
3676 break;
3677
3678 case T_IMEMO:
3679 rb_imemo_mark_and_move(obj, true);
3680 return;
3681
3682 case T_NIL:
3683 case T_FIXNUM:
3684 case T_NODE:
3685 case T_MOVED:
3686 case T_NONE:
3687 /* These can't move */
3688 return;
3689
3690 case T_ARRAY:
3691 gc_ref_update_array(objspace, obj);
3692 break;
3693
3694 case T_HASH:
3695 gc_ref_update_hash(objspace, obj);
3696 UPDATE_IF_MOVED(objspace, RHASH(obj)->ifnone);
3697 break;
3698
3699 case T_STRING:
3700 {
3701 if (STR_SHARED_P(obj)) {
3702 UPDATE_IF_MOVED(objspace, RSTRING(obj)->as.heap.aux.shared);
3703 }
3704
3705 /* If, after move the string is not embedded, and can fit in the
3706 * slot it's been placed in, then re-embed it. */
3707 if (rb_gc_obj_slot_size(obj) >= rb_str_size_as_embedded(obj)) {
3708 if (!STR_EMBED_P(obj) && rb_str_reembeddable_p(obj)) {
3709 rb_str_make_embedded(obj);
3710 }
3711 }
3712
3713 break;
3714 }
3715 case T_DATA:
3716 /* Call the compaction callback, if it exists */
3717 {
3718 void *const ptr = RTYPEDDATA_P(obj) ? RTYPEDDATA_GET_DATA(obj) : DATA_PTR(obj);
3719 if (ptr) {
3720 if (RTYPEDDATA_P(obj) && gc_declarative_marking_p(RTYPEDDATA(obj)->type)) {
3721 size_t *offset_list = TYPED_DATA_REFS_OFFSET_LIST(obj);
3722
3723 for (size_t offset = *offset_list; offset != RUBY_REF_END; offset = *offset_list++) {
3724 VALUE *ref = (VALUE *)((char *)ptr + offset);
3725 *ref = gc_location_internal(objspace, *ref);
3726 }
3727 }
3728 else if (RTYPEDDATA_P(obj)) {
3729 RUBY_DATA_FUNC compact_func = RTYPEDDATA(obj)->type->function.dcompact;
3730 if (compact_func) (*compact_func)(ptr);
3731 }
3732 }
3733 }
3734 break;
3735
3736 case T_OBJECT:
3737 gc_ref_update_object(objspace, obj);
3738 break;
3739
3740 case T_FILE:
3741 if (RFILE(obj)->fptr) {
3742 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->self);
3743 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->pathv);
3744 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->tied_io_for_writing);
3745 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->writeconv_asciicompat);
3746 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->writeconv_pre_ecopts);
3747 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->encs.ecopts);
3748 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->write_lock);
3749 }
3750 break;
3751 case T_REGEXP:
3752 UPDATE_IF_MOVED(objspace, RREGEXP(obj)->src);
3753 break;
3754
3755 case T_SYMBOL:
3756 UPDATE_IF_MOVED(objspace, RSYMBOL(obj)->fstr);
3757 break;
3758
3759 case T_FLOAT:
3760 case T_BIGNUM:
3761 break;
3762
3763 case T_MATCH:
3764 UPDATE_IF_MOVED(objspace, RMATCH(obj)->regexp);
3765
3766 if (RMATCH(obj)->str) {
3767 UPDATE_IF_MOVED(objspace, RMATCH(obj)->str);
3768 }
3769 break;
3770
3771 case T_RATIONAL:
3772 UPDATE_IF_MOVED(objspace, RRATIONAL(obj)->num);
3773 UPDATE_IF_MOVED(objspace, RRATIONAL(obj)->den);
3774 break;
3775
3776 case T_COMPLEX:
3777 UPDATE_IF_MOVED(objspace, RCOMPLEX(obj)->real);
3778 UPDATE_IF_MOVED(objspace, RCOMPLEX(obj)->imag);
3779
3780 break;
3781
3782 case T_STRUCT:
3783 {
3784 long i, len = RSTRUCT_LEN(obj);
3785 VALUE *ptr = (VALUE *)RSTRUCT_CONST_PTR(obj);
3786
3787 for (i = 0; i < len; i++) {
3788 UPDATE_IF_MOVED(objspace, ptr[i]);
3789 }
3790 }
3791 break;
3792 default:
3793 rb_bug("unreachable");
3794 break;
3795 }
3796
3797 UPDATE_IF_MOVED(objspace, RBASIC(obj)->klass);
3798}
3799
3800VALUE
3801rb_gc_start(void)
3802{
3803 rb_gc();
3804 return Qnil;
3805}
3806
3807void
3808rb_gc(void)
3809{
3810 unless_objspace(objspace) { return; }
3811
3812 rb_gc_impl_start(objspace, true, true, true, false);
3813}
3814
3815int
3816rb_during_gc(void)
3817{
3818 unless_objspace(objspace) { return FALSE; }
3819
3820 return rb_gc_impl_during_gc_p(objspace);
3821}
3822
3823size_t
3824rb_gc_count(void)
3825{
3826 return rb_gc_impl_gc_count(rb_gc_get_objspace());
3827}
3828
3829static VALUE
3830gc_count(rb_execution_context_t *ec, VALUE self)
3831{
3832 return SIZET2NUM(rb_gc_count());
3833}
3834
3835VALUE
3836rb_gc_latest_gc_info(VALUE key)
3837{
3838 if (!SYMBOL_P(key) && !RB_TYPE_P(key, T_HASH)) {
3839 rb_raise(rb_eTypeError, "non-hash or symbol given");
3840 }
3841
3842 VALUE val = rb_gc_impl_latest_gc_info(rb_gc_get_objspace(), key);
3843
3844 if (val == Qundef) {
3845 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
3846 }
3847
3848 return val;
3849}
3850
3851static VALUE
3852gc_stat(rb_execution_context_t *ec, VALUE self, VALUE arg) // arg is (nil || hash || symbol)
3853{
3854 if (NIL_P(arg)) {
3855 arg = rb_hash_new();
3856 }
3857 else if (!RB_TYPE_P(arg, T_HASH) && !SYMBOL_P(arg)) {
3858 rb_raise(rb_eTypeError, "non-hash or symbol given");
3859 }
3860
3861 VALUE ret = rb_gc_impl_stat(rb_gc_get_objspace(), arg);
3862
3863 if (ret == Qundef) {
3864 GC_ASSERT(SYMBOL_P(arg));
3865
3866 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(arg));
3867 }
3868
3869 return ret;
3870}
3871
3872size_t
3873rb_gc_stat(VALUE arg)
3874{
3875 if (!RB_TYPE_P(arg, T_HASH) && !SYMBOL_P(arg)) {
3876 rb_raise(rb_eTypeError, "non-hash or symbol given");
3877 }
3878
3879 VALUE ret = rb_gc_impl_stat(rb_gc_get_objspace(), arg);
3880
3881 if (ret == Qundef) {
3882 GC_ASSERT(SYMBOL_P(arg));
3883
3884 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(arg));
3885 }
3886
3887 if (SYMBOL_P(arg)) {
3888 return NUM2SIZET(ret);
3889 }
3890 else {
3891 return 0;
3892 }
3893}
3894
3895static VALUE
3896gc_stat_heap(rb_execution_context_t *ec, VALUE self, VALUE heap_name, VALUE arg)
3897{
3898 if (NIL_P(arg)) {
3899 arg = rb_hash_new();
3900 }
3901
3902 if (NIL_P(heap_name)) {
3903 if (!RB_TYPE_P(arg, T_HASH)) {
3904 rb_raise(rb_eTypeError, "non-hash given");
3905 }
3906 }
3907 else if (FIXNUM_P(heap_name)) {
3908 if (!SYMBOL_P(arg) && !RB_TYPE_P(arg, T_HASH)) {
3909 rb_raise(rb_eTypeError, "non-hash or symbol given");
3910 }
3911 }
3912 else {
3913 rb_raise(rb_eTypeError, "heap_name must be nil or an Integer");
3914 }
3915
3916 VALUE ret = rb_gc_impl_stat_heap(rb_gc_get_objspace(), heap_name, arg);
3917
3918 if (ret == Qundef) {
3919 GC_ASSERT(SYMBOL_P(arg));
3920
3921 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(arg));
3922 }
3923
3924 return ret;
3925}
3926
3927static VALUE
3928gc_config_get(rb_execution_context_t *ec, VALUE self)
3929{
3930 VALUE cfg_hash = rb_gc_impl_config_get(rb_gc_get_objspace());
3931 rb_hash_aset(cfg_hash, sym("implementation"), rb_fstring_cstr(rb_gc_impl_active_gc_name()));
3932
3933 return cfg_hash;
3934}
3935
3936static VALUE
3937gc_config_set(rb_execution_context_t *ec, VALUE self, VALUE hash)
3938{
3939 void *objspace = rb_gc_get_objspace();
3940
3941 rb_gc_impl_config_set(objspace, hash);
3942
3943 return rb_gc_impl_config_get(objspace);
3944}
3945
3946static VALUE
3947gc_stress_get(rb_execution_context_t *ec, VALUE self)
3948{
3949 return rb_gc_impl_stress_get(rb_gc_get_objspace());
3950}
3951
3952static VALUE
3953gc_stress_set_m(rb_execution_context_t *ec, VALUE self, VALUE flag)
3954{
3955 rb_gc_impl_stress_set(rb_gc_get_objspace(), flag);
3956
3957 return flag;
3958}
3959
3960void
3961rb_gc_initial_stress_set(VALUE flag)
3962{
3963 initial_stress = flag;
3964}
3965
3966size_t *
3967rb_gc_heap_sizes(void)
3968{
3969 return rb_gc_impl_heap_sizes(rb_gc_get_objspace());
3970}
3971
3972VALUE
3973rb_gc_enable(void)
3974{
3975 return rb_objspace_gc_enable(rb_gc_get_objspace());
3976}
3977
3978VALUE
3979rb_objspace_gc_enable(void *objspace)
3980{
3981 bool disabled = !rb_gc_impl_gc_enabled_p(objspace);
3982 rb_gc_impl_gc_enable(objspace);
3983 return RBOOL(disabled);
3984}
3985
3986static VALUE
3987gc_enable(rb_execution_context_t *ec, VALUE _)
3988{
3989 return rb_gc_enable();
3990}
3991
3992static VALUE
3993gc_disable_no_rest(void *objspace)
3994{
3995 bool disabled = !rb_gc_impl_gc_enabled_p(objspace);
3996 rb_gc_impl_gc_disable(objspace, false);
3997 return RBOOL(disabled);
3998}
3999
4000VALUE
4001rb_gc_disable_no_rest(void)
4002{
4003 return gc_disable_no_rest(rb_gc_get_objspace());
4004}
4005
4006VALUE
4007rb_gc_disable(void)
4008{
4009 return rb_objspace_gc_disable(rb_gc_get_objspace());
4010}
4011
4012VALUE
4013rb_objspace_gc_disable(void *objspace)
4014{
4015 bool disabled = !rb_gc_impl_gc_enabled_p(objspace);
4016 rb_gc_impl_gc_disable(objspace, true);
4017 return RBOOL(disabled);
4018}
4019
4020static VALUE
4021gc_disable(rb_execution_context_t *ec, VALUE _)
4022{
4023 return rb_gc_disable();
4024}
4025
4026// TODO: think about moving ruby_gc_set_params into Init_heap or Init_gc
4027void
4028ruby_gc_set_params(void)
4029{
4030 rb_gc_impl_set_params(rb_gc_get_objspace());
4031}
4032
4033void
4034rb_objspace_reachable_objects_from(VALUE obj, void (func)(VALUE, void *), void *data)
4035{
4036 RB_VM_LOCK_ENTER();
4037 {
4038 if (rb_gc_impl_during_gc_p(rb_gc_get_objspace())) rb_bug("rb_objspace_reachable_objects_from() is not supported while during GC");
4039
4040 if (!RB_SPECIAL_CONST_P(obj)) {
4041 rb_vm_t *vm = GET_VM();
4042 struct gc_mark_func_data_struct *prev_mfd = vm->gc.mark_func_data;
4043 struct gc_mark_func_data_struct mfd = {
4044 .mark_func = func,
4045 .data = data,
4046 };
4047
4048 vm->gc.mark_func_data = &mfd;
4049 rb_gc_mark_children(rb_gc_get_objspace(), obj);
4050 vm->gc.mark_func_data = prev_mfd;
4051 }
4052 }
4053 RB_VM_LOCK_LEAVE();
4054}
4055
4057 const char *category;
4058 void (*func)(const char *category, VALUE, void *);
4059 void *data;
4060};
4061
4062static void
4063root_objects_from(VALUE obj, void *ptr)
4064{
4065 const struct root_objects_data *data = (struct root_objects_data *)ptr;
4066 (*data->func)(data->category, obj, data->data);
4067}
4068
4069void
4070rb_objspace_reachable_objects_from_root(void (func)(const char *category, VALUE, void *), void *passing_data)
4071{
4072 if (rb_gc_impl_during_gc_p(rb_gc_get_objspace())) rb_bug("rb_gc_impl_objspace_reachable_objects_from_root() is not supported while during GC");
4073
4074 rb_vm_t *vm = GET_VM();
4075
4076 struct root_objects_data data = {
4077 .func = func,
4078 .data = passing_data,
4079 };
4080
4081 struct gc_mark_func_data_struct *prev_mfd = vm->gc.mark_func_data;
4082 struct gc_mark_func_data_struct mfd = {
4083 .mark_func = root_objects_from,
4084 .data = &data,
4085 };
4086
4087 vm->gc.mark_func_data = &mfd;
4088 rb_gc_save_machine_context();
4089 rb_gc_mark_roots(vm->gc.objspace, &data.category);
4090 vm->gc.mark_func_data = prev_mfd;
4091}
4092
4093/*
4094 ------------------------------ DEBUG ------------------------------
4095*/
4096
4097static const char *
4098type_name(int type, VALUE obj)
4099{
4100 switch (type) {
4101#define TYPE_NAME(t) case (t): return #t;
4102 TYPE_NAME(T_NONE);
4103 TYPE_NAME(T_OBJECT);
4104 TYPE_NAME(T_CLASS);
4105 TYPE_NAME(T_MODULE);
4106 TYPE_NAME(T_FLOAT);
4107 TYPE_NAME(T_STRING);
4108 TYPE_NAME(T_REGEXP);
4109 TYPE_NAME(T_ARRAY);
4110 TYPE_NAME(T_HASH);
4111 TYPE_NAME(T_STRUCT);
4112 TYPE_NAME(T_BIGNUM);
4113 TYPE_NAME(T_FILE);
4114 TYPE_NAME(T_MATCH);
4115 TYPE_NAME(T_COMPLEX);
4116 TYPE_NAME(T_RATIONAL);
4117 TYPE_NAME(T_NIL);
4118 TYPE_NAME(T_TRUE);
4119 TYPE_NAME(T_FALSE);
4120 TYPE_NAME(T_SYMBOL);
4121 TYPE_NAME(T_FIXNUM);
4122 TYPE_NAME(T_UNDEF);
4123 TYPE_NAME(T_IMEMO);
4124 TYPE_NAME(T_ICLASS);
4125 TYPE_NAME(T_MOVED);
4126 TYPE_NAME(T_ZOMBIE);
4127 case T_DATA:
4128 if (obj && rb_objspace_data_type_name(obj)) {
4129 return rb_objspace_data_type_name(obj);
4130 }
4131 return "T_DATA";
4132#undef TYPE_NAME
4133 }
4134 return "unknown";
4135}
4136
4137static const char *
4138obj_type_name(VALUE obj)
4139{
4140 return type_name(TYPE(obj), obj);
4141}
4142
4143const char *
4144rb_method_type_name(rb_method_type_t type)
4145{
4146 switch (type) {
4147 case VM_METHOD_TYPE_ISEQ: return "iseq";
4148 case VM_METHOD_TYPE_ATTRSET: return "attrest";
4149 case VM_METHOD_TYPE_IVAR: return "ivar";
4150 case VM_METHOD_TYPE_BMETHOD: return "bmethod";
4151 case VM_METHOD_TYPE_ALIAS: return "alias";
4152 case VM_METHOD_TYPE_REFINED: return "refined";
4153 case VM_METHOD_TYPE_CFUNC: return "cfunc";
4154 case VM_METHOD_TYPE_ZSUPER: return "zsuper";
4155 case VM_METHOD_TYPE_MISSING: return "missing";
4156 case VM_METHOD_TYPE_OPTIMIZED: return "optimized";
4157 case VM_METHOD_TYPE_UNDEF: return "undef";
4158 case VM_METHOD_TYPE_NOTIMPLEMENTED: return "notimplemented";
4159 }
4160 rb_bug("rb_method_type_name: unreachable (type: %d)", type);
4161}
4162
4163static void
4164rb_raw_iseq_info(char *const buff, const size_t buff_size, const rb_iseq_t *iseq)
4165{
4166 if (buff_size > 0 && ISEQ_BODY(iseq) && ISEQ_BODY(iseq)->location.label && !RB_TYPE_P(ISEQ_BODY(iseq)->location.pathobj, T_MOVED)) {
4167 VALUE path = rb_iseq_path(iseq);
4168 int n = ISEQ_BODY(iseq)->location.first_lineno;
4169 snprintf(buff, buff_size, " %s@%s:%d",
4170 RSTRING_PTR(ISEQ_BODY(iseq)->location.label),
4171 RSTRING_PTR(path), n);
4172 }
4173}
4174
4175static int
4176str_len_no_raise(VALUE str)
4177{
4178 long len = RSTRING_LEN(str);
4179 if (len < 0) return 0;
4180 if (len > INT_MAX) return INT_MAX;
4181 return (int)len;
4182}
4183
4184#define BUFF_ARGS buff + pos, buff_size - pos
4185#define APPEND_F(...) if ((pos += snprintf(BUFF_ARGS, "" __VA_ARGS__)) >= buff_size) goto end
4186#define APPEND_S(s) do { \
4187 if ((pos + (int)rb_strlen_lit(s)) >= buff_size) { \
4188 goto end; \
4189 } \
4190 else { \
4191 memcpy(buff + pos, (s), rb_strlen_lit(s) + 1); \
4192 } \
4193 } while (0)
4194#define C(c, s) ((c) != 0 ? (s) : " ")
4195
4196static size_t
4197rb_raw_obj_info_common(char *const buff, const size_t buff_size, const VALUE obj)
4198{
4199 size_t pos = 0;
4200
4201 if (SPECIAL_CONST_P(obj)) {
4202 APPEND_F("%s", obj_type_name(obj));
4203
4204 if (FIXNUM_P(obj)) {
4205 APPEND_F(" %ld", FIX2LONG(obj));
4206 }
4207 else if (SYMBOL_P(obj)) {
4208 APPEND_F(" %s", rb_id2name(SYM2ID(obj)));
4209 }
4210 }
4211 else {
4212 // const int age = RVALUE_AGE_GET(obj);
4213
4214 if (rb_gc_impl_pointer_to_heap_p(rb_gc_get_objspace(), (void *)obj)) {
4215 // TODO: fixme
4216 // APPEND_F("%p [%d%s%s%s%s%s%s] %s ",
4217 // (void *)obj, age,
4218 // C(RVALUE_UNCOLLECTIBLE_BITMAP(obj), "L"),
4219 // C(RVALUE_MARK_BITMAP(obj), "M"),
4220 // C(RVALUE_PIN_BITMAP(obj), "P"),
4221 // C(RVALUE_MARKING_BITMAP(obj), "R"),
4222 // C(RVALUE_WB_UNPROTECTED_BITMAP(obj), "U"),
4223 // C(rb_objspace_garbage_object_p(obj), "G"),
4224 // obj_type_name(obj));
4225 }
4226 else {
4227 /* fake */
4228 // APPEND_F("%p [%dXXXX] %s",
4229 // (void *)obj, age,
4230 // obj_type_name(obj));
4231 }
4232
4233 if (internal_object_p(obj)) {
4234 /* ignore */
4235 }
4236 else if (RBASIC(obj)->klass == 0) {
4237 APPEND_S("(temporary internal)");
4238 }
4239 else if (RTEST(RBASIC(obj)->klass)) {
4240 VALUE class_path = rb_class_path_cached(RBASIC(obj)->klass);
4241 if (!NIL_P(class_path)) {
4242 APPEND_F("(%s)", RSTRING_PTR(class_path));
4243 }
4244 }
4245 }
4246 end:
4247
4248 return pos;
4249}
4250
4251const char *rb_raw_obj_info(char *const buff, const size_t buff_size, VALUE obj);
4252
4253static size_t
4254rb_raw_obj_info_buitin_type(char *const buff, const size_t buff_size, const VALUE obj, size_t pos)
4255{
4256 if (LIKELY(pos < buff_size) && !SPECIAL_CONST_P(obj)) {
4257 const enum ruby_value_type type = BUILTIN_TYPE(obj);
4258
4259 switch (type) {
4260 case T_NODE:
4261 UNEXPECTED_NODE(rb_raw_obj_info);
4262 break;
4263 case T_ARRAY:
4264 if (ARY_SHARED_P(obj)) {
4265 APPEND_S("shared -> ");
4266 rb_raw_obj_info(BUFF_ARGS, ARY_SHARED_ROOT(obj));
4267 }
4268 else if (ARY_EMBED_P(obj)) {
4269 APPEND_F("[%s%s] len: %ld (embed)",
4270 C(ARY_EMBED_P(obj), "E"),
4271 C(ARY_SHARED_P(obj), "S"),
4272 RARRAY_LEN(obj));
4273 }
4274 else {
4275 APPEND_F("[%s%s] len: %ld, capa:%ld ptr:%p",
4276 C(ARY_EMBED_P(obj), "E"),
4277 C(ARY_SHARED_P(obj), "S"),
4278 RARRAY_LEN(obj),
4279 ARY_EMBED_P(obj) ? -1L : RARRAY(obj)->as.heap.aux.capa,
4280 (void *)RARRAY_CONST_PTR(obj));
4281 }
4282 break;
4283 case T_STRING: {
4284 if (STR_SHARED_P(obj)) {
4285 APPEND_F(" [shared] len: %ld", RSTRING_LEN(obj));
4286 }
4287 else {
4288 if (STR_EMBED_P(obj)) APPEND_S(" [embed]");
4289
4290 APPEND_F(" len: %ld, capa: %" PRIdSIZE, RSTRING_LEN(obj), rb_str_capacity(obj));
4291 }
4292 APPEND_F(" \"%.*s\"", str_len_no_raise(obj), RSTRING_PTR(obj));
4293 break;
4294 }
4295 case T_SYMBOL: {
4296 VALUE fstr = RSYMBOL(obj)->fstr;
4297 ID id = RSYMBOL(obj)->id;
4298 if (RB_TYPE_P(fstr, T_STRING)) {
4299 APPEND_F(":%s id:%d", RSTRING_PTR(fstr), (unsigned int)id);
4300 }
4301 else {
4302 APPEND_F("(%p) id:%d", (void *)fstr, (unsigned int)id);
4303 }
4304 break;
4305 }
4306 case T_MOVED: {
4307 APPEND_F("-> %p", (void*)gc_location_internal(rb_gc_get_objspace(), obj));
4308 break;
4309 }
4310 case T_HASH: {
4311 APPEND_F("[%c] %"PRIdSIZE,
4312 RHASH_AR_TABLE_P(obj) ? 'A' : 'S',
4313 RHASH_SIZE(obj));
4314 break;
4315 }
4316 case T_CLASS:
4317 case T_MODULE:
4318 {
4319 VALUE class_path = rb_class_path_cached(obj);
4320 if (!NIL_P(class_path)) {
4321 APPEND_F("%s", RSTRING_PTR(class_path));
4322 }
4323 else {
4324 APPEND_S("(anon)");
4325 }
4326 break;
4327 }
4328 case T_ICLASS:
4329 {
4330 VALUE class_path = rb_class_path_cached(RBASIC_CLASS(obj));
4331 if (!NIL_P(class_path)) {
4332 APPEND_F("src:%s", RSTRING_PTR(class_path));
4333 }
4334 break;
4335 }
4336 case T_OBJECT:
4337 {
4338 if (rb_shape_obj_too_complex(obj)) {
4339 size_t hash_len = rb_st_table_size(ROBJECT_IV_HASH(obj));
4340 APPEND_F("(too_complex) len:%zu", hash_len);
4341 }
4342 else {
4343 uint32_t len = ROBJECT_IV_CAPACITY(obj);
4344
4345 if (RBASIC(obj)->flags & ROBJECT_EMBED) {
4346 APPEND_F("(embed) len:%d", len);
4347 }
4348 else {
4349 VALUE *ptr = ROBJECT_IVPTR(obj);
4350 APPEND_F("len:%d ptr:%p", len, (void *)ptr);
4351 }
4352 }
4353 }
4354 break;
4355 case T_DATA: {
4356 const struct rb_block *block;
4357 const rb_iseq_t *iseq;
4358 if (rb_obj_is_proc(obj) &&
4359 (block = vm_proc_block(obj)) != NULL &&
4360 (vm_block_type(block) == block_type_iseq) &&
4361 (iseq = vm_block_iseq(block)) != NULL) {
4362 rb_raw_iseq_info(BUFF_ARGS, iseq);
4363 }
4364 else if (rb_ractor_p(obj)) {
4365 rb_ractor_t *r = (void *)DATA_PTR(obj);
4366 if (r) {
4367 APPEND_F("r:%d", r->pub.id);
4368 }
4369 }
4370 else {
4371 const char * const type_name = rb_objspace_data_type_name(obj);
4372 if (type_name) {
4373 APPEND_F("%s", type_name);
4374 }
4375 }
4376 break;
4377 }
4378 case T_IMEMO: {
4379 APPEND_F("<%s> ", rb_imemo_name(imemo_type(obj)));
4380
4381 switch (imemo_type(obj)) {
4382 case imemo_ment:
4383 {
4384 const rb_method_entry_t *me = (const rb_method_entry_t *)obj;
4385
4386 APPEND_F(":%s (%s%s%s%s) type:%s aliased:%d owner:%p defined_class:%p",
4387 rb_id2name(me->called_id),
4388 METHOD_ENTRY_VISI(me) == METHOD_VISI_PUBLIC ? "pub" :
4389 METHOD_ENTRY_VISI(me) == METHOD_VISI_PRIVATE ? "pri" : "pro",
4390 METHOD_ENTRY_COMPLEMENTED(me) ? ",cmp" : "",
4391 METHOD_ENTRY_CACHED(me) ? ",cc" : "",
4392 METHOD_ENTRY_INVALIDATED(me) ? ",inv" : "",
4393 me->def ? rb_method_type_name(me->def->type) : "NULL",
4394 me->def ? me->def->aliased : -1,
4395 (void *)me->owner, // obj_info(me->owner),
4396 (void *)me->defined_class); //obj_info(me->defined_class)));
4397
4398 if (me->def) {
4399 switch (me->def->type) {
4400 case VM_METHOD_TYPE_ISEQ:
4401 APPEND_S(" (iseq:");
4402 rb_raw_obj_info(BUFF_ARGS, (VALUE)me->def->body.iseq.iseqptr);
4403 APPEND_S(")");
4404 break;
4405 default:
4406 break;
4407 }
4408 }
4409
4410 break;
4411 }
4412 case imemo_iseq: {
4413 const rb_iseq_t *iseq = (const rb_iseq_t *)obj;
4414 rb_raw_iseq_info(BUFF_ARGS, iseq);
4415 break;
4416 }
4417 case imemo_callinfo:
4418 {
4419 const struct rb_callinfo *ci = (const struct rb_callinfo *)obj;
4420 APPEND_F("(mid:%s, flag:%x argc:%d, kwarg:%s)",
4421 rb_id2name(vm_ci_mid(ci)),
4422 vm_ci_flag(ci),
4423 vm_ci_argc(ci),
4424 vm_ci_kwarg(ci) ? "available" : "NULL");
4425 break;
4426 }
4427 case imemo_callcache:
4428 {
4429 const struct rb_callcache *cc = (const struct rb_callcache *)obj;
4430 VALUE class_path = cc->klass ? rb_class_path_cached(cc->klass) : Qnil;
4431 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4432
4433 APPEND_F("(klass:%s cme:%s%s (%p) call:%p",
4434 NIL_P(class_path) ? (cc->klass ? "??" : "<NULL>") : RSTRING_PTR(class_path),
4435 cme ? rb_id2name(cme->called_id) : "<NULL>",
4436 cme ? (METHOD_ENTRY_INVALIDATED(cme) ? " [inv]" : "") : "",
4437 (void *)cme,
4438 (void *)(uintptr_t)vm_cc_call(cc));
4439 break;
4440 }
4441 default:
4442 break;
4443 }
4444 }
4445 default:
4446 break;
4447 }
4448 }
4449 end:
4450
4451 return pos;
4452}
4453
4454#undef C
4455
4456void
4457rb_asan_poison_object(VALUE obj)
4458{
4459 MAYBE_UNUSED(struct RVALUE *) ptr = (void *)obj;
4460 asan_poison_memory_region(ptr, rb_gc_obj_slot_size(obj));
4461}
4462
4463void
4464rb_asan_unpoison_object(VALUE obj, bool newobj_p)
4465{
4466 MAYBE_UNUSED(struct RVALUE *) ptr = (void *)obj;
4467 asan_unpoison_memory_region(ptr, rb_gc_obj_slot_size(obj), newobj_p);
4468}
4469
4470void *
4471rb_asan_poisoned_object_p(VALUE obj)
4472{
4473 MAYBE_UNUSED(struct RVALUE *) ptr = (void *)obj;
4474 return __asan_region_is_poisoned(ptr, rb_gc_obj_slot_size(obj));
4475}
4476
4477#define asan_unpoisoning_object(obj) \
4478 for (void *poisoned = asan_unpoison_object_temporary(obj), \
4479 *unpoisoning = &poisoned; /* flag to loop just once */ \
4480 unpoisoning; \
4481 unpoisoning = asan_poison_object_restore(obj, poisoned))
4482
4483const char *
4484rb_raw_obj_info(char *const buff, const size_t buff_size, VALUE obj)
4485{
4486 asan_unpoisoning_object(obj) {
4487 size_t pos = rb_raw_obj_info_common(buff, buff_size, obj);
4488 pos = rb_raw_obj_info_buitin_type(buff, buff_size, obj, pos);
4489 if (pos >= buff_size) {} // truncated
4490 }
4491
4492 return buff;
4493}
4494
4495#undef APPEND_S
4496#undef APPEND_F
4497#undef BUFF_ARGS
4498
4499#if RGENGC_OBJ_INFO
4500#define OBJ_INFO_BUFFERS_NUM 10
4501#define OBJ_INFO_BUFFERS_SIZE 0x100
4502static rb_atomic_t obj_info_buffers_index = 0;
4503static char obj_info_buffers[OBJ_INFO_BUFFERS_NUM][OBJ_INFO_BUFFERS_SIZE];
4504
4505/* Increments *var atomically and resets *var to 0 when maxval is
4506 * reached. Returns the wraparound old *var value (0...maxval). */
4507static rb_atomic_t
4508atomic_inc_wraparound(rb_atomic_t *var, const rb_atomic_t maxval)
4509{
4510 rb_atomic_t oldval = RUBY_ATOMIC_FETCH_ADD(*var, 1);
4511 if (RB_UNLIKELY(oldval >= maxval - 1)) { // wraparound *var
4512 const rb_atomic_t newval = oldval + 1;
4513 RUBY_ATOMIC_CAS(*var, newval, newval % maxval);
4514 oldval %= maxval;
4515 }
4516 return oldval;
4517}
4518
4519static const char *
4520obj_info(VALUE obj)
4521{
4522 rb_atomic_t index = atomic_inc_wraparound(&obj_info_buffers_index, OBJ_INFO_BUFFERS_NUM);
4523 char *const buff = obj_info_buffers[index];
4524 return rb_raw_obj_info(buff, OBJ_INFO_BUFFERS_SIZE, obj);
4525}
4526#else
4527static const char *
4528obj_info(VALUE obj)
4529{
4530 return obj_type_name(obj);
4531}
4532#endif
4533
4534/*
4535 ------------------------ Extended allocator ------------------------
4536*/
4537
4539 VALUE exc;
4540 const char *fmt;
4541 va_list *ap;
4542};
4543
4544static void *
4545gc_vraise(void *ptr)
4546{
4547 struct gc_raise_tag *argv = ptr;
4548 rb_vraise(argv->exc, argv->fmt, *argv->ap);
4549 UNREACHABLE_RETURN(NULL);
4550}
4551
4552static void
4553gc_raise(VALUE exc, const char *fmt, ...)
4554{
4555 va_list ap;
4556 va_start(ap, fmt);
4557 struct gc_raise_tag argv = {
4558 exc, fmt, &ap,
4559 };
4560
4561 if (ruby_thread_has_gvl_p()) {
4562 gc_vraise(&argv);
4564 }
4565 else if (ruby_native_thread_p()) {
4566 rb_thread_call_with_gvl(gc_vraise, &argv);
4568 }
4569 else {
4570 /* Not in a ruby thread */
4571 fprintf(stderr, "%s", "[FATAL] ");
4572 vfprintf(stderr, fmt, ap);
4573 }
4574
4575 va_end(ap);
4576 abort();
4577}
4578
4579NORETURN(static void negative_size_allocation_error(const char *));
4580static void
4581negative_size_allocation_error(const char *msg)
4582{
4583 gc_raise(rb_eNoMemError, "%s", msg);
4584}
4585
4586static void *
4587ruby_memerror_body(void *dummy)
4588{
4589 rb_memerror();
4590 return 0;
4591}
4592
4593NORETURN(static void ruby_memerror(void));
4595static void
4596ruby_memerror(void)
4597{
4598 if (ruby_thread_has_gvl_p()) {
4599 rb_memerror();
4600 }
4601 else {
4602 if (ruby_native_thread_p()) {
4603 rb_thread_call_with_gvl(ruby_memerror_body, 0);
4604 }
4605 else {
4606 /* no ruby thread */
4607 fprintf(stderr, "[FATAL] failed to allocate memory\n");
4608 }
4609 }
4610
4611 /* We have discussions whether we should die here; */
4612 /* We might rethink about it later. */
4613 exit(EXIT_FAILURE);
4614}
4615
4616void
4617rb_memerror(void)
4618{
4619 /* the `GET_VM()->special_exceptions` below assumes that
4620 * the VM is reachable from the current thread. We should
4621 * definitely make sure of that. */
4622 RUBY_ASSERT_ALWAYS(ruby_thread_has_gvl_p());
4623
4624 rb_execution_context_t *ec = GET_EC();
4625 VALUE exc = GET_VM()->special_exceptions[ruby_error_nomemory];
4626
4627 if (!exc ||
4628 rb_ec_raised_p(ec, RAISED_NOMEMORY) ||
4629 rb_ec_vm_lock_rec(ec) != ec->tag->lock_rec) {
4630 fprintf(stderr, "[FATAL] failed to allocate memory\n");
4631 exit(EXIT_FAILURE);
4632 }
4633 if (rb_ec_raised_p(ec, RAISED_NOMEMORY)) {
4634 rb_ec_raised_clear(ec);
4635 }
4636 else {
4637 rb_ec_raised_set(ec, RAISED_NOMEMORY);
4638 exc = ruby_vm_special_exception_copy(exc);
4639 }
4640 ec->errinfo = exc;
4641 EC_JUMP_TAG(ec, TAG_RAISE);
4642}
4643
4644bool
4645rb_memerror_reentered(void)
4646{
4647 rb_execution_context_t *ec = GET_EC();
4648 return (ec && rb_ec_raised_p(ec, RAISED_NOMEMORY));
4649}
4650
4651void
4652rb_malloc_info_show_results(void)
4653{
4654}
4655
4656static void *
4657handle_malloc_failure(void *ptr)
4658{
4659 if (LIKELY(ptr)) {
4660 return ptr;
4661 }
4662 else {
4663 ruby_memerror();
4664 UNREACHABLE_RETURN(ptr);
4665 }
4666}
4667
4668static void *ruby_xmalloc_body(size_t size);
4669
4670void *
4671ruby_xmalloc(size_t size)
4672{
4673 return handle_malloc_failure(ruby_xmalloc_body(size));
4674}
4675
4676static void *
4677ruby_xmalloc_body(size_t size)
4678{
4679 if ((ssize_t)size < 0) {
4680 negative_size_allocation_error("too large allocation size");
4681 }
4682
4683 return rb_gc_impl_malloc(rb_gc_get_objspace(), size);
4684}
4685
4686void
4687ruby_malloc_size_overflow(size_t count, size_t elsize)
4688{
4689 rb_raise(rb_eArgError,
4690 "malloc: possible integer overflow (%"PRIuSIZE"*%"PRIuSIZE")",
4691 count, elsize);
4692}
4693
4694void
4695ruby_malloc_add_size_overflow(size_t x, size_t y)
4696{
4697 rb_raise(rb_eArgError,
4698 "malloc: possible integer overflow (%"PRIuSIZE"+%"PRIuSIZE")",
4699 x, y);
4700}
4701
4702static void *ruby_xmalloc2_body(size_t n, size_t size);
4703
4704void *
4705ruby_xmalloc2(size_t n, size_t size)
4706{
4707 return handle_malloc_failure(ruby_xmalloc2_body(n, size));
4708}
4709
4710static void *
4711ruby_xmalloc2_body(size_t n, size_t size)
4712{
4713 return rb_gc_impl_malloc(rb_gc_get_objspace(), xmalloc2_size(n, size));
4714}
4715
4716static void *ruby_xcalloc_body(size_t n, size_t size);
4717
4718void *
4719ruby_xcalloc(size_t n, size_t size)
4720{
4721 return handle_malloc_failure(ruby_xcalloc_body(n, size));
4722}
4723
4724static void *
4725ruby_xcalloc_body(size_t n, size_t size)
4726{
4727 return rb_gc_impl_calloc(rb_gc_get_objspace(), xmalloc2_size(n, size));
4728}
4729
4730static void *ruby_sized_xrealloc_body(void *ptr, size_t new_size, size_t old_size);
4731
4732#ifdef ruby_sized_xrealloc
4733#undef ruby_sized_xrealloc
4734#endif
4735void *
4736ruby_sized_xrealloc(void *ptr, size_t new_size, size_t old_size)
4737{
4738 return handle_malloc_failure(ruby_sized_xrealloc_body(ptr, new_size, old_size));
4739}
4740
4741static void *
4742ruby_sized_xrealloc_body(void *ptr, size_t new_size, size_t old_size)
4743{
4744 if ((ssize_t)new_size < 0) {
4745 negative_size_allocation_error("too large allocation size");
4746 }
4747
4748 return rb_gc_impl_realloc(rb_gc_get_objspace(), ptr, new_size, old_size);
4749}
4750
4751void *
4752ruby_xrealloc(void *ptr, size_t new_size)
4753{
4754 return ruby_sized_xrealloc(ptr, new_size, 0);
4755}
4756
4757static void *ruby_sized_xrealloc2_body(void *ptr, size_t n, size_t size, size_t old_n);
4758
4759#ifdef ruby_sized_xrealloc2
4760#undef ruby_sized_xrealloc2
4761#endif
4762void *
4763ruby_sized_xrealloc2(void *ptr, size_t n, size_t size, size_t old_n)
4764{
4765 return handle_malloc_failure(ruby_sized_xrealloc2_body(ptr, n, size, old_n));
4766}
4767
4768static void *
4769ruby_sized_xrealloc2_body(void *ptr, size_t n, size_t size, size_t old_n)
4770{
4771 size_t len = xmalloc2_size(n, size);
4772 return rb_gc_impl_realloc(rb_gc_get_objspace(), ptr, len, old_n * size);
4773}
4774
4775void *
4776ruby_xrealloc2(void *ptr, size_t n, size_t size)
4777{
4778 return ruby_sized_xrealloc2(ptr, n, size, 0);
4779}
4780
4781#ifdef ruby_sized_xfree
4782#undef ruby_sized_xfree
4783#endif
4784void
4785ruby_sized_xfree(void *x, size_t size)
4786{
4787 if (LIKELY(x)) {
4788 /* It's possible for a C extension's pthread destructor function set by pthread_key_create
4789 * to be called after ruby_vm_destruct and attempt to free memory. Fall back to mimfree in
4790 * that case. */
4791 if (LIKELY(GET_VM())) {
4792 rb_gc_impl_free(rb_gc_get_objspace(), x, size);
4793 }
4794 else {
4795 ruby_mimfree(x);
4796 }
4797 }
4798}
4799
4800void
4801ruby_xfree(void *x)
4802{
4803 ruby_sized_xfree(x, 0);
4804}
4805
4806void *
4807rb_xmalloc_mul_add(size_t x, size_t y, size_t z) /* x * y + z */
4808{
4809 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
4810 return ruby_xmalloc(w);
4811}
4812
4813void *
4814rb_xcalloc_mul_add(size_t x, size_t y, size_t z) /* x * y + z */
4815{
4816 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
4817 return ruby_xcalloc(w, 1);
4818}
4819
4820void *
4821rb_xrealloc_mul_add(const void *p, size_t x, size_t y, size_t z) /* x * y + z */
4822{
4823 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
4824 return ruby_xrealloc((void *)p, w);
4825}
4826
4827void *
4828rb_xmalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
4829{
4830 size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
4831 return ruby_xmalloc(u);
4832}
4833
4834void *
4835rb_xcalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
4836{
4837 size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
4838 return ruby_xcalloc(u, 1);
4839}
4840
4841/* Mimic ruby_xmalloc, but need not rb_objspace.
4842 * should return pointer suitable for ruby_xfree
4843 */
4844void *
4845ruby_mimmalloc(size_t size)
4846{
4847 void *mem;
4848#if CALC_EXACT_MALLOC_SIZE
4849 size += sizeof(struct malloc_obj_info);
4850#endif
4851 mem = malloc(size);
4852#if CALC_EXACT_MALLOC_SIZE
4853 if (!mem) {
4854 return NULL;
4855 }
4856 else
4857 /* set 0 for consistency of allocated_size/allocations */
4858 {
4859 struct malloc_obj_info *info = mem;
4860 info->size = 0;
4861 mem = info + 1;
4862 }
4863#endif
4864 return mem;
4865}
4866
4867void *
4868ruby_mimcalloc(size_t num, size_t size)
4869{
4870 void *mem;
4871#if CALC_EXACT_MALLOC_SIZE
4872 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(num, size);
4873 if (UNLIKELY(t.left)) {
4874 return NULL;
4875 }
4876 size = t.right + sizeof(struct malloc_obj_info);
4877 mem = calloc1(size);
4878 if (!mem) {
4879 return NULL;
4880 }
4881 else
4882 /* set 0 for consistency of allocated_size/allocations */
4883 {
4884 struct malloc_obj_info *info = mem;
4885 info->size = 0;
4886 mem = info + 1;
4887 }
4888#else
4889 mem = calloc(num, size);
4890#endif
4891 return mem;
4892}
4893
4894void
4895ruby_mimfree(void *ptr)
4896{
4897#if CALC_EXACT_MALLOC_SIZE
4898 struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
4899 ptr = info;
4900#endif
4901 free(ptr);
4902}
4903
4904void
4905rb_gc_adjust_memory_usage(ssize_t diff)
4906{
4907 unless_objspace(objspace) { return; }
4908
4909 rb_gc_impl_adjust_memory_usage(objspace, diff);
4910}
4911
4912const char *
4913rb_obj_info(VALUE obj)
4914{
4915 return obj_info(obj);
4916}
4917
4918void
4919rb_obj_info_dump(VALUE obj)
4920{
4921 char buff[0x100];
4922 fprintf(stderr, "rb_obj_info_dump: %s\n", rb_raw_obj_info(buff, 0x100, obj));
4923}
4924
4925void
4926rb_obj_info_dump_loc(VALUE obj, const char *file, int line, const char *func)
4927{
4928 char buff[0x100];
4929 fprintf(stderr, "<OBJ_INFO:%s@%s:%d> %s\n", func, file, line, rb_raw_obj_info(buff, 0x100, obj));
4930}
4931
4932void
4933rb_gc_before_fork(void)
4934{
4935 rb_gc_impl_before_fork(rb_gc_get_objspace());
4936}
4937
4938void
4939rb_gc_after_fork(rb_pid_t pid)
4940{
4941 rb_gc_impl_after_fork(rb_gc_get_objspace(), pid);
4942}
4943
4944/*
4945 * Document-module: ObjectSpace
4946 *
4947 * The ObjectSpace module contains a number of routines
4948 * that interact with the garbage collection facility and allow you to
4949 * traverse all living objects with an iterator.
4950 *
4951 * ObjectSpace also provides support for object finalizers, procs that will be
4952 * called after a specific object was destroyed by garbage collection. See
4953 * the documentation for +ObjectSpace.define_finalizer+ for important
4954 * information on how to use this method correctly.
4955 *
4956 * a = "A"
4957 * b = "B"
4958 *
4959 * ObjectSpace.define_finalizer(a, proc {|id| puts "Finalizer one on #{id}" })
4960 * ObjectSpace.define_finalizer(b, proc {|id| puts "Finalizer two on #{id}" })
4961 *
4962 * a = nil
4963 * b = nil
4964 *
4965 * _produces:_
4966 *
4967 * Finalizer two on 537763470
4968 * Finalizer one on 537763480
4969 */
4970
4971/* Document-class: GC::Profiler
4972 *
4973 * The GC profiler provides access to information on GC runs including time,
4974 * length and object space size.
4975 *
4976 * Example:
4977 *
4978 * GC::Profiler.enable
4979 *
4980 * require 'rdoc/rdoc'
4981 *
4982 * GC::Profiler.report
4983 *
4984 * GC::Profiler.disable
4985 *
4986 * See also GC.count, GC.malloc_allocated_size and GC.malloc_allocations
4987 */
4988
4989#include "gc.rbinc"
4990
4991void
4992Init_GC(void)
4993{
4994#undef rb_intern
4995 malloc_offset = gc_compute_malloc_offset();
4996
4997 rb_mGC = rb_define_module("GC");
4998
4999 VALUE rb_mObjSpace = rb_define_module("ObjectSpace");
5000
5001 rb_define_module_function(rb_mObjSpace, "each_object", os_each_obj, -1);
5002
5003 rb_define_module_function(rb_mObjSpace, "define_finalizer", define_final, -1);
5004 rb_define_module_function(rb_mObjSpace, "undefine_finalizer", undefine_final, 1);
5005
5006 rb_define_module_function(rb_mObjSpace, "_id2ref", os_id2ref, 1);
5007
5008 rb_vm_register_special_exception(ruby_error_nomemory, rb_eNoMemError, "failed to allocate memory");
5009
5010 rb_define_method(rb_cBasicObject, "__id__", rb_obj_id, 0);
5011 rb_define_method(rb_mKernel, "object_id", rb_obj_id, 0);
5012
5013 rb_define_module_function(rb_mObjSpace, "count_objects", count_objects, -1);
5014
5015 rb_gc_impl_init();
5016}
5017
5018// Set a name for the anonymous virtual memory area. `addr` is the starting
5019// address of the area and `size` is its length in bytes. `name` is a
5020// NUL-terminated human-readable string.
5021//
5022// This function is usually called after calling `mmap()`. The human-readable
5023// annotation helps developers identify the call site of `mmap()` that created
5024// the memory mapping.
5025//
5026// This function currently only works on Linux 5.17 or higher. After calling
5027// this function, we can see annotations in the form of "[anon:...]" in
5028// `/proc/self/maps`, where `...` is the content of `name`. This function has
5029// no effect when called on other platforms.
5030void
5031ruby_annotate_mmap(const void *addr, unsigned long size, const char *name)
5032{
5033#if defined(HAVE_SYS_PRCTL_H) && defined(PR_SET_VMA) && defined(PR_SET_VMA_ANON_NAME)
5034 // The name length cannot exceed 80 (including the '\0').
5035 RUBY_ASSERT(strlen(name) < 80);
5036 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, (unsigned long)addr, size, name);
5037 // We ignore errors in prctl. prctl may set errno to EINVAL for several
5038 // reasons.
5039 // 1. The attr (PR_SET_VMA_ANON_NAME) is not a valid attribute.
5040 // 2. addr is an invalid address.
5041 // 3. The string pointed by name is too long.
5042 // The first error indicates PR_SET_VMA_ANON_NAME is not available, and may
5043 // happen if we run the compiled binary on an old kernel. In theory, all
5044 // other errors should result in a failure. But since EINVAL cannot tell
5045 // the first error from others, and this function is mainly used for
5046 // debugging, we silently ignore the error.
5047 errno = 0;
5048#endif
5049}
#define RUBY_ASSERT_ALWAYS(expr,...)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
Definition assert.h:199
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
#define RUBY_ATOMIC_CAS(var, oldval, newval)
Atomic compare-and-swap.
Definition atomic.h:140
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ATOMIC_FETCH_ADD(var, val)
Atomically replaces the value pointed by var with the result of addition of val to the old value of v...
Definition atomic.h:93
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_module_function(klass, mid, func, arity)
Defines klass#mid and makes it a module function.
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RUBY_INTERNAL_EVENT_NEWOBJ
Object allocated.
Definition event.h:93
static VALUE RB_FL_TEST_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_TEST().
Definition fl_type.h:469
static void RB_FL_SET_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_SET().
Definition fl_type.h:606
@ RUBY_FL_WB_PROTECTED
Definition fl_type.h:199
VALUE rb_define_module(const char *name)
Defines a top-level module.
Definition class.c:1095
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
Retrieves argument from argc and argv to given VALUE references according to the format string.
Definition class.c:2635
#define T_COMPLEX
Old name of RUBY_T_COMPLEX.
Definition value_type.h:59
#define TYPE(_)
Old name of rb_type.
Definition value_type.h:108
#define FL_SINGLETON
Old name of RUBY_FL_SINGLETON.
Definition fl_type.h:58
#define T_FILE
Old name of RUBY_T_FILE.
Definition value_type.h:62
#define FL_EXIVAR
Old name of RUBY_FL_EXIVAR.
Definition fl_type.h:66
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:400
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define T_MASK
Old name of RUBY_T_MASK.
Definition value_type.h:68
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define T_NIL
Old name of RUBY_T_NIL.
Definition value_type.h:72
#define UNREACHABLE
Old name of RBIMPL_UNREACHABLE.
Definition assume.h:28
#define T_FLOAT
Old name of RUBY_T_FLOAT.
Definition value_type.h:64
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition value_type.h:67
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
Definition value_type.h:57
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
Definition value_type.h:79
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition fl_type.h:135
#define T_FIXNUM
Old name of RUBY_T_FIXNUM.
Definition value_type.h:63
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition assume.h:29
#define SYM2ID
Old name of RB_SYM2ID.
Definition symbol.h:45
#define T_DATA
Old name of RUBY_T_DATA.
Definition value_type.h:60
#define FIXNUM_FLAG
Old name of RUBY_FIXNUM_FLAG.
#define LL2NUM
Old name of RB_LL2NUM.
Definition long_long.h:30
#define T_NONE
Old name of RUBY_T_NONE.
Definition value_type.h:74
#define T_NODE
Old name of RUBY_T_NODE.
Definition value_type.h:73
#define SIZET2NUM
Old name of RB_SIZE2NUM.
Definition size_t.h:62
#define FL_FINALIZE
Old name of RUBY_FL_FINALIZE.
Definition fl_type.h:61
#define T_MODULE
Old name of RUBY_T_MODULE.
Definition value_type.h:70
#define STATIC_SYM_P
Old name of RB_STATIC_SYM_P.
#define T_TRUE
Old name of RUBY_T_TRUE.
Definition value_type.h:81
#define T_RATIONAL
Old name of RUBY_T_RATIONAL.
Definition value_type.h:76
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define FL_ABLE
Old name of RB_FL_ABLE.
Definition fl_type.h:122
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:132
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition array.h:658
#define LONG2NUM
Old name of RB_LONG2NUM.
Definition long.h:50
#define T_FALSE
Old name of RUBY_T_FALSE.
Definition value_type.h:61
#define T_UNDEF
Old name of RUBY_T_UNDEF.
Definition value_type.h:82
#define FLONUM_P
Old name of RB_FLONUM_P.
#define Qtrue
Old name of RUBY_Qtrue.
#define T_ZOMBIE
Old name of RUBY_T_ZOMBIE.
Definition value_type.h:83
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define FL_WB_PROTECTED
Old name of RUBY_FL_WB_PROTECTED.
Definition fl_type.h:59
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
Definition value_type.h:80
#define T_MATCH
Old name of RUBY_T_MATCH.
Definition value_type.h:69
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define T_MOVED
Old name of RUBY_T_MOVED.
Definition value_type.h:71
#define FL_TEST
Old name of RB_FL_TEST.
Definition fl_type.h:131
#define xcalloc
Old name of ruby_xcalloc.
Definition xmalloc.h:55
#define FL_UNSET
Old name of RB_FL_UNSET.
Definition fl_type.h:133
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define NUM2SIZET
Old name of RB_NUM2SIZE.
Definition size_t.h:61
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
#define T_REGEXP
Old name of RUBY_T_REGEXP.
Definition value_type.h:77
size_t ruby_stack_length(VALUE **p)
Queries what Ruby thinks is the machine stack.
Definition gc.c:2162
int ruby_stack_check(void)
Checks for stack overflow.
Definition gc.c:2202
VALUE rb_eNoMemError
NoMemoryError exception.
Definition error.c:1441
VALUE rb_eRangeError
RangeError exception.
Definition error.c:1434
#define ruby_verbose
This variable controls whether the interpreter is in debug mode.
Definition error.h:475
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1430
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:466
size_t rb_obj_embedded_size(uint32_t numiv)
Internal header for Object.
Definition object.c:98
VALUE rb_mKernel
Kernel module.
Definition object.c:65
VALUE rb_mGC
GC module.
Definition gc.c:432
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:247
VALUE rb_cBasicObject
BasicObject class.
Definition object.c:64
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:865
VALUE rb_to_int(VALUE val)
Identical to rb_check_to_int(), except it raises in case of conversion mismatch.
Definition object.c:3216
int rb_enc_str_coderange(VALUE str)
Scans the passed string to collect its code range.
Definition string.c:1270
Defines RBIMPL_HAS_BUILTIN.
#define RETURN_ENUMERATOR(obj, argc, argv)
Identical to RETURN_SIZED_ENUMERATOR(), except its size is unknown.
Definition enumerator.h:239
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:284
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition proc.c:839
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
Definition proc.c:119
void rb_str_free(VALUE str)
Destroys the given string for no reason.
Definition string.c:2055
size_t rb_str_capacity(VALUE str)
Queries the capacity of the given string.
Definition string.c:1324
VALUE rb_class_path_cached(VALUE mod)
Just another name of rb_mod_name.
Definition variable.c:382
void rb_free_generic_ivar(VALUE obj)
Frees the list of instance variables.
Definition variable.c:1220
void rb_undef_alloc_func(VALUE klass)
Deletes the allocator function of a class.
Definition vm_method.c:1289
VALUE rb_check_funcall(VALUE recv, ID mid, int argc, const VALUE *argv)
Identical to rb_funcallv(), except it returns RUBY_Qundef instead of raising rb_eNoMethodError.
Definition vm_eval.c:668
rb_alloc_func_t rb_get_alloc_func(VALUE klass)
Queries the allocator function of a class.
Definition vm_method.c:1295
int rb_obj_respond_to(VALUE obj, ID mid, int private_p)
Identical to rb_respond_to(), except it additionally takes the visibility parameter.
Definition vm_method.c:2942
VALUE rb_sym2str(VALUE symbol)
Obtain a frozen string representation of a symbol (not including the leading colon).
Definition symbol.c:986
int capa
Designed capacity of the buffer.
Definition io.h:11
int rb_io_fptr_finalize(rb_io_t *fptr)
Destroys the given IO.
Definition io.c:5679
int len
Length of the buffer.
Definition io.h:8
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
Definition ractor.h:249
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
Definition thread.c:1906
VALUE rb_yield(VALUE val)
Yields the block.
Definition vm_eval.c:1354
#define RBIMPL_ATTR_MAYBE_UNUSED()
Wraps (or simulates) [[maybe_unused]]
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
VALUE type(ANYARGS)
ANYARGS-ed function type.
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
#define RARRAY(obj)
Convenient casting macro.
Definition rarray.h:44
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:150
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
Definition rclass.h:44
#define RCLASS(obj)
Convenient casting macro.
Definition rclass.h:38
#define DATA_PTR(obj)
Convenient getter macro.
Definition rdata.h:67
#define RDATA(obj)
Convenient casting macro.
Definition rdata.h:59
#define RUBY_DEFAULT_FREE
This is a value you can set to RData::dfree.
Definition rdata.h:78
void(* RUBY_DATA_FUNC)(void *)
This is the type of callbacks registered to RData.
Definition rdata.h:104
#define RFILE(obj)
Convenient casting macro.
Definition rfile.h:50
#define RHASH_SIZE(h)
Queries the size of the hash.
Definition rhash.h:69
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
#define RMATCH(obj)
Convenient casting macro.
Definition rmatch.h:37
#define ROBJECT(obj)
Convenient casting macro.
Definition robject.h:43
static VALUE * ROBJECT_IVPTR(VALUE obj)
Queries the instance variables.
Definition robject.h:126
#define RREGEXP(obj)
Convenient casting macro.
Definition rregexp.h:37
#define RREGEXP_PTR(obj)
Convenient accessor macro.
Definition rregexp.h:45
#define RSTRING(obj)
Convenient casting macro.
Definition rstring.h:41
static bool RTYPEDDATA_P(VALUE obj)
Checks whether the passed object is RTypedData or RData.
Definition rtypeddata.h:579
#define RTYPEDDATA(obj)
Convenient casting macro.
Definition rtypeddata.h:94
static const struct rb_data_type_struct * RTYPEDDATA_TYPE(VALUE obj)
Queries for the type of given object.
Definition rtypeddata.h:602
const char * rb_obj_classname(VALUE obj)
Queries the name of the class of the passed object.
Definition variable.c:507
#define errno
Ractor-aware version of errno.
Definition ruby.h:388
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition thread.c:5561
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
Defines old _.
#define _(args)
This was a transition path from K&R to ANSI.
Definition stdarg.h:35
Definition hash.h:53
Ruby's ordinal objects.
Definition robject.h:83
"Typed" user data.
Definition rtypeddata.h:350
Definition method.h:63
Definition constant.h:33
CREF (Class REFerence)
Definition method.h:45
Definition class.h:36
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:200
const char * wrap_struct_name
Name of structs of this kind.
Definition rtypeddata.h:207
VALUE flags
Type-specific behavioural characteristics.
Definition rtypeddata.h:309
Definition gc_impl.h:15
Ruby's IO, metadata and buffers.
Definition io.h:280
Represents a match.
Definition rmatch.h:71
struct rmatch_offset * char_offset
Capture group offsets, in C array.
Definition rmatch.h:79
int char_offset_num_allocated
Number of rmatch_offset that ::rmatch::char_offset holds.
Definition rmatch.h:82
struct re_registers regs
"Registers" of a match.
Definition rmatch.h:76
Definition method.h:55
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:136
Internal header for Class.
Definition class.h:29
Represents the region of a capture group.
Definition rmatch.h:65
Definition st.h:79
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
intptr_t SIGNED_VALUE
A signed integer type that has the same width with VALUE.
Definition value.h:63
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
Definition value.h:69
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376
ruby_value_type
C-level type of an object.
Definition value_type.h:113