Ruby 3.5.0dev (2025-05-16 revision 06a56a7ffcb053d5bc45b9a984082d9301d6819c)
gc.c (06a56a7ffcb053d5bc45b9a984082d9301d6819c)
1/**********************************************************************
2
3 gc.c -
4
5 $Author$
6 created at: Tue Oct 5 09:44:46 JST 1993
7
8 Copyright (C) 1993-2007 Yukihiro Matsumoto
9 Copyright (C) 2000 Network Applied Communication Laboratory, Inc.
10 Copyright (C) 2000 Information-technology Promotion Agency, Japan
11
12**********************************************************************/
13
14#define rb_data_object_alloc rb_data_object_alloc
15#define rb_data_typed_object_alloc rb_data_typed_object_alloc
16
17#include "ruby/internal/config.h"
18#ifdef _WIN32
19# include "ruby/ruby.h"
20#endif
21
22#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
23# include "wasm/setjmp.h"
24# include "wasm/machine.h"
25#else
26# include <setjmp.h>
27#endif
28#include <stdarg.h>
29#include <stdio.h>
30
31/* MALLOC_HEADERS_BEGIN */
32#ifndef HAVE_MALLOC_USABLE_SIZE
33# ifdef _WIN32
34# define HAVE_MALLOC_USABLE_SIZE
35# define malloc_usable_size(a) _msize(a)
36# elif defined HAVE_MALLOC_SIZE
37# define HAVE_MALLOC_USABLE_SIZE
38# define malloc_usable_size(a) malloc_size(a)
39# endif
40#endif
41
42#ifdef HAVE_MALLOC_USABLE_SIZE
43# ifdef RUBY_ALTERNATIVE_MALLOC_HEADER
44/* Alternative malloc header is included in ruby/missing.h */
45# elif defined(HAVE_MALLOC_H)
46# include <malloc.h>
47# elif defined(HAVE_MALLOC_NP_H)
48# include <malloc_np.h>
49# elif defined(HAVE_MALLOC_MALLOC_H)
50# include <malloc/malloc.h>
51# endif
52#endif
53
54/* MALLOC_HEADERS_END */
55
56#ifdef HAVE_SYS_TIME_H
57# include <sys/time.h>
58#endif
59
60#ifdef HAVE_SYS_RESOURCE_H
61# include <sys/resource.h>
62#endif
63
64#if defined _WIN32 || defined __CYGWIN__
65# include <windows.h>
66#elif defined(HAVE_POSIX_MEMALIGN)
67#elif defined(HAVE_MEMALIGN)
68# include <malloc.h>
69#endif
70
71#include <sys/types.h>
72
73#ifdef __EMSCRIPTEN__
74#include <emscripten.h>
75#endif
76
77/* For ruby_annotate_mmap */
78#ifdef HAVE_SYS_PRCTL_H
79#include <sys/prctl.h>
80#endif
81
82#undef LIST_HEAD /* ccan/list conflicts with BSD-origin sys/queue.h. */
83
84#include "constant.h"
85#include "darray.h"
86#include "debug_counter.h"
87#include "eval_intern.h"
88#include "gc/gc.h"
89#include "id_table.h"
90#include "internal.h"
91#include "internal/class.h"
92#include "internal/compile.h"
93#include "internal/complex.h"
94#include "internal/cont.h"
95#include "internal/error.h"
96#include "internal/eval.h"
97#include "internal/gc.h"
98#include "internal/hash.h"
99#include "internal/imemo.h"
100#include "internal/io.h"
101#include "internal/numeric.h"
102#include "internal/object.h"
103#include "internal/proc.h"
104#include "internal/rational.h"
105#include "internal/sanitizers.h"
106#include "internal/struct.h"
107#include "internal/symbol.h"
108#include "internal/thread.h"
109#include "internal/variable.h"
110#include "internal/warnings.h"
111#include "probes.h"
112#include "regint.h"
113#include "ruby/debug.h"
114#include "ruby/io.h"
115#include "ruby/re.h"
116#include "ruby/st.h"
117#include "ruby/thread.h"
118#include "ruby/util.h"
119#include "ruby/vm.h"
120#include "ruby_assert.h"
121#include "ruby_atomic.h"
122#include "symbol.h"
123#include "variable.h"
124#include "vm_core.h"
125#include "vm_sync.h"
126#include "vm_callinfo.h"
127#include "ractor_core.h"
128#include "yjit.h"
129
130#include "builtin.h"
131#include "shape.h"
132
133unsigned int
134rb_gc_vm_lock(void)
135{
136 unsigned int lev;
137 RB_VM_LOCK_ENTER_LEV(&lev);
138 return lev;
139}
140
141void
142rb_gc_vm_unlock(unsigned int lev)
143{
144 RB_VM_LOCK_LEAVE_LEV(&lev);
145}
146
147unsigned int
148rb_gc_cr_lock(void)
149{
150 unsigned int lev;
151 RB_VM_LOCK_ENTER_CR_LEV(GET_RACTOR(), &lev);
152 return lev;
153}
154
155void
156rb_gc_cr_unlock(unsigned int lev)
157{
158 RB_VM_LOCK_LEAVE_CR_LEV(GET_RACTOR(), &lev);
159}
160
161unsigned int
162rb_gc_vm_lock_no_barrier(void)
163{
164 unsigned int lev = 0;
165 RB_VM_LOCK_ENTER_LEV_NB(&lev);
166 return lev;
167}
168
169void
170rb_gc_vm_unlock_no_barrier(unsigned int lev)
171{
172 RB_VM_LOCK_LEAVE_LEV(&lev);
173}
174
175void
176rb_gc_vm_barrier(void)
177{
178 rb_vm_barrier();
179}
180
181#if USE_MODULAR_GC
182void *
183rb_gc_get_ractor_newobj_cache(void)
184{
185 return GET_RACTOR()->newobj_cache;
186}
187
188void
189rb_gc_initialize_vm_context(struct rb_gc_vm_context *context)
190{
191 rb_native_mutex_initialize(&context->lock);
192 context->ec = GET_EC();
193}
194
195void
196rb_gc_worker_thread_set_vm_context(struct rb_gc_vm_context *context)
197{
198 rb_native_mutex_lock(&context->lock);
199
200 GC_ASSERT(rb_current_execution_context(false) == NULL);
201
202#ifdef RB_THREAD_LOCAL_SPECIFIER
203 rb_current_ec_set(context->ec);
204#else
205 native_tls_set(ruby_current_ec_key, context->ec);
206#endif
207}
208
209void
210rb_gc_worker_thread_unset_vm_context(struct rb_gc_vm_context *context)
211{
212 rb_native_mutex_unlock(&context->lock);
213
214 GC_ASSERT(rb_current_execution_context(true) == context->ec);
215
216#ifdef RB_THREAD_LOCAL_SPECIFIER
217 rb_current_ec_set(NULL);
218#else
219 native_tls_set(ruby_current_ec_key, NULL);
220#endif
221}
222#endif
223
224bool
225rb_gc_event_hook_required_p(rb_event_flag_t event)
226{
227 return ruby_vm_event_flags & event;
228}
229
230void
231rb_gc_event_hook(VALUE obj, rb_event_flag_t event)
232{
233 if (LIKELY(!rb_gc_event_hook_required_p(event))) return;
234
235 rb_execution_context_t *ec = GET_EC();
236 if (!ec->cfp) return;
237
238 EXEC_EVENT_HOOK(ec, event, ec->cfp->self, 0, 0, 0, obj);
239}
240
241void *
242rb_gc_get_objspace(void)
243{
244 return GET_VM()->gc.objspace;
245}
246
247
248void
249rb_gc_ractor_newobj_cache_foreach(void (*func)(void *cache, void *data), void *data)
250{
251 rb_ractor_t *r = NULL;
252 if (RB_LIKELY(ruby_single_main_ractor)) {
253 GC_ASSERT(
254 ccan_list_empty(&GET_VM()->ractor.set) ||
255 (ccan_list_top(&GET_VM()->ractor.set, rb_ractor_t, vmlr_node) == ruby_single_main_ractor &&
256 ccan_list_tail(&GET_VM()->ractor.set, rb_ractor_t, vmlr_node) == ruby_single_main_ractor)
257 );
258
259 func(ruby_single_main_ractor->newobj_cache, data);
260 }
261 else {
262 ccan_list_for_each(&GET_VM()->ractor.set, r, vmlr_node) {
263 func(r->newobj_cache, data);
264 }
265 }
266}
267
268void
269rb_gc_run_obj_finalizer(VALUE objid, long count, VALUE (*callback)(long i, void *data), void *data)
270{
271 volatile struct {
272 VALUE errinfo;
273 VALUE final;
275 VALUE *sp;
276 long finished;
277 } saved;
278
279 rb_execution_context_t * volatile ec = GET_EC();
280#define RESTORE_FINALIZER() (\
281 ec->cfp = saved.cfp, \
282 ec->cfp->sp = saved.sp, \
283 ec->errinfo = saved.errinfo)
284
285 saved.errinfo = ec->errinfo;
286 saved.cfp = ec->cfp;
287 saved.sp = ec->cfp->sp;
288 saved.finished = 0;
289 saved.final = Qundef;
290
291 EC_PUSH_TAG(ec);
292 enum ruby_tag_type state = EC_EXEC_TAG();
293 if (state != TAG_NONE) {
294 ++saved.finished; /* skip failed finalizer */
295
296 VALUE failed_final = saved.final;
297 saved.final = Qundef;
298 if (!UNDEF_P(failed_final) && !NIL_P(ruby_verbose)) {
299 rb_warn("Exception in finalizer %+"PRIsVALUE, failed_final);
300 rb_ec_error_print(ec, ec->errinfo);
301 }
302 }
303
304 for (long i = saved.finished; RESTORE_FINALIZER(), i < count; saved.finished = ++i) {
305 saved.final = callback(i, data);
306 rb_check_funcall(saved.final, idCall, 1, &objid);
307 }
308 EC_POP_TAG();
309#undef RESTORE_FINALIZER
310}
311
312void
313rb_gc_set_pending_interrupt(void)
314{
315 rb_execution_context_t *ec = GET_EC();
316 ec->interrupt_mask |= PENDING_INTERRUPT_MASK;
317}
318
319void
320rb_gc_unset_pending_interrupt(void)
321{
322 rb_execution_context_t *ec = GET_EC();
323 ec->interrupt_mask &= ~PENDING_INTERRUPT_MASK;
324}
325
326bool
327rb_gc_multi_ractor_p(void)
328{
329 return rb_multi_ractor_p();
330}
331
332bool rb_obj_is_main_ractor(VALUE gv);
333
334bool
335rb_gc_shutdown_call_finalizer_p(VALUE obj)
336{
337 switch (BUILTIN_TYPE(obj)) {
338 case T_DATA:
339 if (!ruby_free_at_exit_p() && (!DATA_PTR(obj) || !RDATA(obj)->dfree)) return false;
340 if (rb_obj_is_thread(obj)) return false;
341 if (rb_obj_is_mutex(obj)) return false;
342 if (rb_obj_is_fiber(obj)) return false;
343 if (rb_obj_is_main_ractor(obj)) return false;
344 if (rb_obj_is_fstring_table(obj)) return false;
345
346 return true;
347
348 case T_FILE:
349 return true;
350
351 case T_SYMBOL:
352 if (RSYMBOL(obj)->fstr &&
353 (BUILTIN_TYPE(RSYMBOL(obj)->fstr) == T_NONE ||
354 BUILTIN_TYPE(RSYMBOL(obj)->fstr) == T_ZOMBIE)) {
355 RSYMBOL(obj)->fstr = 0;
356 }
357 return true;
358
359 case T_NONE:
360 return false;
361
362 default:
363 return ruby_free_at_exit_p();
364 }
365}
366
367uint32_t
368rb_gc_get_shape(VALUE obj)
369{
370 return (uint32_t)rb_obj_shape_id(obj);
371}
372
373void
374rb_gc_set_shape(VALUE obj, uint32_t shape_id)
375{
376 rb_shape_set_shape_id(obj, (uint32_t)shape_id);
377}
378
379uint32_t
380rb_gc_rebuild_shape(VALUE obj, size_t heap_id)
381{
382 shape_id_t orig_shape_id = rb_obj_shape_id(obj);
383 if (rb_shape_id_too_complex_p(orig_shape_id)) {
384 return (uint32_t)orig_shape_id;
385 }
386
387 shape_id_t initial_shape_id = rb_shape_root(heap_id);
388 shape_id_t new_shape_id = rb_shape_traverse_from_new_root(initial_shape_id, orig_shape_id);
389
390 if (new_shape_id == INVALID_SHAPE_ID) {
391 return 0;
392 }
393
394 return (uint32_t)new_shape_id;
395}
396
397void rb_vm_update_references(void *ptr);
398
399#define rb_setjmp(env) RUBY_SETJMP(env)
400#define rb_jmp_buf rb_jmpbuf_t
401#undef rb_data_object_wrap
402
403#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
404#define MAP_ANONYMOUS MAP_ANON
405#endif
406
407#define unless_objspace(objspace) \
408 void *objspace; \
409 rb_vm_t *unless_objspace_vm = GET_VM(); \
410 if (unless_objspace_vm) objspace = unless_objspace_vm->gc.objspace; \
411 else /* return; or objspace will be warned uninitialized */
412
413#define RMOVED(obj) ((struct RMoved *)(obj))
414
415#define TYPED_UPDATE_IF_MOVED(_objspace, _type, _thing) do { \
416 if (rb_gc_impl_object_moved_p((_objspace), (VALUE)(_thing))) { \
417 *(_type *)&(_thing) = (_type)gc_location_internal(_objspace, (VALUE)_thing); \
418 } \
419} while (0)
420
421#define UPDATE_IF_MOVED(_objspace, _thing) TYPED_UPDATE_IF_MOVED(_objspace, VALUE, _thing)
422
423#if RUBY_MARK_FREE_DEBUG
424int ruby_gc_debug_indent = 0;
425#endif
426
427#ifndef RGENGC_OBJ_INFO
428# define RGENGC_OBJ_INFO RGENGC_CHECK_MODE
429#endif
430
431#ifndef CALC_EXACT_MALLOC_SIZE
432# define CALC_EXACT_MALLOC_SIZE 0
433#endif
434
436
437static size_t malloc_offset = 0;
438#if defined(HAVE_MALLOC_USABLE_SIZE)
439static size_t
440gc_compute_malloc_offset(void)
441{
442 // Different allocators use different metadata storage strategies which result in different
443 // ideal sizes.
444 // For instance malloc(64) will waste 8B with glibc, but waste 0B with jemalloc.
445 // But malloc(56) will waste 0B with glibc, but waste 8B with jemalloc.
446 // So we try allocating 64, 56 and 48 bytes and select the first offset that doesn't
447 // waste memory.
448 // This was tested on Linux with glibc 2.35 and jemalloc 5, and for both it result in
449 // no wasted memory.
450 size_t offset = 0;
451 for (offset = 0; offset <= 16; offset += 8) {
452 size_t allocated = (64 - offset);
453 void *test_ptr = malloc(allocated);
454 size_t wasted = malloc_usable_size(test_ptr) - allocated;
455 free(test_ptr);
456
457 if (wasted == 0) {
458 return offset;
459 }
460 }
461 return 0;
462}
463#else
464static size_t
465gc_compute_malloc_offset(void)
466{
467 // If we don't have malloc_usable_size, we use powers of 2.
468 return 0;
469}
470#endif
471
472size_t
473rb_malloc_grow_capa(size_t current, size_t type_size)
474{
475 size_t current_capacity = current;
476 if (current_capacity < 4) {
477 current_capacity = 4;
478 }
479 current_capacity *= type_size;
480
481 // We double the current capacity.
482 size_t new_capacity = (current_capacity * 2);
483
484 // And round up to the next power of 2 if it's not already one.
485 if (rb_popcount64(new_capacity) != 1) {
486 new_capacity = (size_t)(1 << (64 - nlz_int64(new_capacity)));
487 }
488
489 new_capacity -= malloc_offset;
490 new_capacity /= type_size;
491 if (current > new_capacity) {
492 rb_bug("rb_malloc_grow_capa: current_capacity=%zu, new_capacity=%zu, malloc_offset=%zu", current, new_capacity, malloc_offset);
493 }
494 RUBY_ASSERT(new_capacity > current);
495 return new_capacity;
496}
497
498static inline struct rbimpl_size_mul_overflow_tag
499size_mul_add_overflow(size_t x, size_t y, size_t z) /* x * y + z */
500{
501 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
502 struct rbimpl_size_mul_overflow_tag u = rbimpl_size_add_overflow(t.right, z);
503 return (struct rbimpl_size_mul_overflow_tag) { t.left || u.left, u.right };
504}
505
506static inline struct rbimpl_size_mul_overflow_tag
507size_mul_add_mul_overflow(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
508{
509 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
510 struct rbimpl_size_mul_overflow_tag u = rbimpl_size_mul_overflow(z, w);
511 struct rbimpl_size_mul_overflow_tag v = rbimpl_size_add_overflow(t.right, u.right);
512 return (struct rbimpl_size_mul_overflow_tag) { t.left || u.left || v.left, v.right };
513}
514
515PRINTF_ARGS(NORETURN(static void gc_raise(VALUE, const char*, ...)), 2, 3);
516
517static inline size_t
518size_mul_or_raise(size_t x, size_t y, VALUE exc)
519{
520 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
521 if (LIKELY(!t.left)) {
522 return t.right;
523 }
524 else if (rb_during_gc()) {
525 rb_memerror(); /* or...? */
526 }
527 else {
528 gc_raise(
529 exc,
530 "integer overflow: %"PRIuSIZE
531 " * %"PRIuSIZE
532 " > %"PRIuSIZE,
533 x, y, (size_t)SIZE_MAX);
534 }
535}
536
537size_t
538rb_size_mul_or_raise(size_t x, size_t y, VALUE exc)
539{
540 return size_mul_or_raise(x, y, exc);
541}
542
543static inline size_t
544size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
545{
546 struct rbimpl_size_mul_overflow_tag t = size_mul_add_overflow(x, y, z);
547 if (LIKELY(!t.left)) {
548 return t.right;
549 }
550 else if (rb_during_gc()) {
551 rb_memerror(); /* or...? */
552 }
553 else {
554 gc_raise(
555 exc,
556 "integer overflow: %"PRIuSIZE
557 " * %"PRIuSIZE
558 " + %"PRIuSIZE
559 " > %"PRIuSIZE,
560 x, y, z, (size_t)SIZE_MAX);
561 }
562}
563
564size_t
565rb_size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
566{
567 return size_mul_add_or_raise(x, y, z, exc);
568}
569
570static inline size_t
571size_mul_add_mul_or_raise(size_t x, size_t y, size_t z, size_t w, VALUE exc)
572{
573 struct rbimpl_size_mul_overflow_tag t = size_mul_add_mul_overflow(x, y, z, w);
574 if (LIKELY(!t.left)) {
575 return t.right;
576 }
577 else if (rb_during_gc()) {
578 rb_memerror(); /* or...? */
579 }
580 else {
581 gc_raise(
582 exc,
583 "integer overflow: %"PRIdSIZE
584 " * %"PRIdSIZE
585 " + %"PRIdSIZE
586 " * %"PRIdSIZE
587 " > %"PRIdSIZE,
588 x, y, z, w, (size_t)SIZE_MAX);
589 }
590}
591
592#if defined(HAVE_RB_GC_GUARDED_PTR_VAL) && HAVE_RB_GC_GUARDED_PTR_VAL
593/* trick the compiler into thinking a external signal handler uses this */
594volatile VALUE rb_gc_guarded_val;
595volatile VALUE *
596rb_gc_guarded_ptr_val(volatile VALUE *ptr, VALUE val)
597{
598 rb_gc_guarded_val = val;
599
600 return ptr;
601}
602#endif
603
604static const char *obj_type_name(VALUE obj);
605#include "gc/default/default.c"
606
607#if USE_MODULAR_GC && !defined(HAVE_DLOPEN)
608# error "Modular GC requires dlopen"
609#elif USE_MODULAR_GC
610#include <dlfcn.h>
611
612typedef struct gc_function_map {
613 // Bootup
614 void *(*objspace_alloc)(void);
615 void (*objspace_init)(void *objspace_ptr);
616 void *(*ractor_cache_alloc)(void *objspace_ptr, void *ractor);
617 void (*set_params)(void *objspace_ptr);
618 void (*init)(void);
619 size_t *(*heap_sizes)(void *objspace_ptr);
620 // Shutdown
621 void (*shutdown_free_objects)(void *objspace_ptr);
622 void (*objspace_free)(void *objspace_ptr);
623 void (*ractor_cache_free)(void *objspace_ptr, void *cache);
624 // GC
625 void (*start)(void *objspace_ptr, bool full_mark, bool immediate_mark, bool immediate_sweep, bool compact);
626 bool (*during_gc_p)(void *objspace_ptr);
627 void (*prepare_heap)(void *objspace_ptr);
628 void (*gc_enable)(void *objspace_ptr);
629 void (*gc_disable)(void *objspace_ptr, bool finish_current_gc);
630 bool (*gc_enabled_p)(void *objspace_ptr);
631 VALUE (*config_get)(void *objpace_ptr);
632 void (*config_set)(void *objspace_ptr, VALUE hash);
633 void (*stress_set)(void *objspace_ptr, VALUE flag);
634 VALUE (*stress_get)(void *objspace_ptr);
635 // Object allocation
636 VALUE (*new_obj)(void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, bool wb_protected, size_t alloc_size);
637 size_t (*obj_slot_size)(VALUE obj);
638 size_t (*heap_id_for_size)(void *objspace_ptr, size_t size);
639 bool (*size_allocatable_p)(size_t size);
640 // Malloc
641 void *(*malloc)(void *objspace_ptr, size_t size);
642 void *(*calloc)(void *objspace_ptr, size_t size);
643 void *(*realloc)(void *objspace_ptr, void *ptr, size_t new_size, size_t old_size);
644 void (*free)(void *objspace_ptr, void *ptr, size_t old_size);
645 void (*adjust_memory_usage)(void *objspace_ptr, ssize_t diff);
646 // Marking
647 void (*mark)(void *objspace_ptr, VALUE obj);
648 void (*mark_and_move)(void *objspace_ptr, VALUE *ptr);
649 void (*mark_and_pin)(void *objspace_ptr, VALUE obj);
650 void (*mark_maybe)(void *objspace_ptr, VALUE obj);
651 void (*mark_weak)(void *objspace_ptr, VALUE *ptr);
652 void (*remove_weak)(void *objspace_ptr, VALUE parent_obj, VALUE *ptr);
653 // Compaction
654 bool (*object_moved_p)(void *objspace_ptr, VALUE obj);
655 VALUE (*location)(void *objspace_ptr, VALUE value);
656 // Write barriers
657 void (*writebarrier)(void *objspace_ptr, VALUE a, VALUE b);
658 void (*writebarrier_unprotect)(void *objspace_ptr, VALUE obj);
659 void (*writebarrier_remember)(void *objspace_ptr, VALUE obj);
660 // Heap walking
661 void (*each_objects)(void *objspace_ptr, int (*callback)(void *, void *, size_t, void *), void *data);
662 void (*each_object)(void *objspace_ptr, void (*func)(VALUE obj, void *data), void *data);
663 // Finalizers
664 void (*make_zombie)(void *objspace_ptr, VALUE obj, void (*dfree)(void *), void *data);
665 VALUE (*define_finalizer)(void *objspace_ptr, VALUE obj, VALUE block);
666 void (*undefine_finalizer)(void *objspace_ptr, VALUE obj);
667 void (*copy_finalizer)(void *objspace_ptr, VALUE dest, VALUE obj);
668 void (*shutdown_call_finalizer)(void *objspace_ptr);
669 // Object ID
670 VALUE (*object_id)(void *objspace_ptr, VALUE obj);
671 VALUE (*object_id_to_ref)(void *objspace_ptr, VALUE object_id);
672 // Forking
673 void (*before_fork)(void *objspace_ptr);
674 void (*after_fork)(void *objspace_ptr, rb_pid_t pid);
675 // Statistics
676 void (*set_measure_total_time)(void *objspace_ptr, VALUE flag);
677 bool (*get_measure_total_time)(void *objspace_ptr);
678 unsigned long long (*get_total_time)(void *objspace_ptr);
679 size_t (*gc_count)(void *objspace_ptr);
680 VALUE (*latest_gc_info)(void *objspace_ptr, VALUE key);
681 VALUE (*stat)(void *objspace_ptr, VALUE hash_or_sym);
682 VALUE (*stat_heap)(void *objspace_ptr, VALUE heap_name, VALUE hash_or_sym);
683 const char *(*active_gc_name)(void);
684 // Miscellaneous
685 struct rb_gc_object_metadata_entry *(*object_metadata)(void *objspace_ptr, VALUE obj);
686 bool (*pointer_to_heap_p)(void *objspace_ptr, const void *ptr);
687 bool (*garbage_object_p)(void *objspace_ptr, VALUE obj);
688 void (*set_event_hook)(void *objspace_ptr, const rb_event_flag_t event);
689 void (*copy_attributes)(void *objspace_ptr, VALUE dest, VALUE obj);
690
691 bool modular_gc_loaded_p;
692} rb_gc_function_map_t;
693
694static rb_gc_function_map_t rb_gc_functions;
695
696# define RUBY_GC_LIBRARY "RUBY_GC_LIBRARY"
697# define MODULAR_GC_DIR STRINGIZE(modular_gc_dir)
698
699static void
700ruby_modular_gc_init(void)
701{
702 // Assert that the directory path ends with a /
703 RUBY_ASSERT_ALWAYS(MODULAR_GC_DIR[sizeof(MODULAR_GC_DIR) - 2] == '/');
704
705 const char *gc_so_file = getenv(RUBY_GC_LIBRARY);
706
707 rb_gc_function_map_t gc_functions = { 0 };
708
709 char *gc_so_path = NULL;
710 void *handle = NULL;
711 if (gc_so_file) {
712 /* Check to make sure that gc_so_file matches /[\w-_]+/ so that it does
713 * not load a shared object outside of the directory. */
714 for (size_t i = 0; i < strlen(gc_so_file); i++) {
715 char c = gc_so_file[i];
716 if (isalnum(c)) continue;
717 switch (c) {
718 case '-':
719 case '_':
720 break;
721 default:
722 fprintf(stderr, "Only alphanumeric, dash, and underscore is allowed in "RUBY_GC_LIBRARY"\n");
723 exit(1);
724 }
725 }
726
727 size_t gc_so_path_size = strlen(MODULAR_GC_DIR "librubygc." DLEXT) + strlen(gc_so_file) + 1;
728#ifdef LOAD_RELATIVE
729 Dl_info dli;
730 size_t prefix_len = 0;
731 if (dladdr((void *)(uintptr_t)ruby_modular_gc_init, &dli)) {
732 const char *base = strrchr(dli.dli_fname, '/');
733 if (base) {
734 size_t tail = 0;
735# define end_with_p(lit) \
736 (prefix_len >= (tail = rb_strlen_lit(lit)) && \
737 memcmp(base - tail, lit, tail) == 0)
738
739 prefix_len = base - dli.dli_fname;
740 if (end_with_p("/bin") || end_with_p("/lib")) {
741 prefix_len -= tail;
742 }
743 prefix_len += MODULAR_GC_DIR[0] != '/';
744 gc_so_path_size += prefix_len;
745 }
746 }
747#endif
748 gc_so_path = alloca(gc_so_path_size);
749 {
750 size_t gc_so_path_idx = 0;
751#define GC_SO_PATH_APPEND(str) do { \
752 gc_so_path_idx += strlcpy(gc_so_path + gc_so_path_idx, str, gc_so_path_size - gc_so_path_idx); \
753} while (0)
754#ifdef LOAD_RELATIVE
755 if (prefix_len > 0) {
756 memcpy(gc_so_path, dli.dli_fname, prefix_len);
757 gc_so_path_idx = prefix_len;
758 }
759#endif
760 GC_SO_PATH_APPEND(MODULAR_GC_DIR "librubygc.");
761 GC_SO_PATH_APPEND(gc_so_file);
762 GC_SO_PATH_APPEND(DLEXT);
763 GC_ASSERT(gc_so_path_idx == gc_so_path_size - 1);
764#undef GC_SO_PATH_APPEND
765 }
766
767 handle = dlopen(gc_so_path, RTLD_LAZY | RTLD_GLOBAL);
768 if (!handle) {
769 fprintf(stderr, "ruby_modular_gc_init: Shared library %s cannot be opened: %s\n", gc_so_path, dlerror());
770 exit(1);
771 }
772
773 gc_functions.modular_gc_loaded_p = true;
774 }
775
776# define load_modular_gc_func(name) do { \
777 if (handle) { \
778 const char *func_name = "rb_gc_impl_" #name; \
779 gc_functions.name = dlsym(handle, func_name); \
780 if (!gc_functions.name) { \
781 fprintf(stderr, "ruby_modular_gc_init: %s function not exported by library %s\n", func_name, gc_so_path); \
782 exit(1); \
783 } \
784 } \
785 else { \
786 gc_functions.name = rb_gc_impl_##name; \
787 } \
788} while (0)
789
790 // Bootup
791 load_modular_gc_func(objspace_alloc);
792 load_modular_gc_func(objspace_init);
793 load_modular_gc_func(ractor_cache_alloc);
794 load_modular_gc_func(set_params);
795 load_modular_gc_func(init);
796 load_modular_gc_func(heap_sizes);
797 // Shutdown
798 load_modular_gc_func(shutdown_free_objects);
799 load_modular_gc_func(objspace_free);
800 load_modular_gc_func(ractor_cache_free);
801 // GC
802 load_modular_gc_func(start);
803 load_modular_gc_func(during_gc_p);
804 load_modular_gc_func(prepare_heap);
805 load_modular_gc_func(gc_enable);
806 load_modular_gc_func(gc_disable);
807 load_modular_gc_func(gc_enabled_p);
808 load_modular_gc_func(config_set);
809 load_modular_gc_func(config_get);
810 load_modular_gc_func(stress_set);
811 load_modular_gc_func(stress_get);
812 // Object allocation
813 load_modular_gc_func(new_obj);
814 load_modular_gc_func(obj_slot_size);
815 load_modular_gc_func(heap_id_for_size);
816 load_modular_gc_func(size_allocatable_p);
817 // Malloc
818 load_modular_gc_func(malloc);
819 load_modular_gc_func(calloc);
820 load_modular_gc_func(realloc);
821 load_modular_gc_func(free);
822 load_modular_gc_func(adjust_memory_usage);
823 // Marking
824 load_modular_gc_func(mark);
825 load_modular_gc_func(mark_and_move);
826 load_modular_gc_func(mark_and_pin);
827 load_modular_gc_func(mark_maybe);
828 load_modular_gc_func(mark_weak);
829 load_modular_gc_func(remove_weak);
830 // Compaction
831 load_modular_gc_func(object_moved_p);
832 load_modular_gc_func(location);
833 // Write barriers
834 load_modular_gc_func(writebarrier);
835 load_modular_gc_func(writebarrier_unprotect);
836 load_modular_gc_func(writebarrier_remember);
837 // Heap walking
838 load_modular_gc_func(each_objects);
839 load_modular_gc_func(each_object);
840 // Finalizers
841 load_modular_gc_func(make_zombie);
842 load_modular_gc_func(define_finalizer);
843 load_modular_gc_func(undefine_finalizer);
844 load_modular_gc_func(copy_finalizer);
845 load_modular_gc_func(shutdown_call_finalizer);
846 // Forking
847 load_modular_gc_func(before_fork);
848 load_modular_gc_func(after_fork);
849 // Statistics
850 load_modular_gc_func(set_measure_total_time);
851 load_modular_gc_func(get_measure_total_time);
852 load_modular_gc_func(get_total_time);
853 load_modular_gc_func(gc_count);
854 load_modular_gc_func(latest_gc_info);
855 load_modular_gc_func(stat);
856 load_modular_gc_func(stat_heap);
857 load_modular_gc_func(active_gc_name);
858 // Miscellaneous
859 load_modular_gc_func(object_metadata);
860 load_modular_gc_func(pointer_to_heap_p);
861 load_modular_gc_func(garbage_object_p);
862 load_modular_gc_func(set_event_hook);
863 load_modular_gc_func(copy_attributes);
864
865# undef load_modular_gc_func
866
867 rb_gc_functions = gc_functions;
868}
869
870// Bootup
871# define rb_gc_impl_objspace_alloc rb_gc_functions.objspace_alloc
872# define rb_gc_impl_objspace_init rb_gc_functions.objspace_init
873# define rb_gc_impl_ractor_cache_alloc rb_gc_functions.ractor_cache_alloc
874# define rb_gc_impl_set_params rb_gc_functions.set_params
875# define rb_gc_impl_init rb_gc_functions.init
876# define rb_gc_impl_heap_sizes rb_gc_functions.heap_sizes
877// Shutdown
878# define rb_gc_impl_shutdown_free_objects rb_gc_functions.shutdown_free_objects
879# define rb_gc_impl_objspace_free rb_gc_functions.objspace_free
880# define rb_gc_impl_ractor_cache_free rb_gc_functions.ractor_cache_free
881// GC
882# define rb_gc_impl_start rb_gc_functions.start
883# define rb_gc_impl_during_gc_p rb_gc_functions.during_gc_p
884# define rb_gc_impl_prepare_heap rb_gc_functions.prepare_heap
885# define rb_gc_impl_gc_enable rb_gc_functions.gc_enable
886# define rb_gc_impl_gc_disable rb_gc_functions.gc_disable
887# define rb_gc_impl_gc_enabled_p rb_gc_functions.gc_enabled_p
888# define rb_gc_impl_config_get rb_gc_functions.config_get
889# define rb_gc_impl_config_set rb_gc_functions.config_set
890# define rb_gc_impl_stress_set rb_gc_functions.stress_set
891# define rb_gc_impl_stress_get rb_gc_functions.stress_get
892// Object allocation
893# define rb_gc_impl_new_obj rb_gc_functions.new_obj
894# define rb_gc_impl_obj_slot_size rb_gc_functions.obj_slot_size
895# define rb_gc_impl_heap_id_for_size rb_gc_functions.heap_id_for_size
896# define rb_gc_impl_size_allocatable_p rb_gc_functions.size_allocatable_p
897// Malloc
898# define rb_gc_impl_malloc rb_gc_functions.malloc
899# define rb_gc_impl_calloc rb_gc_functions.calloc
900# define rb_gc_impl_realloc rb_gc_functions.realloc
901# define rb_gc_impl_free rb_gc_functions.free
902# define rb_gc_impl_adjust_memory_usage rb_gc_functions.adjust_memory_usage
903// Marking
904# define rb_gc_impl_mark rb_gc_functions.mark
905# define rb_gc_impl_mark_and_move rb_gc_functions.mark_and_move
906# define rb_gc_impl_mark_and_pin rb_gc_functions.mark_and_pin
907# define rb_gc_impl_mark_maybe rb_gc_functions.mark_maybe
908# define rb_gc_impl_mark_weak rb_gc_functions.mark_weak
909# define rb_gc_impl_remove_weak rb_gc_functions.remove_weak
910// Compaction
911# define rb_gc_impl_object_moved_p rb_gc_functions.object_moved_p
912# define rb_gc_impl_location rb_gc_functions.location
913// Write barriers
914# define rb_gc_impl_writebarrier rb_gc_functions.writebarrier
915# define rb_gc_impl_writebarrier_unprotect rb_gc_functions.writebarrier_unprotect
916# define rb_gc_impl_writebarrier_remember rb_gc_functions.writebarrier_remember
917// Heap walking
918# define rb_gc_impl_each_objects rb_gc_functions.each_objects
919# define rb_gc_impl_each_object rb_gc_functions.each_object
920// Finalizers
921# define rb_gc_impl_make_zombie rb_gc_functions.make_zombie
922# define rb_gc_impl_define_finalizer rb_gc_functions.define_finalizer
923# define rb_gc_impl_undefine_finalizer rb_gc_functions.undefine_finalizer
924# define rb_gc_impl_copy_finalizer rb_gc_functions.copy_finalizer
925# define rb_gc_impl_shutdown_call_finalizer rb_gc_functions.shutdown_call_finalizer
926// Forking
927# define rb_gc_impl_before_fork rb_gc_functions.before_fork
928# define rb_gc_impl_after_fork rb_gc_functions.after_fork
929// Statistics
930# define rb_gc_impl_set_measure_total_time rb_gc_functions.set_measure_total_time
931# define rb_gc_impl_get_measure_total_time rb_gc_functions.get_measure_total_time
932# define rb_gc_impl_get_total_time rb_gc_functions.get_total_time
933# define rb_gc_impl_gc_count rb_gc_functions.gc_count
934# define rb_gc_impl_latest_gc_info rb_gc_functions.latest_gc_info
935# define rb_gc_impl_stat rb_gc_functions.stat
936# define rb_gc_impl_stat_heap rb_gc_functions.stat_heap
937# define rb_gc_impl_active_gc_name rb_gc_functions.active_gc_name
938// Miscellaneous
939# define rb_gc_impl_object_metadata rb_gc_functions.object_metadata
940# define rb_gc_impl_pointer_to_heap_p rb_gc_functions.pointer_to_heap_p
941# define rb_gc_impl_garbage_object_p rb_gc_functions.garbage_object_p
942# define rb_gc_impl_set_event_hook rb_gc_functions.set_event_hook
943# define rb_gc_impl_copy_attributes rb_gc_functions.copy_attributes
944#endif
945
946#ifdef RUBY_ASAN_ENABLED
947static void
948asan_death_callback(void)
949{
950 if (GET_VM()) {
951 rb_bug_without_die("ASAN error");
952 }
953}
954#endif
955
956static VALUE initial_stress = Qfalse;
957
958void *
959rb_objspace_alloc(void)
960{
961#if USE_MODULAR_GC
962 ruby_modular_gc_init();
963#endif
964
965 void *objspace = rb_gc_impl_objspace_alloc();
966 ruby_current_vm_ptr->gc.objspace = objspace;
967 rb_gc_impl_objspace_init(objspace);
968 rb_gc_impl_stress_set(objspace, initial_stress);
969
970#ifdef RUBY_ASAN_ENABLED
971 __sanitizer_set_death_callback(asan_death_callback);
972#endif
973
974 return objspace;
975}
976
977void
978rb_objspace_free(void *objspace)
979{
980 rb_gc_impl_objspace_free(objspace);
981}
982
983size_t
984rb_gc_obj_slot_size(VALUE obj)
985{
986 return rb_gc_impl_obj_slot_size(obj);
987}
988
989static inline void
990gc_validate_pc(void)
991{
992#if RUBY_DEBUG
993 rb_execution_context_t *ec = GET_EC();
994 const rb_control_frame_t *cfp = ec->cfp;
995 if (cfp && VM_FRAME_RUBYFRAME_P(cfp) && cfp->pc) {
996 RUBY_ASSERT(cfp->pc >= ISEQ_BODY(cfp->iseq)->iseq_encoded);
997 RUBY_ASSERT(cfp->pc <= ISEQ_BODY(cfp->iseq)->iseq_encoded + ISEQ_BODY(cfp->iseq)->iseq_size);
998 }
999#endif
1000}
1001
1002static inline VALUE
1003newobj_of(rb_ractor_t *cr, VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, bool wb_protected, size_t size)
1004{
1005 VALUE obj = rb_gc_impl_new_obj(rb_gc_get_objspace(), cr->newobj_cache, klass, flags, v1, v2, v3, wb_protected, size);
1006
1007 gc_validate_pc();
1008
1009 if (UNLIKELY(rb_gc_event_hook_required_p(RUBY_INTERNAL_EVENT_NEWOBJ))) {
1010 unsigned int lev;
1011 RB_VM_LOCK_ENTER_CR_LEV(cr, &lev);
1012 {
1013 memset((char *)obj + RVALUE_SIZE, 0, rb_gc_obj_slot_size(obj) - RVALUE_SIZE);
1014
1015 /* We must disable GC here because the callback could call xmalloc
1016 * which could potentially trigger a GC, and a lot of code is unsafe
1017 * to trigger a GC right after an object has been allocated because
1018 * they perform initialization for the object and assume that the
1019 * GC does not trigger before then. */
1020 bool gc_disabled = RTEST(rb_gc_disable_no_rest());
1021 {
1022 rb_gc_event_hook(obj, RUBY_INTERNAL_EVENT_NEWOBJ);
1023 }
1024 if (!gc_disabled) rb_gc_enable();
1025 }
1026 RB_VM_LOCK_LEAVE_CR_LEV(cr, &lev);
1027 }
1028
1029 return obj;
1030}
1031
1032VALUE
1033rb_wb_unprotected_newobj_of(VALUE klass, VALUE flags, size_t size)
1034{
1035 GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
1036 return newobj_of(GET_RACTOR(), klass, flags, 0, 0, 0, FALSE, size);
1037}
1038
1039VALUE
1040rb_wb_protected_newobj_of(rb_execution_context_t *ec, VALUE klass, VALUE flags, size_t size)
1041{
1042 GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
1043 return newobj_of(rb_ec_ractor_ptr(ec), klass, flags, 0, 0, 0, TRUE, size);
1044}
1045
1046#define UNEXPECTED_NODE(func) \
1047 rb_bug(#func"(): GC does not handle T_NODE 0x%x(%p) 0x%"PRIxVALUE, \
1048 BUILTIN_TYPE(obj), (void*)(obj), RBASIC(obj)->flags)
1049
1050static inline void
1051rb_data_object_check(VALUE klass)
1052{
1053 if (klass != rb_cObject && (rb_get_alloc_func(klass) == rb_class_allocate_instance)) {
1054 rb_undef_alloc_func(klass);
1055 rb_warn("undefining the allocator of T_DATA class %"PRIsVALUE, klass);
1056 }
1057}
1058
1059VALUE
1060rb_data_object_wrap(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
1061{
1063 if (klass) rb_data_object_check(klass);
1064 return newobj_of(GET_RACTOR(), klass, T_DATA, (VALUE)dmark, (VALUE)datap, (VALUE)dfree, !dmark, sizeof(struct RTypedData));
1065}
1066
1067VALUE
1068rb_data_object_zalloc(VALUE klass, size_t size, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
1069{
1070 VALUE obj = rb_data_object_wrap(klass, 0, dmark, dfree);
1071 DATA_PTR(obj) = xcalloc(1, size);
1072 return obj;
1073}
1074
1075static VALUE
1076typed_data_alloc(VALUE klass, VALUE typed_flag, void *datap, const rb_data_type_t *type, size_t size)
1077{
1078 RBIMPL_NONNULL_ARG(type);
1079 if (klass) rb_data_object_check(klass);
1080 bool wb_protected = (type->flags & RUBY_FL_WB_PROTECTED) || !type->function.dmark;
1081 return newobj_of(GET_RACTOR(), klass, T_DATA, ((VALUE)type) | IS_TYPED_DATA | typed_flag, (VALUE)datap, 0, wb_protected, size);
1082}
1083
1084VALUE
1085rb_data_typed_object_wrap(VALUE klass, void *datap, const rb_data_type_t *type)
1086{
1087 if (UNLIKELY(type->flags & RUBY_TYPED_EMBEDDABLE)) {
1088 rb_raise(rb_eTypeError, "Cannot wrap an embeddable TypedData");
1089 }
1090
1091 return typed_data_alloc(klass, 0, datap, type, sizeof(struct RTypedData));
1092}
1093
1094VALUE
1095rb_data_typed_object_zalloc(VALUE klass, size_t size, const rb_data_type_t *type)
1096{
1097 if (type->flags & RUBY_TYPED_EMBEDDABLE) {
1098 if (!(type->flags & RUBY_TYPED_FREE_IMMEDIATELY)) {
1099 rb_raise(rb_eTypeError, "Embeddable TypedData must be freed immediately");
1100 }
1101
1102 size_t embed_size = offsetof(struct RTypedData, data) + size;
1103 if (rb_gc_size_allocatable_p(embed_size)) {
1104 VALUE obj = typed_data_alloc(klass, TYPED_DATA_EMBEDDED, 0, type, embed_size);
1105 memset((char *)obj + offsetof(struct RTypedData, data), 0, size);
1106 return obj;
1107 }
1108 }
1109
1110 VALUE obj = typed_data_alloc(klass, 0, NULL, type, sizeof(struct RTypedData));
1111 DATA_PTR(obj) = xcalloc(1, size);
1112 return obj;
1113}
1114
1115static size_t
1116rb_objspace_data_type_memsize(VALUE obj)
1117{
1118 size_t size = 0;
1119 if (RTYPEDDATA_P(obj)) {
1120 const rb_data_type_t *type = RTYPEDDATA_TYPE(obj);
1121 const void *ptr = RTYPEDDATA_GET_DATA(obj);
1122
1123 if (RTYPEDDATA_TYPE(obj)->flags & RUBY_TYPED_EMBEDDABLE && !RTYPEDDATA_EMBEDDED_P(obj)) {
1124#ifdef HAVE_MALLOC_USABLE_SIZE
1125 size += malloc_usable_size((void *)ptr);
1126#endif
1127 }
1128
1129 if (ptr && type->function.dsize) {
1130 size += type->function.dsize(ptr);
1131 }
1132 }
1133
1134 return size;
1135}
1136
1137const char *
1138rb_objspace_data_type_name(VALUE obj)
1139{
1140 if (RTYPEDDATA_P(obj)) {
1141 return RTYPEDDATA_TYPE(obj)->wrap_struct_name;
1142 }
1143 else {
1144 return 0;
1145 }
1146}
1147
1148static enum rb_id_table_iterator_result
1149cvar_table_free_i(VALUE value, void *ctx)
1150{
1151 xfree((void *)value);
1152 return ID_TABLE_CONTINUE;
1153}
1154
1155static void
1156io_fptr_finalize(void *fptr)
1157{
1158 rb_io_fptr_finalize((struct rb_io *)fptr);
1159}
1160
1161static inline void
1162make_io_zombie(void *objspace, VALUE obj)
1163{
1164 rb_io_t *fptr = RFILE(obj)->fptr;
1165 rb_gc_impl_make_zombie(objspace, obj, io_fptr_finalize, fptr);
1166}
1167
1168static bool
1169rb_data_free(void *objspace, VALUE obj)
1170{
1171 void *data = RTYPEDDATA_P(obj) ? RTYPEDDATA_GET_DATA(obj) : DATA_PTR(obj);
1172 if (data) {
1173 int free_immediately = false;
1174 void (*dfree)(void *);
1175
1176 if (RTYPEDDATA_P(obj)) {
1177 free_immediately = (RTYPEDDATA_TYPE(obj)->flags & RUBY_TYPED_FREE_IMMEDIATELY) != 0;
1178 dfree = RTYPEDDATA_TYPE(obj)->function.dfree;
1179 }
1180 else {
1181 dfree = RDATA(obj)->dfree;
1182 }
1183
1184 if (dfree) {
1185 if (dfree == RUBY_DEFAULT_FREE) {
1186 if (!RTYPEDDATA_P(obj) || !RTYPEDDATA_EMBEDDED_P(obj)) {
1187 xfree(data);
1188 RB_DEBUG_COUNTER_INC(obj_data_xfree);
1189 }
1190 }
1191 else if (free_immediately) {
1192 (*dfree)(data);
1193 if (RTYPEDDATA_TYPE(obj)->flags & RUBY_TYPED_EMBEDDABLE && !RTYPEDDATA_EMBEDDED_P(obj)) {
1194 xfree(data);
1195 }
1196
1197 RB_DEBUG_COUNTER_INC(obj_data_imm_free);
1198 }
1199 else {
1200 rb_gc_impl_make_zombie(objspace, obj, dfree, data);
1201 RB_DEBUG_COUNTER_INC(obj_data_zombie);
1202 return FALSE;
1203 }
1204 }
1205 else {
1206 RB_DEBUG_COUNTER_INC(obj_data_empty);
1207 }
1208 }
1209
1210 return true;
1211}
1212
1214 VALUE klass;
1215 bool obj_too_complex;
1216 rb_objspace_t *objspace; // used for update_*
1217};
1218
1219static void
1220classext_free(rb_classext_t *ext, bool is_prime, VALUE namespace, void *arg)
1221{
1222 struct rb_id_table *tbl;
1223 struct classext_foreach_args *args = (struct classext_foreach_args *)arg;
1224
1225 rb_id_table_free(RCLASSEXT_M_TBL(ext));
1226 rb_cc_tbl_free(RCLASSEXT_CC_TBL(ext), args->klass);
1227 if (args->obj_too_complex) {
1228 st_free_table((st_table *)RCLASSEXT_FIELDS(ext));
1229 }
1230 else {
1231 xfree(RCLASSEXT_FIELDS(ext));
1232 }
1233 if (!RCLASSEXT_SHARED_CONST_TBL(ext) && (tbl = RCLASSEXT_CONST_TBL(ext)) != NULL) {
1234 rb_free_const_table(tbl);
1235 }
1236 if ((tbl = RCLASSEXT_CVC_TBL(ext)) != NULL) {
1237 rb_id_table_foreach_values(tbl, cvar_table_free_i, NULL);
1238 rb_id_table_free(tbl);
1239 }
1240 rb_class_classext_free_subclasses(ext, args->klass);
1241 if (RCLASSEXT_SUPERCLASSES_OWNER(ext)) {
1242 xfree(RCLASSEXT_SUPERCLASSES(ext));
1243 }
1244 if (!is_prime) { // the prime classext will be freed with RClass
1245 xfree(ext);
1246 }
1247}
1248
1249static void
1250classext_iclass_free(rb_classext_t *ext, bool is_prime, VALUE namespace, void *arg)
1251{
1252 struct classext_foreach_args *args = (struct classext_foreach_args *)arg;
1253
1254 if (RCLASSEXT_ICLASS_IS_ORIGIN(ext) && !RCLASSEXT_ICLASS_ORIGIN_SHARED_MTBL(ext)) {
1255 /* Method table is not shared for origin iclasses of classes */
1256 rb_id_table_free(RCLASSEXT_M_TBL(ext));
1257 }
1258 if (RCLASSEXT_CALLABLE_M_TBL(ext) != NULL) {
1259 rb_id_table_free(RCLASSEXT_CALLABLE_M_TBL(ext));
1260 }
1261 rb_cc_tbl_free(RCLASSEXT_CC_TBL(ext), args->klass);
1262
1263 rb_class_classext_free_subclasses(ext, args->klass);
1264
1265 if (!is_prime) { // the prime classext will be freed with RClass
1266 xfree(ext);
1267 }
1268}
1269
1270bool
1271rb_gc_obj_free(void *objspace, VALUE obj)
1272{
1273 struct classext_foreach_args args;
1274
1275 RB_DEBUG_COUNTER_INC(obj_free);
1276
1277 switch (BUILTIN_TYPE(obj)) {
1278 case T_NIL:
1279 case T_FIXNUM:
1280 case T_TRUE:
1281 case T_FALSE:
1282 rb_bug("obj_free() called for broken object");
1283 break;
1284 default:
1285 break;
1286 }
1287
1288 switch (BUILTIN_TYPE(obj)) {
1289 case T_OBJECT:
1290 if (rb_shape_obj_too_complex_p(obj)) {
1291 RB_DEBUG_COUNTER_INC(obj_obj_too_complex);
1292 st_free_table(ROBJECT_FIELDS_HASH(obj));
1293 }
1294 else if (RBASIC(obj)->flags & ROBJECT_EMBED) {
1295 RB_DEBUG_COUNTER_INC(obj_obj_embed);
1296 }
1297 else {
1298 xfree(ROBJECT(obj)->as.heap.fields);
1299 RB_DEBUG_COUNTER_INC(obj_obj_ptr);
1300 }
1301 break;
1302 case T_MODULE:
1303 case T_CLASS:
1304 args.klass = obj;
1305 args.obj_too_complex = rb_shape_obj_too_complex_p(obj) ? true : false;
1306
1307 rb_class_classext_foreach(obj, classext_free, (void *)&args);
1308 if (RCLASS(obj)->ns_classext_tbl) {
1309 st_free_table(RCLASS(obj)->ns_classext_tbl);
1310 }
1311 (void)RB_DEBUG_COUNTER_INC_IF(obj_module_ptr, BUILTIN_TYPE(obj) == T_MODULE);
1312 (void)RB_DEBUG_COUNTER_INC_IF(obj_class_ptr, BUILTIN_TYPE(obj) == T_CLASS);
1313 break;
1314 case T_STRING:
1315 rb_str_free(obj);
1316 break;
1317 case T_ARRAY:
1318 rb_ary_free(obj);
1319 break;
1320 case T_HASH:
1321#if USE_DEBUG_COUNTER
1322 switch (RHASH_SIZE(obj)) {
1323 case 0:
1324 RB_DEBUG_COUNTER_INC(obj_hash_empty);
1325 break;
1326 case 1:
1327 RB_DEBUG_COUNTER_INC(obj_hash_1);
1328 break;
1329 case 2:
1330 RB_DEBUG_COUNTER_INC(obj_hash_2);
1331 break;
1332 case 3:
1333 RB_DEBUG_COUNTER_INC(obj_hash_3);
1334 break;
1335 case 4:
1336 RB_DEBUG_COUNTER_INC(obj_hash_4);
1337 break;
1338 case 5:
1339 case 6:
1340 case 7:
1341 case 8:
1342 RB_DEBUG_COUNTER_INC(obj_hash_5_8);
1343 break;
1344 default:
1345 GC_ASSERT(RHASH_SIZE(obj) > 8);
1346 RB_DEBUG_COUNTER_INC(obj_hash_g8);
1347 }
1348
1349 if (RHASH_AR_TABLE_P(obj)) {
1350 if (RHASH_AR_TABLE(obj) == NULL) {
1351 RB_DEBUG_COUNTER_INC(obj_hash_null);
1352 }
1353 else {
1354 RB_DEBUG_COUNTER_INC(obj_hash_ar);
1355 }
1356 }
1357 else {
1358 RB_DEBUG_COUNTER_INC(obj_hash_st);
1359 }
1360#endif
1361
1362 rb_hash_free(obj);
1363 break;
1364 case T_REGEXP:
1365 if (RREGEXP(obj)->ptr) {
1366 onig_free(RREGEXP(obj)->ptr);
1367 RB_DEBUG_COUNTER_INC(obj_regexp_ptr);
1368 }
1369 break;
1370 case T_DATA:
1371 if (!rb_data_free(objspace, obj)) return false;
1372 break;
1373 case T_MATCH:
1374 {
1375 rb_matchext_t *rm = RMATCH_EXT(obj);
1376#if USE_DEBUG_COUNTER
1377 if (rm->regs.num_regs >= 8) {
1378 RB_DEBUG_COUNTER_INC(obj_match_ge8);
1379 }
1380 else if (rm->regs.num_regs >= 4) {
1381 RB_DEBUG_COUNTER_INC(obj_match_ge4);
1382 }
1383 else if (rm->regs.num_regs >= 1) {
1384 RB_DEBUG_COUNTER_INC(obj_match_under4);
1385 }
1386#endif
1387 onig_region_free(&rm->regs, 0);
1388 xfree(rm->char_offset);
1389
1390 RB_DEBUG_COUNTER_INC(obj_match_ptr);
1391 }
1392 break;
1393 case T_FILE:
1394 if (RFILE(obj)->fptr) {
1395 make_io_zombie(objspace, obj);
1396 RB_DEBUG_COUNTER_INC(obj_file_ptr);
1397 return FALSE;
1398 }
1399 break;
1400 case T_RATIONAL:
1401 RB_DEBUG_COUNTER_INC(obj_rational);
1402 break;
1403 case T_COMPLEX:
1404 RB_DEBUG_COUNTER_INC(obj_complex);
1405 break;
1406 case T_MOVED:
1407 break;
1408 case T_ICLASS:
1409 args.klass = obj;
1410
1411 rb_class_classext_foreach(obj, classext_iclass_free, (void *)&args);
1412 if (RCLASS(obj)->ns_classext_tbl) {
1413 st_free_table(RCLASS(obj)->ns_classext_tbl);
1414 }
1415
1416 RB_DEBUG_COUNTER_INC(obj_iclass_ptr);
1417 break;
1418
1419 case T_FLOAT:
1420 RB_DEBUG_COUNTER_INC(obj_float);
1421 break;
1422
1423 case T_BIGNUM:
1424 if (!BIGNUM_EMBED_P(obj) && BIGNUM_DIGITS(obj)) {
1425 xfree(BIGNUM_DIGITS(obj));
1426 RB_DEBUG_COUNTER_INC(obj_bignum_ptr);
1427 }
1428 else {
1429 RB_DEBUG_COUNTER_INC(obj_bignum_embed);
1430 }
1431 break;
1432
1433 case T_NODE:
1434 UNEXPECTED_NODE(obj_free);
1435 break;
1436
1437 case T_STRUCT:
1438 if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) ||
1439 RSTRUCT(obj)->as.heap.ptr == NULL) {
1440 RB_DEBUG_COUNTER_INC(obj_struct_embed);
1441 }
1442 else {
1443 xfree((void *)RSTRUCT(obj)->as.heap.ptr);
1444 RB_DEBUG_COUNTER_INC(obj_struct_ptr);
1445 }
1446 break;
1447
1448 case T_SYMBOL:
1449 RB_DEBUG_COUNTER_INC(obj_symbol);
1450 break;
1451
1452 case T_IMEMO:
1453 rb_imemo_free((VALUE)obj);
1454 break;
1455
1456 default:
1457 rb_bug("gc_sweep(): unknown data type 0x%x(%p) 0x%"PRIxVALUE,
1458 BUILTIN_TYPE(obj), (void*)obj, RBASIC(obj)->flags);
1459 }
1460
1461 if (FL_TEST_RAW(obj, FL_FINALIZE)) {
1462 rb_gc_impl_make_zombie(objspace, obj, 0, 0);
1463 return FALSE;
1464 }
1465 else {
1466 return TRUE;
1467 }
1468}
1469
1470void
1471rb_objspace_set_event_hook(const rb_event_flag_t event)
1472{
1473 rb_gc_impl_set_event_hook(rb_gc_get_objspace(), event);
1474}
1475
1476static int
1477internal_object_p(VALUE obj)
1478{
1479 void *ptr = asan_unpoison_object_temporary(obj);
1480
1481 if (RBASIC(obj)->flags) {
1482 switch (BUILTIN_TYPE(obj)) {
1483 case T_NODE:
1484 UNEXPECTED_NODE(internal_object_p);
1485 break;
1486 case T_NONE:
1487 case T_MOVED:
1488 case T_IMEMO:
1489 case T_ICLASS:
1490 case T_ZOMBIE:
1491 break;
1492 case T_CLASS:
1493 if (!RBASIC(obj)->klass) break;
1494 if (RCLASS_SINGLETON_P(obj)) {
1495 return rb_singleton_class_internal_p(obj);
1496 }
1497 return 0;
1498 default:
1499 if (!RBASIC(obj)->klass) break;
1500 return 0;
1501 }
1502 }
1503 if (ptr || !RBASIC(obj)->flags) {
1504 rb_asan_poison_object(obj);
1505 }
1506 return 1;
1507}
1508
1509int
1510rb_objspace_internal_object_p(VALUE obj)
1511{
1512 return internal_object_p(obj);
1513}
1514
1516 size_t num;
1517 VALUE of;
1518};
1519
1520static int
1521os_obj_of_i(void *vstart, void *vend, size_t stride, void *data)
1522{
1523 struct os_each_struct *oes = (struct os_each_struct *)data;
1524
1525 VALUE v = (VALUE)vstart;
1526 for (; v != (VALUE)vend; v += stride) {
1527 if (!internal_object_p(v)) {
1528 if (!oes->of || rb_obj_is_kind_of(v, oes->of)) {
1529 if (!rb_multi_ractor_p() || rb_ractor_shareable_p(v)) {
1530 rb_yield(v);
1531 oes->num++;
1532 }
1533 }
1534 }
1535 }
1536
1537 return 0;
1538}
1539
1540static VALUE
1541os_obj_of(VALUE of)
1542{
1543 struct os_each_struct oes;
1544
1545 oes.num = 0;
1546 oes.of = of;
1547 rb_objspace_each_objects(os_obj_of_i, &oes);
1548 return SIZET2NUM(oes.num);
1549}
1550
1551/*
1552 * call-seq:
1553 * ObjectSpace.each_object([module]) {|obj| ... } -> integer
1554 * ObjectSpace.each_object([module]) -> an_enumerator
1555 *
1556 * Calls the block once for each living, nonimmediate object in this
1557 * Ruby process. If <i>module</i> is specified, calls the block
1558 * for only those classes or modules that match (or are a subclass of)
1559 * <i>module</i>. Returns the number of objects found. Immediate
1560 * objects (<code>Fixnum</code>s, <code>Symbol</code>s
1561 * <code>true</code>, <code>false</code>, and <code>nil</code>) are
1562 * never returned. In the example below, #each_object returns both
1563 * the numbers we defined and several constants defined in the Math
1564 * module.
1565 *
1566 * If no block is given, an enumerator is returned instead.
1567 *
1568 * a = 102.7
1569 * b = 95 # Won't be returned
1570 * c = 12345678987654321
1571 * count = ObjectSpace.each_object(Numeric) {|x| p x }
1572 * puts "Total count: #{count}"
1573 *
1574 * <em>produces:</em>
1575 *
1576 * 12345678987654321
1577 * 102.7
1578 * 2.71828182845905
1579 * 3.14159265358979
1580 * 2.22044604925031e-16
1581 * 1.7976931348623157e+308
1582 * 2.2250738585072e-308
1583 * Total count: 7
1584 *
1585 * Due to a current known Ractor implementation issue, this method will not yield
1586 * Ractor-unshareable objects in multi-Ractor mode (when
1587 * <code>Ractor.new</code> has been called within the process at least once).
1588 * See https://bugs.ruby-lang.org/issues/19387 for more information.
1589 *
1590 * a = 12345678987654321 # shareable
1591 * b = [].freeze # shareable
1592 * c = {} # not shareable
1593 * ObjectSpace.each_object {|x| x } # yields a, b, and c
1594 * Ractor.new {} # enter multi-Ractor mode
1595 * ObjectSpace.each_object {|x| x } # does not yield c
1596 *
1597 */
1598
1599static VALUE
1600os_each_obj(int argc, VALUE *argv, VALUE os)
1601{
1602 VALUE of;
1603
1604 of = (!rb_check_arity(argc, 0, 1) ? 0 : argv[0]);
1605 RETURN_ENUMERATOR(os, 1, &of);
1606 return os_obj_of(of);
1607}
1608
1609/*
1610 * call-seq:
1611 * ObjectSpace.undefine_finalizer(obj)
1612 *
1613 * Removes all finalizers for <i>obj</i>.
1614 *
1615 */
1616
1617static VALUE
1618undefine_final(VALUE os, VALUE obj)
1619{
1620 return rb_undefine_finalizer(obj);
1621}
1622
1623VALUE
1624rb_undefine_finalizer(VALUE obj)
1625{
1626 rb_check_frozen(obj);
1627
1628 rb_gc_impl_undefine_finalizer(rb_gc_get_objspace(), obj);
1629
1630 return obj;
1631}
1632
1633static void
1634should_be_callable(VALUE block)
1635{
1636 if (!rb_obj_respond_to(block, idCall, TRUE)) {
1637 rb_raise(rb_eArgError, "wrong type argument %"PRIsVALUE" (should be callable)",
1638 rb_obj_class(block));
1639 }
1640}
1641
1642static void
1643should_be_finalizable(VALUE obj)
1644{
1645 if (!FL_ABLE(obj)) {
1646 rb_raise(rb_eArgError, "cannot define finalizer for %s",
1647 rb_obj_classname(obj));
1648 }
1649 rb_check_frozen(obj);
1650}
1651
1652void
1653rb_gc_copy_finalizer(VALUE dest, VALUE obj)
1654{
1655 rb_gc_impl_copy_finalizer(rb_gc_get_objspace(), dest, obj);
1656}
1657
1658/*
1659 * call-seq:
1660 * ObjectSpace.define_finalizer(obj, aProc=proc())
1661 *
1662 * Adds <i>aProc</i> as a finalizer, to be called after <i>obj</i>
1663 * was destroyed. The object ID of the <i>obj</i> will be passed
1664 * as an argument to <i>aProc</i>. If <i>aProc</i> is a lambda or
1665 * method, make sure it can be called with a single argument.
1666 *
1667 * The return value is an array <code>[0, aProc]</code>.
1668 *
1669 * The two recommended patterns are to either create the finaliser proc
1670 * in a non-instance method where it can safely capture the needed state,
1671 * or to use a custom callable object that stores the needed state
1672 * explicitly as instance variables.
1673 *
1674 * class Foo
1675 * def initialize(data_needed_for_finalization)
1676 * ObjectSpace.define_finalizer(self, self.class.create_finalizer(data_needed_for_finalization))
1677 * end
1678 *
1679 * def self.create_finalizer(data_needed_for_finalization)
1680 * proc {
1681 * puts "finalizing #{data_needed_for_finalization}"
1682 * }
1683 * end
1684 * end
1685 *
1686 * class Bar
1687 * class Remover
1688 * def initialize(data_needed_for_finalization)
1689 * @data_needed_for_finalization = data_needed_for_finalization
1690 * end
1691 *
1692 * def call(id)
1693 * puts "finalizing #{@data_needed_for_finalization}"
1694 * end
1695 * end
1696 *
1697 * def initialize(data_needed_for_finalization)
1698 * ObjectSpace.define_finalizer(self, Remover.new(data_needed_for_finalization))
1699 * end
1700 * end
1701 *
1702 * Note that if your finalizer references the object to be
1703 * finalized it will never be run on GC, although it will still be
1704 * run at exit. You will get a warning if you capture the object
1705 * to be finalized as the receiver of the finalizer.
1706 *
1707 * class CapturesSelf
1708 * def initialize(name)
1709 * ObjectSpace.define_finalizer(self, proc {
1710 * # this finalizer will only be run on exit
1711 * puts "finalizing #{name}"
1712 * })
1713 * end
1714 * end
1715 *
1716 * Also note that finalization can be unpredictable and is never guaranteed
1717 * to be run except on exit.
1718 */
1719
1720static VALUE
1721define_final(int argc, VALUE *argv, VALUE os)
1722{
1723 VALUE obj, block;
1724
1725 rb_scan_args(argc, argv, "11", &obj, &block);
1726 if (argc == 1) {
1727 block = rb_block_proc();
1728 }
1729
1730 if (rb_callable_receiver(block) == obj) {
1731 rb_warn("finalizer references object to be finalized");
1732 }
1733
1734 return rb_define_finalizer(obj, block);
1735}
1736
1737VALUE
1738rb_define_finalizer(VALUE obj, VALUE block)
1739{
1740 should_be_finalizable(obj);
1741 should_be_callable(block);
1742
1743 block = rb_gc_impl_define_finalizer(rb_gc_get_objspace(), obj, block);
1744
1745 block = rb_ary_new3(2, INT2FIX(0), block);
1746 OBJ_FREEZE(block);
1747 return block;
1748}
1749
1750void
1751rb_objspace_call_finalizer(void)
1752{
1753 rb_gc_impl_shutdown_call_finalizer(rb_gc_get_objspace());
1754}
1755
1756void
1757rb_objspace_free_objects(void *objspace)
1758{
1759 rb_gc_impl_shutdown_free_objects(objspace);
1760}
1761
1762int
1763rb_objspace_garbage_object_p(VALUE obj)
1764{
1765 return rb_gc_impl_garbage_object_p(rb_gc_get_objspace(), obj);
1766}
1767
1768bool
1769rb_gc_pointer_to_heap_p(VALUE obj)
1770{
1771 return rb_gc_impl_pointer_to_heap_p(rb_gc_get_objspace(), (void *)obj);
1772}
1773
1774#define OBJ_ID_INCREMENT (RUBY_IMMEDIATE_MASK + 1)
1775#define LAST_OBJECT_ID() (object_id_counter * OBJ_ID_INCREMENT)
1776static VALUE id2ref_value = 0;
1777static st_table *id2ref_tbl = NULL;
1778static bool id2ref_tbl_built = false;
1779
1780#if SIZEOF_SIZE_T == SIZEOF_LONG_LONG
1781static size_t object_id_counter = 1;
1782#else
1783static unsigned long long object_id_counter = 1;
1784#endif
1785
1786static inline VALUE
1787generate_next_object_id(void)
1788{
1789#if SIZEOF_SIZE_T == SIZEOF_LONG_LONG
1790 // 64bit atomics are available
1791 return SIZET2NUM(RUBY_ATOMIC_SIZE_FETCH_ADD(object_id_counter, 1) * OBJ_ID_INCREMENT);
1792#else
1793 unsigned int lock_lev = rb_gc_vm_lock();
1794 VALUE id = ULL2NUM(++object_id_counter * OBJ_ID_INCREMENT);
1795 rb_gc_vm_unlock(lock_lev);
1796 return id;
1797#endif
1798}
1799
1800void
1801rb_gc_obj_id_moved(VALUE obj)
1802{
1803 if (UNLIKELY(id2ref_tbl)) {
1804 st_insert(id2ref_tbl, (st_data_t)rb_obj_id(obj), (st_data_t)obj);
1805 }
1806}
1807
1808static int
1809object_id_cmp(st_data_t x, st_data_t y)
1810{
1811 if (RB_TYPE_P(x, T_BIGNUM)) {
1812 return !rb_big_eql(x, y);
1813 }
1814 else {
1815 return x != y;
1816 }
1817}
1818
1819static st_index_t
1820object_id_hash(st_data_t n)
1821{
1822 return FIX2LONG(rb_hash((VALUE)n));
1823}
1824
1825static const struct st_hash_type object_id_hash_type = {
1826 object_id_cmp,
1827 object_id_hash,
1828};
1829
1830static void gc_mark_tbl_no_pin(st_table *table);
1831
1832static void
1833id2ref_tbl_mark(void *data)
1834{
1835 st_table *table = (st_table *)data;
1836 if (UNLIKELY(!RB_POSFIXABLE(LAST_OBJECT_ID()))) {
1837 // It's very unlikely, but if enough object ids were generated, keys may be T_BIGNUM
1838 rb_mark_set(table);
1839 }
1840 // We purposedly don't mark values, as they are weak references.
1841 // rb_gc_obj_free_vm_weak_references takes care of cleaning them up.
1842}
1843
1844static size_t
1845id2ref_tbl_memsize(const void *data)
1846{
1847 return rb_st_memsize(data);
1848}
1849
1850static void
1851id2ref_tbl_compact(void *data)
1852{
1853 st_table *table = (st_table *)data;
1854 if (LIKELY(RB_POSFIXABLE(LAST_OBJECT_ID()))) {
1855 // We know keys are all FIXNUM, so no need to update them.
1856 gc_ref_update_table_values_only(table);
1857 }
1858 else {
1859 gc_update_table_refs(table);
1860 }
1861}
1862
1863static void
1864id2ref_tbl_free(void *data)
1865{
1866 id2ref_tbl = NULL; // clear global ref
1867 st_table *table = (st_table *)data;
1868 st_free_table(table);
1869}
1870
1871static const rb_data_type_t id2ref_tbl_type = {
1872 .wrap_struct_name = "VM/_id2ref_table",
1873 .function = {
1874 .dmark = id2ref_tbl_mark,
1875 .dfree = id2ref_tbl_free,
1876 .dsize = id2ref_tbl_memsize,
1877 .dcompact = id2ref_tbl_compact,
1878 },
1879 .flags = RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_FREE_IMMEDIATELY
1880};
1881
1882#define RUBY_ATOMIC_VALUE_LOAD(x) (VALUE)(RUBY_ATOMIC_PTR_LOAD(x))
1883
1884static VALUE
1885class_object_id(VALUE klass)
1886{
1887 VALUE id = RUBY_ATOMIC_VALUE_LOAD(RCLASS(klass)->object_id);
1888 if (!id) {
1889 unsigned int lock_lev = rb_gc_vm_lock();
1890 id = generate_next_object_id();
1891 VALUE existing_id = RUBY_ATOMIC_VALUE_CAS(RCLASS(klass)->object_id, 0, id);
1892 if (existing_id) {
1893 id = existing_id;
1894 }
1895 else if (RB_UNLIKELY(id2ref_tbl)) {
1896 st_insert(id2ref_tbl, id, klass);
1897 }
1898 rb_gc_vm_unlock(lock_lev);
1899 }
1900 return id;
1901}
1902
1903static VALUE
1904object_id0(VALUE obj)
1905{
1906 VALUE id = Qfalse;
1907
1908 if (rb_shape_has_object_id(rb_obj_shape(obj))) {
1909 shape_id_t object_id_shape_id = rb_shape_transition_object_id(obj);
1910 id = rb_obj_field_get(obj, object_id_shape_id);
1911 RUBY_ASSERT(id, "object_id missing");
1912 return id;
1913 }
1914
1915 // rb_shape_object_id_shape may lock if the current shape has
1916 // multiple children.
1917 shape_id_t object_id_shape_id = rb_shape_transition_object_id(obj);
1918
1919 id = generate_next_object_id();
1920 rb_obj_field_set(obj, object_id_shape_id, id);
1921 if (RB_UNLIKELY(id2ref_tbl)) {
1922 st_insert(id2ref_tbl, (st_data_t)id, (st_data_t)obj);
1923 }
1924 return id;
1925}
1926
1927static VALUE
1928object_id(VALUE obj)
1929{
1930 switch (BUILTIN_TYPE(obj)) {
1931 case T_CLASS:
1932 case T_MODULE:
1933 // With namespaces, classes and modules have different fields
1934 // in different namespaces, so we cannot store the object id
1935 // in fields.
1936 return class_object_id(obj);
1937 case T_IMEMO:
1938 rb_bug("T_IMEMO can't have an object_id");
1939 break;
1940 default:
1941 break;
1942 }
1943
1944 if (UNLIKELY(rb_gc_multi_ractor_p() && rb_ractor_shareable_p(obj))) {
1945 unsigned int lock_lev = rb_gc_vm_lock();
1946 VALUE id = object_id0(obj);
1947 rb_gc_vm_unlock(lock_lev);
1948 return id;
1949 }
1950
1951 return object_id0(obj);
1952}
1953
1954static void
1955build_id2ref_i(VALUE obj, void *data)
1956{
1957 st_table *id2ref_tbl = (st_table *)data;
1958
1959 switch (BUILTIN_TYPE(obj)) {
1960 case T_CLASS:
1961 case T_MODULE:
1962 if (RCLASS(obj)->object_id) {
1963 st_insert(id2ref_tbl, RCLASS(obj)->object_id, obj);
1964 }
1965 break;
1966 case T_IMEMO:
1967 break;
1968 default:
1969 if (rb_shape_obj_has_id(obj)) {
1970 st_insert(id2ref_tbl, rb_obj_id(obj), obj);
1971 }
1972 break;
1973 }
1974}
1975
1976static VALUE
1977object_id_to_ref(void *objspace_ptr, VALUE object_id)
1978{
1979 rb_objspace_t *objspace = objspace_ptr;
1980
1981 unsigned int lev = rb_gc_vm_lock();
1982
1983 if (!id2ref_tbl) {
1984 rb_gc_vm_barrier(); // stop other ractors
1985
1986 // GC Must not trigger while we build the table, otherwise if we end
1987 // up freeing an object that had an ID, we might try to delete it from
1988 // the table even though it wasn't inserted yet.
1989 id2ref_tbl = st_init_table(&object_id_hash_type);
1990 id2ref_value = TypedData_Wrap_Struct(0, &id2ref_tbl_type, id2ref_tbl);
1991
1992 // build_id2ref_i will most certainly malloc, which could trigger GC and sweep
1993 // objects we just added to the table.
1994 bool gc_disabled = RTEST(rb_gc_disable_no_rest());
1995 {
1996 rb_gc_impl_each_object(objspace, build_id2ref_i, (void *)id2ref_tbl);
1997 }
1998 if (!gc_disabled) rb_gc_enable();
1999 id2ref_tbl_built = true;
2000 }
2001
2002 VALUE obj;
2003 bool found = st_lookup(id2ref_tbl, object_id, &obj) && !rb_gc_impl_garbage_object_p(objspace, obj);
2004
2005 rb_gc_vm_unlock(lev);
2006
2007 if (found) {
2008 return obj;
2009 }
2010
2011 if (rb_funcall(object_id, rb_intern(">="), 1, ULL2NUM(LAST_OBJECT_ID()))) {
2012 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is not an id value", rb_funcall(object_id, rb_intern("to_s"), 1, INT2FIX(10)));
2013 }
2014 else {
2015 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is a recycled object", rb_funcall(object_id, rb_intern("to_s"), 1, INT2FIX(10)));
2016 }
2017}
2018
2019static inline void
2020obj_free_object_id(VALUE obj)
2021{
2022 if (RB_BUILTIN_TYPE(obj) == T_IMEMO) {
2023 return;
2024 }
2025
2026 VALUE obj_id = 0;
2027 if (RB_UNLIKELY(id2ref_tbl)) {
2028 switch (BUILTIN_TYPE(obj)) {
2029 case T_CLASS:
2030 case T_MODULE:
2031 if (RCLASS(obj)->object_id) {
2032 obj_id = RCLASS(obj)->object_id;
2033 }
2034 break;
2035 default:
2036 if (rb_shape_obj_has_id(obj)) {
2037 obj_id = object_id(obj);
2038 }
2039 break;
2040 }
2041 }
2042
2043 if (RB_UNLIKELY(obj_id)) {
2044 RUBY_ASSERT(FIXNUM_P(obj_id) || RB_TYPE_P(obj, T_BIGNUM));
2045
2046 if (!st_delete(id2ref_tbl, (st_data_t *)&obj_id, NULL)) {
2047 // If we're currently building the table then it's not a bug
2048 if (id2ref_tbl_built) {
2049 rb_bug("Object ID seen, but not in _id2ref table: object_id=%llu object=%s", NUM2ULL(obj_id), rb_obj_info(obj));
2050 }
2051 }
2052 }
2053}
2054
2055void
2056rb_gc_obj_free_vm_weak_references(VALUE obj)
2057{
2058 obj_free_object_id(obj);
2059
2060 if (FL_TEST_RAW(obj, FL_EXIVAR)) {
2062 FL_UNSET_RAW(obj, FL_EXIVAR);
2063 }
2064
2065 switch (BUILTIN_TYPE(obj)) {
2066 case T_STRING:
2067 if (FL_TEST_RAW(obj, RSTRING_FSTR)) {
2068 rb_gc_free_fstring(obj);
2069 }
2070 break;
2071 case T_SYMBOL:
2072 rb_gc_free_dsymbol(obj);
2073 break;
2074 case T_IMEMO:
2075 switch (imemo_type(obj)) {
2076 case imemo_callinfo:
2077 rb_vm_ci_free((const struct rb_callinfo *)obj);
2078 break;
2079 case imemo_ment:
2080 rb_free_method_entry_vm_weak_references((const rb_method_entry_t *)obj);
2081 break;
2082 default:
2083 break;
2084 }
2085 break;
2086 default:
2087 break;
2088 }
2089}
2090
2091/*
2092 * call-seq:
2093 * ObjectSpace._id2ref(object_id) -> an_object
2094 *
2095 * Converts an object id to a reference to the object. May not be
2096 * called on an object id passed as a parameter to a finalizer.
2097 *
2098 * s = "I am a string" #=> "I am a string"
2099 * r = ObjectSpace._id2ref(s.object_id) #=> "I am a string"
2100 * r == s #=> true
2101 *
2102 * On multi-ractor mode, if the object is not shareable, it raises
2103 * RangeError.
2104 *
2105 * This method is deprecated and should no longer be used.
2106 */
2107
2108static VALUE
2109id2ref(VALUE objid)
2110{
2111#if SIZEOF_LONG == SIZEOF_VOIDP
2112#define NUM2PTR(x) NUM2ULONG(x)
2113#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
2114#define NUM2PTR(x) NUM2ULL(x)
2115#endif
2116 objid = rb_to_int(objid);
2117 if (FIXNUM_P(objid) || rb_big_size(objid) <= SIZEOF_VOIDP) {
2118 VALUE ptr = NUM2PTR(objid);
2119 if (SPECIAL_CONST_P(ptr)) {
2120 if (ptr == Qtrue) return Qtrue;
2121 if (ptr == Qfalse) return Qfalse;
2122 if (NIL_P(ptr)) return Qnil;
2123 if (FIXNUM_P(ptr)) return ptr;
2124 if (FLONUM_P(ptr)) return ptr;
2125
2126 if (SYMBOL_P(ptr)) {
2127 // Check that the symbol is valid
2128 if (rb_static_id_valid_p(SYM2ID(ptr))) {
2129 return ptr;
2130 }
2131 else {
2132 rb_raise(rb_eRangeError, "%p is not a symbol id value", (void *)ptr);
2133 }
2134 }
2135
2136 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is not an id value", rb_int2str(objid, 10));
2137 }
2138 }
2139
2140 VALUE obj = object_id_to_ref(rb_gc_get_objspace(), objid);
2141 if (!rb_multi_ractor_p() || rb_ractor_shareable_p(obj)) {
2142 return obj;
2143 }
2144 else {
2145 rb_raise(rb_eRangeError, "%+"PRIsVALUE" is the id of an unshareable object on multi-ractor", rb_int2str(objid, 10));
2146 }
2147}
2148
2149/* :nodoc: */
2150static VALUE
2151os_id2ref(VALUE os, VALUE objid)
2152{
2153 rb_category_warn(RB_WARN_CATEGORY_DEPRECATED, "ObjectSpace._id2ref is deprecated");
2154 return id2ref(objid);
2155}
2156
2157static VALUE
2158rb_find_object_id(void *objspace, VALUE obj, VALUE (*get_heap_object_id)(VALUE))
2159{
2160 if (SPECIAL_CONST_P(obj)) {
2161#if SIZEOF_LONG == SIZEOF_VOIDP
2162 return LONG2NUM((SIGNED_VALUE)obj);
2163#else
2164 return LL2NUM((SIGNED_VALUE)obj);
2165#endif
2166 }
2167
2168 return get_heap_object_id(obj);
2169}
2170
2171static VALUE
2172nonspecial_obj_id(VALUE obj)
2173{
2174#if SIZEOF_LONG == SIZEOF_VOIDP
2175 return (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG);
2176#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
2177 return LL2NUM((SIGNED_VALUE)(obj) / 2);
2178#else
2179# error not supported
2180#endif
2181}
2182
2183VALUE
2184rb_memory_id(VALUE obj)
2185{
2186 return rb_find_object_id(NULL, obj, nonspecial_obj_id);
2187}
2188
2189/*
2190 * Document-method: __id__
2191 * Document-method: object_id
2192 *
2193 * call-seq:
2194 * obj.__id__ -> integer
2195 * obj.object_id -> integer
2196 *
2197 * Returns an integer identifier for +obj+.
2198 *
2199 * The same number will be returned on all calls to +object_id+ for a given
2200 * object, and no two active objects will share an id.
2201 *
2202 * Note: that some objects of builtin classes are reused for optimization.
2203 * This is the case for immediate values and frozen string literals.
2204 *
2205 * BasicObject implements +__id__+, Kernel implements +object_id+.
2206 *
2207 * Immediate values are not passed by reference but are passed by value:
2208 * +nil+, +true+, +false+, Fixnums, Symbols, and some Floats.
2209 *
2210 * Object.new.object_id == Object.new.object_id # => false
2211 * (21 * 2).object_id == (21 * 2).object_id # => true
2212 * "hello".object_id == "hello".object_id # => false
2213 * "hi".freeze.object_id == "hi".freeze.object_id # => true
2214 */
2215
2216VALUE
2217rb_obj_id(VALUE obj)
2218{
2219 /* If obj is an immediate, the object ID is obj directly converted to a Numeric.
2220 * Otherwise, the object ID is a Numeric that is a non-zero multiple of
2221 * (RUBY_IMMEDIATE_MASK + 1) which guarantees that it does not collide with
2222 * any immediates. */
2223 return rb_find_object_id(rb_gc_get_objspace(), obj, object_id);
2224}
2225
2226bool
2227rb_obj_id_p(VALUE obj)
2228{
2229 return !RB_TYPE_P(obj, T_IMEMO) && rb_shape_obj_has_id(obj);
2230}
2231
2232static enum rb_id_table_iterator_result
2233cc_table_memsize_i(VALUE ccs_ptr, void *data_ptr)
2234{
2235 size_t *total_size = data_ptr;
2236 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
2237 *total_size += sizeof(*ccs);
2238 *total_size += sizeof(ccs->entries[0]) * ccs->capa;
2239 return ID_TABLE_CONTINUE;
2240}
2241
2242static size_t
2243cc_table_memsize(struct rb_id_table *cc_table)
2244{
2245 size_t total = rb_id_table_memsize(cc_table);
2246 rb_id_table_foreach_values(cc_table, cc_table_memsize_i, &total);
2247 return total;
2248}
2249
2250static void
2251classext_memsize(rb_classext_t *ext, bool prime, VALUE namespace, void *arg)
2252{
2253 size_t *size = (size_t *)arg;
2254 size_t s = 0;
2255
2256 if (RCLASSEXT_M_TBL(ext)) {
2257 s += rb_id_table_memsize(RCLASSEXT_M_TBL(ext));
2258 }
2259 if (RCLASSEXT_CVC_TBL(ext)) {
2260 s += rb_id_table_memsize(RCLASSEXT_CVC_TBL(ext));
2261 }
2262 if (RCLASSEXT_CONST_TBL(ext)) {
2263 s += rb_id_table_memsize(RCLASSEXT_CONST_TBL(ext));
2264 }
2265 if (RCLASSEXT_CC_TBL(ext)) {
2266 s += cc_table_memsize(RCLASSEXT_CC_TBL(ext));
2267 }
2268 if (RCLASSEXT_SUPERCLASSES_WITH_SELF(ext)) {
2269 s += (RCLASSEXT_SUPERCLASS_DEPTH(ext) + 1) * sizeof(VALUE);
2270 }
2271 if (!prime) {
2272 s += sizeof(rb_classext_t);
2273 }
2274 *size += s;
2275}
2276
2277static void
2278classext_fields_hash_memsize(rb_classext_t *ext, bool prime, VALUE namespace, void *arg)
2279{
2280 size_t *size = (size_t *)arg;
2281 size_t count;
2282 RB_VM_LOCK_ENTER();
2283 {
2284 count = rb_st_table_size((st_table *)RCLASSEXT_FIELDS(ext));
2285 }
2286 RB_VM_LOCK_LEAVE();
2287 // class IV sizes are allocated as powers of two
2288 *size += SIZEOF_VALUE << bit_length(count);
2289}
2290
2291static void
2292classext_superclasses_memsize(rb_classext_t *ext, bool prime, VALUE namespace, void *arg)
2293{
2294 size_t *size = (size_t *)arg;
2295 size_t array_size;
2296 if (RCLASSEXT_SUPERCLASSES_OWNER(ext)) {
2297 array_size = RCLASSEXT_SUPERCLASS_DEPTH(ext);
2298 if (RCLASSEXT_SUPERCLASSES_WITH_SELF(ext))
2299 array_size += 1;
2300 *size += array_size * sizeof(VALUE);
2301 }
2302}
2303
2304size_t
2305rb_obj_memsize_of(VALUE obj)
2306{
2307 size_t size = 0;
2308
2309 if (SPECIAL_CONST_P(obj)) {
2310 return 0;
2311 }
2312
2313 if (FL_TEST(obj, FL_EXIVAR)) {
2314 size += rb_generic_ivar_memsize(obj);
2315 }
2316
2317 switch (BUILTIN_TYPE(obj)) {
2318 case T_OBJECT:
2319 if (rb_shape_obj_too_complex_p(obj)) {
2320 size += rb_st_memsize(ROBJECT_FIELDS_HASH(obj));
2321 }
2322 else if (!(RBASIC(obj)->flags & ROBJECT_EMBED)) {
2323 size += ROBJECT_FIELDS_CAPACITY(obj) * sizeof(VALUE);
2324 }
2325 break;
2326 case T_MODULE:
2327 case T_CLASS:
2328 rb_class_classext_foreach(obj, classext_memsize, (void *)&size);
2329
2330 if (rb_shape_obj_too_complex_p(obj)) {
2331 rb_class_classext_foreach(obj, classext_fields_hash_memsize, (void *)&size);
2332 }
2333 else {
2334 // class IV sizes are allocated as powers of two
2335 size += SIZEOF_VALUE << bit_length(RCLASS_FIELDS_COUNT(obj));
2336 }
2337
2338 rb_class_classext_foreach(obj, classext_superclasses_memsize, (void *)&size);
2339 break;
2340 case T_ICLASS:
2341 if (RICLASS_OWNS_M_TBL_P(obj)) {
2342 if (RCLASS_M_TBL(obj)) {
2343 size += rb_id_table_memsize(RCLASS_M_TBL(obj));
2344 }
2345 }
2346 if (RCLASS_WRITABLE_CC_TBL(obj)) {
2347 size += cc_table_memsize(RCLASS_WRITABLE_CC_TBL(obj));
2348 }
2349 break;
2350 case T_STRING:
2351 size += rb_str_memsize(obj);
2352 break;
2353 case T_ARRAY:
2354 size += rb_ary_memsize(obj);
2355 break;
2356 case T_HASH:
2357 if (RHASH_ST_TABLE_P(obj)) {
2358 VM_ASSERT(RHASH_ST_TABLE(obj) != NULL);
2359 /* st_table is in the slot */
2360 size += st_memsize(RHASH_ST_TABLE(obj)) - sizeof(st_table);
2361 }
2362 break;
2363 case T_REGEXP:
2364 if (RREGEXP_PTR(obj)) {
2365 size += onig_memsize(RREGEXP_PTR(obj));
2366 }
2367 break;
2368 case T_DATA:
2369 size += rb_objspace_data_type_memsize(obj);
2370 break;
2371 case T_MATCH:
2372 {
2373 rb_matchext_t *rm = RMATCH_EXT(obj);
2374 size += onig_region_memsize(&rm->regs);
2375 size += sizeof(struct rmatch_offset) * rm->char_offset_num_allocated;
2376 }
2377 break;
2378 case T_FILE:
2379 if (RFILE(obj)->fptr) {
2380 size += rb_io_memsize(RFILE(obj)->fptr);
2381 }
2382 break;
2383 case T_RATIONAL:
2384 case T_COMPLEX:
2385 break;
2386 case T_IMEMO:
2387 size += rb_imemo_memsize(obj);
2388 break;
2389
2390 case T_FLOAT:
2391 case T_SYMBOL:
2392 break;
2393
2394 case T_BIGNUM:
2395 if (!(RBASIC(obj)->flags & BIGNUM_EMBED_FLAG) && BIGNUM_DIGITS(obj)) {
2396 size += BIGNUM_LEN(obj) * sizeof(BDIGIT);
2397 }
2398 break;
2399
2400 case T_NODE:
2401 UNEXPECTED_NODE(obj_memsize_of);
2402 break;
2403
2404 case T_STRUCT:
2405 if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) == 0 &&
2406 RSTRUCT(obj)->as.heap.ptr) {
2407 size += sizeof(VALUE) * RSTRUCT_LEN(obj);
2408 }
2409 break;
2410
2411 case T_ZOMBIE:
2412 case T_MOVED:
2413 break;
2414
2415 default:
2416 rb_bug("objspace/memsize_of(): unknown data type 0x%x(%p)",
2417 BUILTIN_TYPE(obj), (void*)obj);
2418 }
2419
2420 return size + rb_gc_obj_slot_size(obj);
2421}
2422
2423static int
2424set_zero(st_data_t key, st_data_t val, st_data_t arg)
2425{
2426 VALUE k = (VALUE)key;
2427 VALUE hash = (VALUE)arg;
2428 rb_hash_aset(hash, k, INT2FIX(0));
2429 return ST_CONTINUE;
2430}
2431
2433 size_t counts[T_MASK+1];
2434 size_t freed;
2435 size_t total;
2436};
2437
2438static void
2439count_objects_i(VALUE obj, void *d)
2440{
2441 struct count_objects_data *data = (struct count_objects_data *)d;
2442
2443 if (RBASIC(obj)->flags) {
2444 data->counts[BUILTIN_TYPE(obj)]++;
2445 }
2446 else {
2447 data->freed++;
2448 }
2449
2450 data->total++;
2451}
2452
2453/*
2454 * call-seq:
2455 * ObjectSpace.count_objects([result_hash]) -> hash
2456 *
2457 * Counts all objects grouped by type.
2458 *
2459 * It returns a hash, such as:
2460 * {
2461 * :TOTAL=>10000,
2462 * :FREE=>3011,
2463 * :T_OBJECT=>6,
2464 * :T_CLASS=>404,
2465 * # ...
2466 * }
2467 *
2468 * The contents of the returned hash are implementation specific.
2469 * It may be changed in future.
2470 *
2471 * The keys starting with +:T_+ means live objects.
2472 * For example, +:T_ARRAY+ is the number of arrays.
2473 * +:FREE+ means object slots which is not used now.
2474 * +:TOTAL+ means sum of above.
2475 *
2476 * If the optional argument +result_hash+ is given,
2477 * it is overwritten and returned. This is intended to avoid probe effect.
2478 *
2479 * h = {}
2480 * ObjectSpace.count_objects(h)
2481 * puts h
2482 * # => { :TOTAL=>10000, :T_CLASS=>158280, :T_MODULE=>20672, :T_STRING=>527249 }
2483 *
2484 * This method is only expected to work on C Ruby.
2485 *
2486 */
2487
2488static VALUE
2489count_objects(int argc, VALUE *argv, VALUE os)
2490{
2491 struct count_objects_data data = { 0 };
2492 VALUE hash = Qnil;
2493
2494 if (rb_check_arity(argc, 0, 1) == 1) {
2495 hash = argv[0];
2496 if (!RB_TYPE_P(hash, T_HASH))
2497 rb_raise(rb_eTypeError, "non-hash given");
2498 }
2499
2500 rb_gc_impl_each_object(rb_gc_get_objspace(), count_objects_i, &data);
2501
2502 if (NIL_P(hash)) {
2503 hash = rb_hash_new();
2504 }
2505 else if (!RHASH_EMPTY_P(hash)) {
2506 rb_hash_stlike_foreach(hash, set_zero, hash);
2507 }
2508 rb_hash_aset(hash, ID2SYM(rb_intern("TOTAL")), SIZET2NUM(data.total));
2509 rb_hash_aset(hash, ID2SYM(rb_intern("FREE")), SIZET2NUM(data.freed));
2510
2511 for (size_t i = 0; i <= T_MASK; i++) {
2512 VALUE type = type_sym(i);
2513 if (data.counts[i])
2514 rb_hash_aset(hash, type, SIZET2NUM(data.counts[i]));
2515 }
2516
2517 return hash;
2518}
2519
2520#define SET_STACK_END SET_MACHINE_STACK_END(&ec->machine.stack_end)
2521
2522#define STACK_START (ec->machine.stack_start)
2523#define STACK_END (ec->machine.stack_end)
2524#define STACK_LEVEL_MAX (ec->machine.stack_maxsize/sizeof(VALUE))
2525
2526#if STACK_GROW_DIRECTION < 0
2527# define STACK_LENGTH (size_t)(STACK_START - STACK_END)
2528#elif STACK_GROW_DIRECTION > 0
2529# define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
2530#else
2531# define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
2532 : (size_t)(STACK_END - STACK_START + 1))
2533#endif
2534#if !STACK_GROW_DIRECTION
2535int ruby_stack_grow_direction;
2536int
2537ruby_get_stack_grow_direction(volatile VALUE *addr)
2538{
2539 VALUE *end;
2540 SET_MACHINE_STACK_END(&end);
2541
2542 if (end > addr) return ruby_stack_grow_direction = 1;
2543 return ruby_stack_grow_direction = -1;
2544}
2545#endif
2546
2547size_t
2549{
2550 rb_execution_context_t *ec = GET_EC();
2551 SET_STACK_END;
2552 if (p) *p = STACK_UPPER(STACK_END, STACK_START, STACK_END);
2553 return STACK_LENGTH;
2554}
2555
2556#define PREVENT_STACK_OVERFLOW 1
2557#ifndef PREVENT_STACK_OVERFLOW
2558#if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
2559# define PREVENT_STACK_OVERFLOW 1
2560#else
2561# define PREVENT_STACK_OVERFLOW 0
2562#endif
2563#endif
2564#if PREVENT_STACK_OVERFLOW && !defined(__EMSCRIPTEN__)
2565static int
2566stack_check(rb_execution_context_t *ec, int water_mark)
2567{
2568 SET_STACK_END;
2569
2570 size_t length = STACK_LENGTH;
2571 size_t maximum_length = STACK_LEVEL_MAX - water_mark;
2572
2573 return length > maximum_length;
2574}
2575#else
2576#define stack_check(ec, water_mark) FALSE
2577#endif
2578
2579#define STACKFRAME_FOR_CALL_CFUNC 2048
2580
2581int
2582rb_ec_stack_check(rb_execution_context_t *ec)
2583{
2584 return stack_check(ec, STACKFRAME_FOR_CALL_CFUNC);
2585}
2586
2587int
2589{
2590 return stack_check(GET_EC(), STACKFRAME_FOR_CALL_CFUNC);
2591}
2592
2593/* ==================== Marking ==================== */
2594
2595#define RB_GC_MARK_OR_TRAVERSE(func, obj_or_ptr, obj, check_obj) do { \
2596 if (!RB_SPECIAL_CONST_P(obj)) { \
2597 rb_vm_t *vm = GET_VM(); \
2598 void *objspace = vm->gc.objspace; \
2599 if (LIKELY(vm->gc.mark_func_data == NULL)) { \
2600 GC_ASSERT(rb_gc_impl_during_gc_p(objspace)); \
2601 (func)(objspace, (obj_or_ptr)); \
2602 } \
2603 else if (check_obj ? \
2604 rb_gc_impl_pointer_to_heap_p(objspace, (const void *)obj) && \
2605 !rb_gc_impl_garbage_object_p(objspace, obj) : \
2606 true) { \
2607 GC_ASSERT(!rb_gc_impl_during_gc_p(objspace)); \
2608 struct gc_mark_func_data_struct *mark_func_data = vm->gc.mark_func_data; \
2609 vm->gc.mark_func_data = NULL; \
2610 mark_func_data->mark_func((obj), mark_func_data->data); \
2611 vm->gc.mark_func_data = mark_func_data; \
2612 } \
2613 } \
2614} while (0)
2615
2616static inline void
2617gc_mark_internal(VALUE obj)
2618{
2619 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark, obj, obj, false);
2620}
2621
2622void
2623rb_gc_mark_movable(VALUE obj)
2624{
2625 gc_mark_internal(obj);
2626}
2627
2628void
2629rb_gc_mark_and_move(VALUE *ptr)
2630{
2631 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark_and_move, ptr, *ptr, false);
2632}
2633
2634static inline void
2635gc_mark_and_pin_internal(VALUE obj)
2636{
2637 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark_and_pin, obj, obj, false);
2638}
2639
2640void
2641rb_gc_mark(VALUE obj)
2642{
2643 gc_mark_and_pin_internal(obj);
2644}
2645
2646static inline void
2647gc_mark_maybe_internal(VALUE obj)
2648{
2649 RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark_maybe, obj, obj, true);
2650}
2651
2652void
2653rb_gc_mark_maybe(VALUE obj)
2654{
2655 gc_mark_maybe_internal(obj);
2656}
2657
2658void
2659rb_gc_mark_weak(VALUE *ptr)
2660{
2661 if (RB_SPECIAL_CONST_P(*ptr)) return;
2662
2663 rb_vm_t *vm = GET_VM();
2664 void *objspace = vm->gc.objspace;
2665 if (LIKELY(vm->gc.mark_func_data == NULL)) {
2666 GC_ASSERT(rb_gc_impl_during_gc_p(objspace));
2667
2668 rb_gc_impl_mark_weak(objspace, ptr);
2669 }
2670 else {
2671 GC_ASSERT(!rb_gc_impl_during_gc_p(objspace));
2672 }
2673}
2674
2675void
2676rb_gc_remove_weak(VALUE parent_obj, VALUE *ptr)
2677{
2678 rb_gc_impl_remove_weak(rb_gc_get_objspace(), parent_obj, ptr);
2679}
2680
2681ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS(static void each_location(register const VALUE *x, register long n, void (*cb)(VALUE, void *), void *data));
2682static void
2683each_location(register const VALUE *x, register long n, void (*cb)(VALUE, void *), void *data)
2684{
2685 VALUE v;
2686 while (n--) {
2687 v = *x;
2688 cb(v, data);
2689 x++;
2690 }
2691}
2692
2693static void
2694each_location_ptr(const VALUE *start, const VALUE *end, void (*cb)(VALUE, void *), void *data)
2695{
2696 if (end <= start) return;
2697 each_location(start, end - start, cb, data);
2698}
2699
2700static void
2701gc_mark_maybe_each_location(VALUE obj, void *data)
2702{
2703 gc_mark_maybe_internal(obj);
2704}
2705
2706void
2707rb_gc_mark_locations(const VALUE *start, const VALUE *end)
2708{
2709 each_location_ptr(start, end, gc_mark_maybe_each_location, NULL);
2710}
2711
2712void
2713rb_gc_mark_values(long n, const VALUE *values)
2714{
2715 for (long i = 0; i < n; i++) {
2716 gc_mark_internal(values[i]);
2717 }
2718}
2719
2720void
2721rb_gc_mark_vm_stack_values(long n, const VALUE *values)
2722{
2723 for (long i = 0; i < n; i++) {
2724 gc_mark_and_pin_internal(values[i]);
2725 }
2726}
2727
2728static int
2729mark_key(st_data_t key, st_data_t value, st_data_t data)
2730{
2731 gc_mark_and_pin_internal((VALUE)key);
2732
2733 return ST_CONTINUE;
2734}
2735
2736void
2737rb_mark_set(st_table *tbl)
2738{
2739 if (!tbl) return;
2740
2741 st_foreach(tbl, mark_key, (st_data_t)rb_gc_get_objspace());
2742}
2743
2744static int
2745mark_keyvalue(st_data_t key, st_data_t value, st_data_t data)
2746{
2747 gc_mark_internal((VALUE)key);
2748 gc_mark_internal((VALUE)value);
2749
2750 return ST_CONTINUE;
2751}
2752
2753static int
2754pin_key_pin_value(st_data_t key, st_data_t value, st_data_t data)
2755{
2756 gc_mark_and_pin_internal((VALUE)key);
2757 gc_mark_and_pin_internal((VALUE)value);
2758
2759 return ST_CONTINUE;
2760}
2761
2762static int
2763pin_key_mark_value(st_data_t key, st_data_t value, st_data_t data)
2764{
2765 gc_mark_and_pin_internal((VALUE)key);
2766 gc_mark_internal((VALUE)value);
2767
2768 return ST_CONTINUE;
2769}
2770
2771static void
2772mark_hash(VALUE hash)
2773{
2774 if (rb_hash_compare_by_id_p(hash)) {
2775 rb_hash_stlike_foreach(hash, pin_key_mark_value, 0);
2776 }
2777 else {
2778 rb_hash_stlike_foreach(hash, mark_keyvalue, 0);
2779 }
2780
2781 gc_mark_internal(RHASH(hash)->ifnone);
2782}
2783
2784void
2785rb_mark_hash(st_table *tbl)
2786{
2787 if (!tbl) return;
2788
2789 st_foreach(tbl, pin_key_pin_value, 0);
2790}
2791
2792static enum rb_id_table_iterator_result
2793mark_method_entry_i(VALUE me, void *objspace)
2794{
2795 gc_mark_internal(me);
2796
2797 return ID_TABLE_CONTINUE;
2798}
2799
2800static void
2801mark_m_tbl(void *objspace, struct rb_id_table *tbl)
2802{
2803 if (tbl) {
2804 rb_id_table_foreach_values(tbl, mark_method_entry_i, objspace);
2805 }
2806}
2807
2808static enum rb_id_table_iterator_result
2809mark_const_entry_i(VALUE value, void *objspace)
2810{
2811 const rb_const_entry_t *ce = (const rb_const_entry_t *)value;
2812
2813 gc_mark_internal(ce->value);
2814 gc_mark_internal(ce->file);
2815 return ID_TABLE_CONTINUE;
2816}
2817
2818static void
2819mark_const_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
2820{
2821 if (!tbl) return;
2822 rb_id_table_foreach_values(tbl, mark_const_entry_i, objspace);
2823}
2824
2827 VALUE klass;
2828};
2829
2830static enum rb_id_table_iterator_result
2831mark_cc_entry_i(ID id, VALUE ccs_ptr, void *data)
2832{
2833 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
2834
2835 VM_ASSERT(vm_ccs_p(ccs));
2836 VM_ASSERT(id == ccs->cme->called_id);
2837
2838 if (METHOD_ENTRY_INVALIDATED(ccs->cme)) {
2839 rb_vm_ccs_free(ccs);
2840 return ID_TABLE_DELETE;
2841 }
2842 else {
2843 gc_mark_internal((VALUE)ccs->cme);
2844
2845 for (int i=0; i<ccs->len; i++) {
2846 VM_ASSERT(((struct mark_cc_entry_args *)data)->klass == ccs->entries[i].cc->klass);
2847 VM_ASSERT(vm_cc_check_cme(ccs->entries[i].cc, ccs->cme));
2848
2849 gc_mark_internal((VALUE)ccs->entries[i].cc);
2850 }
2851 return ID_TABLE_CONTINUE;
2852 }
2853}
2854
2855static void
2856mark_cc_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl, VALUE klass)
2857{
2858 struct mark_cc_entry_args args;
2859
2860 if (!tbl) return;
2861
2862 args.objspace = objspace;
2863 args.klass = klass;
2864 rb_id_table_foreach(tbl, mark_cc_entry_i, (void *)&args);
2865}
2866
2867static enum rb_id_table_iterator_result
2868mark_cvc_tbl_i(VALUE cvc_entry, void *objspace)
2869{
2870 struct rb_cvar_class_tbl_entry *entry;
2871
2872 entry = (struct rb_cvar_class_tbl_entry *)cvc_entry;
2873
2874 RUBY_ASSERT(entry->cref == 0 || (BUILTIN_TYPE((VALUE)entry->cref) == T_IMEMO && IMEMO_TYPE_P(entry->cref, imemo_cref)));
2875 gc_mark_internal((VALUE)entry->cref);
2876
2877 return ID_TABLE_CONTINUE;
2878}
2879
2880static void
2881mark_cvc_tbl(rb_objspace_t *objspace, struct rb_id_table *tbl)
2882{
2883 if (!tbl) return;
2884 rb_id_table_foreach_values(tbl, mark_cvc_tbl_i, objspace);
2885}
2886
2887#if STACK_GROW_DIRECTION < 0
2888#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
2889#elif STACK_GROW_DIRECTION > 0
2890#define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
2891#else
2892#define GET_STACK_BOUNDS(start, end, appendix) \
2893 ((STACK_END < STACK_START) ? \
2894 ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
2895#endif
2896
2897static void
2898gc_mark_machine_stack_location_maybe(VALUE obj, void *data)
2899{
2900 gc_mark_maybe_internal(obj);
2901
2902#ifdef RUBY_ASAN_ENABLED
2903 const rb_execution_context_t *ec = (const rb_execution_context_t *)data;
2904 void *fake_frame_start;
2905 void *fake_frame_end;
2906 bool is_fake_frame = asan_get_fake_stack_extents(
2907 ec->machine.asan_fake_stack_handle, obj,
2908 ec->machine.stack_start, ec->machine.stack_end,
2909 &fake_frame_start, &fake_frame_end
2910 );
2911 if (is_fake_frame) {
2912 each_location_ptr(fake_frame_start, fake_frame_end, gc_mark_maybe_each_location, NULL);
2913 }
2914#endif
2915}
2916
2917static VALUE
2918gc_location_internal(void *objspace, VALUE value)
2919{
2920 if (SPECIAL_CONST_P(value)) {
2921 return value;
2922 }
2923
2924 GC_ASSERT(rb_gc_impl_pointer_to_heap_p(objspace, (void *)value));
2925
2926 return rb_gc_impl_location(objspace, value);
2927}
2928
2929VALUE
2930rb_gc_location(VALUE value)
2931{
2932 return gc_location_internal(rb_gc_get_objspace(), value);
2933}
2934
2935#if defined(__wasm__)
2936
2937
2938static VALUE *rb_stack_range_tmp[2];
2939
2940static void
2941rb_mark_locations(void *begin, void *end)
2942{
2943 rb_stack_range_tmp[0] = begin;
2944 rb_stack_range_tmp[1] = end;
2945}
2946
2947void
2948rb_gc_save_machine_context(void)
2949{
2950 // no-op
2951}
2952
2953# if defined(__EMSCRIPTEN__)
2954
2955static void
2956mark_current_machine_context(const rb_execution_context_t *ec)
2957{
2958 emscripten_scan_stack(rb_mark_locations);
2959 each_location_ptr(rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe_each_location, NULL);
2960
2961 emscripten_scan_registers(rb_mark_locations);
2962 each_location_ptr(rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe_each_location, NULL);
2963}
2964# else // use Asyncify version
2965
2966static void
2967mark_current_machine_context(rb_execution_context_t *ec)
2968{
2969 VALUE *stack_start, *stack_end;
2970 SET_STACK_END;
2971 GET_STACK_BOUNDS(stack_start, stack_end, 1);
2972 each_location_ptr(stack_start, stack_end, gc_mark_maybe_each_location, NULL);
2973
2974 rb_wasm_scan_locals(rb_mark_locations);
2975 each_location_ptr(rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe_each_location, NULL);
2976}
2977
2978# endif
2979
2980#else // !defined(__wasm__)
2981
2982void
2983rb_gc_save_machine_context(void)
2984{
2985 rb_thread_t *thread = GET_THREAD();
2986
2987 RB_VM_SAVE_MACHINE_CONTEXT(thread);
2988}
2989
2990
2991static void
2992mark_current_machine_context(const rb_execution_context_t *ec)
2993{
2994 rb_gc_mark_machine_context(ec);
2995}
2996#endif
2997
2998void
2999rb_gc_mark_machine_context(const rb_execution_context_t *ec)
3000{
3001 VALUE *stack_start, *stack_end;
3002
3003 GET_STACK_BOUNDS(stack_start, stack_end, 0);
3004 RUBY_DEBUG_LOG("ec->th:%u stack_start:%p stack_end:%p", rb_ec_thread_ptr(ec)->serial, stack_start, stack_end);
3005
3006 void *data =
3007#ifdef RUBY_ASAN_ENABLED
3008 /* gc_mark_machine_stack_location_maybe() uses data as const */
3010#else
3011 NULL;
3012#endif
3013
3014 each_location_ptr(stack_start, stack_end, gc_mark_machine_stack_location_maybe, data);
3015 int num_regs = sizeof(ec->machine.regs)/(sizeof(VALUE));
3016 each_location((VALUE*)&ec->machine.regs, num_regs, gc_mark_machine_stack_location_maybe, data);
3017}
3018
3019static int
3020rb_mark_tbl_i(st_data_t key, st_data_t value, st_data_t data)
3021{
3022 gc_mark_and_pin_internal((VALUE)value);
3023
3024 return ST_CONTINUE;
3025}
3026
3027void
3028rb_mark_tbl(st_table *tbl)
3029{
3030 if (!tbl || tbl->num_entries == 0) return;
3031
3032 st_foreach(tbl, rb_mark_tbl_i, 0);
3033}
3034
3035static void
3036gc_mark_tbl_no_pin(st_table *tbl)
3037{
3038 if (!tbl || tbl->num_entries == 0) return;
3039
3040 st_foreach(tbl, gc_mark_tbl_no_pin_i, 0);
3041}
3042
3043void
3044rb_mark_tbl_no_pin(st_table *tbl)
3045{
3046 gc_mark_tbl_no_pin(tbl);
3047}
3048
3049static bool
3050gc_declarative_marking_p(const rb_data_type_t *type)
3051{
3052 return (type->flags & RUBY_TYPED_DECL_MARKING) != 0;
3053}
3054
3055void
3056rb_gc_mark_roots(void *objspace, const char **categoryp)
3057{
3058 rb_execution_context_t *ec = GET_EC();
3059 rb_vm_t *vm = rb_ec_vm_ptr(ec);
3060
3061#define MARK_CHECKPOINT(category) do { \
3062 if (categoryp) *categoryp = category; \
3063} while (0)
3064
3065 MARK_CHECKPOINT("vm");
3066 rb_vm_mark(vm);
3067 if (vm->self) gc_mark_internal(vm->self);
3068
3069 MARK_CHECKPOINT("end_proc");
3070 rb_mark_end_proc();
3071
3072 MARK_CHECKPOINT("global_tbl");
3073 rb_gc_mark_global_tbl();
3074
3075#if USE_YJIT
3076 void rb_yjit_root_mark(void); // in Rust
3077
3078 if (rb_yjit_enabled_p) {
3079 MARK_CHECKPOINT("YJIT");
3080 rb_yjit_root_mark();
3081 }
3082#endif
3083
3084 MARK_CHECKPOINT("machine_context");
3085 mark_current_machine_context(ec);
3086
3087 MARK_CHECKPOINT("global_symbols");
3088 rb_sym_global_symbols_mark();
3089
3090 MARK_CHECKPOINT("finish");
3091
3092#undef MARK_CHECKPOINT
3093}
3094
3099
3100static void
3101gc_mark_classext_module(rb_classext_t *ext, bool prime, VALUE namespace, void *arg)
3102{
3104 rb_objspace_t *objspace = foreach_arg->objspace;
3105 VALUE obj = foreach_arg->obj;
3106
3107 if (RCLASSEXT_SUPER(ext)) {
3108 gc_mark_internal(RCLASSEXT_SUPER(ext));
3109 }
3110 mark_m_tbl(objspace, RCLASSEXT_M_TBL(ext));
3111 if (rb_shape_obj_too_complex_p(obj)) {
3112 gc_mark_tbl_no_pin((st_table *)RCLASSEXT_FIELDS(ext));
3113 // for the case ELSE is written in rb_gc_mark_children() because it's per RClass, not classext
3114 }
3115 if (!RCLASSEXT_SHARED_CONST_TBL(ext) && RCLASSEXT_CONST_TBL(ext)) {
3116 mark_const_tbl(objspace, RCLASSEXT_CONST_TBL(ext));
3117 }
3118 mark_m_tbl(objspace, RCLASSEXT_CALLABLE_M_TBL(ext));
3119 mark_cc_tbl(objspace, RCLASSEXT_CC_TBL(ext), obj);
3120 mark_cvc_tbl(objspace, RCLASSEXT_CVC_TBL(ext));
3121 gc_mark_internal(RCLASSEXT_CLASSPATH(ext));
3122}
3123
3124static void
3125gc_mark_classext_iclass(rb_classext_t *ext, bool prime, VALUE namespace, void *arg)
3126{
3128 rb_objspace_t *objspace = foreach_arg->objspace;
3129 VALUE iclass = foreach_arg->obj;
3130
3131 if (RCLASSEXT_SUPER(ext)) {
3132 gc_mark_internal(RCLASSEXT_SUPER(ext));
3133 }
3134 if (RCLASSEXT_ICLASS_IS_ORIGIN(ext) && !RCLASSEXT_ICLASS_ORIGIN_SHARED_MTBL(ext)) {
3135 mark_m_tbl(objspace, RCLASSEXT_M_TBL(ext));
3136 }
3137 if (RCLASSEXT_INCLUDER(ext)) {
3138 gc_mark_internal(RCLASSEXT_INCLUDER(ext));
3139 }
3140 mark_m_tbl(objspace, RCLASSEXT_CALLABLE_M_TBL(ext));
3141 mark_cc_tbl(objspace, RCLASSEXT_CC_TBL(ext), iclass);
3142}
3143
3144#define TYPED_DATA_REFS_OFFSET_LIST(d) (size_t *)(uintptr_t)RTYPEDDATA_TYPE(d)->function.dmark
3145
3146void
3147rb_gc_mark_children(void *objspace, VALUE obj)
3148{
3149 struct gc_mark_classext_foreach_arg foreach_args;
3150
3151 if (FL_TEST_RAW(obj, FL_EXIVAR)) {
3152 rb_mark_generic_ivar(obj);
3153 }
3154
3155 switch (BUILTIN_TYPE(obj)) {
3156 case T_FLOAT:
3157 case T_BIGNUM:
3158 case T_SYMBOL:
3159 /* Not immediates, but does not have references and singleton class.
3160 *
3161 * RSYMBOL(obj)->fstr intentionally not marked. See log for 96815f1e
3162 * ("symbol.c: remove rb_gc_mark_symbols()") */
3163 return;
3164
3165 case T_NIL:
3166 case T_FIXNUM:
3167 rb_bug("rb_gc_mark() called for broken object");
3168 break;
3169
3170 case T_NODE:
3171 UNEXPECTED_NODE(rb_gc_mark);
3172 break;
3173
3174 case T_IMEMO:
3175 rb_imemo_mark_and_move(obj, false);
3176 return;
3177
3178 default:
3179 break;
3180 }
3181
3182 gc_mark_internal(RBASIC(obj)->klass);
3183
3184 switch (BUILTIN_TYPE(obj)) {
3185 case T_CLASS:
3186 if (FL_TEST_RAW(obj, FL_SINGLETON)) {
3187 gc_mark_internal(RCLASS_ATTACHED_OBJECT(obj));
3188 }
3189 // Continue to the shared T_CLASS/T_MODULE
3190 case T_MODULE:
3191 foreach_args.objspace = objspace;
3192 foreach_args.obj = obj;
3193 rb_class_classext_foreach(obj, gc_mark_classext_module, (void *)&foreach_args);
3194
3195 if (!rb_shape_obj_too_complex_p(obj)) {
3196 for (attr_index_t i = 0; i < RCLASS_FIELDS_COUNT(obj); i++) {
3197 gc_mark_internal(RCLASS_PRIME_FIELDS(obj)[i]);
3198 }
3199 }
3200 break;
3201
3202 case T_ICLASS:
3203 foreach_args.objspace = objspace;
3204 foreach_args.obj = obj;
3205 rb_class_classext_foreach(obj, gc_mark_classext_iclass, (void *)&foreach_args);
3206 break;
3207
3208 case T_ARRAY:
3209 if (ARY_SHARED_P(obj)) {
3210 VALUE root = ARY_SHARED_ROOT(obj);
3211 gc_mark_internal(root);
3212 }
3213 else {
3214 long len = RARRAY_LEN(obj);
3215 const VALUE *ptr = RARRAY_CONST_PTR(obj);
3216 for (long i = 0; i < len; i++) {
3217 gc_mark_internal(ptr[i]);
3218 }
3219 }
3220 break;
3221
3222 case T_HASH:
3223 mark_hash(obj);
3224 break;
3225
3226 case T_STRING:
3227 if (STR_SHARED_P(obj)) {
3228 if (STR_EMBED_P(RSTRING(obj)->as.heap.aux.shared)) {
3229 /* Embedded shared strings cannot be moved because this string
3230 * points into the slot of the shared string. There may be code
3231 * using the RSTRING_PTR on the stack, which would pin this
3232 * string but not pin the shared string, causing it to move. */
3233 gc_mark_and_pin_internal(RSTRING(obj)->as.heap.aux.shared);
3234 }
3235 else {
3236 gc_mark_internal(RSTRING(obj)->as.heap.aux.shared);
3237 }
3238 }
3239 break;
3240
3241 case T_DATA: {
3242 void *const ptr = RTYPEDDATA_P(obj) ? RTYPEDDATA_GET_DATA(obj) : DATA_PTR(obj);
3243
3244 if (ptr) {
3245 if (RTYPEDDATA_P(obj) && gc_declarative_marking_p(RTYPEDDATA_TYPE(obj))) {
3246 size_t *offset_list = TYPED_DATA_REFS_OFFSET_LIST(obj);
3247
3248 for (size_t offset = *offset_list; offset != RUBY_REF_END; offset = *offset_list++) {
3249 gc_mark_internal(*(VALUE *)((char *)ptr + offset));
3250 }
3251 }
3252 else {
3253 RUBY_DATA_FUNC mark_func = RTYPEDDATA_P(obj) ?
3255 RDATA(obj)->dmark;
3256 if (mark_func) (*mark_func)(ptr);
3257 }
3258 }
3259
3260 break;
3261 }
3262
3263 case T_OBJECT: {
3264 if (rb_shape_obj_too_complex_p(obj)) {
3265 gc_mark_tbl_no_pin(ROBJECT_FIELDS_HASH(obj));
3266 }
3267 else {
3268 const VALUE * const ptr = ROBJECT_FIELDS(obj);
3269
3270 uint32_t len = ROBJECT_FIELDS_COUNT(obj);
3271 for (uint32_t i = 0; i < len; i++) {
3272 gc_mark_internal(ptr[i]);
3273 }
3274 }
3275
3276 attr_index_t fields_count = ROBJECT_FIELDS_COUNT(obj);
3277 if (fields_count) {
3278 VALUE klass = RBASIC_CLASS(obj);
3279
3280 // Skip updating max_iv_count if the prime classext is not writable
3281 // because GC context doesn't provide information about namespaces.
3282 if (RCLASS_PRIME_CLASSEXT_WRITABLE_P(klass)) {
3283 VM_ASSERT(rb_shape_obj_too_complex_p(klass));
3284 // Increment max_iv_count if applicable, used to determine size pool allocation
3285 if (RCLASS_MAX_IV_COUNT(klass) < fields_count) {
3286 RCLASS_SET_MAX_IV_COUNT(klass, fields_count);
3287 }
3288 }
3289 }
3290
3291 break;
3292 }
3293
3294 case T_FILE:
3295 if (RFILE(obj)->fptr) {
3296 gc_mark_internal(RFILE(obj)->fptr->self);
3297 gc_mark_internal(RFILE(obj)->fptr->pathv);
3298 gc_mark_internal(RFILE(obj)->fptr->tied_io_for_writing);
3299 gc_mark_internal(RFILE(obj)->fptr->writeconv_asciicompat);
3300 gc_mark_internal(RFILE(obj)->fptr->writeconv_pre_ecopts);
3301 gc_mark_internal(RFILE(obj)->fptr->encs.ecopts);
3302 gc_mark_internal(RFILE(obj)->fptr->write_lock);
3303 gc_mark_internal(RFILE(obj)->fptr->timeout);
3304 gc_mark_internal(RFILE(obj)->fptr->wakeup_mutex);
3305 }
3306 break;
3307
3308 case T_REGEXP:
3309 gc_mark_internal(RREGEXP(obj)->src);
3310 break;
3311
3312 case T_MATCH:
3313 gc_mark_internal(RMATCH(obj)->regexp);
3314 if (RMATCH(obj)->str) {
3315 gc_mark_internal(RMATCH(obj)->str);
3316 }
3317 break;
3318
3319 case T_RATIONAL:
3320 gc_mark_internal(RRATIONAL(obj)->num);
3321 gc_mark_internal(RRATIONAL(obj)->den);
3322 break;
3323
3324 case T_COMPLEX:
3325 gc_mark_internal(RCOMPLEX(obj)->real);
3326 gc_mark_internal(RCOMPLEX(obj)->imag);
3327 break;
3328
3329 case T_STRUCT: {
3330 const long len = RSTRUCT_LEN(obj);
3331 const VALUE * const ptr = RSTRUCT_CONST_PTR(obj);
3332
3333 for (long i = 0; i < len; i++) {
3334 gc_mark_internal(ptr[i]);
3335 }
3336
3337 break;
3338 }
3339
3340 default:
3341 if (BUILTIN_TYPE(obj) == T_MOVED) rb_bug("rb_gc_mark(): %p is T_MOVED", (void *)obj);
3342 if (BUILTIN_TYPE(obj) == T_NONE) rb_bug("rb_gc_mark(): %p is T_NONE", (void *)obj);
3343 if (BUILTIN_TYPE(obj) == T_ZOMBIE) rb_bug("rb_gc_mark(): %p is T_ZOMBIE", (void *)obj);
3344 rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
3345 BUILTIN_TYPE(obj), (void *)obj,
3346 rb_gc_impl_pointer_to_heap_p(objspace, (void *)obj) ? "corrupted object" : "non object");
3347 }
3348}
3349
3350size_t
3351rb_gc_obj_optimal_size(VALUE obj)
3352{
3353 switch (BUILTIN_TYPE(obj)) {
3354 case T_ARRAY:
3355 return rb_ary_size_as_embedded(obj);
3356
3357 case T_OBJECT:
3358 if (rb_shape_obj_too_complex_p(obj)) {
3359 return sizeof(struct RObject);
3360 }
3361 else {
3362 return rb_obj_embedded_size(ROBJECT_FIELDS_CAPACITY(obj));
3363 }
3364
3365 case T_STRING:
3366 return rb_str_size_as_embedded(obj);
3367
3368 case T_HASH:
3369 return sizeof(struct RHash) + (RHASH_ST_TABLE_P(obj) ? sizeof(st_table) : sizeof(ar_table));
3370
3371 default:
3372 return 0;
3373 }
3374}
3375
3376void
3377rb_gc_writebarrier(VALUE a, VALUE b)
3378{
3379 rb_gc_impl_writebarrier(rb_gc_get_objspace(), a, b);
3380}
3381
3382void
3383rb_gc_writebarrier_unprotect(VALUE obj)
3384{
3385 rb_gc_impl_writebarrier_unprotect(rb_gc_get_objspace(), obj);
3386}
3387
3388/*
3389 * remember `obj' if needed.
3390 */
3391void
3392rb_gc_writebarrier_remember(VALUE obj)
3393{
3394 rb_gc_impl_writebarrier_remember(rb_gc_get_objspace(), obj);
3395}
3396
3397void
3398rb_gc_copy_attributes(VALUE dest, VALUE obj)
3399{
3400 rb_gc_impl_copy_attributes(rb_gc_get_objspace(), dest, obj);
3401}
3402
3403int
3404rb_gc_modular_gc_loaded_p(void)
3405{
3406#if USE_MODULAR_GC
3407 return rb_gc_functions.modular_gc_loaded_p;
3408#else
3409 return false;
3410#endif
3411}
3412
3413const char *
3414rb_gc_active_gc_name(void)
3415{
3416 const char *gc_name = rb_gc_impl_active_gc_name();
3417
3418 const size_t len = strlen(gc_name);
3419 if (len > RB_GC_MAX_NAME_LEN) {
3420 rb_bug("GC should have a name no more than %d chars long. Currently: %zu (%s)",
3421 RB_GC_MAX_NAME_LEN, len, gc_name);
3422 }
3423
3424 return gc_name;
3425}
3426
3428rb_gc_object_metadata(VALUE obj)
3429{
3430 return rb_gc_impl_object_metadata(rb_gc_get_objspace(), obj);
3431}
3432
3433/* GC */
3434
3435void *
3436rb_gc_ractor_cache_alloc(rb_ractor_t *ractor)
3437{
3438 return rb_gc_impl_ractor_cache_alloc(rb_gc_get_objspace(), ractor);
3439}
3440
3441void
3442rb_gc_ractor_cache_free(void *cache)
3443{
3444 rb_gc_impl_ractor_cache_free(rb_gc_get_objspace(), cache);
3445}
3446
3447void
3448rb_gc_register_mark_object(VALUE obj)
3449{
3450 if (!rb_gc_impl_pointer_to_heap_p(rb_gc_get_objspace(), (void *)obj))
3451 return;
3452
3453 rb_vm_register_global_object(obj);
3454}
3455
3456void
3457rb_gc_register_address(VALUE *addr)
3458{
3459 rb_vm_t *vm = GET_VM();
3460
3461 VALUE obj = *addr;
3462
3463 struct global_object_list *tmp = ALLOC(struct global_object_list);
3464 tmp->next = vm->global_object_list;
3465 tmp->varptr = addr;
3466 vm->global_object_list = tmp;
3467
3468 /*
3469 * Because some C extensions have assignment-then-register bugs,
3470 * we guard `obj` here so that it would not get swept defensively.
3471 */
3472 RB_GC_GUARD(obj);
3473 if (0 && !SPECIAL_CONST_P(obj)) {
3474 rb_warn("Object is assigned to registering address already: %"PRIsVALUE,
3475 rb_obj_class(obj));
3476 rb_print_backtrace(stderr);
3477 }
3478}
3479
3480void
3481rb_gc_unregister_address(VALUE *addr)
3482{
3483 rb_vm_t *vm = GET_VM();
3484 struct global_object_list *tmp = vm->global_object_list;
3485
3486 if (tmp->varptr == addr) {
3487 vm->global_object_list = tmp->next;
3488 xfree(tmp);
3489 return;
3490 }
3491 while (tmp->next) {
3492 if (tmp->next->varptr == addr) {
3493 struct global_object_list *t = tmp->next;
3494
3495 tmp->next = tmp->next->next;
3496 xfree(t);
3497 break;
3498 }
3499 tmp = tmp->next;
3500 }
3501}
3502
3503void
3505{
3506 rb_gc_register_address(var);
3507}
3508
3509static VALUE
3510gc_start_internal(rb_execution_context_t *ec, VALUE self, VALUE full_mark, VALUE immediate_mark, VALUE immediate_sweep, VALUE compact)
3511{
3512 rb_gc_impl_start(rb_gc_get_objspace(), RTEST(full_mark), RTEST(immediate_mark), RTEST(immediate_sweep), RTEST(compact));
3513
3514 return Qnil;
3515}
3516
3517/*
3518 * rb_objspace_each_objects() is special C API to walk through
3519 * Ruby object space. This C API is too difficult to use it.
3520 * To be frank, you should not use it. Or you need to read the
3521 * source code of this function and understand what this function does.
3522 *
3523 * 'callback' will be called several times (the number of heap page,
3524 * at current implementation) with:
3525 * vstart: a pointer to the first living object of the heap_page.
3526 * vend: a pointer to next to the valid heap_page area.
3527 * stride: a distance to next VALUE.
3528 *
3529 * If callback() returns non-zero, the iteration will be stopped.
3530 *
3531 * This is a sample callback code to iterate liveness objects:
3532 *
3533 * static int
3534 * sample_callback(void *vstart, void *vend, int stride, void *data)
3535 * {
3536 * VALUE v = (VALUE)vstart;
3537 * for (; v != (VALUE)vend; v += stride) {
3538 * if (!rb_objspace_internal_object_p(v)) { // liveness check
3539 * // do something with live object 'v'
3540 * }
3541 * }
3542 * return 0; // continue to iteration
3543 * }
3544 *
3545 * Note: 'vstart' is not a top of heap_page. This point the first
3546 * living object to grasp at least one object to avoid GC issue.
3547 * This means that you can not walk through all Ruby object page
3548 * including freed object page.
3549 *
3550 * Note: On this implementation, 'stride' is the same as sizeof(RVALUE).
3551 * However, there are possibilities to pass variable values with
3552 * 'stride' with some reasons. You must use stride instead of
3553 * use some constant value in the iteration.
3554 */
3555void
3556rb_objspace_each_objects(int (*callback)(void *, void *, size_t, void *), void *data)
3557{
3558 rb_gc_impl_each_objects(rb_gc_get_objspace(), callback, data);
3559}
3560
3561static void
3562gc_ref_update_array(void *objspace, VALUE v)
3563{
3564 if (ARY_SHARED_P(v)) {
3565 VALUE old_root = RARRAY(v)->as.heap.aux.shared_root;
3566
3567 UPDATE_IF_MOVED(objspace, RARRAY(v)->as.heap.aux.shared_root);
3568
3569 VALUE new_root = RARRAY(v)->as.heap.aux.shared_root;
3570 // If the root is embedded and its location has changed
3571 if (ARY_EMBED_P(new_root) && new_root != old_root) {
3572 size_t offset = (size_t)(RARRAY(v)->as.heap.ptr - RARRAY(old_root)->as.ary);
3573 GC_ASSERT(RARRAY(v)->as.heap.ptr >= RARRAY(old_root)->as.ary);
3574 RARRAY(v)->as.heap.ptr = RARRAY(new_root)->as.ary + offset;
3575 }
3576 }
3577 else {
3578 long len = RARRAY_LEN(v);
3579
3580 if (len > 0) {
3581 VALUE *ptr = (VALUE *)RARRAY_CONST_PTR(v);
3582 for (long i = 0; i < len; i++) {
3583 UPDATE_IF_MOVED(objspace, ptr[i]);
3584 }
3585 }
3586
3587 if (rb_gc_obj_slot_size(v) >= rb_ary_size_as_embedded(v)) {
3588 if (rb_ary_embeddable_p(v)) {
3589 rb_ary_make_embedded(v);
3590 }
3591 }
3592 }
3593}
3594
3595static void
3596gc_ref_update_object(void *objspace, VALUE v)
3597{
3598 VALUE *ptr = ROBJECT_FIELDS(v);
3599
3600 if (rb_shape_obj_too_complex_p(v)) {
3601 gc_ref_update_table_values_only(ROBJECT_FIELDS_HASH(v));
3602 return;
3603 }
3604
3605 size_t slot_size = rb_gc_obj_slot_size(v);
3606 size_t embed_size = rb_obj_embedded_size(ROBJECT_FIELDS_CAPACITY(v));
3607 if (slot_size >= embed_size && !RB_FL_TEST_RAW(v, ROBJECT_EMBED)) {
3608 // Object can be re-embedded
3609 memcpy(ROBJECT(v)->as.ary, ptr, sizeof(VALUE) * ROBJECT_FIELDS_COUNT(v));
3610 RB_FL_SET_RAW(v, ROBJECT_EMBED);
3611 xfree(ptr);
3612 ptr = ROBJECT(v)->as.ary;
3613 }
3614
3615 for (uint32_t i = 0; i < ROBJECT_FIELDS_COUNT(v); i++) {
3616 UPDATE_IF_MOVED(objspace, ptr[i]);
3617 }
3618}
3619
3620void
3621rb_gc_ref_update_table_values_only(st_table *tbl)
3622{
3623 gc_ref_update_table_values_only(tbl);
3624}
3625
3626/* Update MOVED references in a VALUE=>VALUE st_table */
3627void
3628rb_gc_update_tbl_refs(st_table *ptr)
3629{
3630 gc_update_table_refs(ptr);
3631}
3632
3633static void
3634gc_ref_update_hash(void *objspace, VALUE v)
3635{
3636 rb_hash_stlike_foreach_with_replace(v, hash_foreach_replace, hash_replace_ref, (st_data_t)objspace);
3637}
3638
3639static void
3640gc_update_values(void *objspace, long n, VALUE *values)
3641{
3642 for (long i = 0; i < n; i++) {
3643 UPDATE_IF_MOVED(objspace, values[i]);
3644 }
3645}
3646
3647void
3648rb_gc_update_values(long n, VALUE *values)
3649{
3650 gc_update_values(rb_gc_get_objspace(), n, values);
3651}
3652
3653static enum rb_id_table_iterator_result
3654check_id_table_move(VALUE value, void *data)
3655{
3656 void *objspace = (void *)data;
3657
3658 if (rb_gc_impl_object_moved_p(objspace, (VALUE)value)) {
3659 return ID_TABLE_REPLACE;
3660 }
3661
3662 return ID_TABLE_CONTINUE;
3663}
3664
3665void
3666rb_gc_prepare_heap_process_object(VALUE obj)
3667{
3668 switch (BUILTIN_TYPE(obj)) {
3669 case T_STRING:
3670 // Precompute the string coderange. This both save time for when it will be
3671 // eventually needed, and avoid mutating heap pages after a potential fork.
3673 break;
3674 default:
3675 break;
3676 }
3677}
3678
3679void
3680rb_gc_prepare_heap(void)
3681{
3682 rb_gc_impl_prepare_heap(rb_gc_get_objspace());
3683}
3684
3685size_t
3686rb_gc_heap_id_for_size(size_t size)
3687{
3688 return rb_gc_impl_heap_id_for_size(rb_gc_get_objspace(), size);
3689}
3690
3691bool
3692rb_gc_size_allocatable_p(size_t size)
3693{
3694 return rb_gc_impl_size_allocatable_p(size);
3695}
3696
3697static enum rb_id_table_iterator_result
3698update_id_table(VALUE *value, void *data, int existing)
3699{
3700 void *objspace = (void *)data;
3701
3702 if (rb_gc_impl_object_moved_p(objspace, (VALUE)*value)) {
3703 *value = gc_location_internal(objspace, (VALUE)*value);
3704 }
3705
3706 return ID_TABLE_CONTINUE;
3707}
3708
3709static void
3710update_m_tbl(void *objspace, struct rb_id_table *tbl)
3711{
3712 if (tbl) {
3713 rb_id_table_foreach_values_with_replace(tbl, check_id_table_move, update_id_table, objspace);
3714 }
3715}
3716
3717static enum rb_id_table_iterator_result
3718update_cc_tbl_i(VALUE ccs_ptr, void *objspace)
3719{
3720 struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
3721 VM_ASSERT(vm_ccs_p(ccs));
3722
3723 if (rb_gc_impl_object_moved_p(objspace, (VALUE)ccs->cme)) {
3724 ccs->cme = (const rb_callable_method_entry_t *)gc_location_internal(objspace, (VALUE)ccs->cme);
3725 }
3726
3727 for (int i=0; i<ccs->len; i++) {
3728 if (rb_gc_impl_object_moved_p(objspace, (VALUE)ccs->entries[i].cc)) {
3729 ccs->entries[i].cc = (struct rb_callcache *)gc_location_internal(objspace, (VALUE)ccs->entries[i].cc);
3730 }
3731 }
3732
3733 // do not replace
3734 return ID_TABLE_CONTINUE;
3735}
3736
3737static void
3738update_cc_tbl(void *objspace, struct rb_id_table *tbl)
3739{
3740 if (!tbl) return;
3741 rb_id_table_foreach_values(tbl, update_cc_tbl_i, objspace);
3742}
3743
3744static enum rb_id_table_iterator_result
3745update_cvc_tbl_i(VALUE cvc_entry, void *objspace)
3746{
3747 struct rb_cvar_class_tbl_entry *entry;
3748
3749 entry = (struct rb_cvar_class_tbl_entry *)cvc_entry;
3750
3751 if (entry->cref) {
3752 TYPED_UPDATE_IF_MOVED(objspace, rb_cref_t *, entry->cref);
3753 }
3754
3755 entry->class_value = gc_location_internal(objspace, entry->class_value);
3756
3757 return ID_TABLE_CONTINUE;
3758}
3759
3760static void
3761update_cvc_tbl(void *objspace, struct rb_id_table *tbl)
3762{
3763 if (!tbl) return;
3764 rb_id_table_foreach_values(tbl, update_cvc_tbl_i, objspace);
3765}
3766
3767static enum rb_id_table_iterator_result
3768update_const_tbl_i(VALUE value, void *objspace)
3769{
3770 rb_const_entry_t *ce = (rb_const_entry_t *)value;
3771
3772 if (rb_gc_impl_object_moved_p(objspace, ce->value)) {
3773 ce->value = gc_location_internal(objspace, ce->value);
3774 }
3775
3776 if (rb_gc_impl_object_moved_p(objspace, ce->file)) {
3777 ce->file = gc_location_internal(objspace, ce->file);
3778 }
3779
3780 return ID_TABLE_CONTINUE;
3781}
3782
3783static void
3784update_const_tbl(void *objspace, struct rb_id_table *tbl)
3785{
3786 if (!tbl) return;
3787 rb_id_table_foreach_values(tbl, update_const_tbl_i, objspace);
3788}
3789
3790static void
3791update_subclasses(void *objspace, rb_classext_t *ext)
3792{
3793 rb_subclass_entry_t *entry;
3794 rb_subclass_anchor_t *anchor = RCLASSEXT_SUBCLASSES(ext);
3795 if (!anchor) return;
3796 entry = anchor->head;
3797 while (entry) {
3798 if (entry->klass)
3799 UPDATE_IF_MOVED(objspace, entry->klass);
3800 entry = entry->next;
3801 }
3802}
3803
3804static void
3805update_superclasses(rb_objspace_t *objspace, rb_classext_t *ext)
3806{
3807 size_t array_size = RCLASSEXT_SUPERCLASS_DEPTH(ext);
3808 if (RCLASSEXT_SUPERCLASSES_OWNER(ext)) {
3809 if (RCLASSEXT_SUPERCLASSES_WITH_SELF(ext))
3810 array_size += 1;
3811 for (size_t i = 0; i < array_size; i++) {
3812 UPDATE_IF_MOVED(objspace, RCLASSEXT_SUPERCLASSES(ext)[i]);
3813 }
3814 }
3815}
3816
3817static void
3818update_classext_values(rb_objspace_t *objspace, rb_classext_t *ext, bool is_iclass)
3819{
3820 UPDATE_IF_MOVED(objspace, RCLASSEXT_ORIGIN(ext));
3821 UPDATE_IF_MOVED(objspace, RCLASSEXT_REFINED_CLASS(ext));
3822 UPDATE_IF_MOVED(objspace, RCLASSEXT_CLASSPATH(ext));
3823 if (is_iclass) {
3824 UPDATE_IF_MOVED(objspace, RCLASSEXT_INCLUDER(ext));
3825 }
3826}
3827
3828static void
3829update_classext(rb_classext_t *ext, bool is_prime, VALUE namespace, void *arg)
3830{
3831 struct classext_foreach_args *args = (struct classext_foreach_args *)arg;
3832 VALUE klass = args->klass;
3833 rb_objspace_t *objspace = args->objspace;
3834
3835 if (RCLASSEXT_SUPER(ext)) {
3836 UPDATE_IF_MOVED(objspace, RCLASSEXT_SUPER(ext));
3837 }
3838
3839 update_m_tbl(objspace, RCLASSEXT_M_TBL(ext));
3840
3841 if (args->obj_too_complex) {
3842 gc_ref_update_table_values_only((st_table *)RCLASSEXT_FIELDS(ext));
3843 }
3844 else {
3845 // Classext is not copied in this case
3846 for (attr_index_t i = 0; i < RCLASS_FIELDS_COUNT(klass); i++) {
3847 UPDATE_IF_MOVED(objspace, RCLASSEXT_FIELDS(RCLASS_EXT_PRIME(klass))[i]);
3848 }
3849 }
3850
3851 if (!RCLASSEXT_SHARED_CONST_TBL(ext)) {
3852 update_const_tbl(objspace, RCLASSEXT_CONST_TBL(ext));
3853 }
3854 update_cc_tbl(objspace, RCLASSEXT_CC_TBL(ext));
3855 update_cvc_tbl(objspace, RCLASSEXT_CVC_TBL(ext));
3856 update_superclasses(objspace, ext);
3857 update_subclasses(objspace, ext);
3858
3859 update_classext_values(objspace, ext, false);
3860}
3861
3862static void
3863update_iclass_classext(rb_classext_t *ext, bool is_prime, VALUE namespace, void *arg)
3864{
3865 struct classext_foreach_args *args = (struct classext_foreach_args *)arg;
3866 rb_objspace_t *objspace = args->objspace;
3867
3868 if (RCLASSEXT_SUPER(ext)) {
3869 UPDATE_IF_MOVED(objspace, RCLASSEXT_SUPER(ext));
3870 }
3871 update_m_tbl(objspace, RCLASSEXT_M_TBL(ext));
3872 update_m_tbl(objspace, RCLASSEXT_CALLABLE_M_TBL(ext));
3873 update_cc_tbl(objspace, RCLASSEXT_CC_TBL(ext));
3874 update_subclasses(objspace, ext);
3875
3876 update_classext_values(objspace, ext, true);
3877}
3878
3879extern rb_symbols_t ruby_global_symbols;
3880#define global_symbols ruby_global_symbols
3881
3883 vm_table_foreach_callback_func callback;
3884 vm_table_update_callback_func update_callback;
3885 void *data;
3886 bool weak_only;
3887};
3888
3889static int
3890vm_weak_table_foreach_weak_key(st_data_t key, st_data_t value, st_data_t data, int error)
3891{
3892 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3893
3894 int ret = iter_data->callback((VALUE)key, iter_data->data);
3895
3896 if (!iter_data->weak_only) {
3897 if (ret != ST_CONTINUE) return ret;
3898
3899 ret = iter_data->callback((VALUE)value, iter_data->data);
3900 }
3901
3902 return ret;
3903}
3904
3905static int
3906vm_weak_table_foreach_update_weak_key(st_data_t *key, st_data_t *value, st_data_t data, int existing)
3907{
3908 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3909
3910 int ret = iter_data->update_callback((VALUE *)key, iter_data->data);
3911
3912 if (!iter_data->weak_only) {
3913 if (ret != ST_CONTINUE) return ret;
3914
3915 ret = iter_data->update_callback((VALUE *)value, iter_data->data);
3916 }
3917
3918 return ret;
3919}
3920
3921static int
3922vm_weak_table_str_sym_foreach(st_data_t key, st_data_t value, st_data_t data, int error)
3923{
3924 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3925
3926 if (!iter_data->weak_only) {
3927 int ret = iter_data->callback((VALUE)key, iter_data->data);
3928 if (ret != ST_CONTINUE) return ret;
3929 }
3930
3931 if (STATIC_SYM_P(value)) {
3932 return ST_CONTINUE;
3933 }
3934 else {
3935 return iter_data->callback((VALUE)value, iter_data->data);
3936 }
3937}
3938
3939static int
3940vm_weak_table_foreach_update_weak_value(st_data_t *key, st_data_t *value, st_data_t data, int existing)
3941{
3942 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3943
3944 if (!iter_data->weak_only) {
3945 int ret = iter_data->update_callback((VALUE *)key, iter_data->data);
3946 if (ret != ST_CONTINUE) return ret;
3947 }
3948
3949 return iter_data->update_callback((VALUE *)value, iter_data->data);
3950}
3951
3952static void
3953free_gen_fields_tbl(VALUE obj, struct gen_fields_tbl *fields_tbl)
3954{
3955 if (UNLIKELY(rb_shape_obj_too_complex_p(obj))) {
3956 st_free_table(fields_tbl->as.complex.table);
3957 }
3958
3959 xfree(fields_tbl);
3960}
3961
3962static int
3963vm_weak_table_gen_fields_foreach_too_complex_i(st_data_t _key, st_data_t value, st_data_t data, int error)
3964{
3965 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3966
3967 GC_ASSERT(!iter_data->weak_only);
3968
3969 if (SPECIAL_CONST_P((VALUE)value)) return ST_CONTINUE;
3970
3971 return iter_data->callback((VALUE)value, iter_data->data);
3972}
3973
3974static int
3975vm_weak_table_gen_fields_foreach_too_complex_replace_i(st_data_t *_key, st_data_t *value, st_data_t data, int existing)
3976{
3977 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3978
3979 GC_ASSERT(!iter_data->weak_only);
3980
3981 return iter_data->update_callback((VALUE *)value, iter_data->data);
3982}
3983
3984struct st_table *rb_generic_fields_tbl_get(void);
3985
3986static int
3987vm_weak_table_id2ref_foreach(st_data_t key, st_data_t value, st_data_t data, int error)
3988{
3989 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3990
3991 if (!iter_data->weak_only && !FIXNUM_P((VALUE)key)) {
3992 int ret = iter_data->callback((VALUE)key, iter_data->data);
3993 if (ret != ST_CONTINUE) return ret;
3994 }
3995
3996 return iter_data->callback((VALUE)value, iter_data->data);
3997}
3998
3999static int
4000vm_weak_table_id2ref_foreach_update(st_data_t *key, st_data_t *value, st_data_t data, int existing)
4001{
4002 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
4003
4004 iter_data->update_callback((VALUE *)value, iter_data->data);
4005
4006 if (!iter_data->weak_only && !FIXNUM_P((VALUE)*key)) {
4007 iter_data->update_callback((VALUE *)key, iter_data->data);
4008 }
4009
4010 return ST_CONTINUE;
4011}
4012
4013static int
4014vm_weak_table_gen_fields_foreach(st_data_t key, st_data_t value, st_data_t data)
4015{
4016 struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
4017
4018 int ret = iter_data->callback((VALUE)key, iter_data->data);
4019
4020 switch (ret) {
4021 case ST_CONTINUE:
4022 break;
4023
4024 case ST_DELETE:
4025 free_gen_fields_tbl((VALUE)key, (struct gen_fields_tbl *)value);
4026
4027 FL_UNSET((VALUE)key, FL_EXIVAR);
4028 return ST_DELETE;
4029
4030 case ST_REPLACE: {
4031 VALUE new_key = (VALUE)key;
4032 ret = iter_data->update_callback(&new_key, iter_data->data);
4033 if (key != new_key) ret = ST_DELETE;
4034 DURING_GC_COULD_MALLOC_REGION_START();
4035 {
4036 st_insert(rb_generic_fields_tbl_get(), (st_data_t)new_key, value);
4037 }
4038 DURING_GC_COULD_MALLOC_REGION_END();
4039 key = (st_data_t)new_key;
4040 break;
4041 }
4042
4043 default:
4044 return ret;
4045 }
4046
4047 if (!iter_data->weak_only) {
4048 struct gen_fields_tbl *fields_tbl = (struct gen_fields_tbl *)value;
4049
4050 if (rb_shape_obj_too_complex_p((VALUE)key)) {
4051 st_foreach_with_replace(
4052 fields_tbl->as.complex.table,
4053 vm_weak_table_gen_fields_foreach_too_complex_i,
4054 vm_weak_table_gen_fields_foreach_too_complex_replace_i,
4055 data
4056 );
4057 }
4058 else {
4059 for (uint32_t i = 0; i < fields_tbl->as.shape.fields_count; i++) {
4060 if (SPECIAL_CONST_P(fields_tbl->as.shape.fields[i])) continue;
4061
4062 int ivar_ret = iter_data->callback(fields_tbl->as.shape.fields[i], iter_data->data);
4063 switch (ivar_ret) {
4064 case ST_CONTINUE:
4065 break;
4066 case ST_REPLACE:
4067 iter_data->update_callback(&fields_tbl->as.shape.fields[i], iter_data->data);
4068 break;
4069 default:
4070 rb_bug("vm_weak_table_gen_fields_foreach: return value %d not supported", ivar_ret);
4071 }
4072 }
4073 }
4074 }
4075
4076 return ret;
4077}
4078
4079static int
4080vm_weak_table_frozen_strings_foreach(st_data_t key, st_data_t value, st_data_t data, int error)
4081{
4082 int retval = vm_weak_table_foreach_weak_key(key, value, data, error);
4083 if (retval == ST_DELETE) {
4084 FL_UNSET((VALUE)key, RSTRING_FSTR);
4085 }
4086 return retval;
4087}
4088
4089void rb_fstring_foreach_with_replace(st_foreach_check_callback_func *func, st_update_callback_func *replace, st_data_t arg);
4090void
4091rb_gc_vm_weak_table_foreach(vm_table_foreach_callback_func callback,
4092 vm_table_update_callback_func update_callback,
4093 void *data,
4094 bool weak_only,
4095 enum rb_gc_vm_weak_tables table)
4096{
4097 rb_vm_t *vm = GET_VM();
4098
4099 struct global_vm_table_foreach_data foreach_data = {
4100 .callback = callback,
4101 .update_callback = update_callback,
4102 .data = data,
4103 .weak_only = weak_only,
4104 };
4105
4106 switch (table) {
4107 case RB_GC_VM_CI_TABLE: {
4108 if (vm->ci_table) {
4109 st_foreach_with_replace(
4110 vm->ci_table,
4111 vm_weak_table_foreach_weak_key,
4112 vm_weak_table_foreach_update_weak_key,
4113 (st_data_t)&foreach_data
4114 );
4115 }
4116 break;
4117 }
4118 case RB_GC_VM_OVERLOADED_CME_TABLE: {
4119 if (vm->overloaded_cme_table) {
4120 st_foreach_with_replace(
4121 vm->overloaded_cme_table,
4122 vm_weak_table_foreach_weak_key,
4123 vm_weak_table_foreach_update_weak_key,
4124 (st_data_t)&foreach_data
4125 );
4126 }
4127 break;
4128 }
4129 case RB_GC_VM_GLOBAL_SYMBOLS_TABLE: {
4130 if (global_symbols.str_sym) {
4131 st_foreach_with_replace(
4132 global_symbols.str_sym,
4133 vm_weak_table_str_sym_foreach,
4134 vm_weak_table_foreach_update_weak_value,
4135 (st_data_t)&foreach_data
4136 );
4137 }
4138 break;
4139 }
4140 case RB_GC_VM_ID2REF_TABLE: {
4141 if (id2ref_tbl) {
4142 st_foreach_with_replace(
4143 id2ref_tbl,
4144 vm_weak_table_id2ref_foreach,
4145 vm_weak_table_id2ref_foreach_update,
4146 (st_data_t)&foreach_data
4147 );
4148 }
4149 break;
4150 }
4151 case RB_GC_VM_GENERIC_FIELDS_TABLE: {
4152 st_table *generic_fields_tbl = rb_generic_fields_tbl_get();
4153 if (generic_fields_tbl) {
4154 st_foreach(
4155 generic_fields_tbl,
4156 vm_weak_table_gen_fields_foreach,
4157 (st_data_t)&foreach_data
4158 );
4159 }
4160 break;
4161 }
4162 case RB_GC_VM_FROZEN_STRINGS_TABLE: {
4163 rb_fstring_foreach_with_replace(
4164 vm_weak_table_frozen_strings_foreach,
4165 vm_weak_table_foreach_update_weak_key,
4166 (st_data_t)&foreach_data
4167 );
4168 break;
4169 }
4170 case RB_GC_VM_WEAK_TABLE_COUNT:
4171 rb_bug("Unreacheable");
4172 }
4173}
4174
4175void
4176rb_gc_update_vm_references(void *objspace)
4177{
4178 rb_execution_context_t *ec = GET_EC();
4179 rb_vm_t *vm = rb_ec_vm_ptr(ec);
4180
4181 rb_vm_update_references(vm);
4182 rb_gc_update_global_tbl();
4183 rb_sym_global_symbols_update_references();
4184
4185#if USE_YJIT
4186 void rb_yjit_root_update_references(void); // in Rust
4187
4188 if (rb_yjit_enabled_p) {
4189 rb_yjit_root_update_references();
4190 }
4191#endif
4192}
4193
4194void
4195rb_gc_update_object_references(void *objspace, VALUE obj)
4196{
4197 struct classext_foreach_args args;
4198
4199 switch (BUILTIN_TYPE(obj)) {
4200 case T_CLASS:
4201 if (FL_TEST_RAW(obj, FL_SINGLETON)) {
4202 UPDATE_IF_MOVED(objspace, RCLASS_ATTACHED_OBJECT(obj));
4203 }
4204 // Continue to the shared T_CLASS/T_MODULE
4205 case T_MODULE:
4206 args.klass = obj;
4207 args.obj_too_complex = rb_shape_obj_too_complex_p(obj);
4208 args.objspace = objspace;
4209 rb_class_classext_foreach(obj, update_classext, (void *)&args);
4210 break;
4211
4212 case T_ICLASS:
4213 args.objspace = objspace;
4214 rb_class_classext_foreach(obj, update_iclass_classext, (void *)&args);
4215 break;
4216
4217 case T_IMEMO:
4218 rb_imemo_mark_and_move(obj, true);
4219 return;
4220
4221 case T_NIL:
4222 case T_FIXNUM:
4223 case T_NODE:
4224 case T_MOVED:
4225 case T_NONE:
4226 /* These can't move */
4227 return;
4228
4229 case T_ARRAY:
4230 gc_ref_update_array(objspace, obj);
4231 break;
4232
4233 case T_HASH:
4234 gc_ref_update_hash(objspace, obj);
4235 UPDATE_IF_MOVED(objspace, RHASH(obj)->ifnone);
4236 break;
4237
4238 case T_STRING:
4239 {
4240 if (STR_SHARED_P(obj)) {
4241 UPDATE_IF_MOVED(objspace, RSTRING(obj)->as.heap.aux.shared);
4242 }
4243
4244 /* If, after move the string is not embedded, and can fit in the
4245 * slot it's been placed in, then re-embed it. */
4246 if (rb_gc_obj_slot_size(obj) >= rb_str_size_as_embedded(obj)) {
4247 if (!STR_EMBED_P(obj) && rb_str_reembeddable_p(obj)) {
4248 rb_str_make_embedded(obj);
4249 }
4250 }
4251
4252 break;
4253 }
4254 case T_DATA:
4255 /* Call the compaction callback, if it exists */
4256 {
4257 void *const ptr = RTYPEDDATA_P(obj) ? RTYPEDDATA_GET_DATA(obj) : DATA_PTR(obj);
4258 if (ptr) {
4259 if (RTYPEDDATA_P(obj) && gc_declarative_marking_p(RTYPEDDATA_TYPE(obj))) {
4260 size_t *offset_list = TYPED_DATA_REFS_OFFSET_LIST(obj);
4261
4262 for (size_t offset = *offset_list; offset != RUBY_REF_END; offset = *offset_list++) {
4263 VALUE *ref = (VALUE *)((char *)ptr + offset);
4264 *ref = gc_location_internal(objspace, *ref);
4265 }
4266 }
4267 else if (RTYPEDDATA_P(obj)) {
4268 RUBY_DATA_FUNC compact_func = RTYPEDDATA_TYPE(obj)->function.dcompact;
4269 if (compact_func) (*compact_func)(ptr);
4270 }
4271 }
4272 }
4273 break;
4274
4275 case T_OBJECT:
4276 gc_ref_update_object(objspace, obj);
4277 break;
4278
4279 case T_FILE:
4280 if (RFILE(obj)->fptr) {
4281 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->self);
4282 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->pathv);
4283 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->tied_io_for_writing);
4284 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->writeconv_asciicompat);
4285 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->writeconv_pre_ecopts);
4286 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->encs.ecopts);
4287 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->write_lock);
4288 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->timeout);
4289 UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->wakeup_mutex);
4290 }
4291 break;
4292 case T_REGEXP:
4293 UPDATE_IF_MOVED(objspace, RREGEXP(obj)->src);
4294 break;
4295
4296 case T_SYMBOL:
4297 UPDATE_IF_MOVED(objspace, RSYMBOL(obj)->fstr);
4298 break;
4299
4300 case T_FLOAT:
4301 case T_BIGNUM:
4302 break;
4303
4304 case T_MATCH:
4305 UPDATE_IF_MOVED(objspace, RMATCH(obj)->regexp);
4306
4307 if (RMATCH(obj)->str) {
4308 UPDATE_IF_MOVED(objspace, RMATCH(obj)->str);
4309 }
4310 break;
4311
4312 case T_RATIONAL:
4313 UPDATE_IF_MOVED(objspace, RRATIONAL(obj)->num);
4314 UPDATE_IF_MOVED(objspace, RRATIONAL(obj)->den);
4315 break;
4316
4317 case T_COMPLEX:
4318 UPDATE_IF_MOVED(objspace, RCOMPLEX(obj)->real);
4319 UPDATE_IF_MOVED(objspace, RCOMPLEX(obj)->imag);
4320
4321 break;
4322
4323 case T_STRUCT:
4324 {
4325 long i, len = RSTRUCT_LEN(obj);
4326 VALUE *ptr = (VALUE *)RSTRUCT_CONST_PTR(obj);
4327
4328 for (i = 0; i < len; i++) {
4329 UPDATE_IF_MOVED(objspace, ptr[i]);
4330 }
4331 }
4332 break;
4333 default:
4334 rb_bug("unreachable");
4335 break;
4336 }
4337
4338 UPDATE_IF_MOVED(objspace, RBASIC(obj)->klass);
4339}
4340
4341VALUE
4342rb_gc_start(void)
4343{
4344 rb_gc();
4345 return Qnil;
4346}
4347
4348void
4349rb_gc(void)
4350{
4351 unless_objspace(objspace) { return; }
4352
4353 rb_gc_impl_start(objspace, true, true, true, false);
4354}
4355
4356int
4357rb_during_gc(void)
4358{
4359 unless_objspace(objspace) { return FALSE; }
4360
4361 return rb_gc_impl_during_gc_p(objspace);
4362}
4363
4364size_t
4365rb_gc_count(void)
4366{
4367 return rb_gc_impl_gc_count(rb_gc_get_objspace());
4368}
4369
4370static VALUE
4371gc_count(rb_execution_context_t *ec, VALUE self)
4372{
4373 return SIZET2NUM(rb_gc_count());
4374}
4375
4376VALUE
4377rb_gc_latest_gc_info(VALUE key)
4378{
4379 if (!SYMBOL_P(key) && !RB_TYPE_P(key, T_HASH)) {
4380 rb_raise(rb_eTypeError, "non-hash or symbol given");
4381 }
4382
4383 VALUE val = rb_gc_impl_latest_gc_info(rb_gc_get_objspace(), key);
4384
4385 if (val == Qundef) {
4386 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
4387 }
4388
4389 return val;
4390}
4391
4392static VALUE
4393gc_stat(rb_execution_context_t *ec, VALUE self, VALUE arg) // arg is (nil || hash || symbol)
4394{
4395 if (NIL_P(arg)) {
4396 arg = rb_hash_new();
4397 }
4398 else if (!RB_TYPE_P(arg, T_HASH) && !SYMBOL_P(arg)) {
4399 rb_raise(rb_eTypeError, "non-hash or symbol given");
4400 }
4401
4402 VALUE ret = rb_gc_impl_stat(rb_gc_get_objspace(), arg);
4403
4404 if (ret == Qundef) {
4405 GC_ASSERT(SYMBOL_P(arg));
4406
4407 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(arg));
4408 }
4409
4410 return ret;
4411}
4412
4413size_t
4414rb_gc_stat(VALUE arg)
4415{
4416 if (!RB_TYPE_P(arg, T_HASH) && !SYMBOL_P(arg)) {
4417 rb_raise(rb_eTypeError, "non-hash or symbol given");
4418 }
4419
4420 VALUE ret = rb_gc_impl_stat(rb_gc_get_objspace(), arg);
4421
4422 if (ret == Qundef) {
4423 GC_ASSERT(SYMBOL_P(arg));
4424
4425 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(arg));
4426 }
4427
4428 if (SYMBOL_P(arg)) {
4429 return NUM2SIZET(ret);
4430 }
4431 else {
4432 return 0;
4433 }
4434}
4435
4436static VALUE
4437gc_stat_heap(rb_execution_context_t *ec, VALUE self, VALUE heap_name, VALUE arg)
4438{
4439 if (NIL_P(arg)) {
4440 arg = rb_hash_new();
4441 }
4442
4443 if (NIL_P(heap_name)) {
4444 if (!RB_TYPE_P(arg, T_HASH)) {
4445 rb_raise(rb_eTypeError, "non-hash given");
4446 }
4447 }
4448 else if (FIXNUM_P(heap_name)) {
4449 if (!SYMBOL_P(arg) && !RB_TYPE_P(arg, T_HASH)) {
4450 rb_raise(rb_eTypeError, "non-hash or symbol given");
4451 }
4452 }
4453 else {
4454 rb_raise(rb_eTypeError, "heap_name must be nil or an Integer");
4455 }
4456
4457 VALUE ret = rb_gc_impl_stat_heap(rb_gc_get_objspace(), heap_name, arg);
4458
4459 if (ret == Qundef) {
4460 GC_ASSERT(SYMBOL_P(arg));
4461
4462 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(arg));
4463 }
4464
4465 return ret;
4466}
4467
4468static VALUE
4469gc_config_get(rb_execution_context_t *ec, VALUE self)
4470{
4471 VALUE cfg_hash = rb_gc_impl_config_get(rb_gc_get_objspace());
4472 rb_hash_aset(cfg_hash, sym("implementation"), rb_fstring_cstr(rb_gc_impl_active_gc_name()));
4473
4474 return cfg_hash;
4475}
4476
4477static VALUE
4478gc_config_set(rb_execution_context_t *ec, VALUE self, VALUE hash)
4479{
4480 void *objspace = rb_gc_get_objspace();
4481
4482 rb_gc_impl_config_set(objspace, hash);
4483
4484 return rb_gc_impl_config_get(objspace);
4485}
4486
4487static VALUE
4488gc_stress_get(rb_execution_context_t *ec, VALUE self)
4489{
4490 return rb_gc_impl_stress_get(rb_gc_get_objspace());
4491}
4492
4493static VALUE
4494gc_stress_set_m(rb_execution_context_t *ec, VALUE self, VALUE flag)
4495{
4496 rb_gc_impl_stress_set(rb_gc_get_objspace(), flag);
4497
4498 return flag;
4499}
4500
4501void
4502rb_gc_initial_stress_set(VALUE flag)
4503{
4504 initial_stress = flag;
4505}
4506
4507size_t *
4508rb_gc_heap_sizes(void)
4509{
4510 return rb_gc_impl_heap_sizes(rb_gc_get_objspace());
4511}
4512
4513VALUE
4514rb_gc_enable(void)
4515{
4516 return rb_objspace_gc_enable(rb_gc_get_objspace());
4517}
4518
4519VALUE
4520rb_objspace_gc_enable(void *objspace)
4521{
4522 bool disabled = !rb_gc_impl_gc_enabled_p(objspace);
4523 rb_gc_impl_gc_enable(objspace);
4524 return RBOOL(disabled);
4525}
4526
4527static VALUE
4528gc_enable(rb_execution_context_t *ec, VALUE _)
4529{
4530 return rb_gc_enable();
4531}
4532
4533static VALUE
4534gc_disable_no_rest(void *objspace)
4535{
4536 bool disabled = !rb_gc_impl_gc_enabled_p(objspace);
4537 rb_gc_impl_gc_disable(objspace, false);
4538 return RBOOL(disabled);
4539}
4540
4541VALUE
4542rb_gc_disable_no_rest(void)
4543{
4544 return gc_disable_no_rest(rb_gc_get_objspace());
4545}
4546
4547VALUE
4548rb_gc_disable(void)
4549{
4550 return rb_objspace_gc_disable(rb_gc_get_objspace());
4551}
4552
4553VALUE
4554rb_objspace_gc_disable(void *objspace)
4555{
4556 bool disabled = !rb_gc_impl_gc_enabled_p(objspace);
4557 rb_gc_impl_gc_disable(objspace, true);
4558 return RBOOL(disabled);
4559}
4560
4561static VALUE
4562gc_disable(rb_execution_context_t *ec, VALUE _)
4563{
4564 return rb_gc_disable();
4565}
4566
4567// TODO: think about moving ruby_gc_set_params into Init_heap or Init_gc
4568void
4569ruby_gc_set_params(void)
4570{
4571 rb_gc_impl_set_params(rb_gc_get_objspace());
4572}
4573
4574void
4575rb_objspace_reachable_objects_from(VALUE obj, void (func)(VALUE, void *), void *data)
4576{
4577 RB_VM_LOCK_ENTER();
4578 {
4579 if (rb_gc_impl_during_gc_p(rb_gc_get_objspace())) rb_bug("rb_objspace_reachable_objects_from() is not supported while during GC");
4580
4581 if (!RB_SPECIAL_CONST_P(obj)) {
4582 rb_vm_t *vm = GET_VM();
4583 struct gc_mark_func_data_struct *prev_mfd = vm->gc.mark_func_data;
4584 struct gc_mark_func_data_struct mfd = {
4585 .mark_func = func,
4586 .data = data,
4587 };
4588
4589 vm->gc.mark_func_data = &mfd;
4590 rb_gc_mark_children(rb_gc_get_objspace(), obj);
4591 vm->gc.mark_func_data = prev_mfd;
4592 }
4593 }
4594 RB_VM_LOCK_LEAVE();
4595}
4596
4598 const char *category;
4599 void (*func)(const char *category, VALUE, void *);
4600 void *data;
4601};
4602
4603static void
4604root_objects_from(VALUE obj, void *ptr)
4605{
4606 const struct root_objects_data *data = (struct root_objects_data *)ptr;
4607 (*data->func)(data->category, obj, data->data);
4608}
4609
4610void
4611rb_objspace_reachable_objects_from_root(void (func)(const char *category, VALUE, void *), void *passing_data)
4612{
4613 if (rb_gc_impl_during_gc_p(rb_gc_get_objspace())) rb_bug("rb_gc_impl_objspace_reachable_objects_from_root() is not supported while during GC");
4614
4615 rb_vm_t *vm = GET_VM();
4616
4617 struct root_objects_data data = {
4618 .func = func,
4619 .data = passing_data,
4620 };
4621
4622 struct gc_mark_func_data_struct *prev_mfd = vm->gc.mark_func_data;
4623 struct gc_mark_func_data_struct mfd = {
4624 .mark_func = root_objects_from,
4625 .data = &data,
4626 };
4627
4628 vm->gc.mark_func_data = &mfd;
4629 rb_gc_save_machine_context();
4630 rb_gc_mark_roots(vm->gc.objspace, &data.category);
4631 vm->gc.mark_func_data = prev_mfd;
4632}
4633
4634/*
4635 ------------------------------ DEBUG ------------------------------
4636*/
4637
4638static const char *
4639type_name(int type, VALUE obj)
4640{
4641 switch (type) {
4642#define TYPE_NAME(t) case (t): return #t;
4643 TYPE_NAME(T_NONE);
4644 TYPE_NAME(T_OBJECT);
4645 TYPE_NAME(T_CLASS);
4646 TYPE_NAME(T_MODULE);
4647 TYPE_NAME(T_FLOAT);
4648 TYPE_NAME(T_STRING);
4649 TYPE_NAME(T_REGEXP);
4650 TYPE_NAME(T_ARRAY);
4651 TYPE_NAME(T_HASH);
4652 TYPE_NAME(T_STRUCT);
4653 TYPE_NAME(T_BIGNUM);
4654 TYPE_NAME(T_FILE);
4655 TYPE_NAME(T_MATCH);
4656 TYPE_NAME(T_COMPLEX);
4657 TYPE_NAME(T_RATIONAL);
4658 TYPE_NAME(T_NIL);
4659 TYPE_NAME(T_TRUE);
4660 TYPE_NAME(T_FALSE);
4661 TYPE_NAME(T_SYMBOL);
4662 TYPE_NAME(T_FIXNUM);
4663 TYPE_NAME(T_UNDEF);
4664 TYPE_NAME(T_IMEMO);
4665 TYPE_NAME(T_ICLASS);
4666 TYPE_NAME(T_MOVED);
4667 TYPE_NAME(T_ZOMBIE);
4668 case T_DATA:
4669 if (obj && rb_objspace_data_type_name(obj)) {
4670 return rb_objspace_data_type_name(obj);
4671 }
4672 return "T_DATA";
4673#undef TYPE_NAME
4674 }
4675 return "unknown";
4676}
4677
4678static const char *
4679obj_type_name(VALUE obj)
4680{
4681 return type_name(TYPE(obj), obj);
4682}
4683
4684const char *
4685rb_method_type_name(rb_method_type_t type)
4686{
4687 switch (type) {
4688 case VM_METHOD_TYPE_ISEQ: return "iseq";
4689 case VM_METHOD_TYPE_ATTRSET: return "attrest";
4690 case VM_METHOD_TYPE_IVAR: return "ivar";
4691 case VM_METHOD_TYPE_BMETHOD: return "bmethod";
4692 case VM_METHOD_TYPE_ALIAS: return "alias";
4693 case VM_METHOD_TYPE_REFINED: return "refined";
4694 case VM_METHOD_TYPE_CFUNC: return "cfunc";
4695 case VM_METHOD_TYPE_ZSUPER: return "zsuper";
4696 case VM_METHOD_TYPE_MISSING: return "missing";
4697 case VM_METHOD_TYPE_OPTIMIZED: return "optimized";
4698 case VM_METHOD_TYPE_UNDEF: return "undef";
4699 case VM_METHOD_TYPE_NOTIMPLEMENTED: return "notimplemented";
4700 }
4701 rb_bug("rb_method_type_name: unreachable (type: %d)", type);
4702}
4703
4704static void
4705rb_raw_iseq_info(char *const buff, const size_t buff_size, const rb_iseq_t *iseq)
4706{
4707 if (buff_size > 0 && ISEQ_BODY(iseq) && ISEQ_BODY(iseq)->location.label && !RB_TYPE_P(ISEQ_BODY(iseq)->location.pathobj, T_MOVED)) {
4708 VALUE path = rb_iseq_path(iseq);
4709 int n = ISEQ_BODY(iseq)->location.first_lineno;
4710 snprintf(buff, buff_size, " %s@%s:%d",
4711 RSTRING_PTR(ISEQ_BODY(iseq)->location.label),
4712 RSTRING_PTR(path), n);
4713 }
4714}
4715
4716static int
4717str_len_no_raise(VALUE str)
4718{
4719 long len = RSTRING_LEN(str);
4720 if (len < 0) return 0;
4721 if (len > INT_MAX) return INT_MAX;
4722 return (int)len;
4723}
4724
4725#define BUFF_ARGS buff + pos, buff_size - pos
4726#define APPEND_F(...) if ((pos += snprintf(BUFF_ARGS, "" __VA_ARGS__)) >= buff_size) goto end
4727#define APPEND_S(s) do { \
4728 if ((pos + (int)rb_strlen_lit(s)) >= buff_size) { \
4729 goto end; \
4730 } \
4731 else { \
4732 memcpy(buff + pos, (s), rb_strlen_lit(s) + 1); \
4733 } \
4734 } while (0)
4735#define C(c, s) ((c) != 0 ? (s) : " ")
4736
4737static size_t
4738rb_raw_obj_info_common(char *const buff, const size_t buff_size, const VALUE obj)
4739{
4740 size_t pos = 0;
4741
4742 if (SPECIAL_CONST_P(obj)) {
4743 APPEND_F("%s", obj_type_name(obj));
4744
4745 if (FIXNUM_P(obj)) {
4746 APPEND_F(" %ld", FIX2LONG(obj));
4747 }
4748 else if (SYMBOL_P(obj)) {
4749 APPEND_F(" %s", rb_id2name(SYM2ID(obj)));
4750 }
4751 }
4752 else {
4753 // const int age = RVALUE_AGE_GET(obj);
4754
4755 if (rb_gc_impl_pointer_to_heap_p(rb_gc_get_objspace(), (void *)obj)) {
4756 // TODO: fixme
4757 // APPEND_F("%p [%d%s%s%s%s%s%s] %s ",
4758 // (void *)obj, age,
4759 // C(RVALUE_UNCOLLECTIBLE_BITMAP(obj), "L"),
4760 // C(RVALUE_MARK_BITMAP(obj), "M"),
4761 // C(RVALUE_PIN_BITMAP(obj), "P"),
4762 // C(RVALUE_MARKING_BITMAP(obj), "R"),
4763 // C(RVALUE_WB_UNPROTECTED_BITMAP(obj), "U"),
4764 // C(rb_objspace_garbage_object_p(obj), "G"),
4765 // obj_type_name(obj));
4766 }
4767 else {
4768 /* fake */
4769 // APPEND_F("%p [%dXXXX] %s",
4770 // (void *)obj, age,
4771 // obj_type_name(obj));
4772 }
4773
4774 if (internal_object_p(obj)) {
4775 /* ignore */
4776 }
4777 else if (RBASIC(obj)->klass == 0) {
4778 APPEND_S("(temporary internal)");
4779 }
4780 else if (RTEST(RBASIC(obj)->klass)) {
4781 VALUE class_path = rb_class_path_cached(RBASIC(obj)->klass);
4782 if (!NIL_P(class_path)) {
4783 APPEND_F("(%s)", RSTRING_PTR(class_path));
4784 }
4785 }
4786 }
4787 end:
4788
4789 return pos;
4790}
4791
4792const char *rb_raw_obj_info(char *const buff, const size_t buff_size, VALUE obj);
4793
4794static size_t
4795rb_raw_obj_info_buitin_type(char *const buff, const size_t buff_size, const VALUE obj, size_t pos)
4796{
4797 if (LIKELY(pos < buff_size) && !SPECIAL_CONST_P(obj)) {
4798 const enum ruby_value_type type = BUILTIN_TYPE(obj);
4799
4800 switch (type) {
4801 case T_NODE:
4802 UNEXPECTED_NODE(rb_raw_obj_info);
4803 break;
4804 case T_ARRAY:
4805 if (ARY_SHARED_P(obj)) {
4806 APPEND_S("shared -> ");
4807 rb_raw_obj_info(BUFF_ARGS, ARY_SHARED_ROOT(obj));
4808 }
4809 else if (ARY_EMBED_P(obj)) {
4810 APPEND_F("[%s%s] len: %ld (embed)",
4811 C(ARY_EMBED_P(obj), "E"),
4812 C(ARY_SHARED_P(obj), "S"),
4813 RARRAY_LEN(obj));
4814 }
4815 else {
4816 APPEND_F("[%s%s] len: %ld, capa:%ld ptr:%p",
4817 C(ARY_EMBED_P(obj), "E"),
4818 C(ARY_SHARED_P(obj), "S"),
4819 RARRAY_LEN(obj),
4820 ARY_EMBED_P(obj) ? -1L : RARRAY(obj)->as.heap.aux.capa,
4821 (void *)RARRAY_CONST_PTR(obj));
4822 }
4823 break;
4824 case T_STRING: {
4825 if (STR_SHARED_P(obj)) {
4826 APPEND_F(" [shared] len: %ld", RSTRING_LEN(obj));
4827 }
4828 else {
4829 if (STR_EMBED_P(obj)) APPEND_S(" [embed]");
4830
4831 APPEND_F(" len: %ld, capa: %" PRIdSIZE, RSTRING_LEN(obj), rb_str_capacity(obj));
4832 }
4833 APPEND_F(" \"%.*s\"", str_len_no_raise(obj), RSTRING_PTR(obj));
4834 break;
4835 }
4836 case T_SYMBOL: {
4837 VALUE fstr = RSYMBOL(obj)->fstr;
4838 ID id = RSYMBOL(obj)->id;
4839 if (RB_TYPE_P(fstr, T_STRING)) {
4840 APPEND_F(":%s id:%d", RSTRING_PTR(fstr), (unsigned int)id);
4841 }
4842 else {
4843 APPEND_F("(%p) id:%d", (void *)fstr, (unsigned int)id);
4844 }
4845 break;
4846 }
4847 case T_MOVED: {
4848 APPEND_F("-> %p", (void*)gc_location_internal(rb_gc_get_objspace(), obj));
4849 break;
4850 }
4851 case T_HASH: {
4852 APPEND_F("[%c] %"PRIdSIZE,
4853 RHASH_AR_TABLE_P(obj) ? 'A' : 'S',
4854 RHASH_SIZE(obj));
4855 break;
4856 }
4857 case T_CLASS:
4858 case T_MODULE:
4859 {
4860 VALUE class_path = rb_class_path_cached(obj);
4861 if (!NIL_P(class_path)) {
4862 APPEND_F("%s", RSTRING_PTR(class_path));
4863 }
4864 else {
4865 APPEND_S("(anon)");
4866 }
4867 break;
4868 }
4869 case T_ICLASS:
4870 {
4871 VALUE class_path = rb_class_path_cached(RBASIC_CLASS(obj));
4872 if (!NIL_P(class_path)) {
4873 APPEND_F("src:%s", RSTRING_PTR(class_path));
4874 }
4875 break;
4876 }
4877 case T_OBJECT:
4878 {
4879 if (rb_shape_obj_too_complex_p(obj)) {
4880 size_t hash_len = rb_st_table_size(ROBJECT_FIELDS_HASH(obj));
4881 APPEND_F("(too_complex) len:%zu", hash_len);
4882 }
4883 else {
4884 uint32_t len = ROBJECT_FIELDS_CAPACITY(obj);
4885
4886 if (RBASIC(obj)->flags & ROBJECT_EMBED) {
4887 APPEND_F("(embed) len:%d", len);
4888 }
4889 else {
4890 VALUE *ptr = ROBJECT_FIELDS(obj);
4891 APPEND_F("len:%d ptr:%p", len, (void *)ptr);
4892 }
4893 }
4894 }
4895 break;
4896 case T_DATA: {
4897 const struct rb_block *block;
4898 const rb_iseq_t *iseq;
4899 if (rb_obj_is_proc(obj) &&
4900 (block = vm_proc_block(obj)) != NULL &&
4901 (vm_block_type(block) == block_type_iseq) &&
4902 (iseq = vm_block_iseq(block)) != NULL) {
4903 rb_raw_iseq_info(BUFF_ARGS, iseq);
4904 }
4905 else if (rb_ractor_p(obj)) {
4906 rb_ractor_t *r = (void *)DATA_PTR(obj);
4907 if (r) {
4908 APPEND_F("r:%d", r->pub.id);
4909 }
4910 }
4911 else {
4912 const char * const type_name = rb_objspace_data_type_name(obj);
4913 if (type_name) {
4914 APPEND_F("%s", type_name);
4915 }
4916 }
4917 break;
4918 }
4919 case T_IMEMO: {
4920 APPEND_F("<%s> ", rb_imemo_name(imemo_type(obj)));
4921
4922 switch (imemo_type(obj)) {
4923 case imemo_ment:
4924 {
4925 const rb_method_entry_t *me = (const rb_method_entry_t *)obj;
4926
4927 APPEND_F(":%s (%s%s%s%s) type:%s aliased:%d owner:%p defined_class:%p",
4928 rb_id2name(me->called_id),
4929 METHOD_ENTRY_VISI(me) == METHOD_VISI_PUBLIC ? "pub" :
4930 METHOD_ENTRY_VISI(me) == METHOD_VISI_PRIVATE ? "pri" : "pro",
4931 METHOD_ENTRY_COMPLEMENTED(me) ? ",cmp" : "",
4932 METHOD_ENTRY_CACHED(me) ? ",cc" : "",
4933 METHOD_ENTRY_INVALIDATED(me) ? ",inv" : "",
4934 me->def ? rb_method_type_name(me->def->type) : "NULL",
4935 me->def ? me->def->aliased : -1,
4936 (void *)me->owner, // obj_info(me->owner),
4937 (void *)me->defined_class); //obj_info(me->defined_class)));
4938
4939 if (me->def) {
4940 switch (me->def->type) {
4941 case VM_METHOD_TYPE_ISEQ:
4942 APPEND_S(" (iseq:");
4943 rb_raw_obj_info(BUFF_ARGS, (VALUE)me->def->body.iseq.iseqptr);
4944 APPEND_S(")");
4945 break;
4946 default:
4947 break;
4948 }
4949 }
4950
4951 break;
4952 }
4953 case imemo_iseq: {
4954 const rb_iseq_t *iseq = (const rb_iseq_t *)obj;
4955 rb_raw_iseq_info(BUFF_ARGS, iseq);
4956 break;
4957 }
4958 case imemo_callinfo:
4959 {
4960 const struct rb_callinfo *ci = (const struct rb_callinfo *)obj;
4961 APPEND_F("(mid:%s, flag:%x argc:%d, kwarg:%s)",
4962 rb_id2name(vm_ci_mid(ci)),
4963 vm_ci_flag(ci),
4964 vm_ci_argc(ci),
4965 vm_ci_kwarg(ci) ? "available" : "NULL");
4966 break;
4967 }
4968 case imemo_callcache:
4969 {
4970 const struct rb_callcache *cc = (const struct rb_callcache *)obj;
4971 VALUE class_path = cc->klass ? rb_class_path_cached(cc->klass) : Qnil;
4972 const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4973
4974 APPEND_F("(klass:%s cme:%s%s (%p) call:%p",
4975 NIL_P(class_path) ? (cc->klass ? "??" : "<NULL>") : RSTRING_PTR(class_path),
4976 cme ? rb_id2name(cme->called_id) : "<NULL>",
4977 cme ? (METHOD_ENTRY_INVALIDATED(cme) ? " [inv]" : "") : "",
4978 (void *)cme,
4979 (void *)(uintptr_t)vm_cc_call(cc));
4980 break;
4981 }
4982 default:
4983 break;
4984 }
4985 }
4986 default:
4987 break;
4988 }
4989 }
4990 end:
4991
4992 return pos;
4993}
4994
4995#undef C
4996
4997void
4998rb_asan_poison_object(VALUE obj)
4999{
5000 MAYBE_UNUSED(struct RVALUE *) ptr = (void *)obj;
5001 asan_poison_memory_region(ptr, rb_gc_obj_slot_size(obj));
5002}
5003
5004void
5005rb_asan_unpoison_object(VALUE obj, bool newobj_p)
5006{
5007 MAYBE_UNUSED(struct RVALUE *) ptr = (void *)obj;
5008 asan_unpoison_memory_region(ptr, rb_gc_obj_slot_size(obj), newobj_p);
5009}
5010
5011void *
5012rb_asan_poisoned_object_p(VALUE obj)
5013{
5014 MAYBE_UNUSED(struct RVALUE *) ptr = (void *)obj;
5015 return __asan_region_is_poisoned(ptr, rb_gc_obj_slot_size(obj));
5016}
5017
5018const char *
5019rb_raw_obj_info(char *const buff, const size_t buff_size, VALUE obj)
5020{
5021 asan_unpoisoning_object(obj) {
5022 size_t pos = rb_raw_obj_info_common(buff, buff_size, obj);
5023 pos = rb_raw_obj_info_buitin_type(buff, buff_size, obj, pos);
5024 if (pos >= buff_size) {} // truncated
5025 }
5026
5027 return buff;
5028}
5029
5030#undef APPEND_S
5031#undef APPEND_F
5032#undef BUFF_ARGS
5033
5034#if RGENGC_OBJ_INFO
5035#define OBJ_INFO_BUFFERS_NUM 10
5036#define OBJ_INFO_BUFFERS_SIZE 0x100
5037static rb_atomic_t obj_info_buffers_index = 0;
5038static char obj_info_buffers[OBJ_INFO_BUFFERS_NUM][OBJ_INFO_BUFFERS_SIZE];
5039
5040/* Increments *var atomically and resets *var to 0 when maxval is
5041 * reached. Returns the wraparound old *var value (0...maxval). */
5042static rb_atomic_t
5043atomic_inc_wraparound(rb_atomic_t *var, const rb_atomic_t maxval)
5044{
5045 rb_atomic_t oldval = RUBY_ATOMIC_FETCH_ADD(*var, 1);
5046 if (RB_UNLIKELY(oldval >= maxval - 1)) { // wraparound *var
5047 const rb_atomic_t newval = oldval + 1;
5048 RUBY_ATOMIC_CAS(*var, newval, newval % maxval);
5049 oldval %= maxval;
5050 }
5051 return oldval;
5052}
5053
5054static const char *
5055obj_info(VALUE obj)
5056{
5057 rb_atomic_t index = atomic_inc_wraparound(&obj_info_buffers_index, OBJ_INFO_BUFFERS_NUM);
5058 char *const buff = obj_info_buffers[index];
5059 return rb_raw_obj_info(buff, OBJ_INFO_BUFFERS_SIZE, obj);
5060}
5061#else
5062static const char *
5063obj_info(VALUE obj)
5064{
5065 return obj_type_name(obj);
5066}
5067#endif
5068
5069/*
5070 ------------------------ Extended allocator ------------------------
5071*/
5072
5074 VALUE exc;
5075 const char *fmt;
5076 va_list *ap;
5077};
5078
5079static void *
5080gc_vraise(void *ptr)
5081{
5082 struct gc_raise_tag *argv = ptr;
5083 rb_vraise(argv->exc, argv->fmt, *argv->ap);
5084 UNREACHABLE_RETURN(NULL);
5085}
5086
5087static void
5088gc_raise(VALUE exc, const char *fmt, ...)
5089{
5090 va_list ap;
5091 va_start(ap, fmt);
5092 struct gc_raise_tag argv = {
5093 exc, fmt, &ap,
5094 };
5095
5096 if (ruby_thread_has_gvl_p()) {
5097 gc_vraise(&argv);
5099 }
5100 else if (ruby_native_thread_p()) {
5101 rb_thread_call_with_gvl(gc_vraise, &argv);
5103 }
5104 else {
5105 /* Not in a ruby thread */
5106 fprintf(stderr, "%s", "[FATAL] ");
5107 vfprintf(stderr, fmt, ap);
5108 }
5109
5110 va_end(ap);
5111 abort();
5112}
5113
5114NORETURN(static void negative_size_allocation_error(const char *));
5115static void
5116negative_size_allocation_error(const char *msg)
5117{
5118 gc_raise(rb_eNoMemError, "%s", msg);
5119}
5120
5121static void *
5122ruby_memerror_body(void *dummy)
5123{
5124 rb_memerror();
5125 return 0;
5126}
5127
5128NORETURN(static void ruby_memerror(void));
5130static void
5131ruby_memerror(void)
5132{
5133 if (ruby_thread_has_gvl_p()) {
5134 rb_memerror();
5135 }
5136 else {
5137 if (ruby_native_thread_p()) {
5138 rb_thread_call_with_gvl(ruby_memerror_body, 0);
5139 }
5140 else {
5141 /* no ruby thread */
5142 fprintf(stderr, "[FATAL] failed to allocate memory\n");
5143 }
5144 }
5145
5146 /* We have discussions whether we should die here; */
5147 /* We might rethink about it later. */
5148 exit(EXIT_FAILURE);
5149}
5150
5151void
5152rb_memerror(void)
5153{
5154 /* the `GET_VM()->special_exceptions` below assumes that
5155 * the VM is reachable from the current thread. We should
5156 * definitely make sure of that. */
5157 RUBY_ASSERT_ALWAYS(ruby_thread_has_gvl_p());
5158
5159 rb_execution_context_t *ec = GET_EC();
5160 VALUE exc = GET_VM()->special_exceptions[ruby_error_nomemory];
5161
5162 if (!exc ||
5163 rb_ec_raised_p(ec, RAISED_NOMEMORY) ||
5164 rb_ec_vm_lock_rec(ec) != ec->tag->lock_rec) {
5165 fprintf(stderr, "[FATAL] failed to allocate memory\n");
5166 exit(EXIT_FAILURE);
5167 }
5168 if (rb_ec_raised_p(ec, RAISED_NOMEMORY)) {
5169 rb_ec_raised_clear(ec);
5170 }
5171 else {
5172 rb_ec_raised_set(ec, RAISED_NOMEMORY);
5173 exc = ruby_vm_special_exception_copy(exc);
5174 }
5175 ec->errinfo = exc;
5176 EC_JUMP_TAG(ec, TAG_RAISE);
5177}
5178
5179bool
5180rb_memerror_reentered(void)
5181{
5182 rb_execution_context_t *ec = GET_EC();
5183 return (ec && rb_ec_raised_p(ec, RAISED_NOMEMORY));
5184}
5185
5186void
5187rb_malloc_info_show_results(void)
5188{
5189}
5190
5191static void *
5192handle_malloc_failure(void *ptr)
5193{
5194 if (LIKELY(ptr)) {
5195 return ptr;
5196 }
5197 else {
5198 ruby_memerror();
5199 UNREACHABLE_RETURN(ptr);
5200 }
5201}
5202
5203static void *ruby_xmalloc_body(size_t size);
5204
5205void *
5206ruby_xmalloc(size_t size)
5207{
5208 return handle_malloc_failure(ruby_xmalloc_body(size));
5209}
5210
5211static void *
5212ruby_xmalloc_body(size_t size)
5213{
5214 if ((ssize_t)size < 0) {
5215 negative_size_allocation_error("too large allocation size");
5216 }
5217
5218 return rb_gc_impl_malloc(rb_gc_get_objspace(), size);
5219}
5220
5221void
5222ruby_malloc_size_overflow(size_t count, size_t elsize)
5223{
5224 rb_raise(rb_eArgError,
5225 "malloc: possible integer overflow (%"PRIuSIZE"*%"PRIuSIZE")",
5226 count, elsize);
5227}
5228
5229void
5230ruby_malloc_add_size_overflow(size_t x, size_t y)
5231{
5232 rb_raise(rb_eArgError,
5233 "malloc: possible integer overflow (%"PRIuSIZE"+%"PRIuSIZE")",
5234 x, y);
5235}
5236
5237static void *ruby_xmalloc2_body(size_t n, size_t size);
5238
5239void *
5240ruby_xmalloc2(size_t n, size_t size)
5241{
5242 return handle_malloc_failure(ruby_xmalloc2_body(n, size));
5243}
5244
5245static void *
5246ruby_xmalloc2_body(size_t n, size_t size)
5247{
5248 return rb_gc_impl_malloc(rb_gc_get_objspace(), xmalloc2_size(n, size));
5249}
5250
5251static void *ruby_xcalloc_body(size_t n, size_t size);
5252
5253void *
5254ruby_xcalloc(size_t n, size_t size)
5255{
5256 return handle_malloc_failure(ruby_xcalloc_body(n, size));
5257}
5258
5259static void *
5260ruby_xcalloc_body(size_t n, size_t size)
5261{
5262 return rb_gc_impl_calloc(rb_gc_get_objspace(), xmalloc2_size(n, size));
5263}
5264
5265static void *ruby_sized_xrealloc_body(void *ptr, size_t new_size, size_t old_size);
5266
5267#ifdef ruby_sized_xrealloc
5268#undef ruby_sized_xrealloc
5269#endif
5270void *
5271ruby_sized_xrealloc(void *ptr, size_t new_size, size_t old_size)
5272{
5273 return handle_malloc_failure(ruby_sized_xrealloc_body(ptr, new_size, old_size));
5274}
5275
5276static void *
5277ruby_sized_xrealloc_body(void *ptr, size_t new_size, size_t old_size)
5278{
5279 if ((ssize_t)new_size < 0) {
5280 negative_size_allocation_error("too large allocation size");
5281 }
5282
5283 return rb_gc_impl_realloc(rb_gc_get_objspace(), ptr, new_size, old_size);
5284}
5285
5286void *
5287ruby_xrealloc(void *ptr, size_t new_size)
5288{
5289 return ruby_sized_xrealloc(ptr, new_size, 0);
5290}
5291
5292static void *ruby_sized_xrealloc2_body(void *ptr, size_t n, size_t size, size_t old_n);
5293
5294#ifdef ruby_sized_xrealloc2
5295#undef ruby_sized_xrealloc2
5296#endif
5297void *
5298ruby_sized_xrealloc2(void *ptr, size_t n, size_t size, size_t old_n)
5299{
5300 return handle_malloc_failure(ruby_sized_xrealloc2_body(ptr, n, size, old_n));
5301}
5302
5303static void *
5304ruby_sized_xrealloc2_body(void *ptr, size_t n, size_t size, size_t old_n)
5305{
5306 size_t len = xmalloc2_size(n, size);
5307 return rb_gc_impl_realloc(rb_gc_get_objspace(), ptr, len, old_n * size);
5308}
5309
5310void *
5311ruby_xrealloc2(void *ptr, size_t n, size_t size)
5312{
5313 return ruby_sized_xrealloc2(ptr, n, size, 0);
5314}
5315
5316#ifdef ruby_sized_xfree
5317#undef ruby_sized_xfree
5318#endif
5319void
5320ruby_sized_xfree(void *x, size_t size)
5321{
5322 if (LIKELY(x)) {
5323 /* It's possible for a C extension's pthread destructor function set by pthread_key_create
5324 * to be called after ruby_vm_destruct and attempt to free memory. Fall back to mimfree in
5325 * that case. */
5326 if (LIKELY(GET_VM())) {
5327 rb_gc_impl_free(rb_gc_get_objspace(), x, size);
5328 }
5329 else {
5330 ruby_mimfree(x);
5331 }
5332 }
5333}
5334
5335void
5336ruby_xfree(void *x)
5337{
5338 ruby_sized_xfree(x, 0);
5339}
5340
5341void *
5342rb_xmalloc_mul_add(size_t x, size_t y, size_t z) /* x * y + z */
5343{
5344 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
5345 return ruby_xmalloc(w);
5346}
5347
5348void *
5349rb_xcalloc_mul_add(size_t x, size_t y, size_t z) /* x * y + z */
5350{
5351 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
5352 return ruby_xcalloc(w, 1);
5353}
5354
5355void *
5356rb_xrealloc_mul_add(const void *p, size_t x, size_t y, size_t z) /* x * y + z */
5357{
5358 size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
5359 return ruby_xrealloc((void *)p, w);
5360}
5361
5362void *
5363rb_xmalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
5364{
5365 size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
5366 return ruby_xmalloc(u);
5367}
5368
5369void *
5370rb_xcalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
5371{
5372 size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
5373 return ruby_xcalloc(u, 1);
5374}
5375
5376/* Mimic ruby_xmalloc, but need not rb_objspace.
5377 * should return pointer suitable for ruby_xfree
5378 */
5379void *
5380ruby_mimmalloc(size_t size)
5381{
5382 void *mem;
5383#if CALC_EXACT_MALLOC_SIZE
5384 size += sizeof(struct malloc_obj_info);
5385#endif
5386 mem = malloc(size);
5387#if CALC_EXACT_MALLOC_SIZE
5388 if (!mem) {
5389 return NULL;
5390 }
5391 else
5392 /* set 0 for consistency of allocated_size/allocations */
5393 {
5394 struct malloc_obj_info *info = mem;
5395 info->size = 0;
5396 mem = info + 1;
5397 }
5398#endif
5399 return mem;
5400}
5401
5402void *
5403ruby_mimcalloc(size_t num, size_t size)
5404{
5405 void *mem;
5406#if CALC_EXACT_MALLOC_SIZE
5407 struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(num, size);
5408 if (UNLIKELY(t.left)) {
5409 return NULL;
5410 }
5411 size = t.right + sizeof(struct malloc_obj_info);
5412 mem = calloc1(size);
5413 if (!mem) {
5414 return NULL;
5415 }
5416 else
5417 /* set 0 for consistency of allocated_size/allocations */
5418 {
5419 struct malloc_obj_info *info = mem;
5420 info->size = 0;
5421 mem = info + 1;
5422 }
5423#else
5424 mem = calloc(num, size);
5425#endif
5426 return mem;
5427}
5428
5429void
5430ruby_mimfree(void *ptr)
5431{
5432#if CALC_EXACT_MALLOC_SIZE
5433 struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
5434 ptr = info;
5435#endif
5436 free(ptr);
5437}
5438
5439void
5440rb_gc_adjust_memory_usage(ssize_t diff)
5441{
5442 unless_objspace(objspace) { return; }
5443
5444 rb_gc_impl_adjust_memory_usage(objspace, diff);
5445}
5446
5447const char *
5448rb_obj_info(VALUE obj)
5449{
5450 return obj_info(obj);
5451}
5452
5453void
5454rb_obj_info_dump(VALUE obj)
5455{
5456 char buff[0x100];
5457 fprintf(stderr, "rb_obj_info_dump: %s\n", rb_raw_obj_info(buff, 0x100, obj));
5458}
5459
5460void
5461rb_obj_info_dump_loc(VALUE obj, const char *file, int line, const char *func)
5462{
5463 char buff[0x100];
5464 fprintf(stderr, "<OBJ_INFO:%s@%s:%d> %s\n", func, file, line, rb_raw_obj_info(buff, 0x100, obj));
5465}
5466
5467void
5468rb_gc_before_fork(void)
5469{
5470 rb_gc_impl_before_fork(rb_gc_get_objspace());
5471}
5472
5473void
5474rb_gc_after_fork(rb_pid_t pid)
5475{
5476 rb_gc_impl_after_fork(rb_gc_get_objspace(), pid);
5477}
5478
5479/*
5480 * Document-module: ObjectSpace
5481 *
5482 * The ObjectSpace module contains a number of routines
5483 * that interact with the garbage collection facility and allow you to
5484 * traverse all living objects with an iterator.
5485 *
5486 * ObjectSpace also provides support for object finalizers, procs that will be
5487 * called after a specific object was destroyed by garbage collection. See
5488 * the documentation for +ObjectSpace.define_finalizer+ for important
5489 * information on how to use this method correctly.
5490 *
5491 * a = "A"
5492 * b = "B"
5493 *
5494 * ObjectSpace.define_finalizer(a, proc {|id| puts "Finalizer one on #{id}" })
5495 * ObjectSpace.define_finalizer(b, proc {|id| puts "Finalizer two on #{id}" })
5496 *
5497 * a = nil
5498 * b = nil
5499 *
5500 * _produces:_
5501 *
5502 * Finalizer two on 537763470
5503 * Finalizer one on 537763480
5504 */
5505
5506/* Document-class: GC::Profiler
5507 *
5508 * The GC profiler provides access to information on GC runs including time,
5509 * length and object space size.
5510 *
5511 * Example:
5512 *
5513 * GC::Profiler.enable
5514 *
5515 * require 'rdoc/rdoc'
5516 *
5517 * GC::Profiler.report
5518 *
5519 * GC::Profiler.disable
5520 *
5521 * See also GC.count, GC.malloc_allocated_size and GC.malloc_allocations
5522 */
5523
5524#include "gc.rbinc"
5525
5526void
5527Init_GC(void)
5528{
5529#undef rb_intern
5530 rb_gc_register_address(&id2ref_value);
5531
5532 malloc_offset = gc_compute_malloc_offset();
5533
5534 rb_mGC = rb_define_module("GC");
5535
5536 VALUE rb_mObjSpace = rb_define_module("ObjectSpace");
5537
5538 rb_define_module_function(rb_mObjSpace, "each_object", os_each_obj, -1);
5539
5540 rb_define_module_function(rb_mObjSpace, "define_finalizer", define_final, -1);
5541 rb_define_module_function(rb_mObjSpace, "undefine_finalizer", undefine_final, 1);
5542
5543 rb_define_module_function(rb_mObjSpace, "_id2ref", os_id2ref, 1);
5544
5545 rb_vm_register_special_exception(ruby_error_nomemory, rb_eNoMemError, "failed to allocate memory");
5546
5547 rb_define_method(rb_cBasicObject, "__id__", rb_obj_id, 0);
5548 rb_define_method(rb_mKernel, "object_id", rb_obj_id, 0);
5549
5550 rb_define_module_function(rb_mObjSpace, "count_objects", count_objects, -1);
5551
5552 rb_gc_impl_init();
5553}
5554
5555// Set a name for the anonymous virtual memory area. `addr` is the starting
5556// address of the area and `size` is its length in bytes. `name` is a
5557// NUL-terminated human-readable string.
5558//
5559// This function is usually called after calling `mmap()`. The human-readable
5560// annotation helps developers identify the call site of `mmap()` that created
5561// the memory mapping.
5562//
5563// This function currently only works on Linux 5.17 or higher. After calling
5564// this function, we can see annotations in the form of "[anon:...]" in
5565// `/proc/self/maps`, where `...` is the content of `name`. This function has
5566// no effect when called on other platforms.
5567void
5568ruby_annotate_mmap(const void *addr, unsigned long size, const char *name)
5569{
5570#if defined(HAVE_SYS_PRCTL_H) && defined(PR_SET_VMA) && defined(PR_SET_VMA_ANON_NAME)
5571 // The name length cannot exceed 80 (including the '\0').
5572 RUBY_ASSERT(strlen(name) < 80);
5573 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, (unsigned long)addr, size, name);
5574 // We ignore errors in prctl. prctl may set errno to EINVAL for several
5575 // reasons.
5576 // 1. The attr (PR_SET_VMA_ANON_NAME) is not a valid attribute.
5577 // 2. addr is an invalid address.
5578 // 3. The string pointed by name is too long.
5579 // The first error indicates PR_SET_VMA_ANON_NAME is not available, and may
5580 // happen if we run the compiled binary on an old kernel. In theory, all
5581 // other errors should result in a failure. But since EINVAL cannot tell
5582 // the first error from others, and this function is mainly used for
5583 // debugging, we silently ignore the error.
5584 errno = 0;
5585#endif
5586}
#define RUBY_ASSERT_ALWAYS(expr,...)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
Definition assert.h:199
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
#define RUBY_ATOMIC_VALUE_CAS(var, oldval, newval)
Identical to RUBY_ATOMIC_CAS, except it expects its arguments are VALUE.
Definition atomic.h:381
#define RUBY_ATOMIC_SIZE_FETCH_ADD(var, val)
Identical to RUBY_ATOMIC_FETCH_ADD, except it expects its arguments to be size_t.
Definition atomic.h:210
#define RUBY_ATOMIC_CAS(var, oldval, newval)
Atomic compare-and-swap.
Definition atomic.h:140
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ATOMIC_FETCH_ADD(var, val)
Atomically replaces the value pointed by var with the result of addition of val to the old value of v...
Definition atomic.h:93
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_module_function(klass, mid, func, arity)
Defines klass#mid and makes it a module function.
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RUBY_INTERNAL_EVENT_NEWOBJ
Object allocated.
Definition event.h:93
static VALUE RB_FL_TEST_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_TEST().
Definition fl_type.h:456
static void RB_FL_SET_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_SET().
Definition fl_type.h:593
@ RUBY_FL_WB_PROTECTED
Definition fl_type.h:198
VALUE rb_define_module(const char *name)
Defines a top-level module.
Definition class.c:1598
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
Retrieves argument from argc and argv to given VALUE references according to the format string.
Definition class.c:3133
#define T_COMPLEX
Old name of RUBY_T_COMPLEX.
Definition value_type.h:59
#define TYPE(_)
Old name of rb_type.
Definition value_type.h:108
#define FL_SINGLETON
Old name of RUBY_FL_SINGLETON.
Definition fl_type.h:58
#define T_FILE
Old name of RUBY_T_FILE.
Definition value_type.h:62
#define FL_UNSET_RAW
Old name of RB_FL_UNSET_RAW.
Definition fl_type.h:133
#define FL_EXIVAR
Old name of RUBY_FL_EXIVAR.
Definition fl_type.h:65
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:400
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define T_MASK
Old name of RUBY_T_MASK.
Definition value_type.h:68
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define T_NIL
Old name of RUBY_T_NIL.
Definition value_type.h:72
#define UNREACHABLE
Old name of RBIMPL_UNREACHABLE.
Definition assume.h:28
#define T_FLOAT
Old name of RUBY_T_FLOAT.
Definition value_type.h:64
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition value_type.h:67
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
Definition value_type.h:57
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
Definition value_type.h:79
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition fl_type.h:134
#define T_FIXNUM
Old name of RUBY_T_FIXNUM.
Definition value_type.h:63
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition assume.h:29
#define SYM2ID
Old name of RB_SYM2ID.
Definition symbol.h:45
#define T_DATA
Old name of RUBY_T_DATA.
Definition value_type.h:60
#define FIXNUM_FLAG
Old name of RUBY_FIXNUM_FLAG.
#define LL2NUM
Old name of RB_LL2NUM.
Definition long_long.h:30
#define T_NONE
Old name of RUBY_T_NONE.
Definition value_type.h:74
#define T_NODE
Old name of RUBY_T_NODE.
Definition value_type.h:73
#define SIZET2NUM
Old name of RB_SIZE2NUM.
Definition size_t.h:62
#define FL_FINALIZE
Old name of RUBY_FL_FINALIZE.
Definition fl_type.h:61
#define T_MODULE
Old name of RUBY_T_MODULE.
Definition value_type.h:70
#define STATIC_SYM_P
Old name of RB_STATIC_SYM_P.
#define T_TRUE
Old name of RUBY_T_TRUE.
Definition value_type.h:81
#define T_RATIONAL
Old name of RUBY_T_RATIONAL.
Definition value_type.h:76
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define FL_ABLE
Old name of RB_FL_ABLE.
Definition fl_type.h:121
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:131
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition array.h:658
#define LONG2NUM
Old name of RB_LONG2NUM.
Definition long.h:50
#define T_FALSE
Old name of RUBY_T_FALSE.
Definition value_type.h:61
#define ULL2NUM
Old name of RB_ULL2NUM.
Definition long_long.h:31
#define T_UNDEF
Old name of RUBY_T_UNDEF.
Definition value_type.h:82
#define FLONUM_P
Old name of RB_FLONUM_P.
#define Qtrue
Old name of RUBY_Qtrue.
#define T_ZOMBIE
Old name of RUBY_T_ZOMBIE.
Definition value_type.h:83
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define NUM2ULL
Old name of RB_NUM2ULL.
Definition long_long.h:35
#define FL_WB_PROTECTED
Old name of RUBY_FL_WB_PROTECTED.
Definition fl_type.h:59
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
Definition value_type.h:80
#define T_MATCH
Old name of RUBY_T_MATCH.
Definition value_type.h:69
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define T_MOVED
Old name of RUBY_T_MOVED.
Definition value_type.h:71
#define FL_TEST
Old name of RB_FL_TEST.
Definition fl_type.h:130
#define xcalloc
Old name of ruby_xcalloc.
Definition xmalloc.h:55
#define FL_UNSET
Old name of RB_FL_UNSET.
Definition fl_type.h:132
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define NUM2SIZET
Old name of RB_NUM2SIZE.
Definition size_t.h:61
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
#define T_REGEXP
Old name of RUBY_T_REGEXP.
Definition value_type.h:77
size_t ruby_stack_length(VALUE **p)
Queries what Ruby thinks is the machine stack.
Definition gc.c:2548
int ruby_stack_check(void)
Checks for stack overflow.
Definition gc.c:2588
void rb_category_warn(rb_warning_category_t category, const char *fmt,...)
Identical to rb_category_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:476
VALUE rb_eNoMemError
NoMemoryError exception.
Definition error.c:1441
VALUE rb_eRangeError
RangeError exception.
Definition error.c:1434
#define ruby_verbose
This variable controls whether the interpreter is in debug mode.
Definition error.h:475
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1430
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:466
@ RB_WARN_CATEGORY_DEPRECATED
Warning is for deprecated features.
Definition error.h:48
VALUE rb_mKernel
Kernel module.
Definition object.c:65
VALUE rb_mGC
GC module.
Definition gc.c:435
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:247
VALUE rb_cBasicObject
BasicObject class.
Definition object.c:64
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:878
size_t rb_obj_embedded_size(uint32_t fields_count)
Internal header for Object.
Definition object.c:98
VALUE rb_to_int(VALUE val)
Identical to rb_check_to_int(), except it raises in case of conversion mismatch.
Definition object.c:3222
#define RB_POSFIXABLE(_)
Checks if the passed value is in range of fixnum, assuming it is a positive number.
Definition fixnum.h:43
int rb_enc_str_coderange(VALUE str)
Scans the passed string to collect its code range.
Definition string.c:1280
VALUE rb_funcall(VALUE recv, ID mid, int n,...)
Calls a method.
Definition vm_eval.c:1117
Defines RBIMPL_HAS_BUILTIN.
void rb_ary_free(VALUE ary)
Destroys the given array for no reason.
#define RETURN_ENUMERATOR(obj, argc, argv)
Identical to RETURN_SIZED_ENUMERATOR(), except its size is unknown.
Definition enumerator.h:239
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:284
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition proc.c:842
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
Definition proc.c:119
void rb_str_free(VALUE str)
Destroys the given string for no reason.
Definition string.c:2065
size_t rb_str_capacity(VALUE str)
Queries the capacity of the given string.
Definition string.c:1334
VALUE rb_class_path_cached(VALUE mod)
Just another name of rb_mod_name.
Definition variable.c:387
void rb_free_generic_ivar(VALUE obj)
Frees the list of instance variables.
Definition variable.c:1270
void rb_undef_alloc_func(VALUE klass)
Deletes the allocator function of a class.
Definition vm_method.c:1387
VALUE rb_check_funcall(VALUE recv, ID mid, int argc, const VALUE *argv)
Identical to rb_funcallv(), except it returns RUBY_Qundef instead of raising rb_eNoMethodError.
Definition vm_eval.c:686
rb_alloc_func_t rb_get_alloc_func(VALUE klass)
Queries the allocator function of a class.
Definition vm_method.c:1393
int rb_obj_respond_to(VALUE obj, ID mid, int private_p)
Identical to rb_respond_to(), except it additionally takes the visibility parameter.
Definition vm_method.c:3048
VALUE rb_sym2str(VALUE symbol)
Obtain a frozen string representation of a symbol (not including the leading colon).
Definition symbol.c:987
int capa
Designed capacity of the buffer.
Definition io.h:11
int rb_io_fptr_finalize(rb_io_t *fptr)
Destroys the given IO.
Definition io.c:5675
int len
Length of the buffer.
Definition io.h:8
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
Definition ractor.h:249
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
Definition thread.c:1930
VALUE rb_yield(VALUE val)
Yields the block.
Definition vm_eval.c:1372
#define RBIMPL_ATTR_MAYBE_UNUSED()
Wraps (or simulates) [[maybe_unused]]
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
VALUE type(ANYARGS)
ANYARGS-ed function type.
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
#define RARRAY(obj)
Convenient casting macro.
Definition rarray.h:44
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:150
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS(obj)
Convenient casting macro.
Definition rclass.h:38
#define DATA_PTR(obj)
Convenient getter macro.
Definition rdata.h:67
#define RDATA(obj)
Convenient casting macro.
Definition rdata.h:59
#define RUBY_DEFAULT_FREE
This is a value you can set to RData::dfree.
Definition rdata.h:78
void(* RUBY_DATA_FUNC)(void *)
This is the type of callbacks registered to RData.
Definition rdata.h:104
#define RFILE(obj)
Convenient casting macro.
Definition rfile.h:50
#define RHASH_SIZE(h)
Queries the size of the hash.
Definition rhash.h:69
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
#define RMATCH(obj)
Convenient casting macro.
Definition rmatch.h:37
#define ROBJECT(obj)
Convenient casting macro.
Definition robject.h:43
static VALUE * ROBJECT_FIELDS(VALUE obj)
Queries the instance variables.
Definition robject.h:126
#define RREGEXP(obj)
Convenient casting macro.
Definition rregexp.h:37
#define RREGEXP_PTR(obj)
Convenient accessor macro.
Definition rregexp.h:45
#define RSTRING(obj)
Convenient casting macro.
Definition rstring.h:41
static bool RTYPEDDATA_P(VALUE obj)
Checks whether the passed object is RTypedData or RData.
Definition rtypeddata.h:579
#define TypedData_Wrap_Struct(klass, data_type, sval)
Converts sval, a pointer to your struct, into a Ruby object.
Definition rtypeddata.h:450
static const struct rb_data_type_struct * RTYPEDDATA_TYPE(VALUE obj)
Queries for the type of given object.
Definition rtypeddata.h:602
const char * rb_obj_classname(VALUE obj)
Queries the name of the class of the passed object.
Definition variable.c:512
#define errno
Ractor-aware version of errno.
Definition ruby.h:388
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition thread.c:5622
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
Defines old _.
#define _(args)
This was a transition path from K&R to ANSI.
Definition stdarg.h:35
Definition hash.h:53
Ruby's ordinal objects.
Definition robject.h:83
"Typed" user data.
Definition rtypeddata.h:353
Definition gc.c:2825
Definition method.h:63
Definition constant.h:33
CREF (Class REFerence)
Definition method.h:45
Definition class.h:72
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:203
RUBY_DATA_FUNC dfree
This function is called when the object is no longer used.
Definition rtypeddata.h:233
RUBY_DATA_FUNC dcompact
This function is called when the object is relocated.
Definition rtypeddata.h:254
struct rb_data_type_struct::@56 function
Function pointers.
const char * wrap_struct_name
Name of structs of this kind.
Definition rtypeddata.h:210
RUBY_DATA_FUNC dmark
This function is called when the object is experiencing GC marks.
Definition rtypeddata.h:224
VALUE flags
Type-specific behavioural characteristics.
Definition rtypeddata.h:312
Definition gc_impl.h:15
Ruby's IO, metadata and buffers.
Definition io.h:280
Represents a match.
Definition rmatch.h:71
struct rmatch_offset * char_offset
Capture group offsets, in C array.
Definition rmatch.h:79
int char_offset_num_allocated
Number of rmatch_offset that ::rmatch::char_offset holds.
Definition rmatch.h:82
struct re_registers regs
"Registers" of a match.
Definition rmatch.h:76
Definition method.h:55
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:136
Definition class.h:65
Represents the region of a capture group.
Definition rmatch.h:65
Definition st.h:79
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
intptr_t SIGNED_VALUE
A signed integer type that has the same width with VALUE.
Definition value.h:63
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
Definition value.h:69
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static enum ruby_value_type RB_BUILTIN_TYPE(VALUE obj)
Queries the type of the object.
Definition value_type.h:182
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376
ruby_value_type
C-level type of an object.
Definition value_type.h:113