1#include "ruby/internal/config.h"
8# ifdef HAVE_SYS_PRCTL_H
13#if !defined(PAGE_SIZE) && defined(HAVE_SYS_USER_H)
18#ifdef BUILDING_MODULAR_GC
19# define nlz_int64(x) (x == 0 ? 64 : (unsigned int)__builtin_clzll((unsigned long long)x))
21# include "internal/bits.h"
31#include "ccan/list/list.h"
34#include "gc/gc_impl.h"
36#ifndef BUILDING_MODULAR_GC
40#ifdef BUILDING_MODULAR_GC
41# define RB_DEBUG_COUNTER_INC(_name) ((void)0)
42# define RB_DEBUG_COUNTER_INC_IF(_name, cond) (!!(cond))
44# include "debug_counter.h"
47#ifdef BUILDING_MODULAR_GC
48# define rb_asan_poison_object(obj) ((void)(obj))
49# define rb_asan_unpoison_object(obj, newobj_p) ((void)(obj), (void)(newobj_p))
50# define asan_unpoisoning_object(obj) if ((obj) || true)
51# define asan_poison_memory_region(ptr, size) ((void)(ptr), (void)(size))
52# define asan_unpoison_memory_region(ptr, size, malloc_p) ((void)(ptr), (size), (malloc_p))
53# define asan_unpoisoning_memory_region(ptr, size) if ((ptr) || (size) || true)
55# define VALGRIND_MAKE_MEM_DEFINED(ptr, size) ((void)(ptr), (void)(size))
56# define VALGRIND_MAKE_MEM_UNDEFINED(ptr, size) ((void)(ptr), (void)(size))
58# include "internal/sanitizers.h"
62#ifndef HAVE_MALLOC_USABLE_SIZE
64# define HAVE_MALLOC_USABLE_SIZE
65# define malloc_usable_size(a) _msize(a)
66# elif defined HAVE_MALLOC_SIZE
67# define HAVE_MALLOC_USABLE_SIZE
68# define malloc_usable_size(a) malloc_size(a)
72#ifdef HAVE_MALLOC_USABLE_SIZE
73# ifdef RUBY_ALTERNATIVE_MALLOC_HEADER
75# elif defined(HAVE_MALLOC_H)
77# elif defined(HAVE_MALLOC_NP_H)
78# include <malloc_np.h>
79# elif defined(HAVE_MALLOC_MALLOC_H)
80# include <malloc/malloc.h>
84#ifdef HAVE_MALLOC_TRIM
89# include <emscripten/emmalloc.h>
93#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
94# include <mach/task.h>
95# include <mach/mach_init.h>
96# include <mach/mach_port.h>
100# define VM_CHECK_MODE RUBY_DEBUG
104#ifndef RACTOR_CHECK_MODE
105# define RACTOR_CHECK_MODE (VM_CHECK_MODE || RUBY_DEBUG) && (SIZEOF_UINT64_T == SIZEOF_VALUE)
108#ifndef RUBY_DEBUG_LOG
109# define RUBY_DEBUG_LOG(...)
112#ifndef GC_HEAP_INIT_BYTES
113#define GC_HEAP_INIT_BYTES (2560 * 1024)
115#ifndef GC_HEAP_FREE_SLOTS
116#define GC_HEAP_FREE_SLOTS 4096
118#ifndef GC_HEAP_GROWTH_FACTOR
119#define GC_HEAP_GROWTH_FACTOR 1.8
121#ifndef GC_HEAP_GROWTH_MAX_BYTES
122#define GC_HEAP_GROWTH_MAX_BYTES 0
124#ifndef GC_HEAP_REMEMBERED_WB_UNPROTECTED_OBJECTS_LIMIT_RATIO
125# define GC_HEAP_REMEMBERED_WB_UNPROTECTED_OBJECTS_LIMIT_RATIO 0.01
127#ifndef GC_HEAP_OLDOBJECT_LIMIT_FACTOR
128#define GC_HEAP_OLDOBJECT_LIMIT_FACTOR 2.0
131#ifndef GC_HEAP_FREE_SLOTS_MIN_RATIO
132#define GC_HEAP_FREE_SLOTS_MIN_RATIO 0.20
134#ifndef GC_HEAP_FREE_SLOTS_GOAL_RATIO
135#define GC_HEAP_FREE_SLOTS_GOAL_RATIO 0.40
137#ifndef GC_HEAP_FREE_SLOTS_MAX_RATIO
138#define GC_HEAP_FREE_SLOTS_MAX_RATIO 0.65
141#ifndef GC_MALLOC_LIMIT_MIN
142#define GC_MALLOC_LIMIT_MIN (16 * 1024 * 1024 )
144#ifndef GC_MALLOC_LIMIT_MAX
145#define GC_MALLOC_LIMIT_MAX (32 * 1024 * 1024 )
147#ifndef GC_MALLOC_LIMIT_GROWTH_FACTOR
148#define GC_MALLOC_LIMIT_GROWTH_FACTOR 1.4
151#ifndef GC_OLDMALLOC_LIMIT_MIN
152#define GC_OLDMALLOC_LIMIT_MIN (16 * 1024 * 1024 )
154#ifndef GC_OLDMALLOC_LIMIT_GROWTH_FACTOR
155#define GC_OLDMALLOC_LIMIT_GROWTH_FACTOR 1.2
157#ifndef GC_OLDMALLOC_LIMIT_MAX
158#define GC_OLDMALLOC_LIMIT_MAX (128 * 1024 * 1024 )
161#ifndef GC_MALLOC_INCREASE_LOCAL_THRESHOLD
162#define GC_MALLOC_INCREASE_LOCAL_THRESHOLD (8 * 1024 )
165#ifdef RB_THREAD_LOCAL_SPECIFIER
166#define USE_MALLOC_INCREASE_LOCAL 1
167static RB_THREAD_LOCAL_SPECIFIER
int malloc_increase_local;
169#define USE_MALLOC_INCREASE_LOCAL 0
172#ifndef GC_CAN_COMPILE_COMPACTION
174# define GC_CAN_COMPILE_COMPACTION 0
176# define GC_CAN_COMPILE_COMPACTION 1
180#ifndef PRINT_ENTER_EXIT_TICK
181# define PRINT_ENTER_EXIT_TICK 0
183#ifndef PRINT_ROOT_TICKS
184#define PRINT_ROOT_TICKS 0
187#define USE_TICK_T (PRINT_ENTER_EXIT_TICK || PRINT_ROOT_TICKS)
190# if SIZEOF_VALUE >= 8
191# define HEAP_COUNT 12
200# define EACH_POOL_SLOT_SIZE(SLOT) \
201 SLOT(32) SLOT(40) SLOT(64) SLOT(80) SLOT(96) SLOT(128) \
202 SLOT(160) SLOT(256) SLOT(512) SLOT(640) SLOT(768) SLOT(1024)
204# define EACH_POOL_SLOT_SIZE(SLOT) \
205 SLOT(32) SLOT(64) SLOT(128) SLOT(256) SLOT(512)
212#define SLOT_RECIPROCAL_SHIFT 48
213#define SLOT_RECIPROCAL(size) (((1ULL << SLOT_RECIPROCAL_SHIFT) + (size) - 1) / (size))
215static const uint64_t heap_slot_reciprocal_table[HEAP_COUNT] = {
216#define SLOT(size) SLOT_RECIPROCAL(size),
217 EACH_POOL_SLOT_SIZE(SLOT)
223 size_t allocated_objects_count;
227 size_t incremental_mark_step_allocated_slots;
232 size_t heap_init_bytes;
233 size_t heap_free_slots;
234 double growth_factor;
235 size_t growth_max_bytes;
237 double heap_free_slots_min_ratio;
238 double heap_free_slots_goal_ratio;
239 double heap_free_slots_max_ratio;
240 double uncollectible_wb_unprotected_objects_limit_ratio;
241 double oldobject_limit_factor;
243 size_t malloc_limit_min;
244 size_t malloc_limit_max;
245 double malloc_limit_growth_factor;
247 size_t oldmalloc_limit_min;
248 size_t oldmalloc_limit_max;
249 double oldmalloc_limit_growth_factor;
255 GC_HEAP_GROWTH_FACTOR,
256 GC_HEAP_GROWTH_MAX_BYTES,
258 GC_HEAP_FREE_SLOTS_MIN_RATIO,
259 GC_HEAP_FREE_SLOTS_GOAL_RATIO,
260 GC_HEAP_FREE_SLOTS_MAX_RATIO,
261 GC_HEAP_REMEMBERED_WB_UNPROTECTED_OBJECTS_LIMIT_RATIO,
262 GC_HEAP_OLDOBJECT_LIMIT_FACTOR,
266 GC_MALLOC_LIMIT_GROWTH_FACTOR,
268 GC_OLDMALLOC_LIMIT_MIN,
269 GC_OLDMALLOC_LIMIT_MAX,
270 GC_OLDMALLOC_LIMIT_GROWTH_FACTOR,
289#define RGENGC_DEBUG -1
291#define RGENGC_DEBUG 0
294#if RGENGC_DEBUG < 0 && !defined(_MSC_VER)
295# define RGENGC_DEBUG_ENABLED(level) (-(RGENGC_DEBUG) >= (level) && ruby_rgengc_debug >= (level))
296#elif defined(HAVE_VA_ARGS_MACRO)
297# define RGENGC_DEBUG_ENABLED(level) ((RGENGC_DEBUG) >= (level))
299# define RGENGC_DEBUG_ENABLED(level) 0
301int ruby_rgengc_debug;
308#ifndef RGENGC_PROFILE
309# define RGENGC_PROFILE 0
318#ifndef RGENGC_ESTIMATE_OLDMALLOC
319# define RGENGC_ESTIMATE_OLDMALLOC 1
322#ifndef GC_PROFILE_MORE_DETAIL
323# define GC_PROFILE_MORE_DETAIL 0
325#ifndef GC_PROFILE_DETAIL_MEMORY
326# define GC_PROFILE_DETAIL_MEMORY 0
328#ifndef GC_ENABLE_LAZY_SWEEP
329# define GC_ENABLE_LAZY_SWEEP 1
332#ifndef VERIFY_FREE_SIZE
334#define VERIFY_FREE_SIZE 1
336#define VERIFY_FREE_SIZE 0
341#undef CALC_EXACT_MALLOC_SIZE
342#define CALC_EXACT_MALLOC_SIZE 1
345#ifndef CALC_EXACT_MALLOC_SIZE
346# define CALC_EXACT_MALLOC_SIZE 0
349#if defined(HAVE_MALLOC_USABLE_SIZE) || CALC_EXACT_MALLOC_SIZE > 0
350# ifndef MALLOC_ALLOCATED_SIZE
351# define MALLOC_ALLOCATED_SIZE 0
354# define MALLOC_ALLOCATED_SIZE 0
356#ifndef MALLOC_ALLOCATED_SIZE_CHECK
357# define MALLOC_ALLOCATED_SIZE_CHECK 0
360#ifndef GC_DEBUG_STRESS_TO_CLASS
361# define GC_DEBUG_STRESS_TO_CLASS RUBY_DEBUG
365 GPR_FLAG_NONE = 0x000,
367 GPR_FLAG_MAJOR_BY_NOFREE = 0x001,
368 GPR_FLAG_MAJOR_BY_OLDGEN = 0x002,
369 GPR_FLAG_MAJOR_BY_SHADY = 0x004,
370 GPR_FLAG_MAJOR_BY_FORCE = 0x008,
371#if RGENGC_ESTIMATE_OLDMALLOC
372 GPR_FLAG_MAJOR_BY_OLDMALLOC = 0x020,
374 GPR_FLAG_MAJOR_MASK = 0x0ff,
377 GPR_FLAG_NEWOBJ = 0x100,
378 GPR_FLAG_MALLOC = 0x200,
379 GPR_FLAG_METHOD = 0x400,
380 GPR_FLAG_CAPI = 0x800,
381 GPR_FLAG_STRESS = 0x1000,
384 GPR_FLAG_IMMEDIATE_SWEEP = 0x2000,
385 GPR_FLAG_HAVE_FINALIZE = 0x4000,
386 GPR_FLAG_IMMEDIATE_MARK = 0x8000,
387 GPR_FLAG_FULL_MARK = 0x10000,
388 GPR_FLAG_COMPACT = 0x20000,
391 (GPR_FLAG_FULL_MARK | GPR_FLAG_IMMEDIATE_MARK |
392 GPR_FLAG_IMMEDIATE_SWEEP | GPR_FLAG_CAPI),
393} gc_profile_record_flag;
399 double gc_invoke_time;
401 size_t heap_total_objects;
402 size_t heap_use_size;
403 size_t heap_total_size;
404 size_t moved_objects;
406#if GC_PROFILE_MORE_DETAIL
408 double gc_sweep_time;
410 size_t heap_use_pages;
411 size_t heap_live_objects;
412 size_t heap_free_objects;
414 size_t allocate_increase;
415 size_t allocate_limit;
418 size_t removing_objects;
419 size_t empty_objects;
420#if GC_PROFILE_DETAIL_MEMORY
426#if MALLOC_ALLOCATED_SIZE
427 size_t allocated_size;
430#if RGENGC_PROFILE > 0
432 size_t remembered_normal_objects;
433 size_t remembered_shady_objects;
441 uint32_t original_shape_id;
444#define RMOVED(obj) ((struct RMoved *)(obj))
446typedef uintptr_t bits_t;
448 BITS_SIZE =
sizeof(bits_t),
449 BITS_BITLENGTH = ( BITS_SIZE * CHAR_BIT )
462#define STACK_CHUNK_SIZE 500
465 VALUE data[STACK_CHUNK_SIZE];
475 size_t unused_cache_size;
478typedef int (*gc_compact_compare_func)(
const void *l,
const void *r,
void *d);
484 size_t total_allocated_pages;
485 size_t force_major_gc_count;
486 size_t force_incremental_marking_finish_count;
487 size_t total_allocated_objects;
488 size_t total_freed_objects;
489 size_t final_slots_count;
496 struct ccan_list_head pages;
499 uintptr_t compact_cursor_index;
508 gc_stress_no_immediate_sweep,
509 gc_stress_full_mark_after_malloc,
523#if RGENGC_ESTIMATE_OLDMALLOC
524 size_t oldmalloc_increase;
530#if MALLOC_ALLOCATED_SIZE
531 size_t allocated_size;
541 unsigned int mode : 2;
542 unsigned int immediate_sweep : 1;
543 unsigned int dont_gc : 1;
544 unsigned int dont_incremental : 1;
545 unsigned int during_gc : 1;
546 unsigned int during_compacting : 1;
547 unsigned int during_reference_updating : 1;
548 unsigned int gc_stressful: 1;
549 unsigned int during_minor_gc : 1;
550 unsigned int during_incremental_marking : 1;
551 unsigned int measure_gc : 1;
557 size_t empty_pages_count;
570 size_t allocated_pages;
573 size_t freeable_pages;
575 size_t allocatable_bytes;
578 VALUE deferred_final;
585 unsigned int latest_gc_info;
591#if GC_PROFILE_MORE_DETAIL
596 size_t minor_gc_count;
597 size_t major_gc_count;
598 size_t compact_count;
599 size_t read_barrier_faults;
600#if RGENGC_PROFILE > 0
601 size_t total_generated_normal_object_count;
602 size_t total_generated_shady_object_count;
603 size_t total_shade_operation_count;
604 size_t total_promoted_count;
605 size_t total_remembered_normal_object_count;
606 size_t total_remembered_shady_object_count;
608#if RGENGC_PROFILE >= 2
609 size_t generated_normal_object_count_types[
RUBY_T_MASK];
610 size_t generated_shady_object_count_types[
RUBY_T_MASK];
613 size_t remembered_normal_object_count_types[
RUBY_T_MASK];
614 size_t remembered_shady_object_count_types[
RUBY_T_MASK];
619 double gc_sweep_start_time;
620 size_t total_allocated_objects_at_gc_start;
621 size_t heap_used_at_gc_start;
622 size_t heap_total_slots_at_gc_start;
626 unsigned long long marking_time_ns;
628 unsigned long long sweeping_time_ns;
629 struct timespec sweeping_start_time;
632 size_t weak_references_count;
635 VALUE gc_stress_mode;
638 bool parent_object_old_p;
642 size_t last_major_gc;
643 size_t uncollectible_wb_unprotected_objects;
644 size_t uncollectible_wb_unprotected_objects_limit;
646 size_t old_objects_limit;
648#if RGENGC_ESTIMATE_OLDMALLOC
649 size_t oldmalloc_increase_limit;
652#if RGENGC_CHECK_MODE >= 2
659 size_t considered_count_table[
T_MASK];
660 size_t moved_count_table[
T_MASK];
661 size_t moved_up_count_table[
T_MASK];
662 size_t moved_down_count_table[
T_MASK];
666 gc_compact_compare_func compare_func;
674#if GC_DEBUG_STRESS_TO_CLASS
675 VALUE stress_to_class;
678 rb_darray(
VALUE) weak_references;
681 unsigned long live_ractor_cache_count;
683 int sweeping_heap_count;
685 int fork_vm_lock_lev;
688#ifndef HEAP_PAGE_ALIGN_LOG
690#define HEAP_PAGE_ALIGN_LOG 16
693#if RACTOR_CHECK_MODE || GC_DEBUG
694struct rvalue_overhead {
695# if RACTOR_CHECK_MODE
696 uint32_t _ractor_belonging_id;
705# define RVALUE_OVERHEAD (sizeof(struct { \
707 struct rvalue_overhead overhead; \
711size_t rb_gc_impl_obj_slot_size(
VALUE obj);
712# define GET_RVALUE_OVERHEAD(obj) ((struct rvalue_overhead *)((uintptr_t)obj + rb_gc_impl_obj_slot_size(obj)))
714# ifndef RVALUE_OVERHEAD
715# define RVALUE_OVERHEAD 0
719#define RVALUE_SLOT_SIZE (sizeof(struct RBasic) + sizeof(VALUE[RBIMPL_RVALUE_EMBED_LEN_MAX]) + RVALUE_OVERHEAD)
721static const size_t pool_slot_sizes[HEAP_COUNT] = {
722#define SLOT(size) size,
723 EACH_POOL_SLOT_SIZE(SLOT)
729static uint8_t size_to_heap_idx[1024 / 8 + 1];
731static uint8_t size_to_heap_idx[512 / 8 + 1];
735# define MAX(a, b) (((a) > (b)) ? (a) : (b))
738# define MIN(a, b) (((a) < (b)) ? (a) : (b))
740#define roomof(x, y) (((x) + (y) - 1) / (y))
741#define CEILDIV(i, mod) roomof(i, mod)
742#define MIN_POOL_SLOT_SIZE 32
744 HEAP_PAGE_ALIGN = (1UL << HEAP_PAGE_ALIGN_LOG),
745 HEAP_PAGE_ALIGN_MASK = (~(~0UL << HEAP_PAGE_ALIGN_LOG)),
746 HEAP_PAGE_SIZE = HEAP_PAGE_ALIGN,
747 HEAP_PAGE_BITMAP_LIMIT = CEILDIV(CEILDIV(HEAP_PAGE_SIZE, MIN_POOL_SLOT_SIZE), BITS_BITLENGTH),
748 HEAP_PAGE_BITMAP_SIZE = (BITS_SIZE * HEAP_PAGE_BITMAP_LIMIT),
750#define HEAP_PAGE_ALIGN (1 << HEAP_PAGE_ALIGN_LOG)
751#define HEAP_PAGE_SIZE HEAP_PAGE_ALIGN
753#if !defined(INCREMENTAL_MARK_STEP_ALLOCATIONS)
754# define INCREMENTAL_MARK_STEP_ALLOCATIONS 500
757#undef INIT_HEAP_PAGE_ALLOC_USE_MMAP
763static const bool HEAP_PAGE_ALLOC_USE_MMAP =
false;
765#elif defined(__wasm__)
769static const bool HEAP_PAGE_ALLOC_USE_MMAP =
false;
771#elif HAVE_CONST_PAGE_SIZE
773static const bool HEAP_PAGE_ALLOC_USE_MMAP = (PAGE_SIZE <= HEAP_PAGE_SIZE);
775#elif defined(PAGE_MAX_SIZE) && (PAGE_MAX_SIZE <= HEAP_PAGE_SIZE)
777static const bool HEAP_PAGE_ALLOC_USE_MMAP =
true;
779#elif defined(PAGE_SIZE)
781# define INIT_HEAP_PAGE_ALLOC_USE_MMAP (PAGE_SIZE <= HEAP_PAGE_SIZE)
783#elif defined(HAVE_SYSCONF) && defined(_SC_PAGE_SIZE)
785# define INIT_HEAP_PAGE_ALLOC_USE_MMAP (sysconf(_SC_PAGE_SIZE) <= HEAP_PAGE_SIZE)
789static const bool HEAP_PAGE_ALLOC_USE_MMAP =
false;
792#ifdef INIT_HEAP_PAGE_ALLOC_USE_MMAP
794# define HEAP_PAGE_ALLOC_USE_MMAP (heap_page_alloc_use_mmap != false)
796static bool heap_page_alloc_use_mmap;
799#define RVALUE_AGE_BIT_COUNT 2
800#define RVALUE_AGE_BIT_MASK (((bits_t)1 << RVALUE_AGE_BIT_COUNT) - 1)
801#define RVALUE_OLD_AGE 3
812 uint64_t slot_size_reciprocal;
813 unsigned short slot_size;
814 unsigned short total_slots;
815 unsigned short free_slots;
816 unsigned short final_slots;
817 unsigned short pinned_slots;
819 unsigned int before_sweep : 1;
820 unsigned int has_remembered_objects : 1;
821 unsigned int has_uncollectible_wb_unprotected_objects : 1;
828 struct ccan_list_node page_node;
830 bits_t wb_unprotected_bits[HEAP_PAGE_BITMAP_LIMIT];
832 bits_t mark_bits[HEAP_PAGE_BITMAP_LIMIT];
833 bits_t uncollectible_bits[HEAP_PAGE_BITMAP_LIMIT];
834 bits_t marking_bits[HEAP_PAGE_BITMAP_LIMIT];
836 bits_t remembered_bits[HEAP_PAGE_BITMAP_LIMIT];
839 bits_t pinned_bits[HEAP_PAGE_BITMAP_LIMIT];
840 bits_t age_bits[HEAP_PAGE_BITMAP_LIMIT * RVALUE_AGE_BIT_COUNT];
847asan_lock_freelist(
struct heap_page *page)
849 asan_poison_memory_region(&page->freelist,
sizeof(
struct free_list *));
856asan_unlock_freelist(
struct heap_page *page)
858 asan_unpoison_memory_region(&page->freelist,
sizeof(
struct free_list *),
false);
864 if (page->total_slots == 0) {
865 GC_ASSERT(page->start == 0);
866 GC_ASSERT(page->slot_size == 0);
867 GC_ASSERT(page->heap == NULL);
868 GC_ASSERT(page->free_slots == 0);
869 asan_unpoisoning_memory_region(&page->freelist,
sizeof(&page->freelist)) {
870 GC_ASSERT(page->freelist == NULL);
876 GC_ASSERT(page->start != 0);
877 GC_ASSERT(page->slot_size != 0);
878 GC_ASSERT(page->heap != NULL);
884#define GET_PAGE_BODY(x) ((struct heap_page_body *)((bits_t)(x) & ~(HEAP_PAGE_ALIGN_MASK)))
885#define GET_PAGE_HEADER(x) (&GET_PAGE_BODY(x)->header)
886#define GET_HEAP_PAGE(x) (GET_PAGE_HEADER(x)->page)
889slot_index_for_offset(
size_t offset, uint64_t reciprocal)
891 return (uint32_t)(((uint64_t)offset * reciprocal) >> SLOT_RECIPROCAL_SHIFT);
894#define SLOT_INDEX(page, p) slot_index_for_offset((uintptr_t)(p) - (page)->start, (page)->slot_size_reciprocal)
895#define SLOT_BITMAP_INDEX(page, p) (SLOT_INDEX(page, p) / BITS_BITLENGTH)
896#define SLOT_BITMAP_OFFSET(page, p) (SLOT_INDEX(page, p) & (BITS_BITLENGTH - 1))
897#define SLOT_BITMAP_BIT(page, p) ((bits_t)1 << SLOT_BITMAP_OFFSET(page, p))
899#define _MARKED_IN_BITMAP(bits, page, p) ((bits)[SLOT_BITMAP_INDEX(page, p)] & SLOT_BITMAP_BIT(page, p))
900#define _MARK_IN_BITMAP(bits, page, p) ((bits)[SLOT_BITMAP_INDEX(page, p)] |= SLOT_BITMAP_BIT(page, p))
901#define _CLEAR_IN_BITMAP(bits, page, p) ((bits)[SLOT_BITMAP_INDEX(page, p)] &= ~SLOT_BITMAP_BIT(page, p))
903#define MARKED_IN_BITMAP(bits, p) _MARKED_IN_BITMAP(bits, GET_HEAP_PAGE(p), p)
904#define MARK_IN_BITMAP(bits, p) _MARK_IN_BITMAP(bits, GET_HEAP_PAGE(p), p)
905#define CLEAR_IN_BITMAP(bits, p) _CLEAR_IN_BITMAP(bits, GET_HEAP_PAGE(p), p)
907#define GET_HEAP_MARK_BITS(x) (&GET_HEAP_PAGE(x)->mark_bits[0])
908#define GET_HEAP_PINNED_BITS(x) (&GET_HEAP_PAGE(x)->pinned_bits[0])
909#define GET_HEAP_UNCOLLECTIBLE_BITS(x) (&GET_HEAP_PAGE(x)->uncollectible_bits[0])
910#define GET_HEAP_WB_UNPROTECTED_BITS(x) (&GET_HEAP_PAGE(x)->wb_unprotected_bits[0])
911#define GET_HEAP_MARKING_BITS(x) (&GET_HEAP_PAGE(x)->marking_bits[0])
914RVALUE_AGE_GET(
VALUE obj)
916 struct heap_page *page = GET_HEAP_PAGE(obj);
917 bits_t *age_bits = page->age_bits;
918 size_t slot_idx = SLOT_INDEX(page, obj);
919 size_t idx = (slot_idx / BITS_BITLENGTH) * 2;
920 int shift = (int)(slot_idx & (BITS_BITLENGTH - 1));
921 int lo = (age_bits[idx] >> shift) & 1;
922 int hi = (age_bits[idx + 1] >> shift) & 1;
923 return lo | (hi << 1);
927RVALUE_AGE_SET_BITMAP(
VALUE obj,
int age)
930 struct heap_page *page = GET_HEAP_PAGE(obj);
931 bits_t *age_bits = page->age_bits;
932 size_t slot_idx = SLOT_INDEX(page, obj);
933 size_t idx = (slot_idx / BITS_BITLENGTH) * 2;
934 int shift = (int)(slot_idx & (BITS_BITLENGTH - 1));
935 bits_t mask = (bits_t)1 << shift;
937 age_bits[idx] = (age_bits[idx] & ~mask) | ((bits_t)(age & 1) << shift);
938 age_bits[idx + 1] = (age_bits[idx + 1] & ~mask) | ((bits_t)((age >> 1) & 1) << shift);
942RVALUE_AGE_SET(
VALUE obj,
int age)
944 RVALUE_AGE_SET_BITMAP(obj, age);
945 if (age == RVALUE_OLD_AGE) {
953#define malloc_limit objspace->malloc_params.limit
954#define malloc_increase objspace->malloc_counters.increase
955#define malloc_allocated_size objspace->malloc_params.allocated_size
956#define heap_pages_lomem objspace->heap_pages.range[0]
957#define heap_pages_himem objspace->heap_pages.range[1]
958#define heap_pages_freeable_pages objspace->heap_pages.freeable_pages
959#define heap_pages_deferred_final objspace->heap_pages.deferred_final
960#define heaps objspace->heaps
961#define during_gc objspace->flags.during_gc
962#define finalizing objspace->atomic_flags.finalizing
963#define finalizer_table objspace->finalizer_table
964#define ruby_gc_stressful objspace->flags.gc_stressful
965#define ruby_gc_stress_mode objspace->gc_stress_mode
966#if GC_DEBUG_STRESS_TO_CLASS
967#define stress_to_class objspace->stress_to_class
968#define set_stress_to_class(c) (stress_to_class = (c))
970#define stress_to_class ((void)objspace, 0)
971#define set_stress_to_class(c) ((void)objspace, (c))
975#define dont_gc_on() (fprintf(stderr, "dont_gc_on@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 1)
976#define dont_gc_off() (fprintf(stderr, "dont_gc_off@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 0)
977#define dont_gc_set(b) (fprintf(stderr, "dont_gc_set(%d)@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = (int)(b))
978#define dont_gc_val() (objspace->flags.dont_gc)
980#define dont_gc_on() (objspace->flags.dont_gc = 1)
981#define dont_gc_off() (objspace->flags.dont_gc = 0)
982#define dont_gc_set(b) (objspace->flags.dont_gc = (int)(b))
983#define dont_gc_val() (objspace->flags.dont_gc)
986#define gc_config_full_mark_set(b) (objspace->gc_config.full_mark = (int)(b))
987#define gc_config_full_mark_val (objspace->gc_config.full_mark)
989#ifndef DURING_GC_COULD_MALLOC_REGION_START
990# define DURING_GC_COULD_MALLOC_REGION_START() \
991 assert(rb_during_gc()); \
992 bool _prev_enabled = rb_gc_impl_gc_enabled_p(objspace); \
993 rb_gc_impl_gc_disable(objspace, false)
996#ifndef DURING_GC_COULD_MALLOC_REGION_END
997# define DURING_GC_COULD_MALLOC_REGION_END() \
998 if (_prev_enabled) rb_gc_impl_gc_enable(objspace)
1001static inline enum gc_mode
1002gc_mode_verify(
enum gc_mode mode)
1004#if RGENGC_CHECK_MODE > 0
1007 case gc_mode_marking:
1008 case gc_mode_sweeping:
1009 case gc_mode_compacting:
1012 rb_bug(
"gc_mode_verify: unreachable (%d)", (
int)mode);
1021 return objspace->sweeping_heap_count != 0;
1028 for (
int i = 0; i < HEAP_COUNT; i++) {
1029 count += (&heaps[i])->total_pages;
1038 for (
int i = 0; i < HEAP_COUNT; i++) {
1040 count += heap->total_allocated_objects;
1049 for (
int i = 0; i < HEAP_COUNT; i++) {
1051 count += heap->total_freed_objects;
1060 for (
int i = 0; i < HEAP_COUNT; i++) {
1062 count += heap->final_slots_count;
1067#define gc_mode(objspace) gc_mode_verify((enum gc_mode)(objspace)->flags.mode)
1068#define gc_mode_set(objspace, m) ((objspace)->flags.mode = (unsigned int)gc_mode_verify(m))
1069#define gc_needs_major_flags objspace->rgengc.need_major_gc
1071#define is_marking(objspace) (gc_mode(objspace) == gc_mode_marking)
1072#define is_sweeping(objspace) (gc_mode(objspace) == gc_mode_sweeping)
1073#define is_full_marking(objspace) ((objspace)->flags.during_minor_gc == FALSE)
1074#define is_incremental_marking(objspace) ((objspace)->flags.during_incremental_marking != FALSE)
1075#define will_be_incremental_marking(objspace) ((objspace)->rgengc.need_major_gc != GPR_FLAG_NONE)
1086#define GC_INCREMENTAL_SWEEP_BYTES (2048 * RVALUE_SLOT_SIZE)
1087#define GC_INCREMENTAL_SWEEP_POOL_BYTES (1024 * RVALUE_SLOT_SIZE)
1088#define is_lazy_sweeping(objspace) (GC_ENABLE_LAZY_SWEEP && has_sweeping_pages(objspace))
1090#define needs_continue_sweeping(objspace, heap) \
1091 ((heap)->free_pages == NULL && is_lazy_sweeping(objspace))
1093#if SIZEOF_LONG == SIZEOF_VOIDP
1094# define obj_id_to_ref(objid) ((objid) ^ FIXNUM_FLAG)
1095#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
1096# define obj_id_to_ref(objid) (FIXNUM_P(objid) ? \
1097 ((objid) ^ FIXNUM_FLAG) : (NUM2PTR(objid) << 1))
1099# error not supported
1105 void (*dfree)(
void *);
1109#define RZOMBIE(o) ((struct RZombie *)(o))
1111static bool ruby_enable_autocompact =
false;
1112#if RGENGC_CHECK_MODE
1113static gc_compact_compare_func ruby_autocompact_compare_func;
1117static int garbage_collect(
rb_objspace_t *,
unsigned int reason);
1122enum gc_enter_event {
1123 gc_enter_event_start,
1124 gc_enter_event_continue,
1125 gc_enter_event_rest,
1126 gc_enter_event_finalizer,
1129static inline void gc_enter(
rb_objspace_t *
objspace,
enum gc_enter_event event,
unsigned int *lock_lev);
1130static inline void gc_exit(
rb_objspace_t *
objspace,
enum gc_enter_event event,
unsigned int *lock_lev);
1145static int gc_mark_stacked_objects_incremental(
rb_objspace_t *,
size_t count);
1146NO_SANITIZE(
"memory",
static inline bool is_pointer_to_heap(
rb_objspace_t *
objspace,
const void *ptr));
1148static void gc_verify_internal_consistency(
void *objspace_ptr);
1150static double getrusage_time(
void);
1154static inline void gc_prof_mark_timer_start(
rb_objspace_t *);
1156static inline void gc_prof_sweep_timer_start(
rb_objspace_t *);
1157static inline void gc_prof_sweep_timer_stop(
rb_objspace_t *);
1161#define gc_prof_record(objspace) (objspace)->profile.current_record
1162#define gc_prof_enabled(objspace) ((objspace)->profile.run && (objspace)->profile.current_record)
1164#ifdef HAVE_VA_ARGS_MACRO
1165# define gc_report(level, objspace, ...) \
1166 if (!RGENGC_DEBUG_ENABLED(level)) {} else gc_report_body(level, objspace, __VA_ARGS__)
1168# define gc_report if (!RGENGC_DEBUG_ENABLED(0)) {} else gc_report_body
1170PRINTF_ARGS(
static void gc_report_body(
int level,
rb_objspace_t *
objspace,
const char *fmt, ...), 3, 4);
1172static void gc_finalize_deferred(
void *dmy);
1183#if defined(__GNUC__) && defined(__i386__)
1184typedef unsigned long long tick_t;
1185#define PRItick "llu"
1189 unsigned long long int x;
1190 __asm__ __volatile__ (
"rdtsc" :
"=A" (x));
1194#elif defined(__GNUC__) && defined(__x86_64__)
1195typedef unsigned long long tick_t;
1196#define PRItick "llu"
1198static __inline__ tick_t
1201 unsigned long hi, lo;
1202 __asm__ __volatile__ (
"rdtsc" :
"=a"(lo),
"=d"(hi));
1203 return ((
unsigned long long)lo)|( ((
unsigned long long)hi)<<32);
1206#elif defined(__powerpc64__) && (GCC_VERSION_SINCE(4,8,0) || defined(__clang__))
1207typedef unsigned long long tick_t;
1208#define PRItick "llu"
1210static __inline__ tick_t
1213 unsigned long long val = __builtin_ppc_get_timebase();
1217#elif defined(__POWERPC__) && defined(__APPLE__)
1221typedef unsigned long long tick_t;
1222#define PRItick "llu"
1224static __inline__ tick_t
1227 unsigned long int upper, lower, tmp;
1228 # define mftbu(r) __asm__ volatile("mftbu %0" : "=r"(r))
1229 # define mftb(r) __asm__ volatile("mftb %0" : "=r"(r))
1234 }
while (tmp != upper);
1235 return ((tick_t)upper << 32) | lower;
1238#elif defined(__aarch64__) && defined(__GNUC__)
1239typedef unsigned long tick_t;
1242static __inline__ tick_t
1246 __asm__ __volatile__ (
"mrs %0, cntvct_el0" :
"=r" (val));
1251#elif defined(_WIN32) && defined(_MSC_VER)
1253typedef unsigned __int64 tick_t;
1254#define PRItick "llu"
1263typedef clock_t tick_t;
1264#define PRItick "llu"
1273#define MEASURE_LINE(expr) expr
1278#define RVALUE_MARKED_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), (obj))
1279#define RVALUE_WB_UNPROTECTED_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), (obj))
1280#define RVALUE_MARKING_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), (obj))
1281#define RVALUE_UNCOLLECTIBLE_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), (obj))
1282#define RVALUE_PINNED_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), (obj))
1287 check_rvalue_consistency(
objspace, obj);
1288 return RVALUE_MARKED_BITMAP(obj) != 0;
1294 check_rvalue_consistency(
objspace, obj);
1295 return RVALUE_PINNED_BITMAP(obj) != 0;
1301 check_rvalue_consistency(
objspace, obj);
1302 return RVALUE_WB_UNPROTECTED_BITMAP(obj) != 0;
1308 check_rvalue_consistency(
objspace, obj);
1309 return RVALUE_MARKING_BITMAP(obj) != 0;
1315 check_rvalue_consistency(
objspace, obj);
1316 return MARKED_IN_BITMAP(GET_HEAP_PAGE(obj)->remembered_bits, obj) != 0;
1322 check_rvalue_consistency(
objspace, obj);
1323 return RVALUE_UNCOLLECTIBLE_BITMAP(obj) != 0;
1326#define RVALUE_PAGE_WB_UNPROTECTED(page, obj) MARKED_IN_BITMAP((page)->wb_unprotected_bits, (obj))
1327#define RVALUE_PAGE_UNCOLLECTIBLE(page, obj) MARKED_IN_BITMAP((page)->uncollectible_bits, (obj))
1328#define RVALUE_PAGE_MARKING(page, obj) MARKED_IN_BITMAP((page)->marking_bits, (obj))
1339 int lev = RB_GC_VM_LOCK_NO_BARRIER();
1342 fprintf(stderr,
"check_rvalue_consistency: %p is a special const.\n", (
void *)obj);
1345 else if (!is_pointer_to_heap(
objspace, (
void *)obj)) {
1347 while (empty_page) {
1348 if ((uintptr_t)empty_page->body <= (uintptr_t)obj &&
1349 (uintptr_t)obj < (uintptr_t)empty_page->body + HEAP_PAGE_SIZE) {
1350 GC_ASSERT(heap_page_in_global_empty_pages_pool(
objspace, empty_page));
1351 fprintf(stderr,
"check_rvalue_consistency: %p is in an empty page (%p).\n",
1352 (
void *)obj, (
void *)empty_page);
1357 fprintf(stderr,
"check_rvalue_consistency: %p is not a Ruby object.\n", (
void *)obj);
1363 const int wb_unprotected_bit = RVALUE_WB_UNPROTECTED_BITMAP(obj) != 0;
1364 const int uncollectible_bit = RVALUE_UNCOLLECTIBLE_BITMAP(obj) != 0;
1365 const int mark_bit = RVALUE_MARKED_BITMAP(obj) != 0;
1366 const int marking_bit = RVALUE_MARKING_BITMAP(obj) != 0;
1367 const int remembered_bit = MARKED_IN_BITMAP(GET_HEAP_PAGE(obj)->remembered_bits, obj) != 0;
1368 const int age = RVALUE_AGE_GET((
VALUE)obj);
1370 if (heap_page_in_global_empty_pages_pool(
objspace, GET_HEAP_PAGE(obj))) {
1371 fprintf(stderr,
"check_rvalue_consistency: %s is in tomb page.\n", rb_obj_info(obj));
1375 fprintf(stderr,
"check_rvalue_consistency: %s is T_NONE.\n", rb_obj_info(obj));
1379 fprintf(stderr,
"check_rvalue_consistency: %s is T_ZOMBIE.\n", rb_obj_info(obj));
1384 rb_obj_memsize_of((
VALUE)obj);
1391 if (age > 0 && wb_unprotected_bit) {
1392 fprintf(stderr,
"check_rvalue_consistency: %s is not WB protected, but age is %d > 0.\n", rb_obj_info(obj), age);
1396 if (!is_marking(
objspace) && uncollectible_bit && !mark_bit) {
1397 fprintf(stderr,
"check_rvalue_consistency: %s is uncollectible, but is not marked while !gc.\n", rb_obj_info(obj));
1402 if (uncollectible_bit && age != RVALUE_OLD_AGE && !wb_unprotected_bit) {
1403 fprintf(stderr,
"check_rvalue_consistency: %s is uncollectible, but not old (age: %d) and not WB unprotected.\n",
1404 rb_obj_info(obj), age);
1407 if (remembered_bit && age != RVALUE_OLD_AGE) {
1408 fprintf(stderr,
"check_rvalue_consistency: %s is remembered, but not old (age: %d).\n",
1409 rb_obj_info(obj), age);
1421 if (is_incremental_marking(
objspace) && marking_bit) {
1422 if (!is_marking(
objspace) && !mark_bit) {
1423 fprintf(stderr,
"check_rvalue_consistency: %s is marking, but not marked.\n", rb_obj_info(obj));
1429 RB_GC_VM_UNLOCK_NO_BARRIER(lev);
1431 if (err > 0 && terminate) {
1432 rb_bug(
"check_rvalue_consistency_force: there is %d errors.", err);
1437#if RGENGC_CHECK_MODE == 0
1447 check_rvalue_consistency_force(
objspace, obj, TRUE);
1457 asan_unpoisoning_object(obj) {
1467 check_rvalue_consistency(
objspace, obj);
1476 MARK_IN_BITMAP(&page->uncollectible_bits[0], obj);
1479#if RGENGC_PROFILE >= 2
1480 objspace->profile.total_promoted_count++;
1488 RB_DEBUG_COUNTER_INC(obj_promote);
1489 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(
objspace, GET_HEAP_PAGE(obj), obj);
1496 int age = RVALUE_AGE_GET((
VALUE)obj);
1498 if (RGENGC_CHECK_MODE && age == RVALUE_OLD_AGE) {
1499 rb_bug(
"RVALUE_AGE_INC: can not increment age of OLD object %s.", rb_obj_info(obj));
1503 RVALUE_AGE_SET(obj, age);
1505 if (age == RVALUE_OLD_AGE) {
1506 RVALUE_OLD_UNCOLLECTIBLE_SET(
objspace, obj);
1509 check_rvalue_consistency(
objspace, obj);
1515 check_rvalue_consistency(
objspace, obj);
1516 GC_ASSERT(!RVALUE_OLD_P(
objspace, obj));
1517 RVALUE_AGE_SET(obj, RVALUE_OLD_AGE - 1);
1518 check_rvalue_consistency(
objspace, obj);
1522RVALUE_AGE_RESET(
VALUE obj)
1524 RVALUE_AGE_SET(obj, 0);
1530 check_rvalue_consistency(
objspace, obj);
1531 GC_ASSERT(RVALUE_OLD_P(
objspace, obj));
1533 if (!is_incremental_marking(
objspace) && RVALUE_REMEMBERED(
objspace, obj)) {
1534 CLEAR_IN_BITMAP(GET_HEAP_PAGE(obj)->remembered_bits, obj);
1537 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), obj);
1538 RVALUE_AGE_RESET(obj);
1540 if (RVALUE_MARKED(
objspace, obj)) {
1544 check_rvalue_consistency(
objspace, obj);
1556 return !RVALUE_MARKED(
objspace, obj);
1560rb_gc_impl_gc_enabled_p(
void *objspace_ptr)
1563 return !dont_gc_val();
1567rb_gc_impl_gc_enable(
void *objspace_ptr)
1575rb_gc_impl_gc_disable(
void *objspace_ptr,
bool finish_current_gc)
1579 if (finish_current_gc) {
1593 return calloc(1, n);
1597rb_gc_impl_set_event_hook(
void *objspace_ptr,
const rb_event_flag_t event)
1604rb_gc_impl_get_total_time(
void *objspace_ptr)
1608 unsigned long long marking_time =
objspace->profile.marking_time_ns;
1609 unsigned long long sweeping_time =
objspace->profile.sweeping_time_ns;
1611 return marking_time + sweeping_time;
1615rb_gc_impl_set_measure_total_time(
void *objspace_ptr,
VALUE flag)
1623rb_gc_impl_get_measure_total_time(
void *objspace_ptr)
1632rb_gc_impl_garbage_object_p(
void *objspace_ptr,
VALUE ptr)
1638 asan_unpoisoning_object(ptr) {
1650 if (dead)
return true;
1651 return is_lazy_sweeping(
objspace) && GET_HEAP_PAGE(ptr)->flags.before_sweep &&
1662 rb_asan_unpoison_object(obj,
false);
1664 asan_unlock_freelist(page);
1668 slot->next = page->freelist;
1669 page->freelist = slot;
1670 asan_lock_freelist(page);
1673 GC_ASSERT(RVALUE_AGE_GET(obj) == 0);
1675 if (RGENGC_CHECK_MODE &&
1677 !(page->start <= (uintptr_t)obj &&
1678 (uintptr_t)obj < ((uintptr_t)page->start + (page->total_slots * page->slot_size)) &&
1679 obj %
sizeof(
VALUE) == 0)) {
1680 rb_bug(
"heap_page_add_freeobj: %p is not rvalue.", (
void *)obj);
1683 rb_asan_poison_object(obj);
1684 gc_report(3,
objspace,
"heap_page_add_freeobj: add %p to freelist\n", (
void *)obj);
1689 rb_heap_t *heap,
size_t free_slots,
size_t total_slots,
size_t slot_size)
1691 double goal_ratio = gc_params.heap_free_slots_goal_ratio;
1692 size_t target_total_slots;
1694 if (goal_ratio == 0.0) {
1695 target_total_slots = (size_t)(total_slots * gc_params.growth_factor);
1697 else if (total_slots == 0) {
1698 target_total_slots = gc_params.heap_init_bytes / slot_size;
1704 double f = (double)(total_slots - free_slots) / ((1 - goal_ratio) * total_slots);
1706 if (f > gc_params.growth_factor) f = gc_params.growth_factor;
1707 if (f < 1.0) f = 1.1;
1709 target_total_slots = (size_t)(f * total_slots);
1713 "free_slots(%8"PRIuSIZE
")/total_slots(%8"PRIuSIZE
")=%1.2f,"
1714 " G(%1.2f), f(%1.2f),"
1715 " total_slots(%8"PRIuSIZE
") => target_total_slots(%8"PRIuSIZE
")\n",
1716 free_slots, total_slots, free_slots/(
double)total_slots,
1717 goal_ratio, f, total_slots, target_total_slots);
1721 if (gc_params.growth_max_bytes > 0) {
1722 size_t max_total_slots = total_slots + gc_params.growth_max_bytes / slot_size;
1723 if (target_total_slots > max_total_slots) target_total_slots = max_total_slots;
1726 size_t extend_slot_count = target_total_slots - total_slots;
1728 if (extend_slot_count == 0) extend_slot_count = 1;
1730 objspace->heap_pages.allocatable_bytes += extend_slot_count * slot_size;
1736 asan_unlock_freelist(page);
1737 GC_ASSERT(page->free_slots != 0);
1738 GC_ASSERT(page->freelist != NULL);
1740 page->free_next = heap->free_pages;
1741 heap->free_pages = page;
1743 RUBY_DEBUG_LOG(
"page:%p freelist:%p", (
void *)page, (
void *)page->freelist);
1745 asan_lock_freelist(page);
1751 asan_unlock_freelist(page);
1752 GC_ASSERT(page->free_slots != 0);
1753 GC_ASSERT(page->freelist != NULL);
1755 page->free_next = heap->pooled_pages;
1756 heap->pooled_pages = page;
1757 objspace->rincgc.pooled_slots += page->free_slots;
1759 asan_lock_freelist(page);
1765 ccan_list_del(&page->page_node);
1766 heap->total_pages--;
1767 heap->total_slots -= page->total_slots;
1771gc_aligned_free(
void *ptr,
size_t size)
1773#if defined __MINGW32__
1774 __mingw_aligned_free(ptr);
1777#elif defined(HAVE_POSIX_MEMALIGN) || defined(HAVE_MEMALIGN)
1780 free(((
void**)ptr)[-1]);
1787 GC_ASSERT((uintptr_t)page_body % HEAP_PAGE_ALIGN == 0);
1789 if (HEAP_PAGE_ALLOC_USE_MMAP) {
1791 GC_ASSERT(HEAP_PAGE_SIZE % sysconf(_SC_PAGE_SIZE) == 0);
1792 if (munmap(page_body, HEAP_PAGE_SIZE)) {
1793 rb_bug(
"heap_page_body_free: munmap failed");
1798 gc_aligned_free(page_body, HEAP_PAGE_SIZE);
1805 objspace->heap_pages.freed_pages++;
1806 heap_page_body_free(page->body);
1813 if (
objspace->empty_pages != NULL && heap_pages_freeable_pages > 0) {
1814 GC_ASSERT(
objspace->empty_pages_count > 0);
1819 for (i = j = 0; i < rb_darray_size(
objspace->heap_pages.sorted); i++) {
1822 if (heap_page_in_global_empty_pages_pool(
objspace, page) && heap_pages_freeable_pages > 0) {
1824 heap_pages_freeable_pages--;
1827 if (heap_page_in_global_empty_pages_pool(
objspace, page)) {
1828 page->free_next =
objspace->empty_pages;
1834 rb_darray_set(
objspace->heap_pages.sorted, j, page);
1840 rb_darray_pop(
objspace->heap_pages.sorted, i - j);
1841 GC_ASSERT(rb_darray_size(
objspace->heap_pages.sorted) == j);
1844 uintptr_t himem = (uintptr_t)hipage->body + HEAP_PAGE_SIZE;
1845 GC_ASSERT(himem <= heap_pages_himem);
1846 heap_pages_himem = himem;
1849 uintptr_t lomem = (uintptr_t)lopage->body +
sizeof(
struct heap_page_header);
1850 GC_ASSERT(lomem >= heap_pages_lomem);
1851 heap_pages_lomem = lomem;
1856gc_aligned_malloc(
size_t alignment,
size_t size)
1859 GC_ASSERT(((alignment - 1) & alignment) == 0);
1860 GC_ASSERT(alignment %
sizeof(
void*) == 0);
1864#if defined __MINGW32__
1865 res = __mingw_aligned_malloc(size, alignment);
1867 void *_aligned_malloc(
size_t,
size_t);
1868 res = _aligned_malloc(size, alignment);
1869#elif defined(HAVE_POSIX_MEMALIGN)
1870 if (posix_memalign(&res, alignment, size) != 0) {
1873#elif defined(HAVE_MEMALIGN)
1874 res = memalign(alignment, size);
1877 res = malloc(alignment + size +
sizeof(
void*));
1878 aligned = (
char*)res + alignment +
sizeof(
void*);
1879 aligned -= ((
VALUE)aligned & (alignment - 1));
1880 ((
void**)aligned)[-1] = res;
1881 res = (
void*)aligned;
1884 GC_ASSERT((uintptr_t)res % alignment == 0);
1890heap_page_body_allocate(
void)
1894 if (HEAP_PAGE_ALLOC_USE_MMAP) {
1896 GC_ASSERT(HEAP_PAGE_ALIGN % sysconf(_SC_PAGE_SIZE) == 0);
1898 size_t mmap_size = HEAP_PAGE_ALIGN + HEAP_PAGE_SIZE;
1899 char *ptr = mmap(NULL, mmap_size,
1900 PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1901 if (ptr == MAP_FAILED) {
1910#if defined(HAVE_SYS_PRCTL_H) && defined(PR_SET_VMA) && defined(PR_SET_VMA_ANON_NAME)
1911 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, ptr, mmap_size,
"Ruby:GC:default:heap_page_body_allocate");
1915 char *aligned = ptr + HEAP_PAGE_ALIGN;
1916 aligned -= ((
VALUE)aligned & (HEAP_PAGE_ALIGN - 1));
1917 GC_ASSERT(aligned > ptr);
1918 GC_ASSERT(aligned <= ptr + HEAP_PAGE_ALIGN);
1920 size_t start_out_of_range_size = aligned - ptr;
1921 GC_ASSERT(start_out_of_range_size % sysconf(_SC_PAGE_SIZE) == 0);
1922 if (start_out_of_range_size > 0) {
1923 if (munmap(ptr, start_out_of_range_size)) {
1924 rb_bug(
"heap_page_body_allocate: munmap failed for start");
1928 size_t end_out_of_range_size = HEAP_PAGE_ALIGN - start_out_of_range_size;
1929 GC_ASSERT(end_out_of_range_size % sysconf(_SC_PAGE_SIZE) == 0);
1930 if (end_out_of_range_size > 0) {
1931 if (munmap(aligned + HEAP_PAGE_SIZE, end_out_of_range_size)) {
1932 rb_bug(
"heap_page_body_allocate: munmap failed for end");
1940 page_body = gc_aligned_malloc(HEAP_PAGE_ALIGN, HEAP_PAGE_SIZE);
1943 GC_ASSERT((uintptr_t)page_body % HEAP_PAGE_ALIGN == 0);
1952 if (
objspace->empty_pages == NULL) {
1953 GC_ASSERT(
objspace->empty_pages_count == 0);
1956 GC_ASSERT(
objspace->empty_pages_count > 0);
1959 objspace->empty_pages = page->free_next;
1969 if (page_body == 0) {
1975 heap_page_body_free(page_body);
1980 uintptr_t end = (uintptr_t)page_body + HEAP_PAGE_SIZE;
1983 size_t hi = rb_darray_size(
objspace->heap_pages.sorted);
1987 size_t mid = (lo + hi) / 2;
1988 mid_page = rb_darray_get(
objspace->heap_pages.sorted, mid);
1989 if ((uintptr_t)mid_page->start < start) {
1992 else if ((uintptr_t)mid_page->start > start) {
1996 rb_bug(
"same heap page is allocated: %p at %"PRIuVALUE, (
void *)page_body, (
VALUE)mid);
2000 rb_darray_insert_without_gc(&
objspace->heap_pages.sorted, hi, page);
2002 if (heap_pages_lomem == 0 || heap_pages_lomem > start) heap_pages_lomem = start;
2003 if (heap_pages_himem < end) heap_pages_himem = end;
2005 page->body = page_body;
2006 page_body->header.page = page;
2008 objspace->heap_pages.allocated_pages++;
2017 GC_ASSERT(!heap->sweeping_page);
2018 GC_ASSERT(heap_page_in_global_empty_pages_pool(
objspace, page));
2022 uintptr_t rem = start % heap->slot_size;
2023 if (rem) start += heap->slot_size - rem;
2025 int slot_count = (int)((HEAP_PAGE_SIZE - (start - (uintptr_t)page->body))/heap->slot_size);
2027 page->start = start;
2028 page->total_slots = slot_count;
2029 page->slot_size = heap->slot_size;
2030 page->slot_size_reciprocal = heap_slot_reciprocal_table[heap - heaps];
2033 memset(&page->wb_unprotected_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
2034 memset(&page->age_bits[0], 0,
sizeof(page->age_bits));
2036 asan_unlock_freelist(page);
2037 page->freelist = NULL;
2038 asan_unpoison_memory_region(page->body, HEAP_PAGE_SIZE,
false);
2039 for (
VALUE p = (
VALUE)start; p < start + (slot_count * heap->slot_size); p += heap->slot_size) {
2040 heap_page_add_freeobj(
objspace, page, p);
2042 asan_lock_freelist(page);
2044 page->free_slots = slot_count;
2046 heap->total_allocated_pages++;
2048 ccan_list_add_tail(&heap->pages, &page->page_node);
2049 heap->total_pages++;
2050 heap->total_slots += page->total_slots;
2056 gc_report(1,
objspace,
"heap_page_allocate_and_initialize: rb_darray_size(objspace->heap_pages.sorted): %"PRIdSIZE
", "
2057 "allocatable_bytes: %"PRIdSIZE
", heap->total_pages: %"PRIdSIZE
"\n",
2058 rb_darray_size(
objspace->heap_pages.sorted),
objspace->heap_pages.allocatable_bytes, heap->total_pages);
2060 bool allocated =
false;
2063 if (page == NULL &&
objspace->heap_pages.allocatable_bytes > 0) {
2064 page = heap_page_allocate(
objspace);
2067 GC_ASSERT(page != NULL);
2071 heap_add_page(
objspace, heap, page);
2072 heap_add_freepage(heap, page);
2075 size_t page_bytes = (size_t)page->total_slots * page->slot_size;
2076 if (
objspace->heap_pages.allocatable_bytes > page_bytes) {
2077 objspace->heap_pages.allocatable_bytes -= page_bytes;
2080 objspace->heap_pages.allocatable_bytes = 0;
2085 return page != NULL;
2091 size_t prev_allocatable_bytes =
objspace->heap_pages.allocatable_bytes;
2092 objspace->heap_pages.allocatable_bytes = HEAP_PAGE_SIZE;
2093 heap_page_allocate_and_initialize(
objspace, heap);
2094 GC_ASSERT(heap->free_pages != NULL);
2095 objspace->heap_pages.allocatable_bytes = prev_allocatable_bytes;
2101 unsigned int lock_lev;
2102 bool needs_gc = is_incremental_marking(
objspace) || needs_continue_sweeping(
objspace, heap);
2103 if (!needs_gc)
return;
2105 gc_enter(
objspace, gc_enter_event_continue, &lock_lev);
2108 if (is_incremental_marking(
objspace)) {
2109 if (gc_marks_continue(
objspace, heap)) {
2114 if (needs_continue_sweeping(
objspace, heap)) {
2118 gc_exit(
objspace, gc_enter_event_continue, &lock_lev);
2124 GC_ASSERT(heap->free_pages == NULL);
2126 if (heap->total_slots < gc_params.heap_init_bytes / heap->slot_size &&
2127 heap->sweeping_page == NULL) {
2128 heap_page_allocate_and_initialize_force(
objspace, heap);
2129 GC_ASSERT(heap->free_pages != NULL);
2136 if (heap->free_pages == NULL) {
2137 heap_page_allocate_and_initialize(
objspace, heap);
2142 if (heap->free_pages == NULL) {
2143 GC_ASSERT(
objspace->empty_pages_count == 0);
2144 GC_ASSERT(
objspace->heap_pages.allocatable_bytes == 0);
2146 if (gc_start(
objspace, GPR_FLAG_NEWOBJ) == FALSE) {
2150 if (
objspace->heap_pages.allocatable_bytes == 0 && !gc_config_full_mark_val) {
2151 heap_allocatable_bytes_expand(
objspace, heap,
2152 heap->freed_slots + heap->empty_slots,
2153 heap->total_slots, heap->slot_size);
2154 GC_ASSERT(
objspace->heap_pages.allocatable_bytes > 0);
2162 if (heap->free_pages == NULL && !heap_page_allocate_and_initialize(
objspace, heap)) {
2163 if (gc_needs_major_flags == GPR_FLAG_NONE) {
2164 rb_bug(
"cannot create a new page after GC");
2167 if (gc_start(
objspace, GPR_FLAG_NEWOBJ) == FALSE) {
2174 if (heap->free_pages == NULL &&
2175 !heap_page_allocate_and_initialize(
objspace, heap)) {
2176 rb_bug(
"cannot create a new page after major GC");
2184 GC_ASSERT(heap->free_pages != NULL);
2188static inline const char*
2189rb_gc_impl_source_location_cstr(
int *ptr)
2210 RBASIC(obj)->flags = flags;
2212#if RBASIC_SHAPE_ID_FIELD
2213 RBASIC(obj)->shape_id = 0;
2217#if RACTOR_CHECK_MODE
2218 void rb_ractor_setup_belonging(
VALUE obj);
2219 rb_ractor_setup_belonging(obj);
2222#if RGENGC_CHECK_MODE
2223 int lev = RB_GC_VM_LOCK_NO_BARRIER();
2225 check_rvalue_consistency(
objspace, obj);
2227 GC_ASSERT(RVALUE_MARKED(
objspace, obj) == FALSE);
2228 GC_ASSERT(RVALUE_MARKING(
objspace, obj) == FALSE);
2229 GC_ASSERT(RVALUE_OLD_P(
objspace, obj) == FALSE);
2230 GC_ASSERT(RVALUE_WB_UNPROTECTED(
objspace, obj) == FALSE);
2232 if (RVALUE_REMEMBERED(
objspace, obj)) rb_bug(
"newobj: %s is remembered.", rb_obj_info(obj));
2234 RB_GC_VM_UNLOCK_NO_BARRIER(lev);
2237 if (RB_UNLIKELY(wb_protected == FALSE)) {
2238 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
2243 objspace->profile.total_generated_normal_object_count++;
2244#if RGENGC_PROFILE >= 2
2249 objspace->profile.total_generated_shady_object_count++;
2250#if RGENGC_PROFILE >= 2
2257 GET_RVALUE_OVERHEAD(obj)->file = rb_gc_impl_source_location_cstr(&GET_RVALUE_OVERHEAD(obj)->line);
2261 gc_report(5,
objspace,
"newobj: %s\n", rb_obj_info(obj));
2268rb_gc_impl_obj_slot_size(
VALUE obj)
2270 return GET_HEAP_PAGE(obj)->slot_size - RVALUE_OVERHEAD;
2274heap_slot_size(
unsigned char pool_id)
2276 GC_ASSERT(pool_id < HEAP_COUNT);
2278 return pool_slot_sizes[pool_id] - RVALUE_OVERHEAD;
2282rb_gc_impl_size_allocatable_p(
size_t size)
2284 return size + RVALUE_OVERHEAD <= pool_slot_sizes[HEAP_COUNT - 1];
2287static const size_t ALLOCATED_COUNT_STEP = 1024;
2291 for (
int heap_idx = 0; heap_idx < HEAP_COUNT; heap_idx++) {
2296 heap_cache->allocated_objects_count = 0;
2305 struct free_slot *p = heap_cache->freelist;
2307 if (RB_UNLIKELY(is_incremental_marking(
objspace))) {
2309 if (cache->incremental_mark_step_allocated_slots >= INCREMENTAL_MARK_STEP_ALLOCATIONS) {
2314 cache->incremental_mark_step_allocated_slots++;
2320 rb_asan_unpoison_object(obj,
true);
2321 heap_cache->freelist = p->next;
2323 heap_cache->allocated_objects_count++;
2325 if (heap_cache->allocated_objects_count >= ALLOCATED_COUNT_STEP) {
2327 heap_cache->allocated_objects_count = 0;
2330#if RGENGC_CHECK_MODE
2331 GC_ASSERT(rb_gc_impl_obj_slot_size(obj) == heap_slot_size(heap_idx));
2333 MEMZERO((
char *)obj,
char, heap_slot_size(heap_idx));
2347 if (heap->free_pages == NULL) {
2351 page = heap->free_pages;
2352 heap->free_pages = page->free_next;
2354 GC_ASSERT(page->free_slots != 0);
2356 asan_unlock_freelist(page);
2365 gc_report(3,
objspace,
"ractor_set_cache: Using page %p\n", (
void *)page->body);
2369 GC_ASSERT(heap_cache->freelist == NULL);
2370 GC_ASSERT(page->free_slots != 0);
2371 GC_ASSERT(page->freelist != NULL);
2373 heap_cache->using_page = page;
2374 heap_cache->freelist = page->freelist;
2375 page->free_slots = 0;
2376 page->freelist = NULL;
2378 rb_asan_unpoison_object((
VALUE)heap_cache->freelist,
false);
2380 rb_asan_poison_object((
VALUE)heap_cache->freelist);
2384init_size_to_heap_idx(
void)
2386 for (
size_t i = 0; i <
sizeof(size_to_heap_idx); i++) {
2387 size_t effective = i * 8 + RVALUE_OVERHEAD;
2389 for (idx = 0; idx < HEAP_COUNT; idx++) {
2390 if (effective <= pool_slot_sizes[idx])
break;
2392 size_to_heap_idx[i] = idx;
2397heap_idx_for_size(
size_t size)
2399 size_t compressed = (size + 7) >> 3;
2400 if (compressed <
sizeof(size_to_heap_idx)) {
2401 size_t heap_idx = size_to_heap_idx[compressed];
2402 if (RB_LIKELY(heap_idx < HEAP_COUNT))
return heap_idx;
2405 rb_bug(
"heap_idx_for_size: allocation size too large "
2406 "(size=%"PRIuSIZE
")", size);
2410rb_gc_impl_heap_id_for_size(
void *objspace_ptr,
size_t size)
2412 return heap_idx_for_size(size);
2416static size_t heap_sizes[HEAP_COUNT + 1] = { 0 };
2419rb_gc_impl_heap_sizes(
void *objspace_ptr)
2421 if (heap_sizes[0] == 0) {
2422 for (
unsigned char i = 0; i < HEAP_COUNT; i++) {
2423 heap_sizes[i] = heap_slot_size(i);
2438 unsigned int lev = 0;
2439 bool unlock_vm =
false;
2442 lev = RB_GC_CR_LOCK();
2447 if (is_incremental_marking(
objspace)) {
2449 cache->incremental_mark_step_allocated_slots = 0;
2452 obj = ractor_cache_allocate_slot(
objspace, cache, heap_idx);
2458 ractor_cache_set_page(
objspace, cache, heap_idx, page);
2461 obj = ractor_cache_allocate_slot(
objspace, cache, heap_idx);
2466 RB_GC_CR_UNLOCK(lev);
2469 if (RB_UNLIKELY(obj ==
Qfalse)) {
2478 VALUE obj = ractor_cache_allocate_slot(
objspace, cache, heap_idx);
2480 if (RB_UNLIKELY(obj ==
Qfalse)) {
2481 obj = newobj_cache_miss(
objspace, cache, heap_idx, vm_locked);
2495 lev = RB_GC_CR_LOCK();
2497 if (RB_UNLIKELY(during_gc || ruby_gc_stressful)) {
2501 if (rb_memerror_reentered()) {
2504 rb_bug(
"object allocation during garbage collection phase");
2507 if (ruby_gc_stressful) {
2508 if (!garbage_collect(
objspace, GPR_FLAG_NEWOBJ)) {
2514 obj = newobj_alloc(
objspace, cache, heap_idx,
true);
2515 newobj_init(klass, flags, wb_protected,
objspace, obj);
2517 RB_GC_CR_UNLOCK(lev);
2522NOINLINE(
static VALUE newobj_slowpath_wb_protected(
VALUE klass,
VALUE flags,
2524NOINLINE(
static VALUE newobj_slowpath_wb_unprotected(
VALUE klass,
VALUE flags,
2530 return newobj_slowpath(klass, flags,
objspace, cache, TRUE, heap_idx);
2536 return newobj_slowpath(klass, flags,
objspace, cache, FALSE, heap_idx);
2540rb_gc_impl_new_obj(
void *objspace_ptr,
void *cache_ptr,
VALUE klass,
VALUE flags,
bool wb_protected,
size_t alloc_size)
2545 RB_DEBUG_COUNTER_INC(obj_newobj);
2546 (void)RB_DEBUG_COUNTER_INC_IF(obj_newobj_wb_unprotected, !wb_protected);
2548 if (RB_UNLIKELY(stress_to_class)) {
2549 if (rb_hash_lookup2(stress_to_class, klass,
Qundef) !=
Qundef) {
2554 size_t heap_idx = heap_idx_for_size(alloc_size);
2558 if (!RB_UNLIKELY(during_gc || ruby_gc_stressful) &&
2560 obj = newobj_alloc(
objspace, cache, heap_idx,
false);
2561 newobj_init(klass, flags, wb_protected,
objspace, obj);
2564 RB_DEBUG_COUNTER_INC(obj_newobj_slowpath);
2566 obj = wb_protected ?
2567 newobj_slowpath_wb_protected(klass, flags,
objspace, cache, heap_idx) :
2568 newobj_slowpath_wb_unprotected(klass, flags,
objspace, cache, heap_idx);
2575ptr_in_page_body_p(
const void *ptr,
const void *memb)
2578 uintptr_t p_body = (uintptr_t)page->body;
2580 if ((uintptr_t)ptr >= p_body) {
2581 return (uintptr_t)ptr < (p_body + HEAP_PAGE_SIZE) ? 0 : 1;
2594 if (ptr < (uintptr_t)heap_pages_lomem ||
2595 ptr > (uintptr_t)heap_pages_himem) {
2599 res = bsearch((
void *)ptr, rb_darray_ref(
objspace->heap_pages.sorted, 0),
2601 ptr_in_page_body_p);
2615 register uintptr_t p = (uintptr_t)ptr;
2618 RB_DEBUG_COUNTER_INC(gc_isptr_trial);
2620 if (p < heap_pages_lomem || p > heap_pages_himem)
return FALSE;
2621 RB_DEBUG_COUNTER_INC(gc_isptr_range);
2623 if (p %
sizeof(
VALUE) != 0)
return FALSE;
2624 RB_DEBUG_COUNTER_INC(gc_isptr_align);
2626 page = heap_page_for_ptr(
objspace, (uintptr_t)ptr);
2628 RB_DEBUG_COUNTER_INC(gc_isptr_maybe);
2629 if (heap_page_in_global_empty_pages_pool(
objspace, page)) {
2633 if (p < page->start)
return FALSE;
2634 if (p >= page->start + (page->total_slots * page->slot_size))
return FALSE;
2635 if ((p - page->start) % page->slot_size != 0)
return FALSE;
2644rb_gc_impl_pointer_to_heap_p(
void *objspace_ptr,
const void *ptr)
2646 return is_pointer_to_heap(objspace_ptr, ptr);
2649#define ZOMBIE_OBJ_KEPT_FLAGS (FL_FINALIZE)
2652rb_gc_impl_make_zombie(
void *objspace_ptr,
VALUE obj,
void (*dfree)(
void *),
void *data)
2656 struct RZombie *zombie = RZOMBIE(obj);
2657 zombie->flags =
T_ZOMBIE | (zombie->flags & ZOMBIE_OBJ_KEPT_FLAGS);
2658 zombie->dfree = dfree;
2659 zombie->data = data;
2660 VALUE prev, next = heap_pages_deferred_final;
2662 zombie->next = prev = next;
2664 }
while (next != prev);
2666 struct heap_page *page = GET_HEAP_PAGE(obj);
2667 page->final_slots++;
2668 page->heap->final_slots_count++;
2671typedef int each_obj_callback(
void *,
void *,
size_t,
void *);
2672typedef int each_page_callback(
struct heap_page *,
void *);
2676 bool reenable_incremental;
2678 each_obj_callback *each_obj_callback;
2679 each_page_callback *each_page_callback;
2683 size_t pages_counts[HEAP_COUNT];
2687objspace_each_objects_ensure(
VALUE arg)
2693 if (data->reenable_incremental) {
2694 objspace->flags.dont_incremental = FALSE;
2697 for (
int i = 0; i < HEAP_COUNT; i++) {
2698 struct heap_page **pages = data->pages[i];
2706objspace_each_objects_try(
VALUE arg)
2712 for (
int i = 0; i < HEAP_COUNT; i++) {
2714 size_t size = heap->total_pages *
sizeof(
struct heap_page *);
2716 struct heap_page **pages = malloc(size);
2717 if (!pages) rb_memerror();
2725 size_t pages_count = 0;
2726 ccan_list_for_each(&heap->pages, page, page_node) {
2727 pages[pages_count] = page;
2730 data->pages[i] = pages;
2731 data->pages_counts[i] = pages_count;
2732 GC_ASSERT(pages_count == heap->total_pages);
2735 for (
int i = 0; i < HEAP_COUNT; i++) {
2737 size_t pages_count = data->pages_counts[i];
2738 struct heap_page **pages = data->pages[i];
2741 for (
size_t i = 0; i < pages_count; i++) {
2744 if (page == NULL)
break;
2748 if (pages[i] != page)
continue;
2750 uintptr_t pstart = (uintptr_t)page->start;
2751 uintptr_t pend = pstart + (page->total_slots * heap->slot_size);
2753 if (data->each_obj_callback &&
2754 (*data->each_obj_callback)((
void *)pstart, (
void *)pend, heap->slot_size, data->data)) {
2757 if (data->each_page_callback &&
2758 (*data->each_page_callback)(page, data->data)) {
2762 page = ccan_list_next(&heap->pages, page, page_node);
2774 bool reenable_incremental = FALSE;
2776 reenable_incremental = !
objspace->flags.dont_incremental;
2779 objspace->flags.dont_incremental = TRUE;
2790objspace_each_objects(
rb_objspace_t *
objspace, each_obj_callback *callback,
void *data,
bool protected)
2794 .each_obj_callback = callback,
2795 .each_page_callback = NULL,
2802rb_gc_impl_each_objects(
void *objspace_ptr, each_obj_callback *callback,
void *data)
2804 objspace_each_objects(objspace_ptr, callback, data, TRUE);
2807#if GC_CAN_COMPILE_COMPACTION
2809objspace_each_pages(
rb_objspace_t *
objspace, each_page_callback *callback,
void *data,
bool protected)
2813 .each_obj_callback = NULL,
2814 .each_page_callback = callback,
2822rb_gc_impl_define_finalizer(
void *objspace_ptr,
VALUE obj,
VALUE block)
2832 unsigned int lev = RB_GC_VM_LOCK();
2834 if (st_lookup(finalizer_table, obj, &data)) {
2835 table = (
VALUE)data;
2838 RB_GC_VM_UNLOCK(lev);
2844 for (i = 0; i <
len; i++) {
2851 lev = RB_GC_VM_LOCK();
2859 st_add_direct(finalizer_table, obj, table);
2862 RB_GC_VM_UNLOCK(lev);
2868rb_gc_impl_undefine_finalizer(
void *objspace_ptr,
VALUE obj)
2874 st_data_t data = obj;
2876 int lev = RB_GC_VM_LOCK();
2877 st_delete(finalizer_table, &data, 0);
2878 RB_GC_VM_UNLOCK(lev);
2884rb_gc_impl_copy_finalizer(
void *objspace_ptr,
VALUE dest,
VALUE obj)
2892 int lev = RB_GC_VM_LOCK();
2893 if (RB_LIKELY(st_lookup(finalizer_table, obj, &data))) {
2896 st_insert(finalizer_table, dest, table);
2900 rb_bug(
"rb_gc_copy_finalizer: FL_FINALIZE set but not found in finalizer_table: %s", rb_obj_info(obj));
2902 RB_GC_VM_UNLOCK(lev);
2906get_final(
long i,
void *data)
2916 if (RZOMBIE(zombie)->dfree) {
2917 RZOMBIE(zombie)->dfree(RZOMBIE(zombie)->data);
2920 st_data_t key = (st_data_t)zombie;
2924 if (st_delete(finalizer_table, &key, &table)) {
2925 RB_GC_VM_UNLOCK(lev);
2927 lev = RB_GC_VM_LOCK();
2930 rb_bug(
"FL_FINALIZE flag is set, but finalizers are not found");
2934 GC_ASSERT(!st_lookup(finalizer_table, key, NULL));
2945 rb_asan_unpoison_object(zombie,
false);
2946 next_zombie = RZOMBIE(zombie)->next;
2947 page = GET_HEAP_PAGE(zombie);
2949 unsigned int lev = RB_GC_VM_LOCK();
2951 lev = run_final(
objspace, zombie, lev);
2954 GC_ASSERT(page->heap->final_slots_count > 0);
2955 GC_ASSERT(page->final_slots > 0);
2957 page->heap->final_slots_count--;
2958 page->final_slots--;
2960 RVALUE_AGE_SET_BITMAP(zombie, 0);
2961 heap_page_add_freeobj(
objspace, page, zombie);
2962 page->heap->total_freed_objects++;
2964 RB_GC_VM_UNLOCK(lev);
2966 zombie = next_zombie;
2982 rb_gc_set_pending_interrupt();
2983 finalize_deferred_heap_pages(
objspace);
2984 rb_gc_unset_pending_interrupt();
2988gc_finalize_deferred(
void *dmy)
3007gc_abort(
void *objspace_ptr)
3011 if (is_incremental_marking(
objspace)) {
3014 while (pop_mark_stack(&
objspace->mark_stack, &obj));
3016 objspace->flags.during_incremental_marking = FALSE;
3021 for (
int i = 0; i < HEAP_COUNT; i++) {
3024 heap->sweeping_page = NULL;
3027 ccan_list_for_each(&heap->pages, page, page_node) {
3028 page->flags.before_sweep =
false;
3033 for (
int i = 0; i < HEAP_COUNT; i++) {
3035 rgengc_mark_and_rememberset_clear(
objspace, heap);
3038 gc_mode_set(
objspace, gc_mode_none);
3042rb_gc_impl_shutdown_free_objects(
void *objspace_ptr)
3046 for (
size_t i = 0; i < rb_darray_size(
objspace->heap_pages.sorted); i++) {
3048 short stride = page->slot_size;
3050 uintptr_t p = (uintptr_t)page->start;
3051 uintptr_t pend = p + page->total_slots * stride;
3052 for (; p < pend; p += stride) {
3054 asan_unpoisoning_object(vp) {
3056 rb_gc_obj_free_vm_weak_references(vp);
3057 if (rb_gc_obj_free(
objspace, vp)) {
3067rb_gc_impl_shutdown_call_finalizer_i(st_data_t key, st_data_t val, st_data_t _data)
3083rb_gc_impl_shutdown_call_finalizer(
void *objspace_ptr)
3087#if RGENGC_CHECK_MODE >= 2
3088 gc_verify_internal_consistency(
objspace);
3092 objspace->flags.dont_incremental = 1;
3101 while (finalizer_table->num_entries) {
3102 st_foreach(finalizer_table, rb_gc_impl_shutdown_call_finalizer_i, 0);
3107 GC_ASSERT(heap_pages_deferred_final == 0);
3116 unsigned int lock_lev;
3117 gc_enter(
objspace, gc_enter_event_finalizer, &lock_lev);
3120 for (
size_t i = 0; i < rb_darray_size(
objspace->heap_pages.sorted); i++) {
3122 short stride = page->slot_size;
3124 uintptr_t p = (uintptr_t)page->start;
3125 uintptr_t pend = p + page->total_slots * stride;
3126 for (; p < pend; p += stride) {
3128 asan_unpoisoning_object(vp) {
3129 if (rb_gc_shutdown_call_finalizer_p(vp)) {
3130 rb_gc_obj_free_vm_weak_references(vp);
3131 if (rb_gc_obj_free(
objspace, vp)) {
3139 gc_exit(
objspace, gc_enter_event_finalizer, &lock_lev);
3141 finalize_deferred_heap_pages(
objspace);
3143 st_free_table(finalizer_table);
3144 finalizer_table = 0;
3149rb_gc_impl_each_object(
void *objspace_ptr,
void (*func)(
VALUE obj,
void *data),
void *data)
3153 for (
size_t i = 0; i < rb_darray_size(
objspace->heap_pages.sorted); i++) {
3155 short stride = page->slot_size;
3157 uintptr_t p = (uintptr_t)page->start;
3158 uintptr_t pend = p + page->total_slots * stride;
3159 for (; p < pend; p += stride) {
3162 asan_unpoisoning_object(obj) {
3178 size_t total_slots = 0;
3179 for (
int i = 0; i < HEAP_COUNT; i++) {
3181 total_slots += heap->total_slots;
3199gc_setup_mark_bits(
struct heap_page *page)
3202 memcpy(&page->mark_bits[0], &page->uncollectible_bits[0], HEAP_PAGE_BITMAP_SIZE);
3209enum {HEAP_PAGE_LOCK = PAGE_NOACCESS, HEAP_PAGE_UNLOCK = PAGE_READWRITE};
3215 return VirtualProtect(body, HEAP_PAGE_SIZE, protect, &old_protect) != 0;
3217#elif defined(__wasi__)
3219enum {HEAP_PAGE_LOCK, HEAP_PAGE_UNLOCK};
3220#define protect_page_body(body, protect) 1
3222enum {HEAP_PAGE_LOCK = PROT_NONE, HEAP_PAGE_UNLOCK = PROT_READ | PROT_WRITE};
3223#define protect_page_body(body, protect) !mprotect((body), HEAP_PAGE_SIZE, (protect))
3229 if (!protect_page_body(body, HEAP_PAGE_LOCK)) {
3230 rb_bug(
"Couldn't protect page %p, errno: %s", (
void *)body, strerror(
errno));
3233 gc_report(5,
objspace,
"Protecting page in move %p\n", (
void *)body);
3240 if (!protect_page_body(body, HEAP_PAGE_UNLOCK)) {
3241 rb_bug(
"Couldn't unprotect page %p, errno: %s", (
void *)body, strerror(
errno));
3244 gc_report(5,
objspace,
"Unprotecting page in move %p\n", (
void *)body);
3251 GC_ASSERT(gc_is_moveable_obj(
objspace, src));
3253 struct heap_page *src_page = GET_HEAP_PAGE(src);
3261 GC_ASSERT(RVALUE_MARKED(
objspace, src));
3263 asan_unlock_freelist(free_page);
3265 asan_lock_freelist(free_page);
3267 rb_asan_unpoison_object(dest,
false);
3274 asan_unlock_freelist(free_page);
3275 free_page->freelist = ((
struct free_slot *)dest)->next;
3276 asan_lock_freelist(free_page);
3280 if (src_page->slot_size > free_page->slot_size) {
3283 else if (free_page->slot_size > src_page->slot_size) {
3287 objspace->rcompactor.total_moved++;
3289 gc_move(
objspace, src, dest, src_page->slot_size, free_page->slot_size);
3291 free_page->free_slots--;
3299 struct heap_page *cursor = heap->compact_cursor;
3302 unlock_page_body(
objspace, cursor->body);
3303 cursor = ccan_list_next(&heap->pages, cursor, page_node);
3308#if GC_CAN_COMPILE_COMPACTION
3312#if defined(__MINGW32__) || defined(_WIN32)
3313# define GC_COMPACTION_SUPPORTED 1
3317# define GC_COMPACTION_SUPPORTED (GC_CAN_COMPILE_COMPACTION && HEAP_PAGE_ALLOC_USE_MMAP)
3320#if GC_CAN_COMPILE_COMPACTION
3322read_barrier_handler(uintptr_t address)
3330 if (page_body == NULL) {
3331 rb_bug(
"read_barrier_handler: segmentation fault at %p", (
void *)address);
3334 int lev = RB_GC_VM_LOCK();
3336 unlock_page_body(
objspace, page_body);
3338 objspace->profile.read_barrier_faults++;
3340 invalidate_moved_page(
objspace, GET_HEAP_PAGE(address));
3342 RB_GC_VM_UNLOCK(lev);
3346#if !GC_CAN_COMPILE_COMPACTION
3348uninstall_handlers(
void)
3354install_handlers(
void)
3358#elif defined(_WIN32)
3359static LPTOP_LEVEL_EXCEPTION_FILTER old_handler;
3360typedef void (*signal_handler)(int);
3361static signal_handler old_sigsegv_handler;
3364read_barrier_signal(EXCEPTION_POINTERS *info)
3367 if (info->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) {
3372 read_barrier_handler((uintptr_t)info->ExceptionRecord->ExceptionInformation[1]);
3373 return EXCEPTION_CONTINUE_EXECUTION;
3376 return EXCEPTION_CONTINUE_SEARCH;
3381uninstall_handlers(
void)
3383 signal(SIGSEGV, old_sigsegv_handler);
3384 SetUnhandledExceptionFilter(old_handler);
3388install_handlers(
void)
3391 old_sigsegv_handler = signal(SIGSEGV, NULL);
3394 old_handler = SetUnhandledExceptionFilter(read_barrier_signal);
3397static struct sigaction old_sigbus_handler;
3398static struct sigaction old_sigsegv_handler;
3400#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
3401static exception_mask_t old_exception_masks[32];
3402static mach_port_t old_exception_ports[32];
3403static exception_behavior_t old_exception_behaviors[32];
3404static thread_state_flavor_t old_exception_flavors[32];
3405static mach_msg_type_number_t old_exception_count;
3408disable_mach_bad_access_exc(
void)
3410 old_exception_count =
sizeof(old_exception_masks) /
sizeof(old_exception_masks[0]);
3411 task_swap_exception_ports(
3412 mach_task_self(), EXC_MASK_BAD_ACCESS,
3413 MACH_PORT_NULL, EXCEPTION_DEFAULT, 0,
3414 old_exception_masks, &old_exception_count,
3415 old_exception_ports, old_exception_behaviors, old_exception_flavors
3420restore_mach_bad_access_exc(
void)
3422 for (mach_msg_type_number_t i = 0; i < old_exception_count; i++) {
3423 task_set_exception_ports(
3425 old_exception_masks[i], old_exception_ports[i],
3426 old_exception_behaviors[i], old_exception_flavors[i]
3433read_barrier_signal(
int sig, siginfo_t *info,
void *data)
3436 struct sigaction prev_sigbus, prev_sigsegv;
3437 sigaction(SIGBUS, &old_sigbus_handler, &prev_sigbus);
3438 sigaction(SIGSEGV, &old_sigsegv_handler, &prev_sigsegv);
3441 sigset_t set, prev_set;
3443 sigaddset(&set, SIGBUS);
3444 sigaddset(&set, SIGSEGV);
3445 sigprocmask(SIG_UNBLOCK, &set, &prev_set);
3446#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
3447 disable_mach_bad_access_exc();
3450 read_barrier_handler((uintptr_t)info->si_addr);
3453#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
3454 restore_mach_bad_access_exc();
3456 sigaction(SIGBUS, &prev_sigbus, NULL);
3457 sigaction(SIGSEGV, &prev_sigsegv, NULL);
3458 sigprocmask(SIG_SETMASK, &prev_set, NULL);
3462uninstall_handlers(
void)
3464#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
3465 restore_mach_bad_access_exc();
3467 sigaction(SIGBUS, &old_sigbus_handler, NULL);
3468 sigaction(SIGSEGV, &old_sigsegv_handler, NULL);
3472install_handlers(
void)
3474 struct sigaction action;
3475 memset(&action, 0,
sizeof(
struct sigaction));
3476 sigemptyset(&action.sa_mask);
3477 action.sa_sigaction = read_barrier_signal;
3478 action.sa_flags = SA_SIGINFO | SA_ONSTACK;
3480 sigaction(SIGBUS, &action, &old_sigbus_handler);
3481 sigaction(SIGSEGV, &action, &old_sigsegv_handler);
3482#ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
3483 disable_mach_bad_access_exc();
3491 for (
int i = 0; i < HEAP_COUNT; i++) {
3493 gc_unprotect_pages(
objspace, heap);
3496 uninstall_handlers();
3501 for (
int i = 0; i < HEAP_COUNT; i++) {
3503 heap->compact_cursor = NULL;
3504 heap->free_pages = NULL;
3505 heap->compact_cursor_index = 0;
3510 record->moved_objects =
objspace->rcompactor.total_moved - record->moved_objects;
3512 objspace->flags.during_compacting = FALSE;
3525 struct heap_page *sweep_page = ctx->page;
3526 short slot_size = sweep_page->slot_size;
3530 GC_ASSERT(vp %
sizeof(
VALUE) == 0);
3532 rb_asan_unpoison_object(vp,
false);
3536 if (
objspace->flags.during_compacting) {
3542 rb_bug(
"T_MOVED shouldn't be seen until compaction is finished");
3544 gc_report(3,
objspace,
"page_sweep: %s is added to freelist\n", rb_obj_info(vp));
3546 heap_page_add_freeobj(
objspace, sweep_page, vp);
3556#if RGENGC_CHECK_MODE
3558 if (RVALUE_OLD_P(
objspace, vp)) rb_bug(
"page_sweep: %p - old while minor GC.", (
void *)p);
3559 if (RVALUE_REMEMBERED(
objspace, vp)) rb_bug(
"page_sweep: %p - remembered.", (
void *)p);
3563#if RGENGC_CHECK_MODE
3564#define CHECK(x) if (x(objspace, vp) != FALSE) rb_bug("obj_free: " #x "(%s) != FALSE", rb_obj_info(vp))
3565 CHECK(RVALUE_WB_UNPROTECTED);
3566 CHECK(RVALUE_MARKED);
3567 CHECK(RVALUE_MARKING);
3568 CHECK(RVALUE_UNCOLLECTIBLE);
3572 if (!rb_gc_obj_needs_cleanup_p(vp)) {
3577 (void)VALGRIND_MAKE_MEM_UNDEFINED((
void*)p, slot_size);
3578 heap_page_add_freeobj(
objspace, sweep_page, vp);
3579 gc_report(3,
objspace,
"page_sweep: %s (fast path) added to freelist\n", rb_obj_info(vp));
3583 gc_report(2,
objspace,
"page_sweep: free %p\n", (
void *)p);
3587 rb_gc_obj_free_vm_weak_references(vp);
3588 if (rb_gc_obj_free(
objspace, vp)) {
3589 (void)VALGRIND_MAKE_MEM_UNDEFINED((
void*)p, slot_size);
3590 heap_page_add_freeobj(
objspace, sweep_page, vp);
3591 gc_report(3,
objspace,
"page_sweep: %s is added to freelist\n", rb_obj_info(vp));
3609 struct heap_page *sweep_page = ctx->page;
3610 GC_ASSERT(sweep_page->heap == heap);
3613 bits_t *bits, bitset;
3615 gc_report(2,
objspace,
"page_sweep: start.\n");
3617#if RGENGC_CHECK_MODE
3618 if (!
objspace->flags.immediate_sweep) {
3619 GC_ASSERT(sweep_page->flags.before_sweep == TRUE);
3622 sweep_page->flags.before_sweep = FALSE;
3623 sweep_page->free_slots = 0;
3625 p = (uintptr_t)sweep_page->start;
3626 bits = sweep_page->mark_bits;
3627 short slot_size = sweep_page->slot_size;
3628 int total_slots = sweep_page->total_slots;
3629 int bitmap_plane_count = CEILDIV(total_slots, BITS_BITLENGTH);
3631 int out_of_range_bits = total_slots % BITS_BITLENGTH;
3632 if (out_of_range_bits != 0) {
3633 bits[bitmap_plane_count - 1] |= ~(((bits_t)1 << out_of_range_bits) - 1);
3638 bits_t *wb_unprotected_bits = sweep_page->wb_unprotected_bits;
3639 bits_t *age_bits = sweep_page->age_bits;
3640 for (
int i = 0; i < bitmap_plane_count; i++) {
3641 bits_t unmarked = ~bits[i];
3642 wb_unprotected_bits[i] &= ~unmarked;
3643 age_bits[i * 2] &= ~unmarked;
3644 age_bits[i * 2 + 1] &= ~unmarked;
3648 for (
int i = 0; i < bitmap_plane_count; i++) {
3651 gc_sweep_plane(
objspace, heap, p, bitset, ctx);
3653 p += BITS_BITLENGTH * slot_size;
3656 if (!heap->compact_cursor) {
3657 gc_setup_mark_bits(sweep_page);
3660#if GC_PROFILE_MORE_DETAIL
3663 record->removing_objects += ctx->final_slots + ctx->freed_slots;
3664 record->empty_objects += ctx->empty_slots;
3667 if (0) fprintf(stderr,
"gc_sweep_page(%"PRIdSIZE
"): total_slots: %d, freed_slots: %d, empty_slots: %d, final_slots: %d\n",
3669 sweep_page->total_slots,
3670 ctx->freed_slots, ctx->empty_slots, ctx->final_slots);
3672 sweep_page->free_slots += ctx->freed_slots + ctx->empty_slots;
3673 sweep_page->heap->total_freed_objects += ctx->freed_slots;
3675 if (heap_pages_deferred_final && !finalizing) {
3676 gc_finalize_deferred_register(
objspace);
3679#if RGENGC_CHECK_MODE
3680 short freelist_len = 0;
3681 asan_unlock_freelist(sweep_page);
3682 struct free_slot *ptr = sweep_page->freelist;
3685 rb_asan_unpoison_object((
VALUE)ptr,
false);
3687 rb_asan_poison_object((
VALUE)ptr);
3690 asan_lock_freelist(sweep_page);
3691 if (freelist_len != sweep_page->free_slots) {
3692 rb_bug(
"inconsistent freelist length: expected %d but was %d", sweep_page->free_slots, freelist_len);
3696 gc_report(2,
objspace,
"page_sweep: end.\n");
3700gc_mode_name(
enum gc_mode mode)
3703 case gc_mode_none:
return "none";
3704 case gc_mode_marking:
return "marking";
3705 case gc_mode_sweeping:
return "sweeping";
3706 case gc_mode_compacting:
return "compacting";
3707 default: rb_bug(
"gc_mode_name: unknown mode: %d", (
int)mode);
3714#if RGENGC_CHECK_MODE
3715 enum gc_mode prev_mode = gc_mode(
objspace);
3716 switch (prev_mode) {
3717 case gc_mode_none: GC_ASSERT(mode == gc_mode_marking);
break;
3718 case gc_mode_marking: GC_ASSERT(mode == gc_mode_sweeping);
break;
3719 case gc_mode_sweeping: GC_ASSERT(mode == gc_mode_none || mode == gc_mode_compacting);
break;
3720 case gc_mode_compacting: GC_ASSERT(mode == gc_mode_none);
break;
3723 if (0) fprintf(stderr,
"gc_mode_transition: %s->%s\n", gc_mode_name(gc_mode(
objspace)), gc_mode_name(mode));
3731 asan_unlock_freelist(page);
3732 if (page->freelist) {
3734 rb_asan_unpoison_object((
VALUE)p,
false);
3738 rb_asan_poison_object((
VALUE)prev);
3739 rb_asan_unpoison_object((
VALUE)p,
false);
3742 rb_asan_poison_object((
VALUE)p);
3745 page->freelist = freelist;
3747 asan_lock_freelist(page);
3754 heap->sweeping_page = ccan_list_top(&heap->pages,
struct heap_page, page_node);
3755 if (heap->sweeping_page) {
3758 heap->free_pages = NULL;
3759 heap->pooled_pages = NULL;
3760 if (!
objspace->flags.immediate_sweep) {
3763 ccan_list_for_each(&heap->pages, page, page_node) {
3764 page->flags.before_sweep = TRUE;
3769#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 4
3773#if GC_CAN_COMPILE_COMPACTION
3774static void gc_sort_heap_by_compare_func(
rb_objspace_t *
objspace, gc_compact_compare_func compare_func);
3775static int compare_pinned_slots(
const void *left,
const void *right,
void *d);
3779gc_ractor_newobj_cache_clear(
void *c,
void *data)
3784 newobj_cache->incremental_mark_step_allocated_slots = 0;
3786 for (
size_t heap_idx = 0; heap_idx < HEAP_COUNT; heap_idx++) {
3792 cache->allocated_objects_count = 0;
3794 struct heap_page *page = cache->using_page;
3795 struct free_slot *freelist = cache->freelist;
3796 RUBY_DEBUG_LOG(
"ractor using_page:%p freelist:%p", (
void *)page, (
void *)freelist);
3798 heap_page_freelist_append(page, freelist);
3800 cache->using_page = NULL;
3801 cache->freelist = NULL;
3808 gc_mode_transition(
objspace, gc_mode_sweeping);
3811#if GC_CAN_COMPILE_COMPACTION
3812 if (
objspace->flags.during_compacting) {
3813 gc_sort_heap_by_compare_func(
3815 objspace->rcompactor.compare_func ?
objspace->rcompactor.compare_func : compare_pinned_slots
3820 for (
int i = 0; i < HEAP_COUNT; i++) {
3822 gc_sweep_start_heap(
objspace, heap);
3825 if (heap->sweeping_page == NULL) {
3826 GC_ASSERT(heap->total_pages == 0);
3827 GC_ASSERT(heap->total_slots == 0);
3828 gc_sweep_finish_heap(
objspace, heap);
3832 rb_gc_ractor_newobj_cache_foreach(gc_ractor_newobj_cache_clear, NULL);
3838 size_t total_slots = heap->total_slots;
3839 size_t swept_slots = heap->freed_slots + heap->empty_slots;
3841 size_t init_slots = gc_params.heap_init_bytes / heap->slot_size;
3842 size_t min_free_slots = (size_t)(MAX(total_slots, init_slots) * gc_params.heap_free_slots_min_ratio);
3844 if (swept_slots < min_free_slots &&
3846 ((heap->empty_slots == 0 && total_slots > 0) || heap->freed_slots > heap->empty_slots)) {
3852 while (swept_slots < min_free_slots &&
3853 (resurrected_page = heap_page_resurrect(
objspace))) {
3854 heap_add_page(
objspace, heap, resurrected_page);
3855 heap_add_freepage(heap, resurrected_page);
3857 swept_slots += resurrected_page->free_slots;
3860 if (swept_slots < min_free_slots) {
3864 objspace->profile.count -
objspace->rgengc.last_major_gc < RVALUE_OLD_AGE) {
3865 if (
objspace->heap_pages.allocatable_bytes < min_free_slots * heap->slot_size) {
3866 heap_allocatable_bytes_expand(
objspace, heap, swept_slots, heap->total_slots, heap->slot_size);
3869 else if (swept_slots < min_free_slots * 7 / 8 &&
3870 objspace->heap_pages.allocatable_bytes < (min_free_slots * 7 / 8 - swept_slots) * heap->slot_size) {
3871 gc_needs_major_flags |= GPR_FLAG_MAJOR_BY_NOFREE;
3872 heap->force_major_gc_count++;
3881 gc_report(1,
objspace,
"gc_sweep_finish\n");
3884 heap_pages_free_unused_pages(
objspace);
3886 for (
int i = 0; i < HEAP_COUNT; i++) {
3889 heap->freed_slots = 0;
3890 heap->empty_slots = 0;
3892 if (!will_be_incremental_marking(
objspace)) {
3893 struct heap_page *end_page = heap->free_pages;
3895 while (end_page->free_next) end_page = end_page->free_next;
3896 end_page->free_next = heap->pooled_pages;
3899 heap->free_pages = heap->pooled_pages;
3901 heap->pooled_pages = NULL;
3907 gc_mode_transition(
objspace, gc_mode_none);
3909#if RGENGC_CHECK_MODE >= 2
3910 gc_verify_internal_consistency(
objspace);
3917 struct heap_page *sweep_page = heap->sweeping_page;
3918 int swept_slots = 0;
3919 int pooled_slots = 0;
3920 int sweep_budget = GC_INCREMENTAL_SWEEP_BYTES / heap->slot_size;
3921 int pool_budget = GC_INCREMENTAL_SWEEP_POOL_BYTES / heap->slot_size;
3923 if (sweep_page == NULL)
return FALSE;
3925#if GC_ENABLE_LAZY_SWEEP
3926 gc_prof_sweep_timer_start(
objspace);
3930 RUBY_DEBUG_LOG(
"sweep_page:%p", (
void *)sweep_page);
3938 gc_sweep_page(
objspace, heap, &ctx);
3939 int free_slots = ctx.freed_slots + ctx.empty_slots;
3941 heap->sweeping_page = ccan_list_next(&heap->pages, sweep_page, page_node);
3943 if (free_slots == sweep_page->total_slots) {
3945 heap_unlink_page(
objspace, heap, sweep_page);
3947 sweep_page->start = 0;
3948 sweep_page->total_slots = 0;
3949 sweep_page->slot_size = 0;
3950 sweep_page->heap = NULL;
3951 sweep_page->free_slots = 0;
3953 asan_unlock_freelist(sweep_page);
3954 sweep_page->freelist = NULL;
3955 asan_lock_freelist(sweep_page);
3957 asan_poison_memory_region(sweep_page->body, HEAP_PAGE_SIZE);
3960 sweep_page->free_next =
objspace->empty_pages;
3961 objspace->empty_pages = sweep_page;
3963 else if (free_slots > 0) {
3964 heap->freed_slots += ctx.freed_slots;
3965 heap->empty_slots += ctx.empty_slots;
3967 if (pooled_slots < pool_budget) {
3968 heap_add_poolpage(
objspace, heap, sweep_page);
3969 pooled_slots += free_slots;
3972 heap_add_freepage(heap, sweep_page);
3973 swept_slots += free_slots;
3974 if (swept_slots > sweep_budget) {
3980 sweep_page->free_next = NULL;
3982 }
while ((sweep_page = heap->sweeping_page));
3984 if (!heap->sweeping_page) {
3986 GC_ASSERT(
objspace->sweeping_heap_count >= 0);
3987 gc_sweep_finish_heap(
objspace, heap);
3989 if (!has_sweeping_pages(
objspace)) {
3994#if GC_ENABLE_LAZY_SWEEP
3995 gc_prof_sweep_timer_stop(
objspace);
3998 return heap->free_pages != NULL;
4004 for (
int i = 0; i < HEAP_COUNT; i++) {
4007 while (heap->sweeping_page) {
4016 GC_ASSERT(dont_gc_val() == FALSE ||
objspace->profile.latest_gc_info & GPR_FLAG_METHOD);
4017 if (!GC_ENABLE_LAZY_SWEEP)
return;
4021 for (
int i = 0; i < HEAP_COUNT; i++) {
4023 if (gc_sweep_step(
objspace, heap)) {
4024 GC_ASSERT(heap->free_pages != NULL);
4026 else if (heap == sweep_heap) {
4027 if (
objspace->empty_pages_count > 0 ||
objspace->heap_pages.allocatable_bytes > 0) {
4038 heap_page_allocate_and_initialize(
objspace, heap);
4040 GC_ASSERT(heap->free_pages != NULL);
4045 GC_ASSERT(gc_mode(
objspace) == gc_mode_none);
4055rb_gc_impl_location(
void *objspace_ptr,
VALUE value)
4059 asan_unpoisoning_object(value) {
4061 destination = (
VALUE)RMOVED(value)->destination;
4065 destination = value;
4072#if GC_CAN_COMPILE_COMPACTION
4083 GC_ASSERT(RVALUE_PINNED(
objspace, forwarding_object));
4084 GC_ASSERT(!RVALUE_MARKED(
objspace, forwarding_object));
4086 CLEAR_IN_BITMAP(GET_HEAP_PINNED_BITS(forwarding_object), forwarding_object);
4088 object = rb_gc_impl_location(
objspace, forwarding_object);
4090 uint32_t original_shape_id = 0;
4092 original_shape_id = RMOVED(forwarding_object)->original_shape_id;
4095 gc_move(
objspace,
object, forwarding_object, GET_HEAP_PAGE(
object)->slot_size, page->slot_size);
4099 if (original_shape_id) {
4100 rb_gc_set_shape(forwarding_object, original_shape_id);
4103 struct heap_page *orig_page = GET_HEAP_PAGE(
object);
4104 orig_page->free_slots++;
4105 RVALUE_AGE_SET_BITMAP(
object, 0);
4106 heap_page_add_freeobj(
objspace, orig_page,
object);
4108 GC_ASSERT(RVALUE_MARKED(
objspace, forwarding_object));
4113 p += page->slot_size;
4123 bits_t *mark_bits, *pin_bits;
4125 short slot_size = page->slot_size;
4126 int total_slots = page->total_slots;
4127 int bitmap_plane_count = CEILDIV(total_slots, BITS_BITLENGTH);
4129 mark_bits = page->mark_bits;
4130 pin_bits = page->pinned_bits;
4132 uintptr_t p = page->start;
4134 for (i=0; i < bitmap_plane_count; i++) {
4137 bitset = pin_bits[i] & ~mark_bits[i];
4138 invalidate_moved_plane(
objspace, page, p, bitset);
4139 p += BITS_BITLENGTH * slot_size;
4148 gc_mode_transition(
objspace, gc_mode_compacting);
4150 for (
int i = 0; i < HEAP_COUNT; i++) {
4152 ccan_list_for_each(&heap->pages, page, page_node) {
4153 page->flags.before_sweep = TRUE;
4156 heap->compact_cursor = ccan_list_tail(&heap->pages,
struct heap_page, page_node);
4157 heap->compact_cursor_index = 0;
4162 record->moved_objects =
objspace->rcompactor.total_moved;
4165 memset(
objspace->rcompactor.considered_count_table, 0,
T_MASK *
sizeof(
size_t));
4166 memset(
objspace->rcompactor.moved_count_table, 0,
T_MASK *
sizeof(
size_t));
4167 memset(
objspace->rcompactor.moved_up_count_table, 0,
T_MASK *
sizeof(
size_t));
4168 memset(
objspace->rcompactor.moved_down_count_table, 0,
T_MASK *
sizeof(
size_t));
4181 const unsigned int immediate_sweep =
objspace->flags.immediate_sweep;
4183 gc_report(1,
objspace,
"gc_sweep: immediate: %d\n", immediate_sweep);
4186 if (
objspace->flags.during_compacting) {
4190 if (immediate_sweep) {
4191#if !GC_ENABLE_LAZY_SWEEP
4192 gc_prof_sweep_timer_start(
objspace);
4195#if !GC_ENABLE_LAZY_SWEEP
4196 gc_prof_sweep_timer_stop(
objspace);
4202 for (
int i = 0; i < HEAP_COUNT; i++) {
4214stack_chunk_alloc(
void)
4228 return stack->chunk == NULL;
4234 size_t size = stack->index;
4235 stack_chunk_t *chunk = stack->chunk ? stack->chunk->next : NULL;
4238 size += stack->limit;
4239 chunk = chunk->next;
4247 chunk->next = stack->cache;
4248 stack->cache = chunk;
4249 stack->cache_size++;
4257 if (stack->unused_cache_size > (stack->cache_size/2)) {
4258 chunk = stack->cache;
4259 stack->cache = stack->cache->next;
4260 stack->cache_size--;
4263 stack->unused_cache_size = stack->cache_size;
4271 GC_ASSERT(stack->index == stack->limit);
4273 if (stack->cache_size > 0) {
4274 next = stack->cache;
4275 stack->cache = stack->cache->next;
4276 stack->cache_size--;
4277 if (stack->unused_cache_size > stack->cache_size)
4278 stack->unused_cache_size = stack->cache_size;
4281 next = stack_chunk_alloc();
4283 next->next = stack->chunk;
4284 stack->chunk = next;
4293 prev = stack->chunk->next;
4294 GC_ASSERT(stack->index == 0);
4295 add_stack_chunk_cache(stack, stack->chunk);
4296 stack->chunk = prev;
4297 stack->index = stack->limit;
4305 while (chunk != NULL) {
4315 mark_stack_chunk_list_free(stack->chunk);
4321 mark_stack_chunk_list_free(stack->cache);
4322 stack->cache_size = 0;
4323 stack->unused_cache_size = 0;
4350 if (stack->index == stack->limit) {
4351 push_mark_stack_chunk(stack);
4353 stack->chunk->data[stack->index++] = obj;
4363 rb_bug(
"push_mark_stack() called for broken object");
4367 rb_bug(
"push_mark_stack: unexpected T_NODE object");
4371 rb_bug(
"rb_gc_mark(): unknown data type 0x%x(%p) %s",
4373 is_pointer_to_heap((
rb_objspace_t *)rb_gc_get_objspace(), (
void *)obj) ?
"corrupted object" :
"non object");
4379 if (is_mark_stack_empty(stack)) {
4382 if (stack->index == 1) {
4383 *data = stack->chunk->data[--stack->index];
4384 pop_mark_stack_chunk(stack);
4387 *data = stack->chunk->data[--stack->index];
4398 stack->index = stack->limit = STACK_CHUNK_SIZE;
4400 for (i=0; i < 4; i++) {
4401 add_stack_chunk_cache(stack, stack_chunk_alloc());
4403 stack->unused_cache_size = stack->cache_size;
4411 if (
objspace->rgengc.parent_object_old_p) {
4412 if (RVALUE_WB_UNPROTECTED(
objspace, obj) || !RVALUE_OLD_P(
objspace, obj)) {
4421 if (RVALUE_MARKED(
objspace, obj))
return 0;
4422 MARK_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj);
4436 if(!gc_config_full_mark_val)
4439 struct heap_page *page = GET_HEAP_PAGE(obj);
4441 GC_ASSERT(RVALUE_MARKING(
objspace, obj) == FALSE);
4442 check_rvalue_consistency(
objspace, obj);
4444 if (!RVALUE_PAGE_WB_UNPROTECTED(page, obj)) {
4445 if (!RVALUE_OLD_P(
objspace, obj)) {
4448 gc_report(3,
objspace,
"gc_aging: YOUNG class: %s\n", rb_obj_info(obj));
4449 RVALUE_AGE_SET(obj, RVALUE_OLD_AGE);
4450 RVALUE_OLD_UNCOLLECTIBLE_SET(
objspace, obj);
4453 gc_report(3,
objspace,
"gc_aging: YOUNG: %s\n", rb_obj_info(obj));
4457 else if (is_full_marking(
objspace)) {
4458 GC_ASSERT(RVALUE_PAGE_UNCOLLECTIBLE(page, obj) == FALSE);
4459 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(
objspace, page, obj);
4462 check_rvalue_consistency(
objspace, obj);
4470#if RGENGC_CHECK_MODE
4471 if (RVALUE_MARKED(
objspace, obj) == FALSE) rb_bug(
"gc_grey: %s is not marked.", rb_obj_info(obj));
4472 if (RVALUE_MARKING(
objspace, obj) == TRUE) rb_bug(
"gc_grey: %s is marking/remembered.", rb_obj_info(obj));
4475 if (is_incremental_marking(
objspace)) {
4476 MARK_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
4480 rb_darray_append_without_gc(&
objspace->weak_references, obj);
4483 push_mark_stack(&
objspace->mark_stack, obj);
4490 enum {info_size = 256};
4491 char obj_info_buf[info_size];
4492 rb_raw_obj_info(obj_info_buf, info_size, obj);
4494 char parent_obj_info_buf[info_size];
4495 rb_raw_obj_info(parent_obj_info_buf, info_size,
objspace->rgengc.parent_object);
4497 rb_bug(
"try to mark T_NONE object (obj: %s, parent: %s)", obj_info_buf, parent_obj_info_buf);
4504 GC_ASSERT(during_gc);
4505 GC_ASSERT(!
objspace->flags.during_reference_updating);
4507 rgengc_check_relation(
objspace, obj);
4508 if (!gc_mark_set(
objspace, obj))
return;
4511 RUBY_DEBUG_LOG(
"%p (%s) parent:%p (%s)",
4512 (
void *)obj, obj_type_name(obj),
4513 (
void *)
objspace->rgengc.parent_object, obj_type_name(
objspace->rgengc.parent_object));
4516 gc_mark_check_t_none(
objspace, obj);
4526 if (RB_UNLIKELY(
objspace->flags.during_compacting)) {
4527 if (RB_LIKELY(during_gc)) {
4528 if (!RVALUE_PINNED(
objspace, obj)) {
4529 GC_ASSERT(GET_HEAP_PAGE(obj)->pinned_slots <= GET_HEAP_PAGE(obj)->total_slots);
4530 GET_HEAP_PAGE(obj)->pinned_slots++;
4531 MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj);
4545rb_gc_impl_mark_and_move(
void *objspace_ptr,
VALUE *ptr)
4549 if (RB_UNLIKELY(
objspace->flags.during_reference_updating)) {
4550 GC_ASSERT(
objspace->flags.during_compacting);
4551 GC_ASSERT(during_gc);
4554 if (destination != *ptr) {
4564rb_gc_impl_mark(
void *objspace_ptr,
VALUE obj)
4572rb_gc_impl_mark_and_pin(
void *objspace_ptr,
VALUE obj)
4580rb_gc_impl_mark_maybe(
void *objspace_ptr,
VALUE obj)
4584 (void)VALGRIND_MAKE_MEM_DEFINED(&obj,
sizeof(obj));
4586 if (is_pointer_to_heap(
objspace, (
void *)obj)) {
4587 asan_unpoisoning_object(obj) {
4602pin_value(st_data_t key, st_data_t value, st_data_t data)
4604 rb_gc_impl_mark_and_pin((
void *)data, (
VALUE)value);
4612 asan_unpoison_memory_region(&
objspace->rgengc.parent_object,
sizeof(
objspace->rgengc.parent_object),
false);
4613 asan_unpoison_memory_region(&
objspace->rgengc.parent_object_old_p,
sizeof(
objspace->rgengc.parent_object_old_p),
false);
4614 objspace->rgengc.parent_object = obj;
4615 objspace->rgengc.parent_object_old_p = old_p;
4627 asan_poison_memory_region(&
objspace->rgengc.parent_object,
sizeof(
objspace->rgengc.parent_object));
4628 asan_poison_memory_region(&
objspace->rgengc.parent_object_old_p,
sizeof(
objspace->rgengc.parent_object_old_p));
4634#define MARK_CHECKPOINT(category) do { \
4635 if (categoryp) *categoryp = category; \
4638 MARK_CHECKPOINT(
"objspace");
4641 if (finalizer_table != NULL) {
4642 st_foreach(finalizer_table, pin_value, (st_data_t)
objspace);
4645 if (stress_to_class) rb_gc_mark(stress_to_class);
4647 rb_gc_save_machine_context();
4648 rb_gc_mark_roots(
objspace, categoryp);
4649 gc_mark_set_parent_invalid(
objspace);
4656 rb_gc_mark_children(
objspace, obj);
4657 gc_mark_set_parent_invalid(
objspace);
4669 size_t marked_slots_at_the_beginning =
objspace->marked_slots;
4670 size_t popped_count = 0;
4672 while (pop_mark_stack(mstack, &obj)) {
4673 if (obj ==
Qundef)
continue;
4675 if (RGENGC_CHECK_MODE && !RVALUE_MARKED(
objspace, obj)) {
4676 rb_bug(
"gc_mark_stacked_objects: %s is not marked.", rb_obj_info(obj));
4681 if (RGENGC_CHECK_MODE && !RVALUE_MARKING(
objspace, obj)) {
4682 rb_bug(
"gc_mark_stacked_objects: incremental, but marking bit is 0");
4684 CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
4687 if (popped_count + (
objspace->marked_slots - marked_slots_at_the_beginning) > count) {
4696 if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(
objspace);
4698 if (is_mark_stack_empty(mstack)) {
4699 shrink_stack_chunk_cache(mstack);
4710 return gc_mark_stacked_objects(
objspace, TRUE, count);
4716 return gc_mark_stacked_objects(
objspace, FALSE, 0);
4719#if RGENGC_CHECK_MODE >= 4
4721#define MAKE_ROOTSIG(obj) (((VALUE)(obj) << 1) | 0x01)
4722#define IS_ROOTSIG(obj) ((VALUE)(obj) & 0x01)
4723#define GET_ROOTSIG(obj) ((const char *)((VALUE)(obj) >> 1))
4731static struct reflist *
4732reflist_create(
VALUE obj)
4734 struct reflist *refs =
xmalloc(
sizeof(
struct reflist));
4737 refs->list[0] = obj;
4743reflist_destruct(
struct reflist *refs)
4750reflist_add(
struct reflist *refs,
VALUE obj)
4752 if (refs->pos == refs->size) {
4754 SIZED_REALLOC_N(refs->list,
VALUE, refs->size, refs->size/2);
4757 refs->list[refs->pos++] = obj;
4761reflist_dump(
struct reflist *refs)
4764 for (i=0; i<refs->pos; i++) {
4765 VALUE obj = refs->list[i];
4766 if (IS_ROOTSIG(obj)) {
4767 fprintf(stderr,
"<root@%s>", GET_ROOTSIG(obj));
4770 fprintf(stderr,
"<%s>", rb_obj_info(obj));
4772 if (i+1 < refs->pos) fprintf(stderr,
", ");
4777reflist_referred_from_machine_context(
struct reflist *refs)
4780 for (i=0; i<refs->pos; i++) {
4781 VALUE obj = refs->list[i];
4782 if (IS_ROOTSIG(obj) && strcmp(GET_ROOTSIG(obj),
"machine_context") == 0)
return 1;
4797 const char *category;
4803allrefs_add(
struct allrefs *data,
VALUE obj)
4805 struct reflist *refs;
4808 if (st_lookup(data->references, obj, &r)) {
4809 refs = (
struct reflist *)r;
4810 reflist_add(refs, data->root_obj);
4814 refs = reflist_create(data->root_obj);
4815 st_insert(data->references, obj, (st_data_t)refs);
4821allrefs_i(
VALUE obj,
void *ptr)
4823 struct allrefs *data = (
struct allrefs *)ptr;
4825 if (allrefs_add(data, obj)) {
4826 push_mark_stack(&data->mark_stack, obj);
4831allrefs_roots_i(
VALUE obj,
void *ptr)
4833 struct allrefs *data = (
struct allrefs *)ptr;
4834 if (strlen(data->category) == 0) rb_bug(
"!!!");
4835 data->root_obj = MAKE_ROOTSIG(data->category);
4837 if (allrefs_add(data, obj)) {
4838 push_mark_stack(&data->mark_stack, obj);
4841#define PUSH_MARK_FUNC_DATA(v) do { \
4842 struct gc_mark_func_data_struct *prev_mark_func_data = GET_VM()->gc.mark_func_data; \
4843 GET_VM()->gc.mark_func_data = (v);
4845#define POP_MARK_FUNC_DATA() GET_VM()->gc.mark_func_data = prev_mark_func_data;} while (0)
4850 struct allrefs data;
4851 struct gc_mark_func_data_struct mfd;
4853 int prev_dont_gc = dont_gc_val();
4857 data.references = st_init_numtable();
4858 init_mark_stack(&data.mark_stack);
4860 mfd.mark_func = allrefs_roots_i;
4864 PUSH_MARK_FUNC_DATA(&mfd);
4865 GET_VM()->gc.mark_func_data = &mfd;
4866 mark_roots(
objspace, &data.category);
4867 POP_MARK_FUNC_DATA();
4870 while (pop_mark_stack(&data.mark_stack, &obj)) {
4871 rb_objspace_reachable_objects_from(data.root_obj = obj, allrefs_i, &data);
4873 free_stack_chunks(&data.mark_stack);
4875 dont_gc_set(prev_dont_gc);
4876 return data.references;
4880objspace_allrefs_destruct_i(st_data_t key, st_data_t value, st_data_t ptr)
4882 struct reflist *refs = (
struct reflist *)value;
4883 reflist_destruct(refs);
4888objspace_allrefs_destruct(
struct st_table *refs)
4890 st_foreach(refs, objspace_allrefs_destruct_i, 0);
4891 st_free_table(refs);
4894#if RGENGC_CHECK_MODE >= 5
4896allrefs_dump_i(st_data_t k, st_data_t v, st_data_t ptr)
4899 struct reflist *refs = (
struct reflist *)v;
4900 fprintf(stderr,
"[allrefs_dump_i] %s <- ", rb_obj_info(obj));
4902 fprintf(stderr,
"\n");
4910 fprintf(stderr,
"[all refs] (size: %"PRIuVALUE
")\n", size);
4911 st_foreach(
objspace->rgengc.allrefs_table, allrefs_dump_i, 0);
4916gc_check_after_marks_i(st_data_t k, st_data_t v, st_data_t ptr)
4919 struct reflist *refs = (
struct reflist *)v;
4923 if (!RVALUE_MARKED(
objspace, obj)) {
4924 fprintf(stderr,
"gc_check_after_marks_i: %s is not marked and not oldgen.\n", rb_obj_info(obj));
4925 fprintf(stderr,
"gc_check_after_marks_i: %p is referred from ", (
void *)obj);
4928 if (reflist_referred_from_machine_context(refs)) {
4929 fprintf(stderr,
" (marked from machine stack).\n");
4934 fprintf(stderr,
"\n");
4941gc_marks_check(
rb_objspace_t *
objspace, st_foreach_callback_func *checker_func,
const char *checker_name)
4943 size_t saved_malloc_increase =
objspace->malloc_params.increase;
4944#if RGENGC_ESTIMATE_OLDMALLOC
4945 size_t saved_oldmalloc_increase =
objspace->malloc_counters.oldmalloc_increase;
4952 st_foreach(
objspace->rgengc.allrefs_table, checker_func, (st_data_t)
objspace);
4955 if (
objspace->rgengc.error_count > 0) {
4956#if RGENGC_CHECK_MODE >= 5
4959 if (checker_name) rb_bug(
"%s: GC has problem.", checker_name);
4962 objspace_allrefs_destruct(
objspace->rgengc.allrefs_table);
4963 objspace->rgengc.allrefs_table = 0;
4966 objspace->malloc_params.increase = saved_malloc_increase;
4967#if RGENGC_ESTIMATE_OLDMALLOC
4968 objspace->malloc_counters.oldmalloc_increase = saved_oldmalloc_increase;
4976 size_t live_object_count;
4977 size_t zombie_object_count;
4980 size_t old_object_count;
4981 size_t remembered_shady_count;
4985check_generation_i(
const VALUE child,
void *ptr)
4988 const VALUE parent = data->parent;
4990 if (RGENGC_CHECK_MODE) GC_ASSERT(RVALUE_OLD_P(data->objspace, parent));
4992 if (!RVALUE_OLD_P(data->objspace, child)) {
4993 if (!RVALUE_REMEMBERED(data->objspace, parent) &&
4994 !RVALUE_REMEMBERED(data->objspace, child) &&
4995 !RVALUE_UNCOLLECTIBLE(data->objspace, child)) {
4996 fprintf(stderr,
"verify_internal_consistency_reachable_i: WB miss (O->Y) %s -> %s\n", rb_obj_info(parent), rb_obj_info(child));
5003check_color_i(
const VALUE child,
void *ptr)
5006 const VALUE parent = data->parent;
5008 if (!RVALUE_WB_UNPROTECTED(data->objspace, parent) && RVALUE_WHITE_P(data->objspace, child)) {
5009 fprintf(stderr,
"verify_internal_consistency_reachable_i: WB miss (B->W) - %s -> %s\n",
5010 rb_obj_info(parent), rb_obj_info(child));
5016check_children_i(
const VALUE child,
void *ptr)
5019 if (check_rvalue_consistency_force(data->objspace, child, FALSE) != 0) {
5020 fprintf(stderr,
"check_children_i: %s has error (referenced from %s)",
5021 rb_obj_info(child), rb_obj_info(data->parent));
5028verify_internal_consistency_i(
void *page_start,
void *page_end,
size_t stride,
5034 for (obj = (
VALUE)page_start; obj != (
VALUE)page_end; obj += stride) {
5035 asan_unpoisoning_object(obj) {
5036 if (!rb_gc_impl_garbage_object_p(
objspace, obj)) {
5038 data->live_object_count++;
5043 if (!gc_object_moved_p(
objspace, obj)) {
5045 rb_objspace_reachable_objects_from(obj, check_children_i, (
void *)data);
5049 if (RVALUE_OLD_P(
objspace, obj)) data->old_object_count++;
5050 if (RVALUE_WB_UNPROTECTED(
objspace, obj) && RVALUE_UNCOLLECTIBLE(
objspace, obj)) data->remembered_shady_count++;
5055 rb_objspace_reachable_objects_from(obj, check_generation_i, (
void *)data);
5058 if (!is_marking(
objspace) && rb_gc_obj_shareable_p(obj)) {
5059 rb_gc_verify_shareable(obj);
5062 if (is_incremental_marking(
objspace)) {
5063 if (RVALUE_BLACK_P(
objspace, obj)) {
5066 rb_objspace_reachable_objects_from(obj, check_color_i, (
void *)data);
5072 data->zombie_object_count++;
5075 fprintf(stderr,
"verify_internal_consistency_i: T_ZOMBIE has extra flags set: %s\n",
5081 fprintf(stderr,
"verify_internal_consistency_i: FL_FINALIZE %s but %s finalizer_table: %s\n",
5082 FL_TEST(obj,
FL_FINALIZE) ?
"set" :
"not set", st_is_member(finalizer_table, obj) ?
"in" :
"not in",
5097 unsigned int has_remembered_shady = FALSE;
5098 unsigned int has_remembered_old = FALSE;
5099 int remembered_old_objects = 0;
5100 int free_objects = 0;
5101 int zombie_objects = 0;
5103 short slot_size = page->slot_size;
5104 uintptr_t start = (uintptr_t)page->start;
5105 uintptr_t end = start + page->total_slots * slot_size;
5107 for (uintptr_t ptr = start; ptr < end; ptr += slot_size) {
5109 asan_unpoisoning_object(val) {
5114 if (RVALUE_PAGE_UNCOLLECTIBLE(page, val) && RVALUE_PAGE_WB_UNPROTECTED(page, val)) {
5115 has_remembered_shady = TRUE;
5117 if (RVALUE_PAGE_MARKING(page, val)) {
5118 has_remembered_old = TRUE;
5119 remembered_old_objects++;
5124 if (!is_incremental_marking(
objspace) &&
5125 page->flags.has_remembered_objects == FALSE && has_remembered_old == TRUE) {
5127 for (uintptr_t ptr = start; ptr < end; ptr += slot_size) {
5129 if (RVALUE_PAGE_MARKING(page, val)) {
5130 fprintf(stderr,
"marking -> %s\n", rb_obj_info(val));
5133 rb_bug(
"page %p's has_remembered_objects should be false, but there are remembered old objects (%d). %s",
5134 (
void *)page, remembered_old_objects, obj ? rb_obj_info(obj) :
"");
5137 if (page->flags.has_uncollectible_wb_unprotected_objects == FALSE && has_remembered_shady == TRUE) {
5138 rb_bug(
"page %p's has_remembered_shady should be false, but there are remembered shady objects. %s",
5139 (
void *)page, obj ? rb_obj_info(obj) :
"");
5144 if (page->free_slots != free_objects) {
5145 rb_bug(
"page %p's free_slots should be %d, but %d", (
void *)page, page->free_slots, free_objects);
5148 if (page->final_slots != zombie_objects) {
5149 rb_bug(
"page %p's final_slots should be %d, but %d", (
void *)page, page->final_slots, zombie_objects);
5152 return remembered_old_objects;
5158 int remembered_old_objects = 0;
5161 ccan_list_for_each(head, page, page_node) {
5162 asan_unlock_freelist(page);
5167 rb_asan_unpoison_object(vp,
false);
5169 fprintf(stderr,
"freelist slot expected to be T_NONE but was: %s\n", rb_obj_info(vp));
5172 rb_asan_poison_object(prev);
5174 asan_lock_freelist(page);
5176 if (page->flags.has_remembered_objects == FALSE) {
5177 remembered_old_objects += gc_verify_heap_page(
objspace, page,
Qfalse);
5181 return remembered_old_objects;
5187 int remembered_old_objects = 0;
5188 for (
int i = 0; i < HEAP_COUNT; i++) {
5189 remembered_old_objects += gc_verify_heap_pages_(
objspace, &((&heaps[i])->pages));
5191 return remembered_old_objects;
5200 gc_report(5,
objspace,
"gc_verify_internal_consistency: start\n");
5203 for (
size_t i = 0; i < rb_darray_size(
objspace->heap_pages.sorted); i++) {
5205 short slot_size = page->slot_size;
5207 uintptr_t start = (uintptr_t)page->start;
5208 uintptr_t end = start + page->total_slots * slot_size;
5210 verify_internal_consistency_i((
void *)start, (
void *)end, slot_size, &data);
5213 if (data.err_count != 0) {
5214#if RGENGC_CHECK_MODE >= 5
5215 objspace->rgengc.error_count = data.err_count;
5216 gc_marks_check(
objspace, NULL, NULL);
5219 rb_bug(
"gc_verify_internal_consistency: found internal inconsistency.");
5227 ractor_cache_flush_count(
objspace, rb_gc_get_ractor_newobj_cache());
5231 !rb_gc_multi_ractor_p()) {
5232 if (objspace_live_slots(
objspace) != data.live_object_count) {
5233 fprintf(stderr,
"heap_pages_final_slots: %"PRIdSIZE
", total_freed_objects: %"PRIdSIZE
"\n",
5235 rb_bug(
"inconsistent live slot number: expect %"PRIuSIZE
", but %"PRIuSIZE
".",
5236 objspace_live_slots(
objspace), data.live_object_count);
5241 if (
objspace->rgengc.old_objects != data.old_object_count) {
5242 rb_bug(
"inconsistent old slot number: expect %"PRIuSIZE
", but %"PRIuSIZE
".",
5243 objspace->rgengc.old_objects, data.old_object_count);
5245 if (
objspace->rgengc.uncollectible_wb_unprotected_objects != data.remembered_shady_count) {
5246 rb_bug(
"inconsistent number of wb unprotected objects: expect %"PRIuSIZE
", but %"PRIuSIZE
".",
5247 objspace->rgengc.uncollectible_wb_unprotected_objects, data.remembered_shady_count);
5252 size_t list_count = 0;
5255 VALUE z = heap_pages_deferred_final;
5258 z = RZOMBIE(z)->next;
5262 if (total_final_slots_count(
objspace) != data.zombie_object_count ||
5263 total_final_slots_count(
objspace) != list_count) {
5265 rb_bug(
"inconsistent finalizing object count:\n"
5266 " expect %"PRIuSIZE
"\n"
5267 " but %"PRIuSIZE
" zombies\n"
5268 " heap_pages_deferred_final list has %"PRIuSIZE
" items.",
5270 data.zombie_object_count,
5275 gc_report(5,
objspace,
"gc_verify_internal_consistency: OK\n");
5279gc_verify_internal_consistency(
void *objspace_ptr)
5283 unsigned int lev = RB_GC_VM_LOCK();
5287 unsigned int prev_during_gc = during_gc;
5290 gc_verify_internal_consistency_(
objspace);
5292 during_gc = prev_during_gc;
5294 RB_GC_VM_UNLOCK(lev);
5298heap_move_pooled_pages_to_free_pages(
rb_heap_t *heap)
5300 if (heap->pooled_pages) {
5301 if (heap->free_pages) {
5302 struct heap_page *free_pages_tail = heap->free_pages;
5303 while (free_pages_tail->free_next) {
5304 free_pages_tail = free_pages_tail->free_next;
5306 free_pages_tail->free_next = heap->pooled_pages;
5309 heap->free_pages = heap->pooled_pages;
5312 heap->pooled_pages = NULL;
5319 struct heap_page *page = GET_HEAP_PAGE(obj);
5320 bits_t *uncollectible_bits = &page->uncollectible_bits[0];
5322 if (!MARKED_IN_BITMAP(uncollectible_bits, obj)) {
5323 page->flags.has_uncollectible_wb_unprotected_objects = TRUE;
5324 MARK_IN_BITMAP(uncollectible_bits, obj);
5325 objspace->rgengc.uncollectible_wb_unprotected_objects++;
5327#if RGENGC_PROFILE > 0
5328 objspace->profile.total_remembered_shady_object_count++;
5329#if RGENGC_PROFILE >= 2
5341gc_marks_wb_unprotected_objects_plane(
rb_objspace_t *
objspace, uintptr_t p, bits_t bits,
short slot_size)
5346 gc_report(2,
objspace,
"gc_marks_wb_unprotected_objects: marked shady: %s\n", rb_obj_info((
VALUE)p));
5362 ccan_list_for_each(&heap->pages, page, page_node) {
5363 bits_t *mark_bits = page->mark_bits;
5364 bits_t *wbun_bits = page->wb_unprotected_bits;
5365 uintptr_t p = page->start;
5366 short slot_size = page->slot_size;
5367 int total_slots = page->total_slots;
5368 int bitmap_plane_count = CEILDIV(total_slots, BITS_BITLENGTH);
5371 for (j=0; j<(size_t)bitmap_plane_count; j++) {
5372 bits_t bits = mark_bits[j] & wbun_bits[j];
5373 gc_marks_wb_unprotected_objects_plane(
objspace, p, bits, slot_size);
5374 p += BITS_BITLENGTH * slot_size;
5378 gc_mark_stacked_objects_all(
objspace);
5382rb_gc_impl_declare_weak_references(
void *objspace_ptr,
VALUE obj)
5388rb_gc_impl_handle_weak_references_alive_p(
void *objspace_ptr,
VALUE obj)
5392 bool marked = RVALUE_MARKED(
objspace, obj);
5395 rgengc_check_relation(
objspace, obj);
5405 rb_darray_foreach(
objspace->weak_references, i, obj_ptr) {
5406 gc_mark_set_parent(
objspace, *obj_ptr);
5407 rb_gc_handle_weak_references(*obj_ptr);
5408 gc_mark_set_parent_invalid(
objspace);
5411 size_t capa = rb_darray_capa(
objspace->weak_references);
5412 size_t size = rb_darray_size(
objspace->weak_references);
5414 objspace->profile.weak_references_count = size;
5416 rb_darray_clear(
objspace->weak_references);
5420 if (
capa > size * 4) {
5421 rb_darray_resize_capa_without_gc(&
objspace->weak_references, size * 2);
5429 if (is_incremental_marking(
objspace)) {
5430 if (RGENGC_CHECK_MODE && is_mark_stack_empty(&
objspace->mark_stack) == 0) {
5431 rb_bug(
"gc_marks_finish: mark stack is not empty (%"PRIdSIZE
").",
5432 mark_stack_size(&
objspace->mark_stack));
5436 while (gc_mark_stacked_objects_incremental(
objspace, INT_MAX) ==
false);
5438#if RGENGC_CHECK_MODE >= 2
5439 if (gc_verify_heap_pages(
objspace) != 0) {
5440 rb_bug(
"gc_marks_finish (incremental): there are remembered old objects.");
5444 objspace->flags.during_incremental_marking = FALSE;
5446 for (
int i = 0; i < HEAP_COUNT; i++) {
5447 gc_marks_wb_unprotected_objects(
objspace, &heaps[i]);
5451 gc_update_weak_references(
objspace);
5453#if RGENGC_CHECK_MODE >= 2
5454 gc_verify_internal_consistency(
objspace);
5457#if RGENGC_CHECK_MODE >= 4
5459 gc_marks_check(
objspace, gc_check_after_marks_i,
"after_marks");
5464 const unsigned long r_mul =
objspace->live_ractor_cache_count > 8 ? 8 :
objspace->live_ractor_cache_count;
5466 size_t total_slots = objspace_available_slots(
objspace);
5467 size_t sweep_slots = total_slots -
objspace->marked_slots;
5468 size_t max_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_max_ratio);
5469 size_t min_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_min_ratio);
5470 if (min_free_slots < gc_params.heap_free_slots * r_mul) {
5471 min_free_slots = gc_params.heap_free_slots * r_mul;
5474 int full_marking = is_full_marking(
objspace);
5479 size_t total_init_slots = 0;
5480 for (
int i = 0; i < HEAP_COUNT; i++) {
5481 total_init_slots += (gc_params.heap_init_bytes / heaps[i].slot_size) * r_mul;
5484 if (max_free_slots < total_init_slots) {
5485 max_free_slots = total_init_slots;
5489 if (sweep_slots > max_free_slots) {
5490 size_t excess_slots = sweep_slots - max_free_slots;
5491 size_t total_heap_pages = heap_eden_total_pages(
objspace);
5492 heap_pages_freeable_pages = total_heap_pages > 0
5493 ? excess_slots * total_heap_pages / total_slots
5497 heap_pages_freeable_pages = 0;
5500 if (
objspace->heap_pages.allocatable_bytes == 0 && sweep_slots < min_free_slots) {
5501 if (!full_marking && sweep_slots < min_free_slots * 7 / 8) {
5502 if (
objspace->profile.count -
objspace->rgengc.last_major_gc < RVALUE_OLD_AGE) {
5503 full_marking = TRUE;
5506 gc_report(1,
objspace,
"gc_marks_finish: next is full GC!!)\n");
5507 gc_needs_major_flags |= GPR_FLAG_MAJOR_BY_NOFREE;
5512 heap_allocatable_bytes_expand(
objspace, NULL, sweep_slots, total_slots, heaps[0].slot_size);
5518 const double r = gc_params.oldobject_limit_factor;
5519 objspace->rgengc.uncollectible_wb_unprotected_objects_limit = MAX(
5520 (
size_t)(
objspace->rgengc.uncollectible_wb_unprotected_objects * r),
5521 (
size_t)(
objspace->rgengc.old_objects * gc_params.uncollectible_wb_unprotected_objects_limit_ratio)
5523 objspace->rgengc.old_objects_limit = (size_t)(
objspace->rgengc.old_objects * r);
5526 if (
objspace->rgengc.uncollectible_wb_unprotected_objects >
objspace->rgengc.uncollectible_wb_unprotected_objects_limit) {
5527 gc_needs_major_flags |= GPR_FLAG_MAJOR_BY_SHADY;
5530 gc_needs_major_flags |= GPR_FLAG_MAJOR_BY_OLDGEN;
5533 gc_report(1,
objspace,
"gc_marks_finish (marks %"PRIdSIZE
" objects, "
5534 "old %"PRIdSIZE
" objects, total %"PRIdSIZE
" slots, "
5535 "sweep %"PRIdSIZE
" slots, allocatable %"PRIdSIZE
" bytes, next GC: %s)\n",
5537 gc_needs_major_flags ?
"major" :
"minor");
5541 rb_ractor_finish_marking();
5547gc_compact_heap_cursors_met_p(
rb_heap_t *heap)
5549 return heap->sweeping_page == heap->compact_cursor;
5556 size_t obj_size = rb_gc_obj_optimal_size(obj);
5557 if (obj_size == 0) {
5561 GC_ASSERT(rb_gc_impl_size_allocatable_p(obj_size));
5563 size_t idx = heap_idx_for_size(obj_size);
5572 GC_ASSERT(gc_is_moveable_obj(
objspace, src));
5575 uint32_t orig_shape = 0;
5576 uint32_t new_shape = 0;
5578 if (gc_compact_heap_cursors_met_p(dest_pool)) {
5579 return dest_pool != heap;
5583 orig_shape = rb_gc_get_shape(src);
5585 if (dest_pool != heap) {
5586 new_shape = rb_gc_rebuild_shape(src, dest_pool - heaps);
5588 if (new_shape == 0) {
5594 while (!try_move(
objspace, dest_pool, dest_pool->free_pages, src)) {
5596 .page = dest_pool->sweeping_page,
5605 lock_page_body(
objspace, GET_PAGE_BODY(src));
5606 gc_sweep_page(
objspace, dest_pool, &ctx);
5607 unlock_page_body(
objspace, GET_PAGE_BODY(src));
5609 if (dest_pool->sweeping_page->free_slots > 0) {
5610 heap_add_freepage(dest_pool, dest_pool->sweeping_page);
5613 dest_pool->sweeping_page = ccan_list_next(&dest_pool->pages, dest_pool->sweeping_page, page_node);
5614 if (gc_compact_heap_cursors_met_p(dest_pool)) {
5615 return dest_pool != heap;
5619 if (orig_shape != 0) {
5620 if (new_shape != 0) {
5622 rb_gc_set_shape(dest, new_shape);
5624 RMOVED(src)->original_shape_id = orig_shape;
5633 short slot_size = page->slot_size;
5637 GC_ASSERT(vp %
sizeof(
VALUE) == 0);
5642 if (gc_is_moveable_obj(
objspace, vp)) {
5643 if (!gc_compact_move(
objspace, heap, vp)) {
5660 GC_ASSERT(page == heap->compact_cursor);
5662 bits_t *mark_bits, *pin_bits;
5664 uintptr_t p = page->start;
5665 short slot_size = page->slot_size;
5666 int total_slots = page->total_slots;
5667 int bitmap_plane_count = CEILDIV(total_slots, BITS_BITLENGTH);
5669 mark_bits = page->mark_bits;
5670 pin_bits = page->pinned_bits;
5672 for (
int j = 0; j < bitmap_plane_count; j++) {
5674 bitset = (mark_bits[j] & ~pin_bits[j]);
5676 if (!gc_compact_plane(
objspace, heap, (uintptr_t)p, bitset, page))
5679 p += BITS_BITLENGTH * slot_size;
5688 for (
int i = 0; i < HEAP_COUNT; i++) {
5691 if (heap->total_pages > 0 &&
5692 !gc_compact_heap_cursors_met_p(heap)) {
5704#if RGENGC_CHECK_MODE >= 2
5705 gc_verify_internal_consistency(
objspace);
5708 while (!gc_compact_all_compacted_p(
objspace)) {
5709 for (
int i = 0; i < HEAP_COUNT; i++) {
5712 if (gc_compact_heap_cursors_met_p(heap)) {
5716 struct heap_page *start_page = heap->compact_cursor;
5718 if (!gc_compact_page(
objspace, heap, start_page)) {
5719 lock_page_body(
objspace, start_page->body);
5726 lock_page_body(
objspace, start_page->body);
5727 heap->compact_cursor = ccan_list_prev(&heap->pages, heap->compact_cursor, page_node);
5733#if RGENGC_CHECK_MODE >= 2
5734 gc_verify_internal_consistency(
objspace);
5741 gc_report(1,
objspace,
"gc_marks_rest\n");
5743 for (
int i = 0; i < HEAP_COUNT; i++) {
5744 (&heaps[i])->pooled_pages = NULL;
5747 if (is_incremental_marking(
objspace)) {
5748 while (gc_mark_stacked_objects_incremental(
objspace, INT_MAX) == FALSE);
5751 gc_mark_stacked_objects_all(
objspace);
5760 bool marking_finished =
false;
5763 if (gc_mark_stacked_objects_incremental(
objspace, slots)) {
5766 marking_finished =
true;
5769 return marking_finished;
5775 GC_ASSERT(dont_gc_val() == FALSE ||
objspace->profile.latest_gc_info & GPR_FLAG_METHOD);
5776 bool marking_finished =
true;
5780 if (heap->free_pages) {
5781 gc_report(2,
objspace,
"gc_marks_continue: has pooled pages");
5786 gc_report(2,
objspace,
"gc_marks_continue: no more pooled pages (stack depth: %"PRIdSIZE
").\n",
5787 mark_stack_size(&
objspace->mark_stack));
5788 heap->force_incremental_marking_finish_count++;
5794 return marking_finished;
5801 gc_report(1,
objspace,
"gc_marks_start: (%s)\n", full_mark ?
"full" :
"minor");
5802 gc_mode_transition(
objspace, gc_mode_marking);
5805 size_t incremental_marking_steps = (
objspace->rincgc.pooled_slots / INCREMENTAL_MARK_STEP_ALLOCATIONS) + 1;
5806 objspace->rincgc.step_slots = (
objspace->marked_slots * 2) / incremental_marking_steps;
5808 if (0) fprintf(stderr,
"objspace->marked_slots: %"PRIdSIZE
", "
5809 "objspace->rincgc.pooled_page_num: %"PRIdSIZE
", "
5810 "objspace->rincgc.step_slots: %"PRIdSIZE
", \n",
5812 objspace->flags.during_minor_gc = FALSE;
5813 if (ruby_enable_autocompact) {
5814 objspace->flags.during_compacting |= TRUE;
5816 objspace->profile.major_gc_count++;
5817 objspace->rgengc.uncollectible_wb_unprotected_objects = 0;
5822 for (
int i = 0; i < HEAP_COUNT; i++) {
5824 rgengc_mark_and_rememberset_clear(
objspace, heap);
5825 heap_move_pooled_pages_to_free_pages(heap);
5827 if (
objspace->flags.during_compacting) {
5830 ccan_list_for_each(&heap->pages, page, page_node) {
5831 page->pinned_slots = 0;
5837 objspace->flags.during_minor_gc = TRUE;
5839 objspace->rgengc.old_objects +
objspace->rgengc.uncollectible_wb_unprotected_objects;
5840 objspace->profile.minor_gc_count++;
5842 for (
int i = 0; i < HEAP_COUNT; i++) {
5843 rgengc_rememberset_mark(
objspace, &heaps[i]);
5849 gc_report(1,
objspace,
"gc_marks_start: (%s) end, stack in %"PRIdSIZE
"\n",
5856 gc_prof_mark_timer_start(
objspace);
5859 bool marking_finished =
false;
5863 gc_marks_start(
objspace, full_mark);
5864 if (!is_incremental_marking(
objspace)) {
5866 marking_finished =
true;
5869#if RGENGC_PROFILE > 0
5872 record->old_objects =
objspace->rgengc.old_objects;
5879 return marking_finished;
5887 if (level <= RGENGC_DEBUG) {
5891 const char *status =
" ";
5894 status = is_full_marking(
objspace) ?
"+" :
"-";
5900 if (is_incremental_marking(
objspace)) {
5905 va_start(args, fmt);
5906 vsnprintf(buf, 1024, fmt, args);
5909 fprintf(out,
"%s|", status);
5919 struct heap_page *page = GET_HEAP_PAGE(obj);
5920 bits_t *bits = &page->remembered_bits[0];
5922 if (MARKED_IN_BITMAP(bits, obj)) {
5926 page->flags.has_remembered_objects = TRUE;
5927 MARK_IN_BITMAP(bits, obj);
5938 gc_report(6,
objspace,
"rgengc_remember: %s %s\n", rb_obj_info(obj),
5939 RVALUE_REMEMBERED(
objspace, obj) ?
"was already remembered" :
"is remembered now");
5941 check_rvalue_consistency(
objspace, obj);
5943 if (RGENGC_CHECK_MODE) {
5944 if (RVALUE_WB_UNPROTECTED(
objspace, obj)) rb_bug(
"rgengc_remember: %s is not wb protected.", rb_obj_info(obj));
5947#if RGENGC_PROFILE > 0
5948 if (!RVALUE_REMEMBERED(
objspace, obj)) {
5949 if (RVALUE_WB_UNPROTECTED(
objspace, obj) == 0) {
5950 objspace->profile.total_remembered_normal_object_count++;
5951#if RGENGC_PROFILE >= 2
5958 return rgengc_remembersetbits_set(
objspace, obj);
5961#ifndef PROFILE_REMEMBERSET_MARK
5962#define PROFILE_REMEMBERSET_MARK 0
5972 gc_report(2,
objspace,
"rgengc_rememberset_mark: mark %s\n", rb_obj_info(obj));
5973 GC_ASSERT(RVALUE_UNCOLLECTIBLE(
objspace, obj));
5974 GC_ASSERT(RVALUE_OLD_P(
objspace, obj) || RVALUE_WB_UNPROTECTED(
objspace, obj));
5979 rb_darray_append_without_gc(&
objspace->weak_references, obj);
5993#if PROFILE_REMEMBERSET_MARK
5994 int has_old = 0, has_shady = 0, has_both = 0, skip = 0;
5996 gc_report(1,
objspace,
"rgengc_rememberset_mark: start\n");
5998 ccan_list_for_each(&heap->pages, page, page_node) {
5999 if (page->flags.has_remembered_objects | page->flags.has_uncollectible_wb_unprotected_objects) {
6000 uintptr_t p = page->start;
6001 short slot_size = page->slot_size;
6002 int total_slots = page->total_slots;
6003 int bitmap_plane_count = CEILDIV(total_slots, BITS_BITLENGTH);
6004 bits_t bitset, bits[HEAP_PAGE_BITMAP_LIMIT];
6005 bits_t *remembered_bits = page->remembered_bits;
6006 bits_t *uncollectible_bits = page->uncollectible_bits;
6007 bits_t *wb_unprotected_bits = page->wb_unprotected_bits;
6008#if PROFILE_REMEMBERSET_MARK
6009 if (page->flags.has_remembered_objects && page->flags.has_uncollectible_wb_unprotected_objects) has_both++;
6010 else if (page->flags.has_remembered_objects) has_old++;
6011 else if (page->flags.has_uncollectible_wb_unprotected_objects) has_shady++;
6013 for (j=0; j < (size_t)bitmap_plane_count; j++) {
6014 bits[j] = remembered_bits[j] | (uncollectible_bits[j] & wb_unprotected_bits[j]);
6015 remembered_bits[j] = 0;
6017 page->flags.has_remembered_objects = FALSE;
6019 for (j=0; j < (size_t)bitmap_plane_count; j++) {
6021 rgengc_rememberset_mark_plane(
objspace, p, bitset, slot_size);
6022 p += BITS_BITLENGTH * slot_size;
6025#if PROFILE_REMEMBERSET_MARK
6032#if PROFILE_REMEMBERSET_MARK
6033 fprintf(stderr,
"%d\t%d\t%d\t%d\n", has_both, has_old, has_shady, skip);
6035 gc_report(1,
objspace,
"rgengc_rememberset_mark: finished\n");
6043 ccan_list_for_each(&heap->pages, page, page_node) {
6044 memset(&page->mark_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
6045 memset(&page->uncollectible_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
6046 memset(&page->marking_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
6047 memset(&page->remembered_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
6048 memset(&page->pinned_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
6049 page->flags.has_uncollectible_wb_unprotected_objects = FALSE;
6050 page->flags.has_remembered_objects = FALSE;
6061 if (RGENGC_CHECK_MODE) {
6062 if (!RVALUE_OLD_P(
objspace, a)) rb_bug(
"gc_writebarrier_generational: %s is not an old object.", rb_obj_info(a));
6063 if ( RVALUE_OLD_P(
objspace, b)) rb_bug(
"gc_writebarrier_generational: %s is an old object.", rb_obj_info(b));
6064 if (is_incremental_marking(
objspace)) rb_bug(
"gc_writebarrier_generational: called while incremental marking: %s -> %s", rb_obj_info(a), rb_obj_info(b));
6068 if (!RVALUE_REMEMBERED(
objspace, a)) {
6069 int lev = RB_GC_VM_LOCK_NO_BARRIER();
6073 RB_GC_VM_UNLOCK_NO_BARRIER(lev);
6075 gc_report(1,
objspace,
"gc_writebarrier_generational: %s (remembered) -> %s\n", rb_obj_info(a), rb_obj_info(b));
6078 check_rvalue_consistency(
objspace, a);
6079 check_rvalue_consistency(
objspace, b);
6085 gc_mark_set_parent(
objspace, parent);
6086 rgengc_check_relation(
objspace, obj);
6087 if (gc_mark_set(
objspace, obj) != FALSE) {
6091 gc_mark_set_parent_invalid(
objspace);
6099 gc_report(2,
objspace,
"gc_writebarrier_incremental: [LG] %p -> %s\n", (
void *)a, rb_obj_info(b));
6103 if (!RVALUE_WB_UNPROTECTED(
objspace, a)) {
6104 gc_report(2,
objspace,
"gc_writebarrier_incremental: [IN] %p -> %s\n", (
void *)a, rb_obj_info(b));
6112 if (RB_UNLIKELY(
objspace->flags.during_compacting)) {
6113 MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(b), b);
6119rb_gc_impl_writebarrier(
void *objspace_ptr,
VALUE a,
VALUE b)
6123#if RGENGC_CHECK_MODE
6124 if (
SPECIAL_CONST_P(a)) rb_bug(
"rb_gc_writebarrier: a is special const: %"PRIxVALUE, a);
6125 if (
SPECIAL_CONST_P(b)) rb_bug(
"rb_gc_writebarrier: b is special const: %"PRIxVALUE, b);
6131 GC_ASSERT(!during_gc);
6140 if (!is_incremental_marking(
objspace)) {
6145 gc_writebarrier_generational(a, b,
objspace);
6151 int lev = RB_GC_VM_LOCK_NO_BARRIER();
6153 if (is_incremental_marking(
objspace)) {
6154 gc_writebarrier_incremental(a, b,
objspace);
6160 RB_GC_VM_UNLOCK_NO_BARRIER(lev);
6162 if (retry)
goto retry;
6168rb_gc_impl_writebarrier_unprotect(
void *objspace_ptr,
VALUE obj)
6172 if (RVALUE_WB_UNPROTECTED(
objspace, obj)) {
6176 gc_report(2,
objspace,
"rb_gc_writebarrier_unprotect: %s %s\n", rb_obj_info(obj),
6177 RVALUE_REMEMBERED(
objspace, obj) ?
" (already remembered)" :
"");
6179 unsigned int lev = RB_GC_VM_LOCK_NO_BARRIER();
6182 gc_report(1,
objspace,
"rb_gc_writebarrier_unprotect: %s\n", rb_obj_info(obj));
6185 gc_remember_unprotected(
objspace, obj);
6188 objspace->profile.total_shade_operation_count++;
6189#if RGENGC_PROFILE >= 2
6195 RVALUE_AGE_RESET(obj);
6198 RB_DEBUG_COUNTER_INC(obj_wb_unprotect);
6199 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
6201 RB_GC_VM_UNLOCK_NO_BARRIER(lev);
6206rb_gc_impl_copy_attributes(
void *objspace_ptr,
VALUE dest,
VALUE obj)
6210 if (RVALUE_WB_UNPROTECTED(
objspace, obj)) {
6211 rb_gc_impl_writebarrier_unprotect(
objspace, dest);
6213 rb_gc_impl_copy_finalizer(
objspace, dest, obj);
6217rb_gc_impl_active_gc_name(
void)
6223rb_gc_impl_writebarrier_remember(
void *objspace_ptr,
VALUE obj)
6227 gc_report(1,
objspace,
"rb_gc_writebarrier_remember: %s\n", rb_obj_info(obj));
6230 int lev = RB_GC_VM_LOCK_NO_BARRIER();
6232 if (is_incremental_marking(
objspace)) {
6233 if (RVALUE_BLACK_P(
objspace, obj)) {
6237 else if (RVALUE_OLD_P(
objspace, obj)) {
6241 RB_GC_VM_UNLOCK_NO_BARRIER(lev);
6247 ID ID_wb_protected, ID_age, ID_old, ID_uncollectible, ID_marking,
6248 ID_marked, ID_pinned, ID_remembered, ID_object_id, ID_shareable;
6251#define RB_GC_OBJECT_METADATA_ENTRY_COUNT (sizeof(struct rb_gc_object_metadata_names) / sizeof(ID))
6255rb_gc_impl_object_metadata(
void *objspace_ptr,
VALUE obj)
6261 if (!names.ID_marked) {
6262#define I(s) names.ID_##s = rb_intern(#s)
6276#define SET_ENTRY(na, v) do { \
6277 GC_ASSERT(n <= RB_GC_OBJECT_METADATA_ENTRY_COUNT); \
6278 object_metadata_entries[n].name = names.ID_##na; \
6279 object_metadata_entries[n].val = v; \
6283 if (!RVALUE_WB_UNPROTECTED(
objspace, obj)) SET_ENTRY(wb_protected,
Qtrue);
6284 SET_ENTRY(age,
INT2FIX(RVALUE_AGE_GET(obj)));
6286 if (RVALUE_UNCOLLECTIBLE(
objspace, obj)) SET_ENTRY(uncollectible,
Qtrue);
6287 if (RVALUE_MARKING(
objspace, obj)) SET_ENTRY(marking,
Qtrue);
6290 if (RVALUE_REMEMBERED(
objspace, obj)) SET_ENTRY(remembered,
Qtrue);
6291 if (rb_obj_id_p(obj)) SET_ENTRY(object_id, rb_obj_id(obj));
6294 object_metadata_entries[n].name = 0;
6295 object_metadata_entries[n].val = 0;
6298 return object_metadata_entries;
6302rb_gc_impl_ractor_cache_alloc(
void *objspace_ptr,
void *ractor)
6306 objspace->live_ractor_cache_count++;
6312rb_gc_impl_ractor_cache_free(
void *objspace_ptr,
void *cache)
6316 objspace->live_ractor_cache_count--;
6317 gc_ractor_newobj_cache_clear(cache, NULL);
6324 if (!heap->free_pages) {
6325 if (!heap_page_allocate_and_initialize(
objspace, heap)) {
6326 objspace->heap_pages.allocatable_bytes = HEAP_PAGE_SIZE;
6327 heap_page_allocate_and_initialize(
objspace, heap);
6335 if (dont_gc_val() || during_gc) {
6336 for (
int i = 0; i < HEAP_COUNT; i++) {
6353 size_t old_limit = malloc_limit;
6355 if (inc > malloc_limit) {
6356 malloc_limit = (size_t)(inc * gc_params.malloc_limit_growth_factor);
6357 if (malloc_limit > gc_params.malloc_limit_max) {
6358 malloc_limit = gc_params.malloc_limit_max;
6362 malloc_limit = (size_t)(malloc_limit * 0.98);
6363 if (malloc_limit < gc_params.malloc_limit_min) {
6364 malloc_limit = gc_params.malloc_limit_min;
6369 if (old_limit != malloc_limit) {
6370 fprintf(stderr,
"[%"PRIuSIZE
"] malloc_limit: %"PRIuSIZE
" -> %"PRIuSIZE
"\n",
6371 rb_gc_count(), old_limit, malloc_limit);
6374 fprintf(stderr,
"[%"PRIuSIZE
"] malloc_limit: not changed (%"PRIuSIZE
")\n",
6375 rb_gc_count(), malloc_limit);
6381#if RGENGC_ESTIMATE_OLDMALLOC
6383 if (
objspace->malloc_counters.oldmalloc_increase >
objspace->rgengc.oldmalloc_increase_limit) {
6384 gc_needs_major_flags |= GPR_FLAG_MAJOR_BY_OLDMALLOC;
6385 objspace->rgengc.oldmalloc_increase_limit =
6386 (size_t)(
objspace->rgengc.oldmalloc_increase_limit * gc_params.oldmalloc_limit_growth_factor);
6388 if (
objspace->rgengc.oldmalloc_increase_limit > gc_params.oldmalloc_limit_max) {
6389 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_max;
6393 if (0) fprintf(stderr,
"%"PRIdSIZE
"\t%d\t%"PRIuSIZE
"\t%"PRIuSIZE
"\t%"PRIdSIZE
"\n",
6395 gc_needs_major_flags,
6396 objspace->malloc_counters.oldmalloc_increase,
6397 objspace->rgengc.oldmalloc_increase_limit,
6398 gc_params.oldmalloc_limit_max);
6402 objspace->malloc_counters.oldmalloc_increase = 0;
6404 if ((
objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_BY_OLDMALLOC) == 0) {
6405 objspace->rgengc.oldmalloc_increase_limit =
6406 (size_t)(
objspace->rgengc.oldmalloc_increase_limit / ((gc_params.oldmalloc_limit_growth_factor - 1)/10 + 1));
6407 if (
objspace->rgengc.oldmalloc_increase_limit < gc_params.oldmalloc_limit_min) {
6408 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
6420 int lev = RB_GC_VM_LOCK();
6422#if GC_PROFILE_MORE_DETAIL
6423 objspace->profile.prepare_time = getrusage_time();
6428#if GC_PROFILE_MORE_DETAIL
6429 objspace->profile.prepare_time = getrusage_time() -
objspace->profile.prepare_time;
6434 RB_GC_VM_UNLOCK(lev);
6442 unsigned int do_full_mark = !!(reason & GPR_FLAG_FULL_MARK);
6444 if (!rb_darray_size(
objspace->heap_pages.sorted))
return TRUE;
6445 if (!(reason & GPR_FLAG_METHOD) && !ready_to_gc(
objspace))
return TRUE;
6447 GC_ASSERT(gc_mode(
objspace) == gc_mode_none,
"gc_mode is %s\n", gc_mode_name(gc_mode(
objspace)));
6448 GC_ASSERT(!is_lazy_sweeping(
objspace));
6449 GC_ASSERT(!is_incremental_marking(
objspace));
6451 unsigned int lock_lev;
6452 gc_enter(
objspace, gc_enter_event_start, &lock_lev);
6455 objspace->flags.immediate_sweep = !!(reason & GPR_FLAG_IMMEDIATE_SWEEP);
6457#if RGENGC_CHECK_MODE >= 2
6458 gc_verify_internal_consistency(
objspace);
6461 if (ruby_gc_stressful) {
6462 int flag =
FIXNUM_P(ruby_gc_stress_mode) ?
FIX2INT(ruby_gc_stress_mode) : 0;
6464 if ((flag & (1 << gc_stress_no_major)) == 0) {
6465 do_full_mark = TRUE;
6468 objspace->flags.immediate_sweep = !(flag & (1<<gc_stress_no_immediate_sweep));
6471 if (gc_needs_major_flags) {
6472 reason |= gc_needs_major_flags;
6473 do_full_mark = TRUE;
6477 if (!gc_config_full_mark_val) {
6478 do_full_mark = FALSE;
6480 gc_needs_major_flags = GPR_FLAG_NONE;
6482 if (do_full_mark && (reason & GPR_FLAG_MAJOR_MASK) == 0) {
6483 reason |= GPR_FLAG_MAJOR_BY_FORCE;
6486 if (
objspace->flags.dont_incremental ||
6487 reason & GPR_FLAG_IMMEDIATE_MARK ||
6488 ruby_gc_stressful) {
6489 objspace->flags.during_incremental_marking = FALSE;
6492 objspace->flags.during_incremental_marking = do_full_mark;
6496 if (do_full_mark && ruby_enable_autocompact) {
6497 objspace->flags.during_compacting = TRUE;
6498#if RGENGC_CHECK_MODE
6499 objspace->rcompactor.compare_func = ruby_autocompact_compare_func;
6503 objspace->flags.during_compacting = !!(reason & GPR_FLAG_COMPACT);
6506 if (!GC_ENABLE_LAZY_SWEEP ||
objspace->flags.dont_incremental) {
6507 objspace->flags.immediate_sweep = TRUE;
6510 if (
objspace->flags.immediate_sweep) reason |= GPR_FLAG_IMMEDIATE_SWEEP;
6512 gc_report(1,
objspace,
"gc_start(reason: %x) => %u, %d, %d\n",
6514 do_full_mark, !is_incremental_marking(
objspace),
objspace->flags.immediate_sweep);
6516 RB_DEBUG_COUNTER_INC(gc_count);
6518 if (reason & GPR_FLAG_MAJOR_MASK) {
6519 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_nofree, reason & GPR_FLAG_MAJOR_BY_NOFREE);
6520 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldgen, reason & GPR_FLAG_MAJOR_BY_OLDGEN);
6521 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_shady, reason & GPR_FLAG_MAJOR_BY_SHADY);
6522 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_force, reason & GPR_FLAG_MAJOR_BY_FORCE);
6523#if RGENGC_ESTIMATE_OLDMALLOC
6524 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldmalloc, reason & GPR_FLAG_MAJOR_BY_OLDMALLOC);
6528 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_newobj, reason & GPR_FLAG_NEWOBJ);
6529 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_malloc, reason & GPR_FLAG_MALLOC);
6530 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_method, reason & GPR_FLAG_METHOD);
6531 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_capi, reason & GPR_FLAG_CAPI);
6532 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_stress, reason & GPR_FLAG_STRESS);
6536 objspace->profile.latest_gc_info = reason;
6537 objspace->profile.total_allocated_objects_at_gc_start = total_allocated_objects(
objspace);
6538 objspace->profile.heap_used_at_gc_start = rb_darray_size(
objspace->heap_pages.sorted);
6539 objspace->profile.heap_total_slots_at_gc_start = objspace_available_slots(
objspace);
6540 objspace->profile.weak_references_count = 0;
6541 gc_prof_setup_new_record(
objspace, reason);
6542 gc_reset_malloc_info(
objspace, do_full_mark);
6546 GC_ASSERT(during_gc);
6550 if (gc_marks(
objspace, do_full_mark)) {
6556 gc_exit(
objspace, gc_enter_event_start, &lock_lev);
6564 unsigned int lock_lev;
6565 gc_enter(
objspace, gc_enter_event_rest, &lock_lev);
6567 if (RGENGC_CHECK_MODE >= 2) gc_verify_internal_consistency(
objspace);
6569 if (is_incremental_marking(
objspace)) {
6583 gc_exit(
objspace, gc_enter_event_rest, &lock_lev);
6589 unsigned int reason;
6598 if (is_full_marking(
objspace)) buff[i++] =
'F';
6599 if (is_incremental_marking(
objspace)) buff[i++] =
'I';
6603 if (is_lazy_sweeping(
objspace)) buff[i++] =
'L';
6614 static char buff[0x10];
6615 gc_current_status_fill(
objspace, buff);
6619#if PRINT_ENTER_EXIT_TICK
6621static tick_t last_exit_tick;
6622static tick_t enter_tick;
6623static int enter_count = 0;
6624static char last_gc_status[0x10];
6629 if (direction == 0) {
6631 enter_tick = tick();
6632 gc_current_status_fill(
objspace, last_gc_status);
6635 tick_t exit_tick = tick();
6636 char current_gc_status[0x10];
6637 gc_current_status_fill(
objspace, current_gc_status);
6640 fprintf(stderr,
"%"PRItick
"\t%"PRItick
"\t%s\t[%s->%s|%c]\n",
6641 enter_tick - last_exit_tick,
6642 exit_tick - enter_tick,
6644 last_gc_status, current_gc_status,
6645 (
objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_MASK) ?
'+' :
'-');
6646 last_exit_tick = exit_tick;
6649 fprintf(stderr,
"%"PRItick
"\t%"PRItick
"\t%s\t[%s->%s|%c]\n",
6651 exit_tick - enter_tick,
6653 last_gc_status, current_gc_status,
6654 (
objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_MASK) ?
'+' :
'-');
6667gc_enter_event_cstr(
enum gc_enter_event event)
6670 case gc_enter_event_start:
return "start";
6671 case gc_enter_event_continue:
return "continue";
6672 case gc_enter_event_rest:
return "rest";
6673 case gc_enter_event_finalizer:
return "finalizer";
6679gc_enter_count(
enum gc_enter_event event)
6682 case gc_enter_event_start: RB_DEBUG_COUNTER_INC(gc_enter_start);
break;
6683 case gc_enter_event_continue: RB_DEBUG_COUNTER_INC(gc_enter_continue);
break;
6684 case gc_enter_event_rest: RB_DEBUG_COUNTER_INC(gc_enter_rest);
break;
6685 case gc_enter_event_finalizer: RB_DEBUG_COUNTER_INC(gc_enter_finalizer);
break;
6689static bool current_process_time(
struct timespec *ts);
6694 if (!current_process_time(ts)) {
6700static unsigned long long
6705 if ((ts->tv_sec > 0 || ts->tv_nsec > 0) &&
6706 current_process_time(&end_time) &&
6707 end_time.tv_sec >= ts->tv_sec) {
6708 return (
unsigned long long)(end_time.tv_sec - ts->tv_sec) * (1000 * 1000 * 1000) +
6709 (end_time.tv_nsec - ts->tv_nsec);
6718 *lock_lev = RB_GC_VM_LOCK();
6721 case gc_enter_event_rest:
6722 case gc_enter_event_start:
6723 case gc_enter_event_continue:
6731 gc_enter_count(event);
6732 if (RB_UNLIKELY(during_gc != 0)) rb_bug(
"during_gc != 0");
6733 if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(
objspace);
6736 RUBY_DEBUG_LOG(
"%s (%s)",gc_enter_event_cstr(event), gc_current_status(
objspace));
6737 gc_report(1,
objspace,
"gc_enter: %s [%s]\n", gc_enter_event_cstr(event), gc_current_status(
objspace));
6738 gc_record(
objspace, 0, gc_enter_event_cstr(event));
6746 GC_ASSERT(during_gc != 0);
6750 gc_record(
objspace, 1, gc_enter_event_cstr(event));
6751 RUBY_DEBUG_LOG(
"%s (%s)", gc_enter_event_cstr(event), gc_current_status(
objspace));
6752 gc_report(1,
objspace,
"gc_exit: %s [%s]\n", gc_enter_event_cstr(event), gc_current_status(
objspace));
6755 RB_GC_VM_UNLOCK(*lock_lev);
6759#define MEASURE_GC (objspace->flags.measure_gc)
6765 GC_ASSERT(during_gc != 0);
6768 gc_clock_start(&
objspace->profile.marking_start_time);
6775 GC_ASSERT(during_gc != 0);
6778 objspace->profile.marking_time_ns += gc_clock_end(&
objspace->profile.marking_start_time);
6785 GC_ASSERT(during_gc != 0);
6788 gc_clock_start(&
objspace->profile.sweeping_start_time);
6795 GC_ASSERT(during_gc != 0);
6798 objspace->profile.sweeping_time_ns += gc_clock_end(&
objspace->profile.sweeping_start_time);
6803gc_with_gvl(
void *ptr)
6806 return (
void *)(
VALUE)garbage_collect(oar->objspace, oar->reason);
6809int ruby_thread_has_gvl_p(
void);
6814 if (dont_gc_val()) {
6820 else if (!ruby_thread_has_gvl_p()) {
6824 oar.reason = reason;
6830 return garbage_collect(
objspace, reason);
6835gc_set_candidate_object_i(
void *vstart,
void *vend,
size_t stride,
void *data)
6840 for (; v != (
VALUE)vend; v += stride) {
6841 asan_unpoisoning_object(v) {
6847 rb_gc_prepare_heap_process_object(v);
6849 RVALUE_AGE_SET_CANDIDATE(
objspace, v);
6859rb_gc_impl_start(
void *objspace_ptr,
bool full_mark,
bool immediate_mark,
bool immediate_sweep,
bool compact)
6862 unsigned int reason = (GPR_FLAG_FULL_MARK |
6863 GPR_FLAG_IMMEDIATE_MARK |
6864 GPR_FLAG_IMMEDIATE_SWEEP |
6867 int full_marking_p = gc_config_full_mark_val;
6868 gc_config_full_mark_set(TRUE);
6872 GC_ASSERT(GC_COMPACTION_SUPPORTED);
6874 reason |= GPR_FLAG_COMPACT;
6877 if (!full_mark) reason &= ~GPR_FLAG_FULL_MARK;
6878 if (!immediate_mark) reason &= ~GPR_FLAG_IMMEDIATE_MARK;
6879 if (!immediate_sweep) reason &= ~GPR_FLAG_IMMEDIATE_SWEEP;
6885 gc_config_full_mark_set(full_marking_p);
6889rb_gc_impl_prepare_heap(
void *objspace_ptr)
6893 size_t orig_total_slots = objspace_available_slots(
objspace);
6894 size_t orig_allocatable_bytes =
objspace->heap_pages.allocatable_bytes;
6896 rb_gc_impl_each_objects(
objspace, gc_set_candidate_object_i, objspace_ptr);
6898 double orig_max_free_slots = gc_params.heap_free_slots_max_ratio;
6900 gc_params.heap_free_slots_max_ratio = 0.0;
6901 rb_gc_impl_start(
objspace,
true,
true,
true,
true);
6902 gc_params.heap_free_slots_max_ratio = orig_max_free_slots;
6904 objspace->heap_pages.allocatable_bytes = 0;
6905 heap_pages_freeable_pages =
objspace->empty_pages_count;
6906 heap_pages_free_unused_pages(objspace_ptr);
6907 GC_ASSERT(heap_pages_freeable_pages == 0);
6908 GC_ASSERT(
objspace->empty_pages_count == 0);
6909 objspace->heap_pages.allocatable_bytes = orig_allocatable_bytes;
6911 size_t total_slots = objspace_available_slots(
objspace);
6912 if (orig_total_slots > total_slots) {
6913 objspace->heap_pages.allocatable_bytes += (orig_total_slots - total_slots) * heaps[0].slot_size;
6916#if defined(HAVE_MALLOC_TRIM) && !defined(RUBY_ALTERNATIVE_MALLOC_HEADER)
6956 GC_ASSERT(st_is_member(finalizer_table, obj));
6960 GC_ASSERT(RVALUE_MARKED(
objspace, obj));
6961 GC_ASSERT(!RVALUE_PINNED(
objspace, obj));
6966 rb_bug(
"gc_is_moveable_obj: unreachable (%d)", (
int)
BUILTIN_TYPE(obj));
6973void rb_mv_generic_ivar(
VALUE src,
VALUE dst);
6983 gc_report(4,
objspace,
"Moving object: %p -> %p\n", (
void *)src, (
void *)dest);
6986 GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(dest), dest));
6988 GC_ASSERT(!RVALUE_MARKING(
objspace, src));
6991 marked = RVALUE_MARKED(
objspace, src);
6992 wb_unprotected = RVALUE_WB_UNPROTECTED(
objspace, src);
6993 uncollectible = RVALUE_UNCOLLECTIBLE(
objspace, src);
6994 bool remembered = RVALUE_REMEMBERED(
objspace, src);
6995 age = RVALUE_AGE_GET(src);
6998 CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS(src), src);
6999 CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(src), src);
7000 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(src), src);
7001 CLEAR_IN_BITMAP(GET_HEAP_PAGE(src)->remembered_bits, src);
7004 memcpy((
void *)dest, (
void *)src, MIN(src_slot_size, slot_size));
7006 if (RVALUE_OVERHEAD > 0) {
7007 void *dest_overhead = (
void *)(((uintptr_t)dest) + slot_size - RVALUE_OVERHEAD);
7008 void *src_overhead = (
void *)(((uintptr_t)src) + src_slot_size - RVALUE_OVERHEAD);
7010 memcpy(dest_overhead, src_overhead, RVALUE_OVERHEAD);
7013 memset((
void *)src, 0, src_slot_size);
7014 RVALUE_AGE_SET_BITMAP(src, 0);
7018 MARK_IN_BITMAP(GET_HEAP_PAGE(dest)->remembered_bits, dest);
7021 CLEAR_IN_BITMAP(GET_HEAP_PAGE(dest)->remembered_bits, dest);
7025 MARK_IN_BITMAP(GET_HEAP_MARK_BITS(dest), dest);
7028 CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS(dest), dest);
7031 if (wb_unprotected) {
7032 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(dest), dest);
7035 CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(dest), dest);
7038 if (uncollectible) {
7039 MARK_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(dest), dest);
7042 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(dest), dest);
7045 RVALUE_AGE_SET(dest, age);
7048 RMOVED(src)->dummy =
Qundef;
7049 RMOVED(src)->destination = dest;
7052 GET_HEAP_PAGE(src)->heap->total_freed_objects++;
7053 GET_HEAP_PAGE(dest)->heap->total_allocated_objects++;
7058#if GC_CAN_COMPILE_COMPACTION
7060compare_pinned_slots(
const void *left,
const void *right,
void *dummy)
7065 left_page = *(
struct heap_page *
const *)left;
7066 right_page = *(
struct heap_page *
const *)right;
7068 return left_page->pinned_slots - right_page->pinned_slots;
7072compare_free_slots(
const void *left,
const void *right,
void *dummy)
7077 left_page = *(
struct heap_page *
const *)left;
7078 right_page = *(
struct heap_page *
const *)right;
7080 return left_page->free_slots - right_page->free_slots;
7086 for (
int j = 0; j < HEAP_COUNT; j++) {
7089 size_t total_pages = heap->total_pages;
7091 struct heap_page *page = 0, **page_list = malloc(size);
7094 heap->free_pages = NULL;
7095 ccan_list_for_each(&heap->pages, page, page_node) {
7096 page_list[i++] = page;
7100 GC_ASSERT((
size_t)i == total_pages);
7107 ccan_list_head_init(&heap->pages);
7109 for (i = 0; i < total_pages; i++) {
7110 ccan_list_add(&heap->pages, &page_list[i]->page_node);
7111 if (page_list[i]->free_slots != 0) {
7112 heap_add_freepage(heap, page_list[i]);
7122rb_gc_impl_register_pinning_obj(
void *objspace_ptr,
VALUE obj)
7128rb_gc_impl_object_moved_p(
void *objspace_ptr,
VALUE obj)
7130 return gc_object_moved_p(objspace_ptr, obj);
7138 page->flags.has_uncollectible_wb_unprotected_objects = FALSE;
7139 page->flags.has_remembered_objects = FALSE;
7142 for (; v != (
VALUE)vend; v += stride) {
7143 asan_unpoisoning_object(v) {
7150 if (RVALUE_WB_UNPROTECTED(
objspace, v)) {
7151 page->flags.has_uncollectible_wb_unprotected_objects = TRUE;
7153 if (RVALUE_REMEMBERED(
objspace, v)) {
7154 page->flags.has_remembered_objects = TRUE;
7156 if (page->flags.before_sweep) {
7158 rb_gc_update_object_references(
objspace, v);
7162 rb_gc_update_object_references(
objspace, v);
7172gc_update_references_weak_table_i(
VALUE obj,
void *data)
7175 asan_unpoisoning_object(obj) {
7182gc_update_references_weak_table_replace_i(
VALUE *obj,
void *data)
7184 *obj = rb_gc_location(*obj);
7192 objspace->flags.during_reference_updating =
true;
7194 rb_gc_before_updating_jit_code();
7198 for (
int i = 0; i < HEAP_COUNT; i++) {
7199 bool should_set_mark_bits = TRUE;
7202 ccan_list_for_each(&heap->pages, page, page_node) {
7203 uintptr_t start = (uintptr_t)page->start;
7204 uintptr_t end = start + (page->total_slots * heap->slot_size);
7206 gc_ref_update((
void *)start, (
void *)end, heap->slot_size,
objspace, page);
7207 if (page == heap->sweeping_page) {
7208 should_set_mark_bits = FALSE;
7210 if (should_set_mark_bits) {
7211 gc_setup_mark_bits(page);
7216 gc_update_table_refs(finalizer_table);
7218 rb_gc_update_vm_references((
void *)
objspace);
7220 for (
int table = 0; table < RB_GC_VM_WEAK_TABLE_COUNT; table++) {
7221 rb_gc_vm_weak_table_foreach(
7222 gc_update_references_weak_table_i,
7223 gc_update_references_weak_table_replace_i,
7230 rb_gc_after_updating_jit_code();
7232 objspace->flags.during_reference_updating =
false;
7235#if GC_CAN_COMPILE_COMPACTION
7237root_obj_check_moved_i(
const char *category,
VALUE obj,
void *data)
7241 if (gc_object_moved_p(
objspace, obj)) {
7242 rb_bug(
"ROOT %s points to MOVED: %p -> %s", category, (
void *)obj, rb_obj_info(rb_gc_impl_location(
objspace, obj)));
7247reachable_object_check_moved_i(
VALUE ref,
void *data)
7250 if (gc_object_moved_p(rb_gc_get_objspace(), ref)) {
7251 rb_bug(
"Object %s points to MOVED: %p -> %s", rb_obj_info(parent), (
void *)ref, rb_obj_info(rb_gc_impl_location(rb_gc_get_objspace(), ref)));
7256heap_check_moved_i(
void *vstart,
void *vend,
size_t stride,
void *data)
7261 for (; v != (
VALUE)vend; v += stride) {
7262 if (gc_object_moved_p(
objspace, v)) {
7266 asan_unpoisoning_object(v) {
7272 if (!rb_gc_impl_garbage_object_p(
objspace, v)) {
7273 rb_objspace_reachable_objects_from(v, reachable_object_check_moved_i, (
void *)v);
7285rb_gc_impl_during_gc_p(
void *objspace_ptr)
7292#if RGENGC_PROFILE >= 2
7325 default:
return "unknown";
7330gc_count_add_each_types(
VALUE hash,
const char *name,
const size_t *types)
7334 for (i=0; i<
T_MASK; i++) {
7335 const char *
type = type_name(i, 0);
7338 rb_hash_aset(hash,
ID2SYM(rb_intern(name)), result);
7343rb_gc_impl_gc_count(
void *objspace_ptr)
7353 static VALUE sym_major_by =
Qnil, sym_gc_by, sym_immediate_sweep, sym_have_finalizer, sym_state, sym_need_major_by;
7354 static VALUE sym_nofree, sym_oldgen, sym_shady, sym_force, sym_stress;
7355#if RGENGC_ESTIMATE_OLDMALLOC
7356 static VALUE sym_oldmalloc;
7358 static VALUE sym_newobj, sym_malloc, sym_method, sym_capi;
7359 static VALUE sym_none, sym_marking, sym_sweeping;
7360 static VALUE sym_weak_references_count;
7362 VALUE major_by, need_major_by;
7363 unsigned int flags = orig_flags ? orig_flags :
objspace->profile.latest_gc_info;
7372 rb_bug(
"gc_info_decode: non-hash or symbol given");
7375 if (
NIL_P(sym_major_by)) {
7376#define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
7389#if RGENGC_ESTIMATE_OLDMALLOC
7401 S(weak_references_count);
7405#define SET(name, attr) \
7406 if (key == sym_##name) \
7408 else if (hash != Qnil) \
7409 rb_hash_aset(hash, sym_##name, (attr));
7412 (flags & GPR_FLAG_MAJOR_BY_NOFREE) ? sym_nofree :
7413 (flags & GPR_FLAG_MAJOR_BY_OLDGEN) ? sym_oldgen :
7414 (flags & GPR_FLAG_MAJOR_BY_SHADY) ? sym_shady :
7415 (flags & GPR_FLAG_MAJOR_BY_FORCE) ? sym_force :
7416#if RGENGC_ESTIMATE_OLDMALLOC
7417 (flags & GPR_FLAG_MAJOR_BY_OLDMALLOC) ? sym_oldmalloc :
7420 SET(major_by, major_by);
7422 if (orig_flags == 0) {
7423 unsigned int need_major_flags = gc_needs_major_flags;
7425 (need_major_flags & GPR_FLAG_MAJOR_BY_NOFREE) ? sym_nofree :
7426 (need_major_flags & GPR_FLAG_MAJOR_BY_OLDGEN) ? sym_oldgen :
7427 (need_major_flags & GPR_FLAG_MAJOR_BY_SHADY) ? sym_shady :
7428 (need_major_flags & GPR_FLAG_MAJOR_BY_FORCE) ? sym_force :
7429#if RGENGC_ESTIMATE_OLDMALLOC
7430 (need_major_flags & GPR_FLAG_MAJOR_BY_OLDMALLOC) ? sym_oldmalloc :
7433 SET(need_major_by, need_major_by);
7437 (flags & GPR_FLAG_NEWOBJ) ? sym_newobj :
7438 (flags & GPR_FLAG_MALLOC) ? sym_malloc :
7439 (flags & GPR_FLAG_METHOD) ? sym_method :
7440 (flags & GPR_FLAG_CAPI) ? sym_capi :
7441 (flags & GPR_FLAG_STRESS) ? sym_stress :
7445 SET(have_finalizer, (flags & GPR_FLAG_HAVE_FINALIZE) ?
Qtrue :
Qfalse);
7446 SET(immediate_sweep, (flags & GPR_FLAG_IMMEDIATE_SWEEP) ?
Qtrue :
Qfalse);
7448 if (orig_flags == 0) {
7449 SET(state, gc_mode(
objspace) == gc_mode_none ? sym_none :
7450 gc_mode(
objspace) == gc_mode_marking ? sym_marking : sym_sweeping);
7453 SET(weak_references_count,
LONG2FIX(
objspace->profile.weak_references_count));
7465rb_gc_impl_latest_gc_info(
void *objspace_ptr,
VALUE key)
7469 return gc_info_decode(
objspace, key, 0);
7476 gc_stat_sym_marking_time,
7477 gc_stat_sym_sweeping_time,
7478 gc_stat_sym_heap_allocated_pages,
7479 gc_stat_sym_heap_empty_pages,
7480 gc_stat_sym_heap_allocatable_bytes,
7481 gc_stat_sym_heap_available_slots,
7482 gc_stat_sym_heap_live_slots,
7483 gc_stat_sym_heap_free_slots,
7484 gc_stat_sym_heap_final_slots,
7485 gc_stat_sym_heap_marked_slots,
7486 gc_stat_sym_heap_eden_pages,
7487 gc_stat_sym_total_allocated_pages,
7488 gc_stat_sym_total_freed_pages,
7489 gc_stat_sym_total_allocated_objects,
7490 gc_stat_sym_total_freed_objects,
7491 gc_stat_sym_malloc_increase_bytes,
7492 gc_stat_sym_malloc_increase_bytes_limit,
7493 gc_stat_sym_minor_gc_count,
7494 gc_stat_sym_major_gc_count,
7495 gc_stat_sym_compact_count,
7496 gc_stat_sym_read_barrier_faults,
7497 gc_stat_sym_total_moved_objects,
7498 gc_stat_sym_remembered_wb_unprotected_objects,
7499 gc_stat_sym_remembered_wb_unprotected_objects_limit,
7500 gc_stat_sym_old_objects,
7501 gc_stat_sym_old_objects_limit,
7502#if RGENGC_ESTIMATE_OLDMALLOC
7503 gc_stat_sym_oldmalloc_increase_bytes,
7504 gc_stat_sym_oldmalloc_increase_bytes_limit,
7507 gc_stat_sym_total_generated_normal_object_count,
7508 gc_stat_sym_total_generated_shady_object_count,
7509 gc_stat_sym_total_shade_operation_count,
7510 gc_stat_sym_total_promoted_count,
7511 gc_stat_sym_total_remembered_normal_object_count,
7512 gc_stat_sym_total_remembered_shady_object_count,
7517static VALUE gc_stat_symbols[gc_stat_sym_last];
7520setup_gc_stat_symbols(
void)
7522 if (gc_stat_symbols[0] == 0) {
7523#define S(s) gc_stat_symbols[gc_stat_sym_##s] = ID2SYM(rb_intern_const(#s))
7528 S(heap_allocated_pages);
7529 S(heap_empty_pages);
7530 S(heap_allocatable_bytes);
7531 S(heap_available_slots);
7534 S(heap_final_slots);
7535 S(heap_marked_slots);
7537 S(total_allocated_pages);
7538 S(total_freed_pages);
7539 S(total_allocated_objects);
7540 S(total_freed_objects);
7541 S(malloc_increase_bytes);
7542 S(malloc_increase_bytes_limit);
7546 S(read_barrier_faults);
7547 S(total_moved_objects);
7548 S(remembered_wb_unprotected_objects);
7549 S(remembered_wb_unprotected_objects_limit);
7551 S(old_objects_limit);
7552#if RGENGC_ESTIMATE_OLDMALLOC
7553 S(oldmalloc_increase_bytes);
7554 S(oldmalloc_increase_bytes_limit);
7557 S(total_generated_normal_object_count);
7558 S(total_generated_shady_object_count);
7559 S(total_shade_operation_count);
7560 S(total_promoted_count);
7561 S(total_remembered_normal_object_count);
7562 S(total_remembered_shady_object_count);
7569ns_to_ms(uint64_t ns)
7571 return ns / (1000 * 1000);
7577rb_gc_impl_stat(
void *objspace_ptr,
VALUE hash_or_sym)
7582 setup_gc_stat_symbols();
7584 ractor_cache_flush_count(
objspace, rb_gc_get_ractor_newobj_cache());
7585 malloc_increase_local_flush(
objspace);
7594 rb_bug(
"non-hash or symbol given");
7597#define SET(name, attr) \
7598 if (key == gc_stat_symbols[gc_stat_sym_##name]) \
7599 return SIZET2NUM(attr); \
7600 else if (hash != Qnil) \
7601 rb_hash_aset(hash, gc_stat_symbols[gc_stat_sym_##name], SIZET2NUM(attr));
7603 SET(count,
objspace->profile.count);
7604 SET(time, (
size_t)ns_to_ms(
objspace->profile.marking_time_ns +
objspace->profile.sweeping_time_ns));
7605 SET(marking_time, (
size_t)ns_to_ms(
objspace->profile.marking_time_ns));
7606 SET(sweeping_time, (
size_t)ns_to_ms(
objspace->profile.sweeping_time_ns));
7609 SET(heap_allocated_pages, rb_darray_size(
objspace->heap_pages.sorted));
7610 SET(heap_empty_pages,
objspace->empty_pages_count)
7611 SET(heap_allocatable_bytes,
objspace->heap_pages.allocatable_bytes);
7612 SET(heap_available_slots, objspace_available_slots(
objspace));
7613 SET(heap_live_slots, objspace_live_slots(
objspace));
7614 SET(heap_free_slots, objspace_free_slots(
objspace));
7615 SET(heap_final_slots, total_final_slots_count(
objspace));
7616 SET(heap_marked_slots,
objspace->marked_slots);
7617 SET(heap_eden_pages, heap_eden_total_pages(
objspace));
7618 SET(total_allocated_pages,
objspace->heap_pages.allocated_pages);
7619 SET(total_freed_pages,
objspace->heap_pages.freed_pages);
7620 SET(total_allocated_objects, total_allocated_objects(
objspace));
7621 SET(total_freed_objects, total_freed_objects(
objspace));
7622 SET(malloc_increase_bytes, malloc_increase);
7623 SET(malloc_increase_bytes_limit, malloc_limit);
7624 SET(minor_gc_count,
objspace->profile.minor_gc_count);
7625 SET(major_gc_count,
objspace->profile.major_gc_count);
7626 SET(compact_count,
objspace->profile.compact_count);
7627 SET(read_barrier_faults,
objspace->profile.read_barrier_faults);
7628 SET(total_moved_objects,
objspace->rcompactor.total_moved);
7629 SET(remembered_wb_unprotected_objects,
objspace->rgengc.uncollectible_wb_unprotected_objects);
7630 SET(remembered_wb_unprotected_objects_limit,
objspace->rgengc.uncollectible_wb_unprotected_objects_limit);
7631 SET(old_objects,
objspace->rgengc.old_objects);
7632 SET(old_objects_limit,
objspace->rgengc.old_objects_limit);
7633#if RGENGC_ESTIMATE_OLDMALLOC
7634 SET(oldmalloc_increase_bytes,
objspace->malloc_counters.oldmalloc_increase);
7635 SET(oldmalloc_increase_bytes_limit,
objspace->rgengc.oldmalloc_increase_limit);
7639 SET(total_generated_normal_object_count,
objspace->profile.total_generated_normal_object_count);
7640 SET(total_generated_shady_object_count,
objspace->profile.total_generated_shady_object_count);
7641 SET(total_shade_operation_count,
objspace->profile.total_shade_operation_count);
7642 SET(total_promoted_count,
objspace->profile.total_promoted_count);
7643 SET(total_remembered_normal_object_count,
objspace->profile.total_remembered_normal_object_count);
7644 SET(total_remembered_shady_object_count,
objspace->profile.total_remembered_shady_object_count);
7653#if defined(RGENGC_PROFILE) && RGENGC_PROFILE >= 2
7655 gc_count_add_each_types(hash,
"generated_normal_object_count_types",
objspace->profile.generated_normal_object_count_types);
7656 gc_count_add_each_types(hash,
"generated_shady_object_count_types",
objspace->profile.generated_shady_object_count_types);
7657 gc_count_add_each_types(hash,
"shade_operation_count_types",
objspace->profile.shade_operation_count_types);
7658 gc_count_add_each_types(hash,
"promoted_types",
objspace->profile.promoted_types);
7659 gc_count_add_each_types(hash,
"remembered_normal_object_count_types",
objspace->profile.remembered_normal_object_count_types);
7660 gc_count_add_each_types(hash,
"remembered_shady_object_count_types",
objspace->profile.remembered_shady_object_count_types);
7667enum gc_stat_heap_sym {
7668 gc_stat_heap_sym_slot_size,
7669 gc_stat_heap_sym_heap_live_slots,
7670 gc_stat_heap_sym_heap_free_slots,
7671 gc_stat_heap_sym_heap_final_slots,
7672 gc_stat_heap_sym_heap_eden_pages,
7673 gc_stat_heap_sym_heap_eden_slots,
7674 gc_stat_heap_sym_total_allocated_pages,
7675 gc_stat_heap_sym_force_major_gc_count,
7676 gc_stat_heap_sym_force_incremental_marking_finish_count,
7677 gc_stat_heap_sym_heap_allocatable_slots,
7678 gc_stat_heap_sym_total_allocated_objects,
7679 gc_stat_heap_sym_total_freed_objects,
7680 gc_stat_heap_sym_last
7683static VALUE gc_stat_heap_symbols[gc_stat_heap_sym_last];
7686setup_gc_stat_heap_symbols(
void)
7688 if (gc_stat_heap_symbols[0] == 0) {
7689#define S(s) gc_stat_heap_symbols[gc_stat_heap_sym_##s] = ID2SYM(rb_intern_const(#s))
7693 S(heap_final_slots);
7696 S(heap_allocatable_slots);
7697 S(total_allocated_pages);
7698 S(force_major_gc_count);
7699 S(force_incremental_marking_finish_count);
7700 S(total_allocated_objects);
7701 S(total_freed_objects);
7709#define SET(name, attr) \
7710 if (key == gc_stat_heap_symbols[gc_stat_heap_sym_##name]) \
7711 return SIZET2NUM(attr); \
7712 else if (hash != Qnil) \
7713 rb_hash_aset(hash, gc_stat_heap_symbols[gc_stat_heap_sym_##name], SIZET2NUM(attr));
7715 SET(slot_size, heap->slot_size);
7716 SET(heap_live_slots, heap->total_allocated_objects - heap->total_freed_objects - heap->final_slots_count);
7717 SET(heap_free_slots, heap->total_slots - (heap->total_allocated_objects - heap->total_freed_objects));
7718 SET(heap_final_slots, heap->final_slots_count);
7719 SET(heap_eden_pages, heap->total_pages);
7720 SET(heap_eden_slots, heap->total_slots);
7721 SET(heap_allocatable_slots,
objspace->heap_pages.allocatable_bytes / heap->slot_size);
7722 SET(total_allocated_pages, heap->total_allocated_pages);
7723 SET(force_major_gc_count, heap->force_major_gc_count);
7724 SET(force_incremental_marking_finish_count, heap->force_incremental_marking_finish_count);
7725 SET(total_allocated_objects, heap->total_allocated_objects);
7726 SET(total_freed_objects, heap->total_freed_objects);
7738rb_gc_impl_stat_heap(
void *objspace_ptr,
VALUE heap_name,
VALUE hash_or_sym)
7742 ractor_cache_flush_count(
objspace, rb_gc_get_ractor_newobj_cache());
7744 setup_gc_stat_heap_symbols();
7746 if (
NIL_P(heap_name)) {
7748 rb_bug(
"non-hash given");
7751 for (
int i = 0; i < HEAP_COUNT; i++) {
7754 hash = rb_hash_new();
7755 rb_hash_aset(hash_or_sym,
INT2FIX(i), hash);
7762 int heap_idx =
FIX2INT(heap_name);
7764 if (heap_idx < 0 || heap_idx >= HEAP_COUNT) {
7765 rb_raise(rb_eArgError,
"size pool index out of range");
7769 return stat_one_heap(
objspace, &heaps[heap_idx],
Qnil, hash_or_sym);
7772 return stat_one_heap(
objspace, &heaps[heap_idx], hash_or_sym,
Qnil);
7775 rb_bug(
"non-hash or symbol given");
7779 rb_bug(
"heap_name must be nil or an Integer");
7791#define RBOOL(v) (v ? Qtrue : Qfalse)
7795rb_gc_impl_config_get(
void *objspace_ptr)
7797#define sym(name) ID2SYM(rb_intern_const(name))
7799 VALUE hash = rb_hash_new();
7801 rb_hash_aset(hash, sym(
"rgengc_allow_full_mark"), RBOOL(gc_config_full_mark_val));
7810 if (
rb_sym2id(key) == rb_intern(
"rgengc_allow_full_mark")) {
7812 gc_config_full_mark_set(
RTEST(value));
7818rb_gc_impl_config_set(
void *objspace_ptr,
VALUE hash)
7823 rb_raise(rb_eArgError,
"expected keyword arguments");
7830rb_gc_impl_stress_get(
void *objspace_ptr)
7833 return ruby_gc_stress_mode;
7837rb_gc_impl_stress_set(
void *objspace_ptr,
VALUE flag)
7846get_envparam_size(
const char *name,
size_t *default_value,
size_t lower_bound)
7848 const char *ptr = getenv(name);
7851 if (ptr != NULL && *ptr) {
7854#if SIZEOF_SIZE_T == SIZEOF_LONG_LONG
7855 val = strtoll(ptr, &end, 0);
7857 val = strtol(ptr, &end, 0);
7869 unit = 1024*1024*1024;
7873 while (*end && isspace((
unsigned char)*end)) end++;
7875 if (
RTEST(
ruby_verbose)) fprintf(stderr,
"invalid string for %s: %s\n", name, ptr);
7879 if (val < -(ssize_t)(SIZE_MAX / 2 / unit) || (ssize_t)(SIZE_MAX / 2 / unit) < val) {
7880 if (
RTEST(
ruby_verbose)) fprintf(stderr,
"%s=%s is ignored because it overflows\n", name, ptr);
7885 if (val > 0 && (
size_t)val > lower_bound) {
7887 fprintf(stderr,
"%s=%"PRIdSIZE
" (default value: %"PRIuSIZE
")\n", name, val, *default_value);
7889 *default_value = (size_t)val;
7894 fprintf(stderr,
"%s=%"PRIdSIZE
" (default value: %"PRIuSIZE
") is ignored because it must be greater than %"PRIuSIZE
".\n",
7895 name, val, *default_value, lower_bound);
7904get_envparam_double(
const char *name,
double *default_value,
double lower_bound,
double upper_bound,
int accept_zero)
7906 const char *ptr = getenv(name);
7909 if (ptr != NULL && *ptr) {
7912 if (!*ptr || *end) {
7913 if (
RTEST(
ruby_verbose)) fprintf(stderr,
"invalid string for %s: %s\n", name, ptr);
7917 if (accept_zero && val == 0.0) {
7920 else if (val <= lower_bound) {
7922 fprintf(stderr,
"%s=%f (default value: %f) is ignored because it must be greater than %f.\n",
7923 name, val, *default_value, lower_bound);
7926 else if (upper_bound != 0.0 &&
7927 val > upper_bound) {
7929 fprintf(stderr,
"%s=%f (default value: %f) is ignored because it must be lower than %f.\n",
7930 name, val, *default_value, upper_bound);
7940 if (
RTEST(
ruby_verbose)) fprintf(stderr,
"%s=%f (default value: %f)\n", name, val, *default_value);
7941 *default_value = val;
7986rb_gc_impl_set_params(
void *objspace_ptr)
7990 if (get_envparam_size(
"RUBY_GC_HEAP_FREE_SLOTS", &gc_params.heap_free_slots, 0)) {
7994 get_envparam_size(
"RUBY_GC_HEAP_INIT_BYTES", &gc_params.heap_init_bytes, 0);
7996 get_envparam_double(
"RUBY_GC_HEAP_GROWTH_FACTOR", &gc_params.growth_factor, 1.0, 0.0, FALSE);
7997 get_envparam_size (
"RUBY_GC_HEAP_GROWTH_MAX_BYTES", &gc_params.growth_max_bytes, 0);
7998 get_envparam_double(
"RUBY_GC_HEAP_FREE_SLOTS_MIN_RATIO", &gc_params.heap_free_slots_min_ratio,
8000 get_envparam_double(
"RUBY_GC_HEAP_FREE_SLOTS_MAX_RATIO", &gc_params.heap_free_slots_max_ratio,
8001 gc_params.heap_free_slots_min_ratio, 1.0, FALSE);
8002 get_envparam_double(
"RUBY_GC_HEAP_FREE_SLOTS_GOAL_RATIO", &gc_params.heap_free_slots_goal_ratio,
8003 gc_params.heap_free_slots_min_ratio, gc_params.heap_free_slots_max_ratio, TRUE);
8004 get_envparam_double(
"RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR", &gc_params.oldobject_limit_factor, 0.0, 0.0, TRUE);
8005 get_envparam_double(
"RUBY_GC_HEAP_REMEMBERED_WB_UNPROTECTED_OBJECTS_LIMIT_RATIO", &gc_params.uncollectible_wb_unprotected_objects_limit_ratio, 0.0, 0.0, TRUE);
8007 if (get_envparam_size(
"RUBY_GC_MALLOC_LIMIT", &gc_params.malloc_limit_min, 0)) {
8008 malloc_limit = gc_params.malloc_limit_min;
8010 get_envparam_size (
"RUBY_GC_MALLOC_LIMIT_MAX", &gc_params.malloc_limit_max, 0);
8011 if (!gc_params.malloc_limit_max) {
8012 gc_params.malloc_limit_max = SIZE_MAX;
8014 get_envparam_double(
"RUBY_GC_MALLOC_LIMIT_GROWTH_FACTOR", &gc_params.malloc_limit_growth_factor, 1.0, 0.0, FALSE);
8016#if RGENGC_ESTIMATE_OLDMALLOC
8017 if (get_envparam_size(
"RUBY_GC_OLDMALLOC_LIMIT", &gc_params.oldmalloc_limit_min, 0)) {
8018 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
8020 get_envparam_size (
"RUBY_GC_OLDMALLOC_LIMIT_MAX", &gc_params.oldmalloc_limit_max, 0);
8021 get_envparam_double(
"RUBY_GC_OLDMALLOC_LIMIT_GROWTH_FACTOR", &gc_params.oldmalloc_limit_growth_factor, 1.0, 0.0, FALSE);
8028#ifdef HAVE_MALLOC_USABLE_SIZE
8030 hint = malloc_usable_size(ptr);
8037 MEMOP_TYPE_MALLOC = 0,
8043atomic_sub_nounderflow(
size_t *var,
size_t sub)
8045 if (sub == 0)
return;
8049 if (val < sub) sub = val;
8054#define gc_stress_full_mark_after_malloc_p() \
8055 (FIXNUM_P(ruby_gc_stress_mode) && (FIX2LONG(ruby_gc_stress_mode) & (1<<gc_stress_full_mark_after_malloc)))
8061 unsigned int reason = (GPR_FLAG_IMMEDIATE_MARK | GPR_FLAG_IMMEDIATE_SWEEP |
8062 GPR_FLAG_STRESS | GPR_FLAG_MALLOC);
8064 if (gc_stress_full_mark_after_malloc_p()) {
8065 reason |= GPR_FLAG_FULL_MARK;
8067 garbage_collect_with_gvl(
objspace, reason);
8074 if (new_size > old_size) {
8076#if RGENGC_ESTIMATE_OLDMALLOC
8081 atomic_sub_nounderflow(&malloc_increase, old_size - new_size);
8082#if RGENGC_ESTIMATE_OLDMALLOC
8083 atomic_sub_nounderflow(&
objspace->malloc_counters.oldmalloc_increase, old_size - new_size);
8088#if USE_MALLOC_INCREASE_LOCAL
8092 int delta = malloc_increase_local;
8093 if (delta == 0)
return;
8095 malloc_increase_local = 0;
8097 malloc_increase_commit(
objspace, (
size_t)delta, 0);
8100 malloc_increase_commit(
objspace, 0, (
size_t)(-delta));
8111objspace_malloc_increase_report(
rb_objspace_t *
objspace,
void *mem,
size_t new_size,
size_t old_size,
enum memop_type
type,
bool gc_allowed)
8113 if (0) fprintf(stderr,
"increase - ptr: %p, type: %s, new_size: %"PRIdSIZE
", old_size: %"PRIdSIZE
"\n",
8115 type == MEMOP_TYPE_MALLOC ?
"malloc" :
8116 type == MEMOP_TYPE_FREE ?
"free " :
8117 type == MEMOP_TYPE_REALLOC ?
"realloc":
"error",
8118 new_size, old_size);
8123objspace_malloc_increase_body(
rb_objspace_t *
objspace,
void *mem,
size_t new_size,
size_t old_size,
enum memop_type
type,
bool gc_allowed)
8125#if USE_MALLOC_INCREASE_LOCAL
8126 if (new_size < GC_MALLOC_INCREASE_LOCAL_THRESHOLD &&
8127 old_size < GC_MALLOC_INCREASE_LOCAL_THRESHOLD) {
8128 malloc_increase_local += (int)new_size - (
int)old_size;
8130 if (malloc_increase_local >= GC_MALLOC_INCREASE_LOCAL_THRESHOLD ||
8131 malloc_increase_local <= -GC_MALLOC_INCREASE_LOCAL_THRESHOLD) {
8132 malloc_increase_local_flush(
objspace);
8136 malloc_increase_local_flush(
objspace);
8137 malloc_increase_commit(
objspace, new_size, old_size);
8140 malloc_increase_commit(
objspace, new_size, old_size);
8143 if (
type == MEMOP_TYPE_MALLOC && gc_allowed) {
8146 if (ruby_thread_has_gvl_p() && is_lazy_sweeping(
objspace)) {
8150 garbage_collect_with_gvl(
objspace, GPR_FLAG_MALLOC);
8154#if MALLOC_ALLOCATED_SIZE
8155 if (new_size >= old_size) {
8159 size_t dec_size = old_size - new_size;
8161#if MALLOC_ALLOCATED_SIZE_CHECK
8162 size_t allocated_size =
objspace->malloc_params.allocated_size;
8163 if (allocated_size < dec_size) {
8164 rb_bug(
"objspace_malloc_increase: underflow malloc_params.allocated_size.");
8167 atomic_sub_nounderflow(&
objspace->malloc_params.allocated_size, dec_size);
8171 case MEMOP_TYPE_MALLOC:
8174 case MEMOP_TYPE_FREE:
8176 size_t allocations =
objspace->malloc_params.allocations;
8177 if (allocations > 0) {
8178 atomic_sub_nounderflow(&
objspace->malloc_params.allocations, 1);
8180#if MALLOC_ALLOCATED_SIZE_CHECK
8182 GC_ASSERT(
objspace->malloc_params.allocations > 0);
8187 case MEMOP_TYPE_REALLOC:
break;
8193#define objspace_malloc_increase(...) \
8194 for (bool malloc_increase_done = objspace_malloc_increase_report(__VA_ARGS__); \
8195 !malloc_increase_done; \
8196 malloc_increase_done = objspace_malloc_increase_body(__VA_ARGS__))
8205 if (size == 0) size = 1;
8207#if CALC_EXACT_MALLOC_SIZE
8221 return during_gc && !dont_gc_val() && !rb_gc_multi_ractor_p() && ruby_thread_has_gvl_p();
8227 size = objspace_malloc_size(
objspace, mem, size);
8228 objspace_malloc_increase(
objspace, mem, size, 0, MEMOP_TYPE_MALLOC, gc_allowed) {}
8230#if CALC_EXACT_MALLOC_SIZE
8241#if defined(__GNUC__) && RUBY_DEBUG
8242#define RB_BUG_INSTEAD_OF_RB_MEMERROR 1
8245#ifndef RB_BUG_INSTEAD_OF_RB_MEMERROR
8246# define RB_BUG_INSTEAD_OF_RB_MEMERROR 0
8249#define GC_MEMERROR(...) \
8250 ((RB_BUG_INSTEAD_OF_RB_MEMERROR+0) ? rb_bug("" __VA_ARGS__) : (void)0)
8252#define TRY_WITH_GC(siz, expr) do { \
8253 const gc_profile_record_flag gpr = \
8254 GPR_FLAG_FULL_MARK | \
8255 GPR_FLAG_IMMEDIATE_MARK | \
8256 GPR_FLAG_IMMEDIATE_SWEEP | \
8258 objspace_malloc_gc_stress(objspace); \
8260 if (RB_LIKELY((expr))) { \
8263 else if (gc_allowed && !garbage_collect_with_gvl(objspace, gpr)) { \
8265 GC_MEMERROR("TRY_WITH_GC: could not GC"); \
8267 else if ((expr)) { \
8271 GC_MEMERROR("TRY_WITH_GC: could not allocate:" \
8272 "%"PRIdSIZE" bytes for %s", \
8280 if (RB_UNLIKELY(malloc_during_gc_p(
objspace))) {
8283 rb_bug(
"Cannot %s during GC", msg);
8288rb_gc_impl_free(
void *objspace_ptr,
void *ptr,
size_t old_size)
8299#if CALC_EXACT_MALLOC_SIZE
8303 rb_bug(
"buffer %p has no recorded size. Was it allocated with ruby_mimalloc? If so it should be freed with ruby_mimfree", ptr);
8306 if (old_size && (old_size +
sizeof(
struct malloc_obj_info)) != info->size) {
8307 rb_bug(
"buffer %p freed with old_size=%zu, but was allocated with size=%zu", ptr, old_size, info->size -
sizeof(
struct malloc_obj_info));
8311 old_size = info->size;
8313 old_size = objspace_malloc_size(
objspace, ptr, old_size);
8315 objspace_malloc_increase(
objspace, ptr, 0, old_size, MEMOP_TYPE_FREE,
true) {
8318 RB_DEBUG_COUNTER_INC(heap_xfree);
8323rb_gc_impl_malloc(
void *objspace_ptr,
size_t size,
bool gc_allowed)
8326 check_malloc_not_in_gc(
objspace,
"malloc");
8330 size = objspace_malloc_prepare(
objspace, size);
8331 TRY_WITH_GC(size, mem = malloc(size));
8332 RB_DEBUG_COUNTER_INC(heap_xmalloc);
8333 if (!mem)
return mem;
8334 return objspace_malloc_fixup(
objspace, mem, size, gc_allowed);
8338rb_gc_impl_calloc(
void *objspace_ptr,
size_t size,
bool gc_allowed)
8342 if (RB_UNLIKELY(malloc_during_gc_p(
objspace))) {
8343 rb_warn(
"calloc during GC detected, this could cause crashes if it triggers another GC");
8344#if RGENGC_CHECK_MODE || RUBY_DEBUG
8345 rb_bug(
"Cannot calloc during GC");
8351 size = objspace_malloc_prepare(
objspace, size);
8352 TRY_WITH_GC(size, mem = calloc1(size));
8353 if (!mem)
return mem;
8354 return objspace_malloc_fixup(
objspace, mem, size, gc_allowed);
8358rb_gc_impl_realloc(
void *objspace_ptr,
void *ptr,
size_t new_size,
size_t old_size,
bool gc_allowed)
8362 check_malloc_not_in_gc(
objspace,
"realloc");
8366 if (!ptr)
return rb_gc_impl_malloc(
objspace, new_size, gc_allowed);
8373 if (new_size == 0) {
8374 if ((mem = rb_gc_impl_malloc(
objspace, 0, gc_allowed)) != NULL) {
8397 rb_gc_impl_free(
objspace, ptr, old_size);
8411#if CALC_EXACT_MALLOC_SIZE
8417 if (old_size && (old_size +
sizeof(
struct malloc_obj_info)) != info->size) {
8418 rb_bug(
"buffer %p realloced with old_size=%zu, but was allocated with size=%zu", ptr, old_size, info->size -
sizeof(
struct malloc_obj_info));
8421 old_size = info->size;
8425 old_size = objspace_malloc_size(
objspace, ptr, old_size);
8427 if (!mem)
return mem;
8428 new_size = objspace_malloc_size(
objspace, mem, new_size);
8430#if CALC_EXACT_MALLOC_SIZE
8433 info->size = new_size;
8438 objspace_malloc_increase(
objspace, mem, new_size, old_size, MEMOP_TYPE_REALLOC, gc_allowed);
8440 RB_DEBUG_COUNTER_INC(heap_xrealloc);
8445rb_gc_impl_adjust_memory_usage(
void *objspace_ptr, ssize_t diff)
8450 objspace_malloc_increase(
objspace, 0, diff, 0, MEMOP_TYPE_REALLOC,
true);
8452 else if (diff < 0) {
8453 objspace_malloc_increase(
objspace, 0, 0, -diff, MEMOP_TYPE_REALLOC,
true);
8462#define GC_PROFILE_RECORD_DEFAULT_SIZE 100
8465current_process_time(
struct timespec *ts)
8467#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_PROCESS_CPUTIME_ID)
8469 static int try_clock_gettime = 1;
8470 if (try_clock_gettime && clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ts) == 0) {
8474 try_clock_gettime = 0;
8481 struct rusage usage;
8483 if (getrusage(RUSAGE_SELF, &usage) == 0) {
8484 time = usage.ru_utime;
8485 ts->tv_sec = time.tv_sec;
8486 ts->tv_nsec = (int32_t)time.tv_usec * 1000;
8494 FILETIME creation_time, exit_time, kernel_time, user_time;
8497 if (GetProcessTimes(GetCurrentProcess(),
8498 &creation_time, &exit_time, &kernel_time, &user_time) != 0) {
8499 memcpy(&ui, &user_time,
sizeof(FILETIME));
8500#define PER100NSEC (uint64_t)(1000 * 1000 * 10)
8501 ts->tv_nsec = (long)(ui.QuadPart % PER100NSEC);
8502 ts->tv_sec = (time_t)(ui.QuadPart / PER100NSEC);
8515 if (current_process_time(&ts)) {
8516 return ts.tv_sec + ts.tv_nsec * 1e-9;
8528 size_t index =
objspace->profile.next_index;
8535 objspace->profile.size = GC_PROFILE_RECORD_DEFAULT_SIZE;
8538 if (index >=
objspace->profile.size) {
8542 if (!ptr) rb_memerror();
8546 rb_bug(
"gc_profile malloc or realloc miss");
8552 record->flags = reason | (ruby_gc_stressful ? GPR_FLAG_STRESS : 0);
8553#if MALLOC_ALLOCATED_SIZE
8554 record->allocated_size = malloc_allocated_size;
8556#if GC_PROFILE_MORE_DETAIL && GC_PROFILE_DETAIL_MEMORY
8559 struct rusage usage;
8560 if (getrusage(RUSAGE_SELF, &usage) == 0) {
8561 record->maxrss = usage.ru_maxrss;
8562 record->minflt = usage.ru_minflt;
8563 record->majflt = usage.ru_majflt;
8576#if GC_PROFILE_MORE_DETAIL
8577 record->prepare_time =
objspace->profile.prepare_time;
8579 record->gc_time = 0;
8580 record->gc_invoke_time = getrusage_time();
8585elapsed_time_from(
double time)
8587 double now = getrusage_time();
8601 record->gc_time = elapsed_time_from(record->gc_invoke_time);
8602 record->gc_invoke_time -=
objspace->profile.invoke_time;
8606#ifdef BUILDING_MODULAR_GC
8607# define RUBY_DTRACE_GC_HOOK(name)
8609# define RUBY_DTRACE_GC_HOOK(name) \
8610 do {if (RUBY_DTRACE_GC_##name##_ENABLED()) RUBY_DTRACE_GC_##name();} while (0)
8616 RUBY_DTRACE_GC_HOOK(MARK_BEGIN);
8617#if GC_PROFILE_MORE_DETAIL
8619 gc_prof_record(
objspace)->gc_mark_time = getrusage_time();
8627 RUBY_DTRACE_GC_HOOK(MARK_END);
8628#if GC_PROFILE_MORE_DETAIL
8631 record->gc_mark_time = elapsed_time_from(record->gc_mark_time);
8639 RUBY_DTRACE_GC_HOOK(SWEEP_BEGIN);
8643 if (record->gc_time > 0 || GC_PROFILE_MORE_DETAIL) {
8644 objspace->profile.gc_sweep_start_time = getrusage_time();
8652 RUBY_DTRACE_GC_HOOK(SWEEP_END);
8658 if (record->gc_time > 0) {
8659 sweep_time = elapsed_time_from(
objspace->profile.gc_sweep_start_time);
8661 record->gc_time += sweep_time;
8663 else if (GC_PROFILE_MORE_DETAIL) {
8664 sweep_time = elapsed_time_from(
objspace->profile.gc_sweep_start_time);
8667#if GC_PROFILE_MORE_DETAIL
8668 record->gc_sweep_time += sweep_time;
8669 if (heap_pages_deferred_final) record->flags |= GPR_FLAG_HAVE_FINALIZE;
8671 if (heap_pages_deferred_final)
objspace->profile.latest_gc_info |= GPR_FLAG_HAVE_FINALIZE;
8678#if GC_PROFILE_MORE_DETAIL
8681 record->allocate_increase = malloc_increase;
8682 record->allocate_limit = malloc_limit;
8695 size_t use_size = 0;
8696 size_t total_size = 0;
8697 for (
int i = 0; i < HEAP_COUNT; i++) {
8699 size_t heap_live = heap->total_allocated_objects - heap->total_freed_objects - heap->final_slots_count;
8700 total += heap->total_slots;
8701 use_size += heap_live * heap->slot_size;
8702 total_size += heap->total_slots * heap->slot_size;
8705#if GC_PROFILE_MORE_DETAIL
8706 size_t live =
objspace->profile.total_allocated_objects_at_gc_start - total_freed_objects(
objspace);
8707 record->heap_use_pages =
objspace->profile.heap_used_at_gc_start;
8708 record->heap_live_objects = live;
8709 record->heap_free_objects = total - live;
8712 record->heap_total_objects = total;
8713 record->heap_use_size = use_size;
8714 record->heap_total_size = total_size;
8730 void *p =
objspace->profile.records;
8734 objspace->profile.current_record = 0;
8790gc_profile_record_get(
VALUE _)
8801 for (i =0; i <
objspace->profile.next_index; i++) {
8804 prof = rb_hash_new();
8805 rb_hash_aset(prof,
ID2SYM(rb_intern(
"GC_FLAGS")), gc_info_decode(
objspace, rb_hash_new(), record->flags));
8806 rb_hash_aset(prof,
ID2SYM(rb_intern(
"GC_TIME")),
DBL2NUM(record->gc_time));
8807 rb_hash_aset(prof,
ID2SYM(rb_intern(
"GC_INVOKE_TIME")),
DBL2NUM(record->gc_invoke_time));
8808 rb_hash_aset(prof,
ID2SYM(rb_intern(
"HEAP_USE_SIZE")),
SIZET2NUM(record->heap_use_size));
8809 rb_hash_aset(prof,
ID2SYM(rb_intern(
"HEAP_TOTAL_SIZE")),
SIZET2NUM(record->heap_total_size));
8810 rb_hash_aset(prof,
ID2SYM(rb_intern(
"HEAP_TOTAL_OBJECTS")),
SIZET2NUM(record->heap_total_objects));
8811 rb_hash_aset(prof,
ID2SYM(rb_intern(
"MOVED_OBJECTS")),
SIZET2NUM(record->moved_objects));
8812 rb_hash_aset(prof,
ID2SYM(rb_intern(
"GC_IS_MARKED")),
Qtrue);
8813#if GC_PROFILE_MORE_DETAIL
8814 rb_hash_aset(prof,
ID2SYM(rb_intern(
"GC_MARK_TIME")),
DBL2NUM(record->gc_mark_time));
8815 rb_hash_aset(prof,
ID2SYM(rb_intern(
"GC_SWEEP_TIME")),
DBL2NUM(record->gc_sweep_time));
8816 rb_hash_aset(prof,
ID2SYM(rb_intern(
"ALLOCATE_INCREASE")),
SIZET2NUM(record->allocate_increase));
8817 rb_hash_aset(prof,
ID2SYM(rb_intern(
"ALLOCATE_LIMIT")),
SIZET2NUM(record->allocate_limit));
8818 rb_hash_aset(prof,
ID2SYM(rb_intern(
"HEAP_USE_PAGES")),
SIZET2NUM(record->heap_use_pages));
8819 rb_hash_aset(prof,
ID2SYM(rb_intern(
"HEAP_LIVE_OBJECTS")),
SIZET2NUM(record->heap_live_objects));
8820 rb_hash_aset(prof,
ID2SYM(rb_intern(
"HEAP_FREE_OBJECTS")),
SIZET2NUM(record->heap_free_objects));
8822 rb_hash_aset(prof,
ID2SYM(rb_intern(
"REMOVING_OBJECTS")),
SIZET2NUM(record->removing_objects));
8823 rb_hash_aset(prof,
ID2SYM(rb_intern(
"EMPTY_OBJECTS")),
SIZET2NUM(record->empty_objects));
8825 rb_hash_aset(prof,
ID2SYM(rb_intern(
"HAVE_FINALIZE")), (record->flags & GPR_FLAG_HAVE_FINALIZE) ?
Qtrue :
Qfalse);
8828#if RGENGC_PROFILE > 0
8829 rb_hash_aset(prof,
ID2SYM(rb_intern(
"OLD_OBJECTS")),
SIZET2NUM(record->old_objects));
8830 rb_hash_aset(prof,
ID2SYM(rb_intern(
"REMEMBERED_NORMAL_OBJECTS")),
SIZET2NUM(record->remembered_normal_objects));
8831 rb_hash_aset(prof,
ID2SYM(rb_intern(
"REMEMBERED_SHADY_OBJECTS")),
SIZET2NUM(record->remembered_shady_objects));
8839#if GC_PROFILE_MORE_DETAIL
8840#define MAJOR_REASON_MAX 0x10
8843gc_profile_dump_major_reason(
unsigned int flags,
char *buff)
8845 unsigned int reason = flags & GPR_FLAG_MAJOR_MASK;
8848 if (reason == GPR_FLAG_NONE) {
8854 if (reason & GPR_FLAG_MAJOR_BY_##x) { \
8855 buff[i++] = #x[0]; \
8856 if (i >= MAJOR_REASON_MAX) rb_bug("gc_profile_dump_major_reason: overflow"); \
8862#if RGENGC_ESTIMATE_OLDMALLOC
8877 size_t count =
objspace->profile.next_index;
8878#ifdef MAJOR_REASON_MAX
8879 char reason_str[MAJOR_REASON_MAX];
8882 if (
objspace->profile.run && count ) {
8886 append(out, rb_sprintf(
"GC %"PRIuSIZE
" invokes.\n",
objspace->profile.count));
8887 append(out,
rb_str_new_cstr(
"Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC Time(ms)\n"));
8889 for (i = 0; i < count; i++) {
8890 record = &
objspace->profile.records[i];
8891 append(out, rb_sprintf(
"%5"PRIuSIZE
" %19.3f %20"PRIuSIZE
" %20"PRIuSIZE
" %20"PRIuSIZE
" %30.20f\n",
8892 i+1, record->gc_invoke_time, record->heap_use_size,
8893 record->heap_total_size, record->heap_total_objects, record->gc_time*1000));
8896#if GC_PROFILE_MORE_DETAIL
8897 const char *str =
"\n\n" \
8899 "Prepare Time = Previously GC's rest sweep time\n"
8900 "Index Flags Allocate Inc. Allocate Limit"
8901#if CALC_EXACT_MALLOC_SIZE
8904 " Use Page Mark Time(ms) Sweep Time(ms) Prepare Time(ms) LivingObj FreeObj RemovedObj EmptyObj"
8906 " OldgenObj RemNormObj RemShadObj"
8908#if GC_PROFILE_DETAIL_MEMORY
8909 " MaxRSS(KB) MinorFLT MajorFLT"
8914 for (i = 0; i < count; i++) {
8915 record = &
objspace->profile.records[i];
8916 append(out, rb_sprintf(
"%5"PRIuSIZE
" %4s/%c/%6s%c %13"PRIuSIZE
" %15"PRIuSIZE
8917#
if CALC_EXACT_MALLOC_SIZE
8920 " %9"PRIuSIZE
" %17.12f %17.12f %17.12f %10"PRIuSIZE
" %10"PRIuSIZE
" %10"PRIuSIZE
" %10"PRIuSIZE
8922 "%10"PRIuSIZE
" %10"PRIuSIZE
" %10"PRIuSIZE
8924#
if GC_PROFILE_DETAIL_MEMORY
8930 gc_profile_dump_major_reason(record->flags, reason_str),
8931 (record->flags & GPR_FLAG_HAVE_FINALIZE) ?
'F' :
'.',
8932 (record->flags & GPR_FLAG_NEWOBJ) ?
"NEWOBJ" :
8933 (record->flags & GPR_FLAG_MALLOC) ?
"MALLOC" :
8934 (record->flags & GPR_FLAG_METHOD) ?
"METHOD" :
8935 (record->flags & GPR_FLAG_CAPI) ?
"CAPI__" :
"??????",
8936 (record->flags & GPR_FLAG_STRESS) ?
'!' :
' ',
8937 record->allocate_increase, record->allocate_limit,
8938#if CALC_EXACT_MALLOC_SIZE
8939 record->allocated_size,
8941 record->heap_use_pages,
8942 record->gc_mark_time*1000,
8943 record->gc_sweep_time*1000,
8944 record->prepare_time*1000,
8946 record->heap_live_objects,
8947 record->heap_free_objects,
8948 record->removing_objects,
8949 record->empty_objects
8952 record->old_objects,
8953 record->remembered_normal_objects,
8954 record->remembered_shady_objects
8956#if GC_PROFILE_DETAIL_MEMORY
8958 record->maxrss / 1024,
8981gc_profile_result(
VALUE _)
8998gc_profile_report(
int argc,
VALUE *argv,
VALUE self)
9003 gc_profile_dump_on(out, rb_io_write);
9016gc_profile_total_time(
VALUE self)
9023 size_t count =
objspace->profile.next_index;
9025 for (i = 0; i < count; i++) {
9026 time +=
objspace->profile.records[i].gc_time;
9040gc_profile_enable_get(
VALUE self)
9055gc_profile_enable(
VALUE _)
9059 objspace->profile.current_record = 0;
9072gc_profile_disable(
VALUE _)
9077 objspace->profile.current_record = 0;
9082rb_gc_verify_internal_consistency(
void)
9084 gc_verify_internal_consistency(rb_gc_get_objspace());
9098gc_verify_internal_consistency_m(
VALUE dummy)
9100 rb_gc_verify_internal_consistency();
9104#if GC_CAN_COMPILE_COMPACTION
9118 GC_ASSERT(GC_COMPACTION_SUPPORTED);
9120 ruby_enable_autocompact =
RTEST(v);
9122#if RGENGC_CHECK_MODE
9123 ruby_autocompact_compare_func = NULL;
9127 if (
id == rb_intern(
"empty")) {
9128 ruby_autocompact_compare_func = compare_free_slots;
9136# define gc_set_auto_compact rb_f_notimplement
9139#if GC_CAN_COMPILE_COMPACTION
9147gc_get_auto_compact(
VALUE _)
9152# define gc_get_auto_compact rb_f_notimplement
9155#if GC_CAN_COMPILE_COMPACTION
9181gc_compact_stats(
VALUE self)
9184 VALUE h = rb_hash_new();
9185 VALUE considered = rb_hash_new();
9186 VALUE moved = rb_hash_new();
9187 VALUE moved_up = rb_hash_new();
9188 VALUE moved_down = rb_hash_new();
9190 for (
size_t i = 0; i <
T_MASK; i++) {
9191 if (
objspace->rcompactor.considered_count_table[i]) {
9192 rb_hash_aset(considered, type_sym(i),
SIZET2NUM(
objspace->rcompactor.considered_count_table[i]));
9195 if (
objspace->rcompactor.moved_count_table[i]) {
9196 rb_hash_aset(moved, type_sym(i),
SIZET2NUM(
objspace->rcompactor.moved_count_table[i]));
9199 if (
objspace->rcompactor.moved_up_count_table[i]) {
9200 rb_hash_aset(moved_up, type_sym(i),
SIZET2NUM(
objspace->rcompactor.moved_up_count_table[i]));
9203 if (
objspace->rcompactor.moved_down_count_table[i]) {
9204 rb_hash_aset(moved_down, type_sym(i),
SIZET2NUM(
objspace->rcompactor.moved_down_count_table[i]));
9208 rb_hash_aset(h,
ID2SYM(rb_intern(
"considered")), considered);
9209 rb_hash_aset(h,
ID2SYM(rb_intern(
"moved")), moved);
9210 rb_hash_aset(h,
ID2SYM(rb_intern(
"moved_up")), moved_up);
9211 rb_hash_aset(h,
ID2SYM(rb_intern(
"moved_down")), moved_down);
9216# define gc_compact_stats rb_f_notimplement
9219#if GC_CAN_COMPILE_COMPACTION
9238gc_compact(
VALUE self)
9241 int full_marking_p = gc_config_full_mark_val;
9242 gc_config_full_mark_set(TRUE);
9245 rb_gc_impl_start(rb_gc_get_objspace(),
true,
true,
true,
true);
9246 gc_config_full_mark_set(full_marking_p);
9248 return gc_compact_stats(self);
9251# define gc_compact rb_f_notimplement
9254#if GC_CAN_COMPILE_COMPACTION
9255struct desired_compaction_pages_i_data {
9257 size_t required_slots[HEAP_COUNT];
9261desired_compaction_pages_i(
struct heap_page *page,
void *data)
9263 struct desired_compaction_pages_i_data *tdata = data;
9266 VALUE vend = vstart + (
VALUE)(page->total_slots * page->heap->slot_size);
9269 for (
VALUE v = vstart; v != vend; v += page->heap->slot_size) {
9270 asan_unpoisoning_object(v) {
9274 size_t dest_pool_idx = dest_pool - heaps;
9275 tdata->required_slots[dest_pool_idx]++;
9299gc_verify_compaction_references(
int argc,
VALUE* argv,
VALUE self)
9301 static ID keywords[3] = {0};
9303 keywords[0] = rb_intern(
"toward");
9304 keywords[1] = rb_intern(
"double_heap");
9305 keywords[2] = rb_intern(
"expand_heap");
9312 int kwarg_count =
rb_get_kwargs(options, keywords, 0, 3, arguments);
9313 bool toward_empty = kwarg_count > 0 &&
SYMBOL_P(arguments[0]) &&
SYM2ID(arguments[0]) == rb_intern(
"empty");
9314 bool expand_heap = (kwarg_count > 1 &&
RTEST(arguments[1])) || (kwarg_count > 2 &&
RTEST(arguments[2]));
9319 rb_gc_impl_start(
objspace,
true,
true,
true,
false);
9321 unsigned int lev = RB_GC_VM_LOCK();
9327 struct desired_compaction_pages_i_data desired_compaction = {
9329 .required_slots = {0},
9332 objspace_each_pages(
objspace, desired_compaction_pages_i, &desired_compaction, TRUE);
9335 size_t max_existing_pages = 0;
9336 for (
int i = 0; i < HEAP_COUNT; i++) {
9338 max_existing_pages = MAX(max_existing_pages, heap->total_pages);
9342 for (
int i = 0; i < HEAP_COUNT; i++) {
9345 size_t pages_to_add = 0;
9352 pages_to_add += max_existing_pages - heap->total_pages;
9357 objspace->heap_pages.allocatable_bytes = desired_compaction.required_slots[i] * heap->slot_size;
9358 while (
objspace->heap_pages.allocatable_bytes > 0) {
9359 heap_page_allocate_and_initialize(
objspace, heap);
9367 for (; pages_to_add > 0; pages_to_add--) {
9368 heap_page_allocate_and_initialize_force(
objspace, heap);
9374 objspace->rcompactor.compare_func = compare_free_slots;
9377 RB_GC_VM_UNLOCK(lev);
9379 rb_gc_impl_start(rb_gc_get_objspace(),
true,
true,
true,
true);
9381 rb_objspace_reachable_objects_from_root(root_obj_check_moved_i,
objspace);
9384 objspace->rcompactor.compare_func = NULL;
9386 return gc_compact_stats(self);
9389# define gc_verify_compaction_references rb_f_notimplement
9393rb_gc_impl_objspace_free(
void *objspace_ptr)
9398 rb_bug(
"lazy sweeping underway when freeing object space");
9403 for (
size_t i = 0; i < rb_darray_size(
objspace->heap_pages.sorted); i++) {
9406 rb_darray_free_without_gc(
objspace->heap_pages.sorted);
9407 heap_pages_lomem = 0;
9408 heap_pages_himem = 0;
9410 for (
int i = 0; i < HEAP_COUNT; i++) {
9412 heap->total_pages = 0;
9413 heap->total_slots = 0;
9416 free_stack_chunks(&
objspace->mark_stack);
9417 mark_stack_free_cache(&
objspace->mark_stack);
9419 rb_darray_free_without_gc(
objspace->weak_references);
9424#if MALLOC_ALLOCATED_SIZE
9435gc_malloc_allocated_size(
VALUE self)
9451gc_malloc_allocations(
VALUE self)
9459rb_gc_impl_before_fork(
void *objspace_ptr)
9463 objspace->fork_vm_lock_lev = RB_GC_VM_LOCK();
9468rb_gc_impl_after_fork(
void *objspace_ptr, rb_pid_t pid)
9472 RB_GC_VM_UNLOCK(
objspace->fork_vm_lock_lev);
9476 rb_gc_ractor_newobj_cache_foreach(gc_ractor_newobj_cache_clear, NULL);
9480VALUE rb_ident_hash_new_with_size(st_index_t size);
9482#if GC_DEBUG_STRESS_TO_CLASS
9491rb_gcdebug_add_stress_to_class(
int argc,
VALUE *argv,
VALUE self)
9495 if (!stress_to_class) {
9496 set_stress_to_class(rb_ident_hash_new_with_size(argc));
9499 for (
int i = 0; i < argc; i++) {
9500 VALUE klass = argv[i];
9501 rb_hash_aset(stress_to_class, klass,
Qtrue);
9516rb_gcdebug_remove_stress_to_class(
int argc,
VALUE *argv,
VALUE self)
9520 if (stress_to_class) {
9521 for (
int i = 0; i < argc; ++i) {
9522 rb_hash_delete(stress_to_class, argv[i]);
9525 if (rb_hash_size(stress_to_class) == 0) {
9526 stress_to_class = 0;
9535rb_gc_impl_objspace_alloc(
void)
9543rb_gc_impl_objspace_init(
void *objspace_ptr)
9547 gc_config_full_mark_set(TRUE);
9550 malloc_limit = gc_params.malloc_limit_min;
9552 if (
objspace->finalize_deferred_pjob == POSTPONED_JOB_HANDLE_INVALID) {
9553 rb_bug(
"Could not preregister postponed job for GC");
9559 GC_ASSERT(rb_gc_impl_size_allocatable_p(
sizeof(
struct RBasic) +
sizeof(
VALUE[RBIMPL_RVALUE_EMBED_LEN_MAX])));
9561 for (
int i = 0; i < HEAP_COUNT; i++) {
9564 heap->slot_size = pool_slot_sizes[i];
9566 ccan_list_head_init(&heap->pages);
9569 init_size_to_heap_idx();
9571 rb_darray_make_without_gc(&
objspace->heap_pages.sorted, 0);
9572 rb_darray_make_without_gc(&
objspace->weak_references, 0);
9574#if defined(INIT_HEAP_PAGE_ALLOC_USE_MMAP)
9576 heap_page_alloc_use_mmap = INIT_HEAP_PAGE_ALLOC_USE_MMAP;
9578#if RGENGC_ESTIMATE_OLDMALLOC
9579 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
9581 gc_params.heap_init_bytes = GC_HEAP_INIT_BYTES;
9583 init_mark_stack(&
objspace->mark_stack);
9585 objspace->profile.invoke_time = getrusage_time();
9586 finalizer_table = st_init_numtable();
9590rb_gc_impl_init(
void)
9592 VALUE gc_constants = rb_hash_new();
9593 rb_hash_aset(gc_constants,
ID2SYM(rb_intern(
"DEBUG")), GC_DEBUG ?
Qtrue :
Qfalse);
9595 size_t rvalue_pool = 0;
9596 for (
size_t i = 0; i < HEAP_COUNT; i++) {
9597 if (pool_slot_sizes[i] >= RVALUE_SLOT_SIZE) { rvalue_pool = pool_slot_sizes[i];
break; }
9599 rb_hash_aset(gc_constants,
ID2SYM(rb_intern(
"RVALUE_SIZE")),
SIZET2NUM(rvalue_pool - RVALUE_OVERHEAD));
9601 rb_hash_aset(gc_constants,
ID2SYM(rb_intern(
"RVALUE_OVERHEAD")),
SIZET2NUM(RVALUE_OVERHEAD));
9602 rb_hash_aset(gc_constants,
ID2SYM(rb_intern(
"HEAP_PAGE_BITMAP_SIZE")),
SIZET2NUM(HEAP_PAGE_BITMAP_SIZE));
9603 rb_hash_aset(gc_constants,
ID2SYM(rb_intern(
"HEAP_PAGE_SIZE")),
SIZET2NUM(HEAP_PAGE_SIZE));
9604 rb_hash_aset(gc_constants,
ID2SYM(rb_intern(
"HEAP_COUNT")),
LONG2FIX(HEAP_COUNT));
9605 rb_hash_aset(gc_constants,
ID2SYM(rb_intern(
"RVARGC_MAX_ALLOCATE_SIZE")),
LONG2FIX(heap_slot_size(HEAP_COUNT - 1)));
9606 rb_hash_aset(gc_constants,
ID2SYM(rb_intern(
"RVALUE_OLD_AGE")),
LONG2FIX(RVALUE_OLD_AGE));
9607 if (RB_BUG_INSTEAD_OF_RB_MEMERROR+0) {
9608 rb_hash_aset(gc_constants,
ID2SYM(rb_intern(
"RB_BUG_INSTEAD_OF_RB_MEMERROR")),
Qtrue);
9612 rb_define_const(
rb_mGC,
"INTERNAL_CONSTANTS", gc_constants);
9614 if (GC_COMPACTION_SUPPORTED) {
9629#if GC_DEBUG_STRESS_TO_CLASS
9637#if MALLOC_ALLOCATED_SIZE
9656#define OPT(o) if (o) rb_ary_push(opts, rb_interned_str(#o, sizeof(#o) - 1))
9660 OPT(RGENGC_CHECK_MODE);
9661 OPT(RGENGC_PROFILE);
9662 OPT(RGENGC_ESTIMATE_OLDMALLOC);
9663 OPT(GC_PROFILE_MORE_DETAIL);
9664 OPT(GC_ENABLE_LAZY_SWEEP);
9665 OPT(CALC_EXACT_MALLOC_SIZE);
9666 OPT(MALLOC_ALLOCATED_SIZE);
9667 OPT(MALLOC_ALLOCATED_SIZE_CHECK);
9668 OPT(GC_PROFILE_DETAIL_MEMORY);
9669 OPT(GC_COMPACTION_SUPPORTED);
#define RBIMPL_ASSERT_OR_ASSUME(...)
This is either RUBY_ASSERT or RBIMPL_ASSUME, depending on RUBY_DEBUG.
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
#define RUBY_ATOMIC_VALUE_CAS(var, oldval, newval)
Identical to RUBY_ATOMIC_CAS, except it expects its arguments are VALUE.
#define RUBY_ATOMIC_SIZE_EXCHANGE(var, val)
Identical to RUBY_ATOMIC_EXCHANGE, except it expects its arguments are size_t.
#define RUBY_ATOMIC_SIZE_INC(var)
Identical to RUBY_ATOMIC_INC, except it expects its argument is size_t.
#define RUBY_ATOMIC_SIZE_CAS(var, oldval, newval)
Identical to RUBY_ATOMIC_CAS, except it expects its arguments are size_t.
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
#define RUBY_ATOMIC_SIZE_ADD(var, val)
Identical to RUBY_ATOMIC_ADD, except it expects its arguments are size_t.
#define RUBY_ATOMIC_VALUE_EXCHANGE(var, val)
Identical to RUBY_ATOMIC_EXCHANGE, except it expects its arguments are VALUE.
#define RUBY_ATOMIC_SET(var, val)
Identical to RUBY_ATOMIC_EXCHANGE, except for the return type.
#define RUBY_ATOMIC_EXCHANGE(var, val)
Atomically replaces the value pointed by var with val.
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
unsigned int rb_postponed_job_handle_t
The type of a handle returned from rb_postponed_job_preregister and passed to rb_postponed_job_trigge...
void rb_postponed_job_trigger(rb_postponed_job_handle_t h)
Triggers a pre-registered job registered with rb_postponed_job_preregister, scheduling it for executi...
rb_postponed_job_handle_t rb_postponed_job_preregister(unsigned int flags, rb_postponed_job_func_t func, void *data)
Pre-registers a func in Ruby's postponed job preregistration table, returning an opaque handle which ...
#define RB_GNUC_EXTENSION_BLOCK(x)
This is expanded to the passed token for non-GCC compilers.
#define RUBY_INTERNAL_EVENT_GC_EXIT
gc_exit() is called.
#define RUBY_INTERNAL_EVENT_GC_ENTER
gc_enter() is called.
#define RUBY_INTERNAL_EVENT_GC_END_SWEEP
GC ended sweep phase.
#define RUBY_INTERNAL_EVENT_GC_END_MARK
GC ended mark phase.
#define RUBY_INTERNAL_EVENT_OBJSPACE_MASK
Bitmask of GC events.
#define RUBY_INTERNAL_EVENT_FREEOBJ
Object swept.
#define RUBY_INTERNAL_EVENT_GC_START
GC started.
uint32_t rb_event_flag_t
Represents event(s).
static VALUE RB_FL_TEST(VALUE obj, VALUE flags)
Tests if the given flag(s) are set or not.
static VALUE RB_FL_TEST_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_TEST().
static void RB_FL_SET_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_SET().
static void RB_FL_UNSET_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_UNSET().
@ RUBY_FL_PROMOTED
Ruby objects are "generational".
@ RUBY_FL_WEAK_REFERENCE
This object weakly refers to other objects.
VALUE rb_define_module_under(VALUE outer, const char *name)
Defines a module under the namespace of outer.
int rb_scan_args_kw(int kw_flag, int argc, const VALUE *argv, const char *fmt,...)
Identical to rb_scan_args(), except it also accepts kw_splat.
int rb_keyword_given_p(void)
Determines if the current method is given a keyword argument.
int rb_get_kwargs(VALUE keyword_hash, const ID *table, int required, int optional, VALUE *values)
Keyword argument deconstructor.
#define T_COMPLEX
Old name of RUBY_T_COMPLEX.
#define T_FILE
Old name of RUBY_T_FILE.
#define T_STRING
Old name of RUBY_T_STRING.
#define xfree
Old name of ruby_xfree.
#define T_MASK
Old name of RUBY_T_MASK.
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
#define OBJ_FROZEN
Old name of RB_OBJ_FROZEN.
#define T_NIL
Old name of RUBY_T_NIL.
#define T_FLOAT
Old name of RUBY_T_FLOAT.
#define T_IMEMO
Old name of RUBY_T_IMEMO.
#define ID2SYM
Old name of RB_ID2SYM.
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
#define T_FIXNUM
Old name of RUBY_T_FIXNUM.
#define SYM2ID
Old name of RB_SYM2ID.
#define T_DATA
Old name of RUBY_T_DATA.
#define FL_SHAREABLE
Old name of RUBY_FL_SHAREABLE.
#define T_NONE
Old name of RUBY_T_NONE.
#define T_NODE
Old name of RUBY_T_NODE.
#define SIZET2NUM
Old name of RB_SIZE2NUM.
#define xmalloc
Old name of ruby_xmalloc.
#define LONG2FIX
Old name of RB_INT2FIX.
#define FIX2INT
Old name of RB_FIX2INT.
#define FL_FINALIZE
Old name of RUBY_FL_FINALIZE.
#define T_MODULE
Old name of RUBY_T_MODULE.
#define T_TRUE
Old name of RUBY_T_TRUE.
#define T_RATIONAL
Old name of RUBY_T_RATIONAL.
#define T_ICLASS
Old name of RUBY_T_ICLASS.
#define T_HASH
Old name of RUBY_T_HASH.
#define ALLOC_N
Old name of RB_ALLOC_N.
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
#define FL_SET
Old name of RB_FL_SET.
#define rb_ary_new3
Old name of rb_ary_new_from_args.
#define T_FALSE
Old name of RUBY_T_FALSE.
#define ULL2NUM
Old name of RB_ULL2NUM.
#define T_UNDEF
Old name of RUBY_T_UNDEF.
#define Qtrue
Old name of RUBY_Qtrue.
#define T_ZOMBIE
Old name of RUBY_T_ZOMBIE.
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
#define T_OBJECT
Old name of RUBY_T_OBJECT.
#define NIL_P
Old name of RB_NIL_P.
#define FL_WB_PROTECTED
Old name of RUBY_FL_WB_PROTECTED.
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
#define DBL2NUM
Old name of rb_float_new.
#define T_MATCH
Old name of RUBY_T_MATCH.
#define T_CLASS
Old name of RUBY_T_CLASS.
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
#define T_MOVED
Old name of RUBY_T_MOVED.
#define FL_TEST
Old name of RB_FL_TEST.
#define FL_UNSET
Old name of RB_FL_UNSET.
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define FL_SET_RAW
Old name of RB_FL_SET_RAW.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
#define T_REGEXP
Old name of RUBY_T_REGEXP.
#define ruby_verbose
This variable controls whether the interpreter is in debug mode.
VALUE rb_eRuntimeError
RuntimeError exception.
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
VALUE rb_equal(VALUE lhs, VALUE rhs)
This function is an optimised version of calling #==.
VALUE rb_stdout
STDOUT constant.
Routines to manipulate encodings of strings.
static bool RB_OBJ_PROMOTED_RAW(VALUE obj)
This is the implementation of RB_OBJ_PROMOTED().
VALUE rb_ary_dup(VALUE ary)
Duplicates an array.
VALUE rb_ary_new(void)
Allocates a new, empty array.
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
VALUE rb_str_buf_append(VALUE dst, VALUE src)
Identical to rb_str_cat_cstr(), except it takes Ruby's string instead of C's.
VALUE rb_str_buf_new(long capa)
Allocates a "string buffer".
#define rb_str_new_cstr(str)
Identical to rb_str_new, except it assumes the passed pointer is a pointer to a C string.
const char * rb_sourcefile(void)
Resembles __FILE__.
VALUE rb_f_notimplement(int argc, const VALUE *argv, VALUE obj, VALUE marker)
Raises rb_eNotImpError.
int rb_sourceline(void)
Resembles __LINE__.
#define RB_SYM2ID
Just another name of rb_sym2id.
ID rb_sym2id(VALUE obj)
Converts an instance of rb_cSymbol into an ID.
int capa
Designed capacity of the buffer.
int len
Length of the buffer.
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
#define strtod(s, e)
Just another name of ruby_strtod.
void ruby_qsort(void *, const size_t, const size_t, int(*)(const void *, const void *, void *), void *)
Reentrant implementation of quick sort.
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
VALUE type(ANYARGS)
ANYARGS-ed function type.
void rb_hash_foreach(VALUE q, int_type *w, VALUE e)
Iteration over the given hash.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define RARRAY_LEN
Just another name of rb_array_len.
static void RARRAY_ASET(VALUE ary, long i, VALUE v)
Assigns an object in an array.
#define RARRAY_AREF(a, i)
#define RBASIC(obj)
Convenient casting macro.
#define errno
Ractor-aware version of errno.
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
#define _(args)
This was a transition path from K&R to ANSI.
Ruby object's base components.
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
uintptr_t VALUE
Type that represents a Ruby object.
static enum ruby_value_type RB_BUILTIN_TYPE(VALUE obj)
Queries the type of the object.
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
ruby_value_type
C-level type of an object.
@ RUBY_T_ICLASS
Hidden classes known as IClasses.
@ RUBY_T_FIXNUM
Integers formerly known as Fixnums.
@ RUBY_T_MASK
Bitmask of ruby_value_type.
@ RUBY_T_NONE
Non-object (swept etc.)