1 #include "ruby/internal/config.h"
10 #if !defined(PAGE_SIZE) && defined(HAVE_SYS_USER_H)
12 # include <sys/user.h>
15 #include "internal/hash.h"
24 #include "ccan/list/list.h"
27 #include "gc/gc_impl.h"
29 #ifndef BUILDING_SHARED_GC
33 #include "debug_counter.h"
34 #include "internal/sanitizers.h"
37 #ifndef HAVE_MALLOC_USABLE_SIZE
39 # define HAVE_MALLOC_USABLE_SIZE
40 # define malloc_usable_size(a) _msize(a)
41 # elif defined HAVE_MALLOC_SIZE
42 # define HAVE_MALLOC_USABLE_SIZE
43 # define malloc_usable_size(a) malloc_size(a)
47 #ifdef HAVE_MALLOC_USABLE_SIZE
48 # ifdef RUBY_ALTERNATIVE_MALLOC_HEADER
50 # elif defined(HAVE_MALLOC_H)
52 # elif defined(HAVE_MALLOC_NP_H)
53 # include <malloc_np.h>
54 # elif defined(HAVE_MALLOC_MALLOC_H)
55 # include <malloc/malloc.h>
59 #ifdef HAVE_MALLOC_TRIM
62 # ifdef __EMSCRIPTEN__
64 # include <emscripten/emmalloc.h>
68 #ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
69 # include <mach/task.h>
70 # include <mach/mach_init.h>
71 # include <mach/mach_port.h>
75 # define VM_CHECK_MODE RUBY_DEBUG
79 #ifndef RACTOR_CHECK_MODE
80 # define RACTOR_CHECK_MODE (VM_CHECK_MODE || RUBY_DEBUG) && (SIZEOF_UINT64_T == SIZEOF_VALUE)
83 #ifndef RUBY_DEBUG_LOG
84 # define RUBY_DEBUG_LOG(...)
87 #ifndef GC_HEAP_INIT_SLOTS
88 #define GC_HEAP_INIT_SLOTS 10000
90 #ifndef GC_HEAP_FREE_SLOTS
91 #define GC_HEAP_FREE_SLOTS 4096
93 #ifndef GC_HEAP_GROWTH_FACTOR
94 #define GC_HEAP_GROWTH_FACTOR 1.8
96 #ifndef GC_HEAP_GROWTH_MAX_SLOTS
97 #define GC_HEAP_GROWTH_MAX_SLOTS 0
99 #ifndef GC_HEAP_REMEMBERED_WB_UNPROTECTED_OBJECTS_LIMIT_RATIO
100 # define GC_HEAP_REMEMBERED_WB_UNPROTECTED_OBJECTS_LIMIT_RATIO 0.01
102 #ifndef GC_HEAP_OLDOBJECT_LIMIT_FACTOR
103 #define GC_HEAP_OLDOBJECT_LIMIT_FACTOR 2.0
106 #ifndef GC_HEAP_FREE_SLOTS_MIN_RATIO
107 #define GC_HEAP_FREE_SLOTS_MIN_RATIO 0.20
109 #ifndef GC_HEAP_FREE_SLOTS_GOAL_RATIO
110 #define GC_HEAP_FREE_SLOTS_GOAL_RATIO 0.40
112 #ifndef GC_HEAP_FREE_SLOTS_MAX_RATIO
113 #define GC_HEAP_FREE_SLOTS_MAX_RATIO 0.65
116 #ifndef GC_MALLOC_LIMIT_MIN
117 #define GC_MALLOC_LIMIT_MIN (16 * 1024 * 1024 )
119 #ifndef GC_MALLOC_LIMIT_MAX
120 #define GC_MALLOC_LIMIT_MAX (32 * 1024 * 1024 )
122 #ifndef GC_MALLOC_LIMIT_GROWTH_FACTOR
123 #define GC_MALLOC_LIMIT_GROWTH_FACTOR 1.4
126 #ifndef GC_OLDMALLOC_LIMIT_MIN
127 #define GC_OLDMALLOC_LIMIT_MIN (16 * 1024 * 1024 )
129 #ifndef GC_OLDMALLOC_LIMIT_GROWTH_FACTOR
130 #define GC_OLDMALLOC_LIMIT_GROWTH_FACTOR 1.2
132 #ifndef GC_OLDMALLOC_LIMIT_MAX
133 #define GC_OLDMALLOC_LIMIT_MAX (128 * 1024 * 1024 )
136 #ifndef GC_CAN_COMPILE_COMPACTION
137 #if defined(__wasi__)
138 # define GC_CAN_COMPILE_COMPACTION 0
140 # define GC_CAN_COMPILE_COMPACTION 1
144 #ifndef PRINT_ENTER_EXIT_TICK
145 # define PRINT_ENTER_EXIT_TICK 0
147 #ifndef PRINT_ROOT_TICKS
148 #define PRINT_ROOT_TICKS 0
151 #define USE_TICK_T (PRINT_ENTER_EXIT_TICK || PRINT_ROOT_TICKS)
154 # define HEAP_COUNT 5
163 size_t incremental_mark_step_allocated_slots;
168 size_t heap_init_slots[HEAP_COUNT];
169 size_t heap_free_slots;
170 double growth_factor;
171 size_t growth_max_slots;
173 double heap_free_slots_min_ratio;
174 double heap_free_slots_goal_ratio;
175 double heap_free_slots_max_ratio;
176 double uncollectible_wb_unprotected_objects_limit_ratio;
177 double oldobject_limit_factor;
179 size_t malloc_limit_min;
180 size_t malloc_limit_max;
181 double malloc_limit_growth_factor;
183 size_t oldmalloc_limit_min;
184 size_t oldmalloc_limit_max;
185 double oldmalloc_limit_growth_factor;
189 { GC_HEAP_INIT_SLOTS },
191 GC_HEAP_GROWTH_FACTOR,
192 GC_HEAP_GROWTH_MAX_SLOTS,
194 GC_HEAP_FREE_SLOTS_MIN_RATIO,
195 GC_HEAP_FREE_SLOTS_GOAL_RATIO,
196 GC_HEAP_FREE_SLOTS_MAX_RATIO,
197 GC_HEAP_REMEMBERED_WB_UNPROTECTED_OBJECTS_LIMIT_RATIO,
198 GC_HEAP_OLDOBJECT_LIMIT_FACTOR,
202 GC_MALLOC_LIMIT_GROWTH_FACTOR,
204 GC_OLDMALLOC_LIMIT_MIN,
205 GC_OLDMALLOC_LIMIT_MAX,
206 GC_OLDMALLOC_LIMIT_GROWTH_FACTOR,
225 #define RGENGC_DEBUG -1
227 #define RGENGC_DEBUG 0
230 #if RGENGC_DEBUG < 0 && !defined(_MSC_VER)
231 # define RGENGC_DEBUG_ENABLED(level) (-(RGENGC_DEBUG) >= (level) && ruby_rgengc_debug >= (level))
232 #elif defined(HAVE_VA_ARGS_MACRO)
233 # define RGENGC_DEBUG_ENABLED(level) ((RGENGC_DEBUG) >= (level))
235 # define RGENGC_DEBUG_ENABLED(level) 0
237 int ruby_rgengc_debug;
244 #ifndef RGENGC_PROFILE
245 # define RGENGC_PROFILE 0
254 #ifndef RGENGC_ESTIMATE_OLDMALLOC
255 # define RGENGC_ESTIMATE_OLDMALLOC 1
261 #ifndef RGENGC_FORCE_MAJOR_GC
262 # define RGENGC_FORCE_MAJOR_GC 0
265 #ifndef GC_PROFILE_MORE_DETAIL
266 # define GC_PROFILE_MORE_DETAIL 0
268 #ifndef GC_PROFILE_DETAIL_MEMORY
269 # define GC_PROFILE_DETAIL_MEMORY 0
271 #ifndef GC_ENABLE_LAZY_SWEEP
272 # define GC_ENABLE_LAZY_SWEEP 1
274 #ifndef CALC_EXACT_MALLOC_SIZE
275 # define CALC_EXACT_MALLOC_SIZE 0
277 #if defined(HAVE_MALLOC_USABLE_SIZE) || CALC_EXACT_MALLOC_SIZE > 0
278 # ifndef MALLOC_ALLOCATED_SIZE
279 # define MALLOC_ALLOCATED_SIZE 0
282 # define MALLOC_ALLOCATED_SIZE 0
284 #ifndef MALLOC_ALLOCATED_SIZE_CHECK
285 # define MALLOC_ALLOCATED_SIZE_CHECK 0
288 #ifndef GC_DEBUG_STRESS_TO_CLASS
289 # define GC_DEBUG_STRESS_TO_CLASS RUBY_DEBUG
293 GPR_FLAG_NONE = 0x000,
295 GPR_FLAG_MAJOR_BY_NOFREE = 0x001,
296 GPR_FLAG_MAJOR_BY_OLDGEN = 0x002,
297 GPR_FLAG_MAJOR_BY_SHADY = 0x004,
298 GPR_FLAG_MAJOR_BY_FORCE = 0x008,
299 #if RGENGC_ESTIMATE_OLDMALLOC
300 GPR_FLAG_MAJOR_BY_OLDMALLOC = 0x020,
302 GPR_FLAG_MAJOR_MASK = 0x0ff,
305 GPR_FLAG_NEWOBJ = 0x100,
306 GPR_FLAG_MALLOC = 0x200,
307 GPR_FLAG_METHOD = 0x400,
308 GPR_FLAG_CAPI = 0x800,
309 GPR_FLAG_STRESS = 0x1000,
312 GPR_FLAG_IMMEDIATE_SWEEP = 0x2000,
313 GPR_FLAG_HAVE_FINALIZE = 0x4000,
314 GPR_FLAG_IMMEDIATE_MARK = 0x8000,
315 GPR_FLAG_FULL_MARK = 0x10000,
316 GPR_FLAG_COMPACT = 0x20000,
319 (GPR_FLAG_FULL_MARK | GPR_FLAG_IMMEDIATE_MARK |
320 GPR_FLAG_IMMEDIATE_SWEEP | GPR_FLAG_CAPI),
321 } gc_profile_record_flag;
327 double gc_invoke_time;
329 size_t heap_total_objects;
330 size_t heap_use_size;
331 size_t heap_total_size;
332 size_t moved_objects;
334 #if GC_PROFILE_MORE_DETAIL
336 double gc_sweep_time;
338 size_t heap_use_pages;
339 size_t heap_live_objects;
340 size_t heap_free_objects;
342 size_t allocate_increase;
343 size_t allocate_limit;
346 size_t removing_objects;
347 size_t empty_objects;
348 #if GC_PROFILE_DETAIL_MEMORY
354 #if MALLOC_ALLOCATED_SIZE
355 size_t allocated_size;
358 #if RGENGC_PROFILE > 0
360 size_t remembered_normal_objects;
361 size_t remembered_shady_objects;
369 uint32_t original_shape_id;
372 #define RMOVED(obj) ((struct RMoved *)(obj))
374 typedef uintptr_t bits_t;
376 BITS_SIZE =
sizeof(bits_t),
377 BITS_BITLENGTH = ( BITS_SIZE * CHAR_BIT )
390 #define STACK_CHUNK_SIZE 500
393 VALUE data[STACK_CHUNK_SIZE];
403 size_t unused_cache_size;
406 typedef int (*gc_compact_compare_func)(
const void *l,
const void *r,
void *d);
412 size_t total_allocated_pages;
413 size_t force_major_gc_count;
414 size_t force_incremental_marking_finish_count;
415 size_t total_allocated_objects;
416 size_t total_freed_objects;
417 size_t final_slots_count;
424 struct ccan_list_head pages;
427 uintptr_t compact_cursor_index;
436 gc_stress_no_immediate_sweep,
437 gc_stress_full_mark_after_malloc,
452 #if MALLOC_ALLOCATED_SIZE
453 size_t allocated_size;
463 unsigned int mode : 2;
464 unsigned int immediate_sweep : 1;
465 unsigned int dont_gc : 1;
466 unsigned int dont_incremental : 1;
467 unsigned int during_gc : 1;
468 unsigned int during_compacting : 1;
469 unsigned int during_reference_updating : 1;
470 unsigned int gc_stressful: 1;
471 unsigned int has_newobj_hook: 1;
472 unsigned int during_minor_gc : 1;
473 unsigned int during_incremental_marking : 1;
474 unsigned int measure_gc : 1;
478 unsigned long long next_object_id;
481 size_t empty_pages_count;
494 size_t allocated_pages;
497 size_t freeable_pages;
499 size_t allocatable_slots;
502 VALUE deferred_final;
509 unsigned int latest_gc_info;
515 #if GC_PROFILE_MORE_DETAIL
520 size_t minor_gc_count;
521 size_t major_gc_count;
522 size_t compact_count;
523 size_t read_barrier_faults;
524 #if RGENGC_PROFILE > 0
525 size_t total_generated_normal_object_count;
526 size_t total_generated_shady_object_count;
527 size_t total_shade_operation_count;
528 size_t total_promoted_count;
529 size_t total_remembered_normal_object_count;
530 size_t total_remembered_shady_object_count;
532 #if RGENGC_PROFILE >= 2
533 size_t generated_normal_object_count_types[
RUBY_T_MASK];
534 size_t generated_shady_object_count_types[
RUBY_T_MASK];
537 size_t remembered_normal_object_count_types[
RUBY_T_MASK];
538 size_t remembered_shady_object_count_types[
RUBY_T_MASK];
543 double gc_sweep_start_time;
544 size_t total_allocated_objects_at_gc_start;
545 size_t heap_used_at_gc_start;
549 unsigned long long marking_time_ns;
551 unsigned long long sweeping_time_ns;
552 struct timespec sweeping_start_time;
555 size_t weak_references_count;
556 size_t retained_weak_references_count;
559 VALUE gc_stress_mode;
564 size_t last_major_gc;
565 size_t uncollectible_wb_unprotected_objects;
566 size_t uncollectible_wb_unprotected_objects_limit;
568 size_t old_objects_limit;
570 #if RGENGC_ESTIMATE_OLDMALLOC
571 size_t oldmalloc_increase;
572 size_t oldmalloc_increase_limit;
575 #if RGENGC_CHECK_MODE >= 2
582 size_t considered_count_table[
T_MASK];
583 size_t moved_count_table[
T_MASK];
584 size_t moved_up_count_table[
T_MASK];
585 size_t moved_down_count_table[
T_MASK];
589 gc_compact_compare_func compare_func;
600 #if GC_DEBUG_STRESS_TO_CLASS
601 VALUE stress_to_class;
604 rb_darray(
VALUE *) weak_references;
607 unsigned long live_ractor_cache_count;
610 #ifndef HEAP_PAGE_ALIGN_LOG
612 #define HEAP_PAGE_ALIGN_LOG 16
615 #if RACTOR_CHECK_MODE || GC_DEBUG
616 struct rvalue_overhead {
617 # if RACTOR_CHECK_MODE
618 uint32_t _ractor_belonging_id;
627 # define RVALUE_OVERHEAD (sizeof(struct { \
629 struct rvalue_overhead overhead; \
633 size_t rb_gc_impl_obj_slot_size(
VALUE obj);
634 # define GET_RVALUE_OVERHEAD(obj) ((struct rvalue_overhead *)((uintptr_t)obj + rb_gc_impl_obj_slot_size(obj)))
636 # define RVALUE_OVERHEAD 0
639 #define BASE_SLOT_SIZE (sizeof(struct RBasic) + sizeof(VALUE[RBIMPL_RVALUE_EMBED_LEN_MAX]) + RVALUE_OVERHEAD)
642 # define MAX(a, b) (((a) > (b)) ? (a) : (b))
645 # define MIN(a, b) (((a) < (b)) ? (a) : (b))
647 #define roomof(x, y) (((x) + (y) - 1) / (y))
648 #define CEILDIV(i, mod) roomof(i, mod)
650 HEAP_PAGE_ALIGN = (1UL << HEAP_PAGE_ALIGN_LOG),
651 HEAP_PAGE_ALIGN_MASK = (~(~0UL << HEAP_PAGE_ALIGN_LOG)),
652 HEAP_PAGE_SIZE = HEAP_PAGE_ALIGN,
653 HEAP_PAGE_OBJ_LIMIT = (
unsigned int)((HEAP_PAGE_SIZE -
sizeof(
struct heap_page_header)) / BASE_SLOT_SIZE),
654 HEAP_PAGE_BITMAP_LIMIT = CEILDIV(CEILDIV(HEAP_PAGE_SIZE, BASE_SLOT_SIZE), BITS_BITLENGTH),
655 HEAP_PAGE_BITMAP_SIZE = (BITS_SIZE * HEAP_PAGE_BITMAP_LIMIT),
657 #define HEAP_PAGE_ALIGN (1 << HEAP_PAGE_ALIGN_LOG)
658 #define HEAP_PAGE_SIZE HEAP_PAGE_ALIGN
660 #if !defined(INCREMENTAL_MARK_STEP_ALLOCATIONS)
661 # define INCREMENTAL_MARK_STEP_ALLOCATIONS 500
664 #undef INIT_HEAP_PAGE_ALLOC_USE_MMAP
670 static const bool HEAP_PAGE_ALLOC_USE_MMAP =
false;
672 #elif defined(__wasm__)
676 static const bool HEAP_PAGE_ALLOC_USE_MMAP =
false;
678 #elif HAVE_CONST_PAGE_SIZE
680 static const bool HEAP_PAGE_ALLOC_USE_MMAP = (PAGE_SIZE <= HEAP_PAGE_SIZE);
682 #elif defined(PAGE_MAX_SIZE) && (PAGE_MAX_SIZE <= HEAP_PAGE_SIZE)
684 static const bool HEAP_PAGE_ALLOC_USE_MMAP =
true;
686 #elif defined(PAGE_SIZE)
688 # define INIT_HEAP_PAGE_ALLOC_USE_MMAP (PAGE_SIZE <= HEAP_PAGE_SIZE)
690 #elif defined(HAVE_SYSCONF) && defined(_SC_PAGE_SIZE)
692 # define INIT_HEAP_PAGE_ALLOC_USE_MMAP (sysconf(_SC_PAGE_SIZE) <= HEAP_PAGE_SIZE)
696 static const bool HEAP_PAGE_ALLOC_USE_MMAP =
false;
699 #ifdef INIT_HEAP_PAGE_ALLOC_USE_MMAP
701 # define HEAP_PAGE_ALLOC_USE_MMAP (heap_page_alloc_use_mmap != false)
703 static bool heap_page_alloc_use_mmap;
706 #define RVALUE_AGE_BIT_COUNT 2
707 #define RVALUE_AGE_BIT_MASK (((bits_t)1 << RVALUE_AGE_BIT_COUNT) - 1)
708 #define RVALUE_OLD_AGE 3
716 unsigned short slot_size;
717 unsigned short total_slots;
718 unsigned short free_slots;
719 unsigned short final_slots;
720 unsigned short pinned_slots;
722 unsigned int before_sweep : 1;
723 unsigned int has_remembered_objects : 1;
724 unsigned int has_uncollectible_wb_unprotected_objects : 1;
733 struct ccan_list_node page_node;
735 bits_t wb_unprotected_bits[HEAP_PAGE_BITMAP_LIMIT];
737 bits_t mark_bits[HEAP_PAGE_BITMAP_LIMIT];
738 bits_t uncollectible_bits[HEAP_PAGE_BITMAP_LIMIT];
739 bits_t marking_bits[HEAP_PAGE_BITMAP_LIMIT];
741 bits_t remembered_bits[HEAP_PAGE_BITMAP_LIMIT];
744 bits_t pinned_bits[HEAP_PAGE_BITMAP_LIMIT];
745 bits_t age_bits[HEAP_PAGE_BITMAP_LIMIT * RVALUE_AGE_BIT_COUNT];
752 asan_lock_freelist(
struct heap_page *page)
754 asan_poison_memory_region(&page->freelist,
sizeof(
struct free_list *));
761 asan_unlock_freelist(
struct heap_page *page)
763 asan_unpoison_memory_region(&page->freelist,
sizeof(
struct free_list *),
false);
769 if (page->total_slots == 0) {
770 GC_ASSERT(page->start == 0);
771 GC_ASSERT(page->slot_size == 0);
772 GC_ASSERT(page->heap == NULL);
773 GC_ASSERT(page->free_slots == 0);
774 asan_unpoisoning_memory_region(&page->freelist,
sizeof(&page->freelist)) {
775 GC_ASSERT(page->freelist == NULL);
781 GC_ASSERT(page->start != 0);
782 GC_ASSERT(page->slot_size != 0);
783 GC_ASSERT(page->heap != NULL);
789 #define GET_PAGE_BODY(x) ((struct heap_page_body *)((bits_t)(x) & ~(HEAP_PAGE_ALIGN_MASK)))
790 #define GET_PAGE_HEADER(x) (&GET_PAGE_BODY(x)->header)
791 #define GET_HEAP_PAGE(x) (GET_PAGE_HEADER(x)->page)
793 #define NUM_IN_PAGE(p) (((bits_t)(p) & HEAP_PAGE_ALIGN_MASK) / BASE_SLOT_SIZE)
794 #define BITMAP_INDEX(p) (NUM_IN_PAGE(p) / BITS_BITLENGTH )
795 #define BITMAP_OFFSET(p) (NUM_IN_PAGE(p) & (BITS_BITLENGTH-1))
796 #define BITMAP_BIT(p) ((bits_t)1 << BITMAP_OFFSET(p))
799 #define MARKED_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] & BITMAP_BIT(p))
800 #define MARK_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] | BITMAP_BIT(p))
801 #define CLEAR_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] & ~BITMAP_BIT(p))
804 #define GET_HEAP_MARK_BITS(x) (&GET_HEAP_PAGE(x)->mark_bits[0])
805 #define GET_HEAP_PINNED_BITS(x) (&GET_HEAP_PAGE(x)->pinned_bits[0])
806 #define GET_HEAP_UNCOLLECTIBLE_BITS(x) (&GET_HEAP_PAGE(x)->uncollectible_bits[0])
807 #define GET_HEAP_WB_UNPROTECTED_BITS(x) (&GET_HEAP_PAGE(x)->wb_unprotected_bits[0])
808 #define GET_HEAP_MARKING_BITS(x) (&GET_HEAP_PAGE(x)->marking_bits[0])
810 #define GC_SWEEP_PAGES_FREEABLE_PER_STEP 3
812 #define RVALUE_AGE_BITMAP_INDEX(n) (NUM_IN_PAGE(n) / (BITS_BITLENGTH / RVALUE_AGE_BIT_COUNT))
813 #define RVALUE_AGE_BITMAP_OFFSET(n) ((NUM_IN_PAGE(n) % (BITS_BITLENGTH / RVALUE_AGE_BIT_COUNT)) * RVALUE_AGE_BIT_COUNT)
816 RVALUE_AGE_GET(
VALUE obj)
818 bits_t *age_bits = GET_HEAP_PAGE(obj)->age_bits;
819 return (
int)(age_bits[RVALUE_AGE_BITMAP_INDEX(obj)] >> RVALUE_AGE_BITMAP_OFFSET(obj)) & RVALUE_AGE_BIT_MASK;
823 RVALUE_AGE_SET(
VALUE obj,
int age)
826 bits_t *age_bits = GET_HEAP_PAGE(obj)->age_bits;
828 age_bits[RVALUE_AGE_BITMAP_INDEX(obj)] &= ~(RVALUE_AGE_BIT_MASK << (RVALUE_AGE_BITMAP_OFFSET(obj)));
830 age_bits[RVALUE_AGE_BITMAP_INDEX(obj)] |= ((bits_t)age << RVALUE_AGE_BITMAP_OFFSET(obj));
831 if (age == RVALUE_OLD_AGE) {
839 #define malloc_limit objspace->malloc_params.limit
840 #define malloc_increase objspace->malloc_params.increase
841 #define malloc_allocated_size objspace->malloc_params.allocated_size
842 #define heap_pages_lomem objspace->heap_pages.range[0]
843 #define heap_pages_himem objspace->heap_pages.range[1]
844 #define heap_pages_freeable_pages objspace->heap_pages.freeable_pages
845 #define heap_pages_deferred_final objspace->heap_pages.deferred_final
846 #define heaps objspace->heaps
847 #define during_gc objspace->flags.during_gc
848 #define finalizing objspace->atomic_flags.finalizing
849 #define finalizer_table objspace->finalizer_table
850 #define ruby_gc_stressful objspace->flags.gc_stressful
851 #define ruby_gc_stress_mode objspace->gc_stress_mode
852 #if GC_DEBUG_STRESS_TO_CLASS
853 #define stress_to_class objspace->stress_to_class
854 #define set_stress_to_class(c) (stress_to_class = (c))
856 #define stress_to_class (objspace, 0)
857 #define set_stress_to_class(c) (objspace, (c))
861 #define dont_gc_on() (fprintf(stderr, "dont_gc_on@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 1)
862 #define dont_gc_off() (fprintf(stderr, "dont_gc_off@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 0)
863 #define dont_gc_set(b) (fprintf(stderr, "dont_gc_set(%d)@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = (int)(b))
864 #define dont_gc_val() (objspace->flags.dont_gc)
866 #define dont_gc_on() (objspace->flags.dont_gc = 1)
867 #define dont_gc_off() (objspace->flags.dont_gc = 0)
868 #define dont_gc_set(b) (objspace->flags.dont_gc = (int)(b))
869 #define dont_gc_val() (objspace->flags.dont_gc)
872 #define gc_config_full_mark_set(b) (objspace->gc_config.full_mark = (int)(b))
873 #define gc_config_full_mark_val (objspace->gc_config.full_mark)
875 #ifndef DURING_GC_COULD_MALLOC_REGION_START
876 # define DURING_GC_COULD_MALLOC_REGION_START() \
877 assert(rb_during_gc()); \
878 bool _prev_enabled = rb_gc_impl_gc_enabled_p(objspace); \
879 rb_gc_impl_gc_disable(objspace, false)
882 #ifndef DURING_GC_COULD_MALLOC_REGION_END
883 # define DURING_GC_COULD_MALLOC_REGION_END() \
884 if (_prev_enabled) rb_gc_impl_gc_enable(objspace)
887 static inline enum gc_mode
888 gc_mode_verify(
enum gc_mode mode)
890 #if RGENGC_CHECK_MODE > 0
893 case gc_mode_marking:
894 case gc_mode_sweeping:
895 case gc_mode_compacting:
898 rb_bug(
"gc_mode_verify: unreachable (%d)", (
int)mode);
907 for (
int i = 0; i < HEAP_COUNT; i++) {
908 if ((&heaps[i])->sweeping_page) {
919 for (
int i = 0; i < HEAP_COUNT; i++) {
920 count += (&heaps[i])->total_pages;
929 for (
int i = 0; i < HEAP_COUNT; i++) {
931 count += heap->total_allocated_objects;
940 for (
int i = 0; i < HEAP_COUNT; i++) {
942 count += heap->total_freed_objects;
951 for (
int i = 0; i < HEAP_COUNT; i++) {
953 count += heap->final_slots_count;
958 #define gc_mode(objspace) gc_mode_verify((enum gc_mode)(objspace)->flags.mode)
959 #define gc_mode_set(objspace, m) ((objspace)->flags.mode = (unsigned int)gc_mode_verify(m))
960 #define gc_needs_major_flags objspace->rgengc.need_major_gc
962 #define is_marking(objspace) (gc_mode(objspace) == gc_mode_marking)
963 #define is_sweeping(objspace) (gc_mode(objspace) == gc_mode_sweeping)
964 #define is_full_marking(objspace) ((objspace)->flags.during_minor_gc == FALSE)
965 #define is_incremental_marking(objspace) ((objspace)->flags.during_incremental_marking != FALSE)
966 #define will_be_incremental_marking(objspace) ((objspace)->rgengc.need_major_gc != GPR_FLAG_NONE)
967 #define GC_INCREMENTAL_SWEEP_SLOT_COUNT 2048
968 #define GC_INCREMENTAL_SWEEP_POOL_SLOT_COUNT 1024
969 #define is_lazy_sweeping(objspace) (GC_ENABLE_LAZY_SWEEP && has_sweeping_pages(objspace))
971 #if SIZEOF_LONG == SIZEOF_VOIDP
972 # define obj_id_to_ref(objid) ((objid) ^ FIXNUM_FLAG)
973 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
974 # define obj_id_to_ref(objid) (FIXNUM_P(objid) ? \
975 ((objid) ^ FIXNUM_FLAG) : (NUM2PTR(objid) << 1))
977 # error not supported
983 void (*dfree)(
void *);
987 #define RZOMBIE(o) ((struct RZombie *)(o))
989 int ruby_disable_gc = 0;
990 int ruby_enable_autocompact = 0;
991 #if RGENGC_CHECK_MODE
992 gc_compact_compare_func ruby_autocompact_compare_func;
996 static int garbage_collect(
rb_objspace_t *,
unsigned int reason);
998 static int gc_start(
rb_objspace_t *objspace,
unsigned int reason);
1001 enum gc_enter_event {
1002 gc_enter_event_start,
1003 gc_enter_event_continue,
1004 gc_enter_event_rest,
1005 gc_enter_event_finalizer,
1008 static inline void gc_enter(
rb_objspace_t *objspace,
enum gc_enter_event event,
unsigned int *lock_lev);
1009 static inline void gc_exit(
rb_objspace_t *objspace,
enum gc_enter_event event,
unsigned int *lock_lev);
1024 static int gc_mark_stacked_objects_incremental(
rb_objspace_t *,
size_t count);
1025 NO_SANITIZE(
"memory",
static inline bool is_pointer_to_heap(
rb_objspace_t *objspace,
const void *
ptr));
1027 static void gc_verify_internal_consistency(
void *objspace_ptr);
1029 static double getrusage_time(
void);
1030 static inline void gc_prof_setup_new_record(
rb_objspace_t *objspace,
unsigned int reason);
1033 static inline void gc_prof_mark_timer_start(
rb_objspace_t *);
1034 static inline void gc_prof_mark_timer_stop(
rb_objspace_t *);
1035 static inline void gc_prof_sweep_timer_start(
rb_objspace_t *);
1036 static inline void gc_prof_sweep_timer_stop(
rb_objspace_t *);
1037 static inline void gc_prof_set_malloc_info(
rb_objspace_t *);
1040 #define gc_prof_record(objspace) (objspace)->profile.current_record
1041 #define gc_prof_enabled(objspace) ((objspace)->profile.run && (objspace)->profile.current_record)
1043 #ifdef HAVE_VA_ARGS_MACRO
1044 # define gc_report(level, objspace, ...) \
1045 if (!RGENGC_DEBUG_ENABLED(level)) {} else gc_report_body(level, objspace, __VA_ARGS__)
1047 # define gc_report if (!RGENGC_DEBUG_ENABLED(0)) {} else gc_report_body
1049 PRINTF_ARGS(
static void gc_report_body(
int level,
rb_objspace_t *objspace,
const char *fmt, ...), 3, 4);
1051 static void gc_finalize_deferred(
void *dmy);
1062 #if defined(__GNUC__) && defined(__i386__)
1063 typedef unsigned long long tick_t;
1064 #define PRItick "llu"
1065 static inline tick_t
1068 unsigned long long int x;
1069 __asm__ __volatile__ (
"rdtsc" :
"=A" (x));
1073 #elif defined(__GNUC__) && defined(__x86_64__)
1074 typedef unsigned long long tick_t;
1075 #define PRItick "llu"
1077 static __inline__ tick_t
1080 unsigned long hi, lo;
1081 __asm__ __volatile__ (
"rdtsc" :
"=a"(lo),
"=d"(hi));
1082 return ((
unsigned long long)lo)|( ((
unsigned long long)hi)<<32);
1085 #elif defined(__powerpc64__) && (GCC_VERSION_SINCE(4,8,0) || defined(__clang__))
1086 typedef unsigned long long tick_t;
1087 #define PRItick "llu"
1089 static __inline__ tick_t
1092 unsigned long long val = __builtin_ppc_get_timebase();
1099 #elif defined(__POWERPC__) && defined(__APPLE__)
1100 typedef unsigned long long tick_t;
1101 #define PRItick "llu"
1103 static __inline__ tick_t
1106 unsigned long int upper, lower, tmp;
1107 # define mftbu(r) __asm__ volatile("mftbu %0" : "=r"(r))
1108 # define mftb(r) __asm__ volatile("mftb %0" : "=r"(r))
1113 }
while (tmp != upper);
1114 return ((tick_t)upper << 32) | lower;
1117 #elif defined(__aarch64__) && defined(__GNUC__)
1118 typedef unsigned long tick_t;
1119 #define PRItick "lu"
1121 static __inline__ tick_t
1125 __asm__ __volatile__ (
"mrs %0, cntvct_el0" :
"=r" (val));
1130 #elif defined(_WIN32) && defined(_MSC_VER)
1132 typedef unsigned __int64 tick_t;
1133 #define PRItick "llu"
1135 static inline tick_t
1142 typedef clock_t tick_t;
1143 #define PRItick "llu"
1145 static inline tick_t
1152 #define MEASURE_LINE(expr) expr
1157 #define RVALUE_MARKED_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), (obj))
1158 #define RVALUE_WB_UNPROTECTED_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), (obj))
1159 #define RVALUE_MARKING_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), (obj))
1160 #define RVALUE_UNCOLLECTIBLE_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), (obj))
1161 #define RVALUE_PINNED_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), (obj))
1166 check_rvalue_consistency(objspace, obj);
1167 return RVALUE_MARKED_BITMAP(obj) != 0;
1173 check_rvalue_consistency(objspace, obj);
1174 return RVALUE_PINNED_BITMAP(obj) != 0;
1180 check_rvalue_consistency(objspace, obj);
1181 return RVALUE_WB_UNPROTECTED_BITMAP(obj) != 0;
1187 check_rvalue_consistency(objspace, obj);
1188 return RVALUE_MARKING_BITMAP(obj) != 0;
1194 check_rvalue_consistency(objspace, obj);
1195 return MARKED_IN_BITMAP(GET_HEAP_PAGE(obj)->remembered_bits, obj) != 0;
1201 check_rvalue_consistency(objspace, obj);
1202 return RVALUE_UNCOLLECTIBLE_BITMAP(obj) != 0;
1205 #define RVALUE_PAGE_WB_UNPROTECTED(page, obj) MARKED_IN_BITMAP((page)->wb_unprotected_bits, (obj))
1206 #define RVALUE_PAGE_UNCOLLECTIBLE(page, obj) MARKED_IN_BITMAP((page)->uncollectible_bits, (obj))
1207 #define RVALUE_PAGE_MARKING(page, obj) MARKED_IN_BITMAP((page)->marking_bits, (obj))
1214 check_rvalue_consistency_force(
rb_objspace_t *objspace,
const VALUE obj,
int terminate)
1218 int lev = rb_gc_vm_lock_no_barrier();
1221 fprintf(stderr,
"check_rvalue_consistency: %p is a special const.\n", (
void *)obj);
1224 else if (!is_pointer_to_heap(objspace, (
void *)obj)) {
1225 struct heap_page *empty_page = objspace->empty_pages;
1226 while (empty_page) {
1227 if ((uintptr_t)empty_page->body <= (uintptr_t)obj &&
1228 (uintptr_t)obj < (uintptr_t)empty_page->body + HEAP_PAGE_SIZE) {
1229 GC_ASSERT(heap_page_in_global_empty_pages_pool(objspace, empty_page));
1230 fprintf(stderr,
"check_rvalue_consistency: %p is in an empty page (%p).\n",
1231 (
void *)obj, (
void *)empty_page);
1236 fprintf(stderr,
"check_rvalue_consistency: %p is not a Ruby object.\n", (
void *)obj);
1242 const int wb_unprotected_bit = RVALUE_WB_UNPROTECTED_BITMAP(obj) != 0;
1243 const int uncollectible_bit = RVALUE_UNCOLLECTIBLE_BITMAP(obj) != 0;
1244 const int mark_bit = RVALUE_MARKED_BITMAP(obj) != 0;
1245 const int marking_bit = RVALUE_MARKING_BITMAP(obj) != 0;
1246 const int remembered_bit = MARKED_IN_BITMAP(GET_HEAP_PAGE(obj)->remembered_bits, obj) != 0;
1247 const int age = RVALUE_AGE_GET((
VALUE)obj);
1249 if (heap_page_in_global_empty_pages_pool(objspace, GET_HEAP_PAGE(obj))) {
1250 fprintf(stderr,
"check_rvalue_consistency: %s is in tomb page.\n", rb_obj_info(obj));
1254 fprintf(stderr,
"check_rvalue_consistency: %s is T_NONE.\n", rb_obj_info(obj));
1258 fprintf(stderr,
"check_rvalue_consistency: %s is T_ZOMBIE.\n", rb_obj_info(obj));
1263 rb_obj_memsize_of((
VALUE)obj);
1270 if (age > 0 && wb_unprotected_bit) {
1271 fprintf(stderr,
"check_rvalue_consistency: %s is not WB protected, but age is %d > 0.\n", rb_obj_info(obj), age);
1275 if (!is_marking(objspace) && uncollectible_bit && !mark_bit) {
1276 fprintf(stderr,
"check_rvalue_consistency: %s is uncollectible, but is not marked while !gc.\n", rb_obj_info(obj));
1280 if (!is_full_marking(objspace)) {
1281 if (uncollectible_bit && age != RVALUE_OLD_AGE && !wb_unprotected_bit) {
1282 fprintf(stderr,
"check_rvalue_consistency: %s is uncollectible, but not old (age: %d) and not WB unprotected.\n",
1283 rb_obj_info(obj), age);
1286 if (remembered_bit && age != RVALUE_OLD_AGE) {
1287 fprintf(stderr,
"check_rvalue_consistency: %s is remembered, but not old (age: %d).\n",
1288 rb_obj_info(obj), age);
1300 if (is_incremental_marking(objspace) && marking_bit) {
1301 if (!is_marking(objspace) && !mark_bit) {
1302 fprintf(stderr,
"check_rvalue_consistency: %s is marking, but not marked.\n", rb_obj_info(obj));
1308 rb_gc_vm_unlock_no_barrier(lev);
1310 if (err > 0 && terminate) {
1311 rb_bug(
"check_rvalue_consistency_force: there is %d errors.", err);
1316 #if RGENGC_CHECK_MODE == 0
1326 check_rvalue_consistency_force(objspace, obj, TRUE);
1339 asan_unpoisoning_object(obj) {
1350 check_rvalue_consistency(objspace, obj);
1359 MARK_IN_BITMAP(&page->uncollectible_bits[0], obj);
1360 objspace->rgengc.old_objects++;
1362 #if RGENGC_PROFILE >= 2
1363 objspace->profile.total_promoted_count++;
1371 RB_DEBUG_COUNTER_INC(obj_promote);
1372 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace, GET_HEAP_PAGE(obj), obj);
1379 int age = RVALUE_AGE_GET((
VALUE)obj);
1381 if (RGENGC_CHECK_MODE && age == RVALUE_OLD_AGE) {
1382 rb_bug(
"RVALUE_AGE_INC: can not increment age of OLD object %s.", rb_obj_info(obj));
1386 RVALUE_AGE_SET(obj, age);
1388 if (age == RVALUE_OLD_AGE) {
1389 RVALUE_OLD_UNCOLLECTIBLE_SET(objspace, obj);
1392 check_rvalue_consistency(objspace, obj);
1398 check_rvalue_consistency(objspace, obj);
1399 GC_ASSERT(!RVALUE_OLD_P(objspace, obj));
1400 RVALUE_AGE_SET(obj, RVALUE_OLD_AGE - 1);
1401 check_rvalue_consistency(objspace, obj);
1405 RVALUE_AGE_RESET(
VALUE obj)
1407 RVALUE_AGE_SET(obj, 0);
1413 check_rvalue_consistency(objspace, obj);
1414 GC_ASSERT(RVALUE_OLD_P(objspace, obj));
1416 if (!is_incremental_marking(objspace) && RVALUE_REMEMBERED(objspace, obj)) {
1417 CLEAR_IN_BITMAP(GET_HEAP_PAGE(obj)->remembered_bits, obj);
1420 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), obj);
1421 RVALUE_AGE_RESET(obj);
1423 if (RVALUE_MARKED(objspace, obj)) {
1424 objspace->rgengc.old_objects--;
1427 check_rvalue_consistency(objspace, obj);
1433 return RVALUE_MARKED(objspace, obj) && !RVALUE_MARKING(objspace, obj);
1439 return !RVALUE_MARKED(objspace, obj);
1443 rb_gc_impl_gc_enabled_p(
void *objspace_ptr)
1446 return !dont_gc_val();
1450 rb_gc_impl_gc_enable(
void *objspace_ptr)
1458 rb_gc_impl_gc_disable(
void *objspace_ptr,
bool finish_current_gc)
1462 if (finish_current_gc) {
1473 static inline void *
1476 return calloc(1, n);
1480 rb_gc_impl_set_event_hook(
void *objspace_ptr,
const rb_event_flag_t event)
1488 rb_gc_impl_get_total_time(
void *objspace_ptr)
1492 unsigned long long marking_time = objspace->profile.marking_time_ns;
1493 unsigned long long sweeping_time = objspace->profile.sweeping_time_ns;
1495 return marking_time + sweeping_time;
1499 rb_gc_impl_set_measure_total_time(
void *objspace_ptr,
VALUE flag)
1503 objspace->flags.measure_gc =
RTEST(flag) ? TRUE : FALSE;
1507 rb_gc_impl_get_measure_total_time(
void *objspace_ptr)
1511 return objspace->flags.measure_gc;
1517 size_t heap_idx = heap - heaps;
1518 return gc_params.heap_init_slots[heap_idx];
1522 object_id_cmp(st_data_t x, st_data_t y)
1533 object_id_hash(st_data_t n)
1538 #define OBJ_ID_INCREMENT (RUBY_IMMEDIATE_MASK + 1)
1539 #define OBJ_ID_INITIAL (OBJ_ID_INCREMENT)
1541 static const struct st_hash_type object_id_hash_type = {
1548 rb_gc_impl_garbage_object_p(
void *objspace_ptr,
VALUE ptr)
1554 asan_unpoisoning_object(
ptr) {
1566 if (dead)
return true;
1567 return is_lazy_sweeping(objspace) && GET_HEAP_PAGE(
ptr)->flags.before_sweep &&
1568 !RVALUE_MARKED(objspace,
ptr);
1572 rb_gc_impl_object_id_to_ref(
void *objspace_ptr,
VALUE object_id)
1577 if (st_lookup(objspace->id_to_obj_tbl, object_id, &obj) &&
1578 !rb_gc_impl_garbage_object_p(objspace, obj)) {
1591 rb_gc_impl_object_id(
void *objspace_ptr,
VALUE obj)
1596 unsigned int lev = rb_gc_vm_lock();
1599 if (st_lookup(objspace->obj_to_id_tbl, (st_data_t)obj, &val)) {
1603 rb_bug(
"rb_gc_impl_object_id: FL_SEEN_OBJ_ID flag set but not found in table");
1607 GC_ASSERT(!st_lookup(objspace->obj_to_id_tbl, (st_data_t)obj, NULL));
1609 id =
ULL2NUM(objspace->next_object_id);
1610 objspace->next_object_id += OBJ_ID_INCREMENT;
1612 st_insert(objspace->obj_to_id_tbl, (st_data_t)obj, (st_data_t)
id);
1613 st_insert(objspace->id_to_obj_tbl, (st_data_t)
id, (st_data_t)obj);
1616 rb_gc_vm_unlock(lev);
1628 asan_unpoison_object(obj,
false);
1630 asan_unlock_freelist(page);
1634 slot->next = page->freelist;
1635 page->freelist = slot;
1636 asan_lock_freelist(page);
1638 RVALUE_AGE_RESET(obj);
1640 if (RGENGC_CHECK_MODE &&
1642 !(page->start <= (uintptr_t)obj &&
1643 (uintptr_t)obj < ((uintptr_t)page->start + (page->total_slots * page->slot_size)) &&
1644 obj % BASE_SLOT_SIZE == 0)) {
1645 rb_bug(
"heap_page_add_freeobj: %p is not rvalue.", (
void *)obj);
1648 asan_poison_object(obj);
1649 gc_report(3, objspace,
"heap_page_add_freeobj: add %p to freelist\n", (
void *)obj);
1654 rb_heap_t *heap,
size_t free_slots,
size_t total_slots)
1656 double goal_ratio = gc_params.heap_free_slots_goal_ratio;
1657 size_t target_total_slots;
1659 if (goal_ratio == 0.0) {
1660 target_total_slots = (size_t)(total_slots * gc_params.growth_factor);
1662 else if (total_slots == 0) {
1663 target_total_slots = minimum_slots_for_heap(objspace, heap);
1669 double f = (double)(total_slots - free_slots) / ((1 - goal_ratio) * total_slots);
1671 if (f > gc_params.growth_factor) f = gc_params.growth_factor;
1672 if (f < 1.0) f = 1.1;
1674 target_total_slots = (size_t)(f * total_slots);
1678 "free_slots(%8"PRIuSIZE
")/total_slots(%8"PRIuSIZE
")=%1.2f,"
1679 " G(%1.2f), f(%1.2f),"
1680 " total_slots(%8"PRIuSIZE
") => target_total_slots(%8"PRIuSIZE
")\n",
1681 free_slots, total_slots, free_slots/(
double)total_slots,
1682 goal_ratio, f, total_slots, target_total_slots);
1686 if (gc_params.growth_max_slots > 0) {
1687 size_t max_total_slots = (size_t)(total_slots + gc_params.growth_max_slots);
1688 if (target_total_slots > max_total_slots) target_total_slots = max_total_slots;
1691 size_t extend_slot_count = target_total_slots - total_slots;
1693 if (extend_slot_count == 0) extend_slot_count = 1;
1695 objspace->heap_pages.allocatable_slots += extend_slot_count;
1701 asan_unlock_freelist(page);
1702 GC_ASSERT(page->free_slots != 0);
1703 GC_ASSERT(page->freelist != NULL);
1705 page->free_next = heap->free_pages;
1706 heap->free_pages = page;
1708 RUBY_DEBUG_LOG(
"page:%p freelist:%p", (
void *)page, (
void *)page->freelist);
1710 asan_lock_freelist(page);
1716 asan_unlock_freelist(page);
1717 GC_ASSERT(page->free_slots != 0);
1718 GC_ASSERT(page->freelist != NULL);
1720 page->free_next = heap->pooled_pages;
1721 heap->pooled_pages = page;
1722 objspace->rincgc.pooled_slots += page->free_slots;
1724 asan_lock_freelist(page);
1730 ccan_list_del(&page->page_node);
1731 heap->total_pages--;
1732 heap->total_slots -= page->total_slots;
1736 gc_aligned_free(
void *
ptr,
size_t size)
1738 #if defined __MINGW32__
1739 __mingw_aligned_free(
ptr);
1740 #elif defined _WIN32
1742 #elif defined(HAVE_POSIX_MEMALIGN) || defined(HAVE_MEMALIGN)
1745 free(((
void**)
ptr)[-1]);
1752 GC_ASSERT((uintptr_t)page_body % HEAP_PAGE_ALIGN == 0);
1754 if (HEAP_PAGE_ALLOC_USE_MMAP) {
1756 GC_ASSERT(HEAP_PAGE_SIZE % sysconf(_SC_PAGE_SIZE) == 0);
1757 if (munmap(page_body, HEAP_PAGE_SIZE)) {
1758 rb_bug(
"heap_page_body_free: munmap failed");
1763 gc_aligned_free(page_body, HEAP_PAGE_SIZE);
1770 objspace->heap_pages.freed_pages++;
1771 heap_page_body_free(page->body);
1778 size_t pages_to_keep_count =
1780 CEILDIV(objspace->heap_pages.allocatable_slots, HEAP_PAGE_OBJ_LIMIT) *
1782 (1 << (HEAP_COUNT / 2));
1784 if (objspace->empty_pages != NULL && objspace->empty_pages_count > pages_to_keep_count) {
1785 GC_ASSERT(objspace->empty_pages_count > 0);
1786 objspace->empty_pages = NULL;
1787 objspace->empty_pages_count = 0;
1790 for (i = j = 0; i < rb_darray_size(objspace->heap_pages.sorted); i++) {
1791 struct heap_page *page = rb_darray_get(objspace->heap_pages.sorted, i);
1793 if (heap_page_in_global_empty_pages_pool(objspace, page) && pages_to_keep_count == 0) {
1794 heap_page_free(objspace, page);
1797 if (heap_page_in_global_empty_pages_pool(objspace, page) && pages_to_keep_count > 0) {
1798 page->free_next = objspace->empty_pages;
1799 objspace->empty_pages = page;
1800 objspace->empty_pages_count++;
1801 pages_to_keep_count--;
1805 rb_darray_set(objspace->heap_pages.sorted, j, page);
1811 rb_darray_pop(objspace->heap_pages.sorted, i - j);
1812 GC_ASSERT(rb_darray_size(objspace->heap_pages.sorted) == j);
1814 struct heap_page *hipage = rb_darray_get(objspace->heap_pages.sorted, rb_darray_size(objspace->heap_pages.sorted) - 1);
1815 uintptr_t himem = (uintptr_t)hipage->body + HEAP_PAGE_SIZE;
1816 GC_ASSERT(himem <= heap_pages_himem);
1817 heap_pages_himem = himem;
1819 struct heap_page *lopage = rb_darray_get(objspace->heap_pages.sorted, 0);
1820 uintptr_t lomem = (uintptr_t)lopage->body +
sizeof(
struct heap_page_header);
1821 GC_ASSERT(lomem >= heap_pages_lomem);
1822 heap_pages_lomem = lomem;
1827 gc_aligned_malloc(
size_t alignment,
size_t size)
1830 GC_ASSERT(((alignment - 1) & alignment) == 0);
1831 GC_ASSERT(alignment %
sizeof(
void*) == 0);
1835 #if defined __MINGW32__
1836 res = __mingw_aligned_malloc(size, alignment);
1837 #elif defined _WIN32
1838 void *_aligned_malloc(
size_t,
size_t);
1839 res = _aligned_malloc(size, alignment);
1840 #elif defined(HAVE_POSIX_MEMALIGN)
1841 if (posix_memalign(&res, alignment, size) != 0) {
1844 #elif defined(HAVE_MEMALIGN)
1845 res = memalign(alignment, size);
1848 res = malloc(alignment + size +
sizeof(
void*));
1849 aligned = (
char*)res + alignment +
sizeof(
void*);
1850 aligned -= ((
VALUE)aligned & (alignment - 1));
1851 ((
void**)aligned)[-1] = res;
1852 res = (
void*)aligned;
1855 GC_ASSERT((uintptr_t)res % alignment == 0);
1861 heap_page_body_allocate(
void)
1865 if (HEAP_PAGE_ALLOC_USE_MMAP) {
1867 GC_ASSERT(HEAP_PAGE_ALIGN % sysconf(_SC_PAGE_SIZE) == 0);
1869 char *
ptr = mmap(NULL, HEAP_PAGE_ALIGN + HEAP_PAGE_SIZE,
1870 PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1871 if (
ptr == MAP_FAILED) {
1875 char *aligned =
ptr + HEAP_PAGE_ALIGN;
1876 aligned -= ((
VALUE)aligned & (HEAP_PAGE_ALIGN - 1));
1877 GC_ASSERT(aligned >
ptr);
1878 GC_ASSERT(aligned <=
ptr + HEAP_PAGE_ALIGN);
1880 size_t start_out_of_range_size = aligned -
ptr;
1881 GC_ASSERT(start_out_of_range_size % sysconf(_SC_PAGE_SIZE) == 0);
1882 if (start_out_of_range_size > 0) {
1883 if (munmap(
ptr, start_out_of_range_size)) {
1884 rb_bug(
"heap_page_body_allocate: munmap failed for start");
1888 size_t end_out_of_range_size = HEAP_PAGE_ALIGN - start_out_of_range_size;
1889 GC_ASSERT(end_out_of_range_size % sysconf(_SC_PAGE_SIZE) == 0);
1890 if (end_out_of_range_size > 0) {
1891 if (munmap(aligned + HEAP_PAGE_SIZE, end_out_of_range_size)) {
1892 rb_bug(
"heap_page_body_allocate: munmap failed for end");
1900 page_body = gc_aligned_malloc(HEAP_PAGE_ALIGN, HEAP_PAGE_SIZE);
1903 GC_ASSERT((uintptr_t)page_body % HEAP_PAGE_ALIGN == 0);
1912 if (objspace->empty_pages != NULL) {
1913 GC_ASSERT(objspace->empty_pages_count > 0);
1914 objspace->empty_pages_count--;
1915 page = objspace->empty_pages;
1916 objspace->empty_pages = page->free_next;
1926 if (page_body == 0) {
1932 heap_page_body_free(page_body);
1937 uintptr_t end = (uintptr_t)page_body + HEAP_PAGE_SIZE;
1940 size_t hi = rb_darray_size(objspace->heap_pages.sorted);
1944 size_t mid = (lo + hi) / 2;
1945 mid_page = rb_darray_get(objspace->heap_pages.sorted, mid);
1946 if ((uintptr_t)mid_page->start < start) {
1949 else if ((uintptr_t)mid_page->start > start) {
1953 rb_bug(
"same heap page is allocated: %p at %"PRIuVALUE, (
void *)page_body, (
VALUE)mid);
1957 rb_darray_insert(&objspace->heap_pages.sorted, hi, page);
1959 if (heap_pages_lomem == 0 || heap_pages_lomem > start) heap_pages_lomem = start;
1960 if (heap_pages_himem < end) heap_pages_himem = end;
1962 page->body = page_body;
1963 page_body->header.page = page;
1965 objspace->heap_pages.allocated_pages++;
1974 GC_ASSERT(!heap->sweeping_page);
1975 GC_ASSERT(heap_page_in_global_empty_pages_pool(objspace, page));
1979 if (start % BASE_SLOT_SIZE != 0) {
1980 int delta = BASE_SLOT_SIZE - (start % BASE_SLOT_SIZE);
1981 start = start + delta;
1982 GC_ASSERT(NUM_IN_PAGE(start) == 0 || NUM_IN_PAGE(start) == 1);
1988 if (NUM_IN_PAGE(start) == 1) {
1989 start += heap->slot_size - BASE_SLOT_SIZE;
1992 GC_ASSERT(NUM_IN_PAGE(start) * BASE_SLOT_SIZE % heap->slot_size == 0);
1995 int slot_count = (int)((HEAP_PAGE_SIZE - (start - (uintptr_t)page->body))/heap->slot_size);
1997 page->start = start;
1998 page->total_slots = slot_count;
1999 page->slot_size = heap->slot_size;
2002 asan_unlock_freelist(page);
2003 page->freelist = NULL;
2004 asan_unpoison_memory_region(page->body, HEAP_PAGE_SIZE,
false);
2005 for (
VALUE p = (
VALUE)start; p < start + (slot_count * heap->slot_size); p += heap->slot_size) {
2006 heap_page_add_freeobj(objspace, page, p);
2008 asan_lock_freelist(page);
2010 page->free_slots = slot_count;
2012 heap->total_allocated_pages++;
2014 ccan_list_add_tail(&heap->pages, &page->page_node);
2015 heap->total_pages++;
2016 heap->total_slots += page->total_slots;
2022 if (objspace->heap_pages.allocatable_slots > 0) {
2023 gc_report(1, objspace,
"heap_page_allocate_and_initialize: rb_darray_size(objspace->heap_pages.sorted): %"PRIdSIZE
", "
2024 "allocatable_slots: %"PRIdSIZE
", heap->total_pages: %"PRIdSIZE
"\n",
2025 rb_darray_size(objspace->heap_pages.sorted), objspace->heap_pages.allocatable_slots, heap->total_pages);
2027 struct heap_page *page = heap_page_resurrect(objspace);
2029 page = heap_page_allocate(objspace);
2031 heap_add_page(objspace, heap, page);
2032 heap_add_freepage(heap, page);
2034 if (objspace->heap_pages.allocatable_slots > (
size_t)page->total_slots) {
2035 objspace->heap_pages.allocatable_slots -= page->total_slots;
2038 objspace->heap_pages.allocatable_slots = 0;
2050 size_t prev_allocatable_slots = objspace->heap_pages.allocatable_slots;
2052 objspace->heap_pages.allocatable_slots = 1;
2053 heap_page_allocate_and_initialize(objspace, heap);
2054 GC_ASSERT(heap->free_pages != NULL);
2055 objspace->heap_pages.allocatable_slots = prev_allocatable_slots;
2061 unsigned int lock_lev;
2062 gc_enter(objspace, gc_enter_event_continue, &lock_lev);
2065 if (is_incremental_marking(objspace)) {
2066 if (gc_marks_continue(objspace, heap)) {
2073 if (heap->free_pages == NULL && is_lazy_sweeping(objspace)) {
2074 gc_sweep_continue(objspace, heap);
2077 gc_exit(objspace, gc_enter_event_continue, &lock_lev);
2083 GC_ASSERT(heap->free_pages == NULL);
2085 if (heap->total_slots < gc_params.heap_init_slots[heap - heaps] &&
2086 heap->sweeping_page == NULL) {
2087 heap_page_allocate_and_initialize_force(objspace, heap);
2088 GC_ASSERT(heap->free_pages != NULL);
2093 gc_continue(objspace, heap);
2095 if (heap->free_pages == NULL) {
2096 heap_page_allocate_and_initialize(objspace, heap);
2101 if (heap->free_pages == NULL) {
2102 if (gc_start(objspace, GPR_FLAG_NEWOBJ) == FALSE) {
2106 if (objspace->heap_pages.allocatable_slots == 0 && !gc_config_full_mark_val) {
2107 heap_allocatable_slots_expand(objspace, heap,
2108 heap->freed_slots + heap->empty_slots,
2110 GC_ASSERT(objspace->heap_pages.allocatable_slots > 0);
2113 gc_continue(objspace, heap);
2118 if (heap->free_pages == NULL && !heap_page_allocate_and_initialize(objspace, heap)) {
2119 if (gc_needs_major_flags == GPR_FLAG_NONE) {
2120 rb_bug(
"cannot create a new page after GC");
2123 if (gc_start(objspace, GPR_FLAG_NEWOBJ) == FALSE) {
2128 gc_continue(objspace, heap);
2130 if (heap->free_pages == NULL &&
2131 !heap_page_allocate_and_initialize(objspace, heap)) {
2132 rb_bug(
"cannot create a new page after major GC");
2140 GC_ASSERT(heap->free_pages != NULL);
2154 static inline const char*
2155 rb_gc_impl_source_location_cstr(
int *
ptr)
2174 #if !__has_feature(memory_sanitizer)
2178 RBASIC(obj)->flags = flags;
2183 RVALUE_AGE_SET_CANDIDATE(objspace, obj);
2186 #if RACTOR_CHECK_MODE
2187 void rb_ractor_setup_belonging(
VALUE obj);
2188 rb_ractor_setup_belonging(obj);
2191 #if RGENGC_CHECK_MODE
2192 newobj_fill(obj, 0, 0, 0);
2194 int lev = rb_gc_vm_lock_no_barrier();
2196 check_rvalue_consistency(objspace, obj);
2198 GC_ASSERT(RVALUE_MARKED(objspace, obj) == FALSE);
2199 GC_ASSERT(RVALUE_MARKING(objspace, obj) == FALSE);
2200 GC_ASSERT(RVALUE_OLD_P(objspace, obj) == FALSE);
2201 GC_ASSERT(RVALUE_WB_UNPROTECTED(objspace, obj) == FALSE);
2203 if (RVALUE_REMEMBERED(objspace, obj))
rb_bug(
"newobj: %s is remembered.", rb_obj_info(obj));
2205 rb_gc_vm_unlock_no_barrier(lev);
2209 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
2214 objspace->profile.total_generated_normal_object_count++;
2215 #if RGENGC_PROFILE >= 2
2216 objspace->profile.generated_normal_object_count_types[
BUILTIN_TYPE(obj)]++;
2220 objspace->profile.total_generated_shady_object_count++;
2221 #if RGENGC_PROFILE >= 2
2222 objspace->profile.generated_shady_object_count_types[
BUILTIN_TYPE(obj)]++;
2228 GET_RVALUE_OVERHEAD(obj)->file = rb_gc_impl_source_location_cstr(&GET_RVALUE_OVERHEAD(obj)->line);
2232 gc_report(5, objspace,
"newobj: %s\n", rb_obj_info(obj));
2234 RUBY_DEBUG_LOG(
"obj:%p (%s)", (
void *)obj, rb_obj_info(obj));
2239 rb_gc_impl_obj_slot_size(
VALUE obj)
2241 return GET_HEAP_PAGE(obj)->slot_size - RVALUE_OVERHEAD;
2244 static inline size_t
2245 heap_slot_size(
unsigned char pool_id)
2247 GC_ASSERT(pool_id < HEAP_COUNT);
2249 size_t slot_size = (1 << pool_id) * BASE_SLOT_SIZE;
2251 #if RGENGC_CHECK_MODE
2253 GC_ASSERT(heaps[pool_id].slot_size == (
short)slot_size);
2256 slot_size -= RVALUE_OVERHEAD;
2262 rb_gc_impl_size_allocatable_p(
size_t size)
2264 return size <= heap_slot_size(HEAP_COUNT - 1);
2272 struct free_slot *p = heap_cache->freelist;
2274 if (
RB_UNLIKELY(is_incremental_marking(objspace))) {
2276 if (cache->incremental_mark_step_allocated_slots >= INCREMENTAL_MARK_STEP_ALLOCATIONS) {
2281 cache->incremental_mark_step_allocated_slots++;
2287 MAYBE_UNUSED(
const size_t) stride = heap_slot_size(heap_idx);
2288 heap_cache->freelist = p->next;
2289 asan_unpoison_memory_region(p, stride,
true);
2290 #if RGENGC_CHECK_MODE
2291 GC_ASSERT(rb_gc_impl_obj_slot_size(obj) == stride);
2293 MEMZERO((
char *)obj,
char, stride);
2307 if (heap->free_pages == NULL) {
2308 heap_prepare(objspace, heap);
2311 page = heap->free_pages;
2312 heap->free_pages = page->free_next;
2314 GC_ASSERT(page->free_slots != 0);
2316 asan_unlock_freelist(page);
2325 gc_report(3, objspace,
"ractor_set_cache: Using page %p\n", (
void *)page->body);
2329 GC_ASSERT(heap_cache->freelist == NULL);
2330 GC_ASSERT(page->free_slots != 0);
2331 GC_ASSERT(page->freelist != NULL);
2333 heap_cache->using_page = page;
2334 heap_cache->freelist = page->freelist;
2335 page->free_slots = 0;
2336 page->freelist = NULL;
2338 asan_unpoison_object((
VALUE)heap_cache->freelist,
false);
2340 asan_poison_object((
VALUE)heap_cache->freelist);
2343 static inline size_t
2344 heap_idx_for_size(
size_t size)
2346 size += RVALUE_OVERHEAD;
2348 size_t slot_count = CEILDIV(size, BASE_SLOT_SIZE);
2351 size_t heap_idx = 64 - nlz_int64(slot_count - 1);
2353 if (heap_idx >= HEAP_COUNT) {
2354 rb_bug(
"heap_idx_for_size: allocation size too large "
2355 "(size=%"PRIuSIZE
"u, heap_idx=%"PRIuSIZE
"u)", size, heap_idx);
2358 #if RGENGC_CHECK_MODE
2360 GC_ASSERT(size <= (
size_t)heaps[heap_idx].slot_size);
2361 if (heap_idx > 0) GC_ASSERT(size > (
size_t)heaps[heap_idx - 1].slot_size);
2368 rb_gc_impl_heap_id_for_size(
void *objspace_ptr,
size_t size)
2370 return heap_idx_for_size(size);
2374 static size_t heap_sizes[HEAP_COUNT + 1] = { 0 };
2377 rb_gc_impl_heap_sizes(
void *objspace_ptr)
2379 if (heap_sizes[0] == 0) {
2380 for (
unsigned char i = 0; i < HEAP_COUNT; i++) {
2381 heap_sizes[i] = heap_slot_size(i);
2396 unsigned int lev = 0;
2397 bool unlock_vm =
false;
2400 lev = rb_gc_cr_lock();
2406 if (is_incremental_marking(objspace)) {
2407 gc_continue(objspace, heap);
2408 cache->incremental_mark_step_allocated_slots = 0;
2411 obj = ractor_cache_allocate_slot(objspace, cache, heap_idx);
2416 struct heap_page *page = heap_next_free_page(objspace, heap);
2417 ractor_cache_set_page(objspace, cache, heap_idx, page);
2420 obj = ractor_cache_allocate_slot(objspace, cache, heap_idx);
2425 rb_gc_cr_unlock(lev);
2437 VALUE obj = ractor_cache_allocate_slot(objspace, cache, heap_idx);
2440 obj = newobj_cache_miss(objspace, cache, heap_idx, vm_locked);
2444 heap->total_allocated_objects++;
2445 GC_ASSERT(rb_gc_multi_ractor_p() ||
2446 heap->total_slots >=
2447 (heap->total_allocated_objects - heap->total_freed_objects - heap->final_slots_count));
2460 lev = rb_gc_cr_lock();
2462 if (
RB_UNLIKELY(during_gc || ruby_gc_stressful)) {
2466 rb_bug(
"object allocation during garbage collection phase");
2469 if (ruby_gc_stressful) {
2470 if (!garbage_collect(objspace, GPR_FLAG_NEWOBJ)) {
2476 obj = newobj_alloc(objspace, cache, heap_idx,
true);
2477 newobj_init(klass, flags, wb_protected, objspace, obj);
2479 rb_gc_cr_unlock(lev);
2484 NOINLINE(
static VALUE newobj_slowpath_wb_protected(
VALUE klass,
VALUE flags,
2486 NOINLINE(
static VALUE newobj_slowpath_wb_unprotected(
VALUE klass,
VALUE flags,
2492 return newobj_slowpath(klass, flags, objspace, cache, TRUE, heap_idx);
2498 return newobj_slowpath(klass, flags, objspace, cache, FALSE, heap_idx);
2502 rb_gc_impl_new_obj(
void *objspace_ptr,
void *cache_ptr,
VALUE klass,
VALUE flags,
VALUE v1,
VALUE v2,
VALUE v3,
bool wb_protected,
size_t alloc_size)
2507 RB_DEBUG_COUNTER_INC(obj_newobj);
2508 (void)RB_DEBUG_COUNTER_INC_IF(obj_newobj_wb_unprotected, !wb_protected);
2512 for (
long i = 0; i < cnt; i++) {
2517 size_t heap_idx = heap_idx_for_size(alloc_size);
2521 if (!
RB_UNLIKELY(during_gc || ruby_gc_stressful) &&
2523 obj = newobj_alloc(objspace, cache, heap_idx,
false);
2524 newobj_init(klass, flags, wb_protected, objspace, obj);
2527 RB_DEBUG_COUNTER_INC(obj_newobj_slowpath);
2529 obj = wb_protected ?
2530 newobj_slowpath_wb_protected(klass, flags, objspace, cache, heap_idx) :
2531 newobj_slowpath_wb_unprotected(klass, flags, objspace, cache, heap_idx);
2534 return newobj_fill(obj, v1, v2, v3);
2538 ptr_in_page_body_p(
const void *
ptr,
const void *memb)
2541 uintptr_t p_body = (uintptr_t)page->body;
2543 if ((uintptr_t)
ptr >= p_body) {
2544 return (uintptr_t)
ptr < (p_body + HEAP_PAGE_SIZE) ? 0 : 1;
2557 if (
ptr < (uintptr_t)heap_pages_lomem ||
2558 ptr > (uintptr_t)heap_pages_himem) {
2562 res = bsearch((
void *)
ptr, rb_darray_ref(objspace->heap_pages.sorted, 0),
2563 rb_darray_size(objspace->heap_pages.sorted),
sizeof(
struct heap_page *),
2564 ptr_in_page_body_p);
2574 PUREFUNC(
static inline bool is_pointer_to_heap(
rb_objspace_t *objspace,
const void *
ptr);)
2578 register uintptr_t p = (uintptr_t)
ptr;
2581 RB_DEBUG_COUNTER_INC(gc_isptr_trial);
2583 if (p < heap_pages_lomem || p > heap_pages_himem)
return FALSE;
2584 RB_DEBUG_COUNTER_INC(gc_isptr_range);
2586 if (p % BASE_SLOT_SIZE != 0)
return FALSE;
2587 RB_DEBUG_COUNTER_INC(gc_isptr_align);
2589 page = heap_page_for_ptr(objspace, (uintptr_t)
ptr);
2591 RB_DEBUG_COUNTER_INC(gc_isptr_maybe);
2592 if (heap_page_in_global_empty_pages_pool(objspace, page)) {
2596 if (p < page->start)
return FALSE;
2597 if (p >= page->start + (page->total_slots * page->slot_size))
return FALSE;
2598 if ((NUM_IN_PAGE(p) * BASE_SLOT_SIZE) % page->slot_size != 0)
return FALSE;
2607 rb_gc_impl_pointer_to_heap_p(
void *objspace_ptr,
const void *
ptr)
2609 return is_pointer_to_heap(objspace_ptr,
ptr);
2612 #define ZOMBIE_OBJ_KEPT_FLAGS (FL_SEEN_OBJ_ID | FL_FINALIZE)
2615 rb_gc_impl_make_zombie(
void *objspace_ptr,
VALUE obj,
void (*dfree)(
void *),
void *data)
2619 struct RZombie *zombie = RZOMBIE(obj);
2621 zombie->dfree = dfree;
2622 zombie->data = data;
2623 VALUE prev, next = heap_pages_deferred_final;
2625 zombie->next = prev = next;
2627 }
while (next != prev);
2629 struct heap_page *page = GET_HEAP_PAGE(obj);
2630 page->final_slots++;
2631 page->heap->final_slots_count++;
2637 st_data_t o = (st_data_t)obj,
id;
2642 if (st_delete(objspace->obj_to_id_tbl, &o, &
id)) {
2644 st_delete(objspace->id_to_obj_tbl, &
id, NULL);
2647 rb_bug(
"Object ID seen, but not in mapping table: %s", rb_obj_info(obj));
2651 typedef int each_obj_callback(
void *,
void *,
size_t,
void *);
2652 typedef int each_page_callback(
struct heap_page *,
void *);
2656 bool reenable_incremental;
2658 each_obj_callback *each_obj_callback;
2659 each_page_callback *each_page_callback;
2663 size_t pages_counts[HEAP_COUNT];
2667 objspace_each_objects_ensure(
VALUE arg)
2673 if (data->reenable_incremental) {
2674 objspace->flags.dont_incremental = FALSE;
2677 for (
int i = 0; i < HEAP_COUNT; i++) {
2678 struct heap_page **pages = data->pages[i];
2686 objspace_each_objects_try(
VALUE arg)
2692 for (
int i = 0; i < HEAP_COUNT; i++) {
2694 size_t size = heap->total_pages *
sizeof(
struct heap_page *);
2696 struct heap_page **pages = malloc(size);
2705 size_t pages_count = 0;
2706 ccan_list_for_each(&heap->pages, page, page_node) {
2707 pages[pages_count] = page;
2710 data->pages[i] = pages;
2711 data->pages_counts[i] = pages_count;
2712 GC_ASSERT(pages_count == heap->total_pages);
2715 for (
int i = 0; i < HEAP_COUNT; i++) {
2717 size_t pages_count = data->pages_counts[i];
2718 struct heap_page **pages = data->pages[i];
2721 for (
size_t i = 0; i < pages_count; i++) {
2724 if (page == NULL)
break;
2728 if (pages[i] != page)
continue;
2730 uintptr_t pstart = (uintptr_t)page->start;
2731 uintptr_t pend = pstart + (page->total_slots * heap->slot_size);
2733 if (data->each_obj_callback &&
2734 (*data->each_obj_callback)((
void *)pstart, (
void *)pend, heap->slot_size, data->data)) {
2737 if (data->each_page_callback &&
2738 (*data->each_page_callback)(page, data->data)) {
2742 page = ccan_list_next(&heap->pages, page, page_node);
2754 bool reenable_incremental = FALSE;
2756 reenable_incremental = !objspace->flags.dont_incremental;
2759 objspace->flags.dont_incremental = TRUE;
2770 objspace_each_objects(
rb_objspace_t *objspace, each_obj_callback *callback,
void *data,
bool protected)
2773 .objspace = objspace,
2774 .each_obj_callback = callback,
2775 .each_page_callback = NULL,
2782 rb_gc_impl_each_objects(
void *objspace_ptr, each_obj_callback *callback,
void *data)
2784 objspace_each_objects(objspace_ptr, callback, data, TRUE);
2787 #if GC_CAN_COMPILE_COMPACTION
2789 objspace_each_pages(
rb_objspace_t *objspace, each_page_callback *callback,
void *data,
bool protected)
2792 .objspace = objspace,
2793 .each_obj_callback = NULL,
2794 .each_page_callback = callback,
2802 rb_gc_impl_define_finalizer(
void *objspace_ptr,
VALUE obj,
VALUE block)
2812 if (st_lookup(finalizer_table, obj, &data)) {
2813 table = (
VALUE)data;
2820 for (i = 0; i <
len; i++) {
2833 st_add_direct(finalizer_table, obj, table);
2840 rb_gc_impl_undefine_finalizer(
void *objspace_ptr,
VALUE obj)
2846 st_data_t data = obj;
2847 st_delete(finalizer_table, &data, 0);
2852 rb_gc_impl_copy_finalizer(
void *objspace_ptr,
VALUE dest,
VALUE obj)
2860 if (
RB_LIKELY(st_lookup(finalizer_table, obj, &data))) {
2861 table = (
VALUE)data;
2862 st_insert(finalizer_table, dest, table);
2866 rb_bug(
"rb_gc_copy_finalizer: FL_FINALIZE set but not found in finalizer_table: %s", rb_obj_info(obj));
2874 return rb_gc_impl_object_id(objspace, obj);
2878 objspace->next_object_id += OBJ_ID_INCREMENT;
2884 get_final(
long i,
void *data)
2894 if (RZOMBIE(zombie)->dfree) {
2895 RZOMBIE(zombie)->dfree(RZOMBIE(zombie)->data);
2898 st_data_t key = (st_data_t)zombie;
2902 if (st_delete(finalizer_table, &key, &table)) {
2903 rb_gc_run_obj_finalizer(get_object_id_in_finalizer(objspace, zombie),
RARRAY_LEN(table), get_final, (
void *)table);
2906 rb_bug(
"FL_FINALIZE flag is set, but finalizers are not found");
2910 GC_ASSERT(!st_lookup(finalizer_table, key, NULL));
2920 asan_unpoison_object(zombie,
false);
2921 next_zombie = RZOMBIE(zombie)->next;
2922 page = GET_HEAP_PAGE(zombie);
2924 run_final(objspace, zombie);
2926 int lev = rb_gc_vm_lock();
2930 obj_free_object_id(objspace, zombie);
2933 GC_ASSERT(page->heap->final_slots_count > 0);
2934 GC_ASSERT(page->final_slots > 0);
2936 page->heap->final_slots_count--;
2937 page->final_slots--;
2939 heap_page_add_freeobj(objspace, page, zombie);
2940 page->heap->total_freed_objects++;
2942 rb_gc_vm_unlock(lev);
2944 zombie = next_zombie;
2953 finalize_list(objspace, zombie);
2960 rb_gc_set_pending_interrupt();
2961 finalize_deferred_heap_pages(objspace);
2962 rb_gc_unset_pending_interrupt();
2966 gc_finalize_deferred(
void *dmy)
2971 finalize_deferred(objspace);
2985 gc_abort(
void *objspace_ptr)
2989 if (is_incremental_marking(objspace)) {
2992 while (pop_mark_stack(&objspace->mark_stack, &obj));
2994 objspace->flags.during_incremental_marking = FALSE;
2997 if (is_lazy_sweeping(objspace)) {
2998 for (
int i = 0; i < HEAP_COUNT; i++) {
3001 heap->sweeping_page = NULL;
3004 ccan_list_for_each(&heap->pages, page, page_node) {
3005 page->flags.before_sweep =
false;
3010 for (
int i = 0; i < HEAP_COUNT; i++) {
3012 rgengc_mark_and_rememberset_clear(objspace, heap);
3015 gc_mode_set(objspace, gc_mode_none);
3019 rb_gc_impl_shutdown_free_objects(
void *objspace_ptr)
3023 for (
size_t i = 0; i < rb_darray_size(objspace->heap_pages.sorted); i++) {
3024 struct heap_page *page = rb_darray_get(objspace->heap_pages.sorted, i);
3025 short stride = page->slot_size;
3027 uintptr_t p = (uintptr_t)page->start;
3028 uintptr_t pend = p + page->total_slots * stride;
3029 for (; p < pend; p += stride) {
3031 asan_unpoisoning_object(vp) {
3033 if (rb_gc_obj_free(objspace, vp)) {
3043 rb_gc_impl_shutdown_call_finalizer_i(st_data_t key, st_data_t val, st_data_t data)
3052 rb_gc_run_obj_finalizer(rb_gc_impl_object_id(objspace, obj),
RARRAY_LEN(table), get_final, (
void *)table);
3060 rb_gc_impl_shutdown_call_finalizer(
void *objspace_ptr)
3064 #if RGENGC_CHECK_MODE >= 2
3065 gc_verify_internal_consistency(objspace);
3069 objspace->flags.dont_incremental = 1;
3078 while (finalizer_table->num_entries) {
3079 st_foreach(finalizer_table, rb_gc_impl_shutdown_call_finalizer_i, (st_data_t)objspace);
3083 finalize_deferred(objspace);
3084 GC_ASSERT(heap_pages_deferred_final == 0);
3093 unsigned int lock_lev;
3094 gc_enter(objspace, gc_enter_event_finalizer, &lock_lev);
3097 for (
size_t i = 0; i < rb_darray_size(objspace->heap_pages.sorted); i++) {
3098 struct heap_page *page = rb_darray_get(objspace->heap_pages.sorted, i);
3099 short stride = page->slot_size;
3101 uintptr_t p = (uintptr_t)page->start;
3102 uintptr_t pend = p + page->total_slots * stride;
3103 for (; p < pend; p += stride) {
3105 asan_unpoisoning_object(vp) {
3106 if (rb_gc_shutdown_call_finalizer_p(vp)) {
3107 if (rb_gc_obj_free(objspace, vp)) {
3115 gc_exit(objspace, gc_enter_event_finalizer, &lock_lev);
3117 finalize_deferred_heap_pages(objspace);
3119 st_free_table(finalizer_table);
3120 finalizer_table = 0;
3125 rb_gc_impl_each_object(
void *objspace_ptr,
void (*func)(
VALUE obj,
void *data),
void *data)
3129 for (
size_t i = 0; i < rb_darray_size(objspace->heap_pages.sorted); i++) {
3130 struct heap_page *page = rb_darray_get(objspace->heap_pages.sorted, i);
3131 short stride = page->slot_size;
3133 uintptr_t p = (uintptr_t)page->start;
3134 uintptr_t pend = p + page->total_slots * stride;
3135 for (; p < pend; p += stride) {
3138 asan_unpoisoning_object(obj) {
3154 size_t total_slots = 0;
3155 for (
int i = 0; i < HEAP_COUNT; i++) {
3157 total_slots += heap->total_slots;
3165 return total_allocated_objects(objspace) - total_freed_objects(objspace) - total_final_slots_count(objspace);
3171 return objspace_available_slots(objspace) - objspace_live_slots(objspace) - total_final_slots_count(objspace);
3175 gc_setup_mark_bits(
struct heap_page *page)
3178 memcpy(&page->mark_bits[0], &page->uncollectible_bits[0], HEAP_PAGE_BITMAP_SIZE);
3185 enum {HEAP_PAGE_LOCK = PAGE_NOACCESS, HEAP_PAGE_UNLOCK = PAGE_READWRITE};
3191 return VirtualProtect(body, HEAP_PAGE_SIZE, protect, &old_protect) != 0;
3194 enum {HEAP_PAGE_LOCK = PROT_NONE, HEAP_PAGE_UNLOCK = PROT_READ | PROT_WRITE};
3195 #define protect_page_body(body, protect) !mprotect((body), HEAP_PAGE_SIZE, (protect))
3201 if (!protect_page_body(body, HEAP_PAGE_LOCK)) {
3202 rb_bug(
"Couldn't protect page %p, errno: %s", (
void *)body, strerror(
errno));
3205 gc_report(5, objspace,
"Protecting page in move %p\n", (
void *)body);
3212 if (!protect_page_body(body, HEAP_PAGE_UNLOCK)) {
3213 rb_bug(
"Couldn't unprotect page %p, errno: %s", (
void *)body, strerror(
errno));
3216 gc_report(5, objspace,
"Unprotecting page in move %p\n", (
void *)body);
3223 GC_ASSERT(gc_is_moveable_obj(objspace, src));
3225 struct heap_page *src_page = GET_HEAP_PAGE(src);
3233 GC_ASSERT(RVALUE_MARKED(objspace, src));
3235 asan_unlock_freelist(free_page);
3237 asan_lock_freelist(free_page);
3238 asan_unpoison_object(dest,
false);
3244 asan_unlock_freelist(free_page);
3245 free_page->freelist = ((
struct free_slot *)dest)->next;
3246 asan_lock_freelist(free_page);
3250 if (src_page->slot_size > free_page->slot_size) {
3251 objspace->rcompactor.moved_down_count_table[
BUILTIN_TYPE(src)]++;
3253 else if (free_page->slot_size > src_page->slot_size) {
3254 objspace->rcompactor.moved_up_count_table[
BUILTIN_TYPE(src)]++;
3256 objspace->rcompactor.moved_count_table[
BUILTIN_TYPE(src)]++;
3257 objspace->rcompactor.total_moved++;
3259 gc_move(objspace, src, dest, src_page->slot_size, free_page->slot_size);
3260 gc_pin(objspace, src);
3261 free_page->free_slots--;
3269 struct heap_page *cursor = heap->compact_cursor;
3272 unlock_page_body(objspace, cursor->body);
3273 cursor = ccan_list_next(&heap->pages, cursor, page_node);
3278 #if GC_CAN_COMPILE_COMPACTION
3282 #if defined(__MINGW32__) || defined(_WIN32)
3283 # define GC_COMPACTION_SUPPORTED 1
3287 # define GC_COMPACTION_SUPPORTED (GC_CAN_COMPILE_COMPACTION && HEAP_PAGE_ALLOC_USE_MMAP)
3290 #if GC_CAN_COMPILE_COMPACTION
3292 read_barrier_handler(uintptr_t original_address)
3298 uintptr_t address = original_address - (original_address % BASE_SLOT_SIZE);
3300 obj = (
VALUE)address;
3306 if (page_body == NULL) {
3307 rb_bug(
"read_barrier_handler: segmentation fault at %p", (
void *)original_address);
3310 int lev = rb_gc_vm_lock();
3312 unlock_page_body(objspace, page_body);
3314 objspace->profile.read_barrier_faults++;
3316 invalidate_moved_page(objspace, GET_HEAP_PAGE(obj));
3318 rb_gc_vm_unlock(lev);
3322 #if !GC_CAN_COMPILE_COMPACTION
3324 uninstall_handlers(
void)
3330 install_handlers(
void)
3334 #elif defined(_WIN32)
3335 static LPTOP_LEVEL_EXCEPTION_FILTER old_handler;
3336 typedef void (*signal_handler)(int);
3337 static signal_handler old_sigsegv_handler;
3340 read_barrier_signal(EXCEPTION_POINTERS *info)
3343 if (info->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) {
3348 read_barrier_handler((uintptr_t)info->ExceptionRecord->ExceptionInformation[1]);
3349 return EXCEPTION_CONTINUE_EXECUTION;
3352 return EXCEPTION_CONTINUE_SEARCH;
3357 uninstall_handlers(
void)
3359 signal(SIGSEGV, old_sigsegv_handler);
3360 SetUnhandledExceptionFilter(old_handler);
3364 install_handlers(
void)
3367 old_sigsegv_handler = signal(SIGSEGV, NULL);
3370 old_handler = SetUnhandledExceptionFilter(read_barrier_signal);
3373 static struct sigaction old_sigbus_handler;
3374 static struct sigaction old_sigsegv_handler;
3376 #ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
3377 static exception_mask_t old_exception_masks[32];
3378 static mach_port_t old_exception_ports[32];
3379 static exception_behavior_t old_exception_behaviors[32];
3380 static thread_state_flavor_t old_exception_flavors[32];
3381 static mach_msg_type_number_t old_exception_count;
3384 disable_mach_bad_access_exc(
void)
3386 old_exception_count =
sizeof(old_exception_masks) /
sizeof(old_exception_masks[0]);
3387 task_swap_exception_ports(
3388 mach_task_self(), EXC_MASK_BAD_ACCESS,
3389 MACH_PORT_NULL, EXCEPTION_DEFAULT, 0,
3390 old_exception_masks, &old_exception_count,
3391 old_exception_ports, old_exception_behaviors, old_exception_flavors
3396 restore_mach_bad_access_exc(
void)
3398 for (mach_msg_type_number_t i = 0; i < old_exception_count; i++) {
3399 task_set_exception_ports(
3401 old_exception_masks[i], old_exception_ports[i],
3402 old_exception_behaviors[i], old_exception_flavors[i]
3409 read_barrier_signal(
int sig, siginfo_t *info,
void *data)
3412 struct sigaction prev_sigbus, prev_sigsegv;
3413 sigaction(SIGBUS, &old_sigbus_handler, &prev_sigbus);
3414 sigaction(SIGSEGV, &old_sigsegv_handler, &prev_sigsegv);
3417 sigset_t set, prev_set;
3419 sigaddset(&set, SIGBUS);
3420 sigaddset(&set, SIGSEGV);
3421 sigprocmask(SIG_UNBLOCK, &set, &prev_set);
3422 #ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
3423 disable_mach_bad_access_exc();
3426 read_barrier_handler((uintptr_t)info->si_addr);
3429 #ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
3430 restore_mach_bad_access_exc();
3432 sigaction(SIGBUS, &prev_sigbus, NULL);
3433 sigaction(SIGSEGV, &prev_sigsegv, NULL);
3434 sigprocmask(SIG_SETMASK, &prev_set, NULL);
3438 uninstall_handlers(
void)
3440 #ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
3441 restore_mach_bad_access_exc();
3443 sigaction(SIGBUS, &old_sigbus_handler, NULL);
3444 sigaction(SIGSEGV, &old_sigsegv_handler, NULL);
3448 install_handlers(
void)
3450 struct sigaction action;
3451 memset(&action, 0,
sizeof(
struct sigaction));
3452 sigemptyset(&action.sa_mask);
3453 action.sa_sigaction = read_barrier_signal;
3454 action.sa_flags = SA_SIGINFO | SA_ONSTACK;
3456 sigaction(SIGBUS, &action, &old_sigbus_handler);
3457 sigaction(SIGSEGV, &action, &old_sigsegv_handler);
3458 #ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
3459 disable_mach_bad_access_exc();
3467 for (
int i = 0; i < HEAP_COUNT; i++) {
3469 gc_unprotect_pages(objspace, heap);
3472 uninstall_handlers();
3474 gc_update_references(objspace);
3475 objspace->profile.compact_count++;
3477 for (
int i = 0; i < HEAP_COUNT; i++) {
3479 heap->compact_cursor = NULL;
3480 heap->free_pages = NULL;
3481 heap->compact_cursor_index = 0;
3484 if (gc_prof_enabled(objspace)) {
3486 record->moved_objects = objspace->rcompactor.total_moved - record->moved_objects;
3488 objspace->flags.during_compacting = FALSE;
3501 struct heap_page *sweep_page = ctx->page;
3502 short slot_size = sweep_page->slot_size;
3503 short slot_bits = slot_size / BASE_SLOT_SIZE;
3504 GC_ASSERT(slot_bits > 0);
3508 GC_ASSERT(vp % BASE_SLOT_SIZE == 0);
3510 asan_unpoison_object(vp,
false);
3514 gc_report(2, objspace,
"page_sweep: free %p\n", (
void *)p);
3515 #if RGENGC_CHECK_MODE
3516 if (!is_full_marking(objspace)) {
3517 if (RVALUE_OLD_P(objspace, vp))
rb_bug(
"page_sweep: %p - old while minor GC.", (
void *)p);
3518 if (RVALUE_REMEMBERED(objspace, vp))
rb_bug(
"page_sweep: %p - remembered.", (
void *)p);
3522 if (RVALUE_WB_UNPROTECTED(objspace, vp)) CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(vp), vp);
3524 #if RGENGC_CHECK_MODE
3525 #define CHECK(x) if (x(objspace, vp) != FALSE) rb_bug("obj_free: " #x "(%s) != FALSE", rb_obj_info(vp))
3526 CHECK(RVALUE_WB_UNPROTECTED);
3527 CHECK(RVALUE_MARKED);
3528 CHECK(RVALUE_MARKING);
3529 CHECK(RVALUE_UNCOLLECTIBLE);
3536 if (rb_gc_obj_free(objspace, vp)) {
3537 if (has_object_id) {
3538 obj_free_object_id(objspace, vp);
3542 (void)VALGRIND_MAKE_MEM_UNDEFINED((
void*)p, BASE_SLOT_SIZE);
3543 heap_page_add_freeobj(objspace, sweep_page, vp);
3544 gc_report(3, objspace,
"page_sweep: %s is added to freelist\n", rb_obj_info(vp));
3553 if (objspace->flags.during_compacting) {
3559 rb_bug(
"T_MOVED shouldn't be seen until compaction is finished");
3561 gc_report(3, objspace,
"page_sweep: %s is added to freelist\n", rb_obj_info(vp));
3563 heap_page_add_freeobj(objspace, sweep_page, vp);
3574 bitset >>= slot_bits;
3581 struct heap_page *sweep_page = ctx->page;
3582 GC_ASSERT(sweep_page->heap == heap);
3585 bits_t *bits, bitset;
3587 gc_report(2, objspace,
"page_sweep: start.\n");
3589 #if RGENGC_CHECK_MODE
3590 if (!objspace->flags.immediate_sweep) {
3591 GC_ASSERT(sweep_page->flags.before_sweep == TRUE);
3594 sweep_page->flags.before_sweep = FALSE;
3595 sweep_page->free_slots = 0;
3597 p = (uintptr_t)sweep_page->start;
3598 bits = sweep_page->mark_bits;
3600 int page_rvalue_count = sweep_page->total_slots * (sweep_page->slot_size / BASE_SLOT_SIZE);
3601 int out_of_range_bits = (NUM_IN_PAGE(p) + page_rvalue_count) % BITS_BITLENGTH;
3602 if (out_of_range_bits != 0) {
3603 bits[BITMAP_INDEX(p) + page_rvalue_count / BITS_BITLENGTH] |= ~(((bits_t)1 << out_of_range_bits) - 1);
3609 int bitmap_plane_count = CEILDIV(NUM_IN_PAGE(p) + page_rvalue_count, BITS_BITLENGTH);
3610 GC_ASSERT(bitmap_plane_count == HEAP_PAGE_BITMAP_LIMIT - 1 ||
3611 bitmap_plane_count == HEAP_PAGE_BITMAP_LIMIT);
3615 bitset >>= NUM_IN_PAGE(p);
3617 gc_sweep_plane(objspace, heap, p, bitset, ctx);
3619 p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
3621 for (
int i = 1; i < bitmap_plane_count; i++) {
3624 gc_sweep_plane(objspace, heap, p, bitset, ctx);
3626 p += BITS_BITLENGTH * BASE_SLOT_SIZE;
3629 if (!heap->compact_cursor) {
3630 gc_setup_mark_bits(sweep_page);
3633 #if GC_PROFILE_MORE_DETAIL
3634 if (gc_prof_enabled(objspace)) {
3636 record->removing_objects += ctx->final_slots + ctx->freed_slots;
3637 record->empty_objects += ctx->empty_slots;
3640 if (0) fprintf(stderr,
"gc_sweep_page(%"PRIdSIZE
"): total_slots: %d, freed_slots: %d, empty_slots: %d, final_slots: %d\n",
3642 sweep_page->total_slots,
3643 ctx->freed_slots, ctx->empty_slots, ctx->final_slots);
3645 sweep_page->free_slots += ctx->freed_slots + ctx->empty_slots;
3646 sweep_page->heap->total_freed_objects += ctx->freed_slots;
3648 if (heap_pages_deferred_final && !finalizing) {
3649 gc_finalize_deferred_register(objspace);
3652 #if RGENGC_CHECK_MODE
3653 short freelist_len = 0;
3654 asan_unlock_freelist(sweep_page);
3660 asan_lock_freelist(sweep_page);
3661 if (freelist_len != sweep_page->free_slots) {
3662 rb_bug(
"inconsistent freelist length: expected %d but was %d", sweep_page->free_slots, freelist_len);
3666 gc_report(2, objspace,
"page_sweep: end.\n");
3670 gc_mode_name(
enum gc_mode mode)
3673 case gc_mode_none:
return "none";
3674 case gc_mode_marking:
return "marking";
3675 case gc_mode_sweeping:
return "sweeping";
3676 case gc_mode_compacting:
return "compacting";
3677 default:
rb_bug(
"gc_mode_name: unknown mode: %d", (
int)mode);
3682 gc_mode_transition(
rb_objspace_t *objspace,
enum gc_mode mode)
3684 #if RGENGC_CHECK_MODE
3685 enum gc_mode prev_mode = gc_mode(objspace);
3686 switch (prev_mode) {
3687 case gc_mode_none: GC_ASSERT(mode == gc_mode_marking);
break;
3688 case gc_mode_marking: GC_ASSERT(mode == gc_mode_sweeping);
break;
3689 case gc_mode_sweeping: GC_ASSERT(mode == gc_mode_none || mode == gc_mode_compacting);
break;
3690 case gc_mode_compacting: GC_ASSERT(mode == gc_mode_none);
break;
3693 if (0) fprintf(stderr,
"gc_mode_transition: %s->%s\n", gc_mode_name(gc_mode(objspace)), gc_mode_name(mode));
3694 gc_mode_set(objspace, mode);
3701 asan_unlock_freelist(page);
3702 if (page->freelist) {
3704 asan_unpoison_object((
VALUE)p,
false);
3708 asan_poison_object((
VALUE)prev);
3709 asan_unpoison_object((
VALUE)p,
false);
3712 asan_poison_object((
VALUE)p);
3715 page->freelist = freelist;
3717 asan_lock_freelist(page);
3724 heap->sweeping_page = ccan_list_top(&heap->pages,
struct heap_page, page_node);
3725 heap->free_pages = NULL;
3726 heap->pooled_pages = NULL;
3727 if (!objspace->flags.immediate_sweep) {
3730 ccan_list_for_each(&heap->pages, page, page_node) {
3731 page->flags.before_sweep = TRUE;
3736 #if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 4
3740 #if GC_CAN_COMPILE_COMPACTION
3741 static void gc_sort_heap_by_compare_func(
rb_objspace_t *objspace, gc_compact_compare_func compare_func);
3742 static int compare_pinned_slots(
const void *left,
const void *right,
void *d);
3746 gc_ractor_newobj_cache_clear(
void *c,
void *data)
3750 newobj_cache->incremental_mark_step_allocated_slots = 0;
3752 for (
size_t heap_idx = 0; heap_idx < HEAP_COUNT; heap_idx++) {
3755 struct heap_page *page = cache->using_page;
3756 struct free_slot *freelist = cache->freelist;
3757 RUBY_DEBUG_LOG(
"ractor using_page:%p freelist:%p", (
void *)page, (
void *)freelist);
3759 heap_page_freelist_append(page, freelist);
3761 cache->using_page = NULL;
3762 cache->freelist = NULL;
3769 gc_mode_transition(objspace, gc_mode_sweeping);
3770 objspace->rincgc.pooled_slots = 0;
3771 objspace->heap_pages.allocatable_slots = 0;
3773 #if GC_CAN_COMPILE_COMPACTION
3774 if (objspace->flags.during_compacting) {
3775 gc_sort_heap_by_compare_func(
3777 objspace->rcompactor.compare_func ? objspace->rcompactor.compare_func : compare_pinned_slots
3782 for (
int i = 0; i < HEAP_COUNT; i++) {
3784 gc_sweep_start_heap(objspace, heap);
3787 if (heap->sweeping_page == NULL) {
3788 GC_ASSERT(heap->total_pages == 0);
3789 GC_ASSERT(heap->total_slots == 0);
3790 gc_sweep_finish_heap(objspace, heap);
3794 rb_gc_ractor_newobj_cache_foreach(gc_ractor_newobj_cache_clear, NULL);
3800 size_t total_slots = heap->total_slots;
3801 size_t swept_slots = heap->freed_slots + heap->empty_slots;
3803 size_t init_slots = gc_params.heap_init_slots[heap - heaps];
3804 size_t min_free_slots = (size_t)(MAX(total_slots, init_slots) * gc_params.heap_free_slots_min_ratio);
3806 if (swept_slots < min_free_slots &&
3808 (heap->empty_slots == 0 || heap->freed_slots > heap->empty_slots)) {
3814 while (swept_slots < min_free_slots &&
3815 (resurrected_page = heap_page_resurrect(objspace))) {
3816 heap_add_page(objspace, heap, resurrected_page);
3817 heap_add_freepage(heap, resurrected_page);
3819 swept_slots += resurrected_page->free_slots;
3822 if (swept_slots < min_free_slots) {
3825 if (is_full_marking(objspace) ||
3826 objspace->profile.count - objspace->rgengc.last_major_gc < RVALUE_OLD_AGE) {
3827 heap_allocatable_slots_expand(objspace, heap, swept_slots, heap->total_slots);
3830 gc_needs_major_flags |= GPR_FLAG_MAJOR_BY_NOFREE;
3831 heap->force_major_gc_count++;
3840 gc_report(1, objspace,
"gc_sweep_finish\n");
3842 gc_prof_set_heap_info(objspace);
3843 heap_pages_free_unused_pages(objspace);
3845 for (
int i = 0; i < HEAP_COUNT; i++) {
3848 heap->freed_slots = 0;
3849 heap->empty_slots = 0;
3851 if (!will_be_incremental_marking(objspace)) {
3852 struct heap_page *end_page = heap->free_pages;
3854 while (end_page->free_next) end_page = end_page->free_next;
3855 end_page->free_next = heap->pooled_pages;
3858 heap->free_pages = heap->pooled_pages;
3860 heap->pooled_pages = NULL;
3861 objspace->rincgc.pooled_slots = 0;
3866 gc_mode_transition(objspace, gc_mode_none);
3868 #if RGENGC_CHECK_MODE >= 2
3869 gc_verify_internal_consistency(objspace);
3876 struct heap_page *sweep_page = heap->sweeping_page;
3877 int unlink_limit = GC_SWEEP_PAGES_FREEABLE_PER_STEP;
3878 int swept_slots = 0;
3879 int pooled_slots = 0;
3881 if (sweep_page == NULL)
return FALSE;
3883 #if GC_ENABLE_LAZY_SWEEP
3884 gc_prof_sweep_timer_start(objspace);
3888 RUBY_DEBUG_LOG(
"sweep_page:%p", (
void *)sweep_page);
3896 gc_sweep_page(objspace, heap, &ctx);
3897 int free_slots = ctx.freed_slots + ctx.empty_slots;
3899 heap->sweeping_page = ccan_list_next(&heap->pages, sweep_page, page_node);
3901 if (free_slots == sweep_page->total_slots &&
3902 heap_pages_freeable_pages > 0 &&
3904 heap_pages_freeable_pages--;
3907 heap_unlink_page(objspace, heap, sweep_page);
3909 sweep_page->start = 0;
3910 sweep_page->total_slots = 0;
3911 sweep_page->slot_size = 0;
3912 sweep_page->heap = NULL;
3913 sweep_page->free_slots = 0;
3915 asan_unlock_freelist(sweep_page);
3916 sweep_page->freelist = NULL;
3917 asan_lock_freelist(sweep_page);
3919 asan_poison_memory_region(sweep_page->body, HEAP_PAGE_SIZE);
3921 objspace->empty_pages_count++;
3922 sweep_page->free_next = objspace->empty_pages;
3923 objspace->empty_pages = sweep_page;
3925 else if (free_slots > 0) {
3926 heap->freed_slots += ctx.freed_slots;
3927 heap->empty_slots += ctx.empty_slots;
3929 if (pooled_slots < GC_INCREMENTAL_SWEEP_POOL_SLOT_COUNT) {
3930 heap_add_poolpage(objspace, heap, sweep_page);
3931 pooled_slots += free_slots;
3934 heap_add_freepage(heap, sweep_page);
3935 swept_slots += free_slots;
3936 if (swept_slots > GC_INCREMENTAL_SWEEP_SLOT_COUNT) {
3942 sweep_page->free_next = NULL;
3944 }
while ((sweep_page = heap->sweeping_page));
3946 if (!heap->sweeping_page) {
3947 gc_sweep_finish_heap(objspace, heap);
3949 if (!has_sweeping_pages(objspace)) {
3950 gc_sweep_finish(objspace);
3954 #if GC_ENABLE_LAZY_SWEEP
3955 gc_prof_sweep_timer_stop(objspace);
3958 return heap->free_pages != NULL;
3964 for (
int i = 0; i < HEAP_COUNT; i++) {
3967 while (heap->sweeping_page) {
3968 gc_sweep_step(objspace, heap);
3976 GC_ASSERT(dont_gc_val() == FALSE || objspace->profile.latest_gc_info & GPR_FLAG_METHOD);
3977 if (!GC_ENABLE_LAZY_SWEEP)
return;
3979 gc_sweeping_enter(objspace);
3981 for (
int i = 0; i < HEAP_COUNT; i++) {
3983 if (!gc_sweep_step(objspace, heap)) {
3986 if (heap == sweep_heap && objspace->heap_pages.allocatable_slots == 0) {
3988 gc_sweep_rest(objspace);
3994 gc_sweeping_exit(objspace);
3998 rb_gc_impl_location(
void *objspace_ptr,
VALUE value)
4003 asan_unpoisoning_object(value) {
4005 destination = (
VALUE)RMOVED(value)->destination;
4009 destination = value;
4014 destination = value;
4020 #if GC_CAN_COMPILE_COMPACTION
4031 GC_ASSERT(RVALUE_PINNED(objspace, forwarding_object));
4032 GC_ASSERT(!RVALUE_MARKED(objspace, forwarding_object));
4034 CLEAR_IN_BITMAP(GET_HEAP_PINNED_BITS(forwarding_object), forwarding_object);
4036 object = rb_gc_impl_location(objspace, forwarding_object);
4038 uint32_t original_shape_id = 0;
4040 original_shape_id = RMOVED(forwarding_object)->original_shape_id;
4043 gc_move(objspace,
object, forwarding_object, GET_HEAP_PAGE(
object)->slot_size, page->slot_size);
4047 if (original_shape_id) {
4048 rb_gc_set_shape(forwarding_object, original_shape_id);
4051 struct heap_page *orig_page = GET_HEAP_PAGE(
object);
4052 orig_page->free_slots++;
4053 heap_page_add_freeobj(objspace, orig_page,
object);
4055 GC_ASSERT(RVALUE_MARKED(objspace, forwarding_object));
4060 p += BASE_SLOT_SIZE;
4070 bits_t *mark_bits, *pin_bits;
4073 mark_bits = page->mark_bits;
4074 pin_bits = page->pinned_bits;
4076 uintptr_t p = page->start;
4079 bitset = pin_bits[0] & ~mark_bits[0];
4080 bitset >>= NUM_IN_PAGE(p);
4081 invalidate_moved_plane(objspace, page, p, bitset);
4082 p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
4084 for (i=1; i < HEAP_PAGE_BITMAP_LIMIT; i++) {
4087 bitset = pin_bits[i] & ~mark_bits[i];
4089 invalidate_moved_plane(objspace, page, p, bitset);
4090 p += BITS_BITLENGTH * BASE_SLOT_SIZE;
4099 gc_mode_transition(objspace, gc_mode_compacting);
4101 for (
int i = 0; i < HEAP_COUNT; i++) {
4103 ccan_list_for_each(&heap->pages, page, page_node) {
4104 page->flags.before_sweep = TRUE;
4107 heap->compact_cursor = ccan_list_tail(&heap->pages,
struct heap_page, page_node);
4108 heap->compact_cursor_index = 0;
4111 if (gc_prof_enabled(objspace)) {
4113 record->moved_objects = objspace->rcompactor.total_moved;
4116 memset(objspace->rcompactor.considered_count_table, 0,
T_MASK *
sizeof(
size_t));
4117 memset(objspace->rcompactor.moved_count_table, 0,
T_MASK *
sizeof(
size_t));
4118 memset(objspace->rcompactor.moved_up_count_table, 0,
T_MASK *
sizeof(
size_t));
4119 memset(objspace->rcompactor.moved_down_count_table, 0,
T_MASK *
sizeof(
size_t));
4130 gc_sweeping_enter(objspace);
4132 const unsigned int immediate_sweep = objspace->flags.immediate_sweep;
4134 gc_report(1, objspace,
"gc_sweep: immediate: %d\n", immediate_sweep);
4136 gc_sweep_start(objspace);
4137 if (objspace->flags.during_compacting) {
4138 gc_sweep_compact(objspace);
4141 if (immediate_sweep) {
4142 #if !GC_ENABLE_LAZY_SWEEP
4143 gc_prof_sweep_timer_start(objspace);
4145 gc_sweep_rest(objspace);
4146 #if !GC_ENABLE_LAZY_SWEEP
4147 gc_prof_sweep_timer_stop(objspace);
4153 for (
int i = 0; i < HEAP_COUNT; i++) {
4155 gc_sweep_step(objspace, heap);
4159 gc_sweeping_exit(objspace);
4165 stack_chunk_alloc(
void)
4179 return stack->chunk == NULL;
4185 size_t size = stack->index;
4186 stack_chunk_t *chunk = stack->chunk ? stack->chunk->next : NULL;
4189 size += stack->limit;
4190 chunk = chunk->next;
4198 chunk->next = stack->cache;
4199 stack->cache = chunk;
4200 stack->cache_size++;
4208 if (stack->unused_cache_size > (stack->cache_size/2)) {
4209 chunk = stack->cache;
4210 stack->cache = stack->cache->next;
4211 stack->cache_size--;
4214 stack->unused_cache_size = stack->cache_size;
4222 GC_ASSERT(stack->index == stack->limit);
4224 if (stack->cache_size > 0) {
4225 next = stack->cache;
4226 stack->cache = stack->cache->next;
4227 stack->cache_size--;
4228 if (stack->unused_cache_size > stack->cache_size)
4229 stack->unused_cache_size = stack->cache_size;
4232 next = stack_chunk_alloc();
4234 next->next = stack->chunk;
4235 stack->chunk = next;
4244 prev = stack->chunk->next;
4245 GC_ASSERT(stack->index == 0);
4246 add_stack_chunk_cache(stack, stack->chunk);
4247 stack->chunk = prev;
4248 stack->index = stack->limit;
4256 while (chunk != NULL) {
4266 mark_stack_chunk_list_free(stack->chunk);
4272 mark_stack_chunk_list_free(stack->cache);
4273 stack->cache_size = 0;
4274 stack->unused_cache_size = 0;
4301 if (stack->index == stack->limit) {
4302 push_mark_stack_chunk(stack);
4304 stack->chunk->data[stack->index++] = obj;
4314 rb_bug(
"push_mark_stack() called for broken object");
4318 rb_bug(
"push_mark_stack: unexpected T_NODE object");
4322 rb_bug(
"rb_gc_mark(): unknown data type 0x%x(%p) %s",
4324 is_pointer_to_heap((
rb_objspace_t *)rb_gc_get_objspace(), (
void *)obj) ?
"corrupted object" :
"non object");
4330 if (is_mark_stack_empty(stack)) {
4333 if (stack->index == 1) {
4334 *data = stack->chunk->data[--stack->index];
4335 pop_mark_stack_chunk(stack);
4338 *data = stack->chunk->data[--stack->index];
4349 stack->index = stack->limit = STACK_CHUNK_SIZE;
4351 for (i=0; i < 4; i++) {
4352 add_stack_chunk_cache(stack, stack_chunk_alloc());
4354 stack->unused_cache_size = stack->cache_size;
4362 const VALUE old_parent = objspace->rgengc.parent_object;
4365 if (RVALUE_WB_UNPROTECTED(objspace, obj) || !RVALUE_OLD_P(objspace, obj)) {
4366 rgengc_remember(objspace, old_parent);
4370 GC_ASSERT(old_parent == objspace->rgengc.parent_object);
4376 if (RVALUE_MARKED(objspace, obj))
return 0;
4377 MARK_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj);
4391 if(!gc_config_full_mark_val)
4394 struct heap_page *page = GET_HEAP_PAGE(obj);
4396 GC_ASSERT(RVALUE_MARKING(objspace, obj) == FALSE);
4397 check_rvalue_consistency(objspace, obj);
4399 if (!RVALUE_PAGE_WB_UNPROTECTED(page, obj)) {
4400 if (!RVALUE_OLD_P(objspace, obj)) {
4401 gc_report(3, objspace,
"gc_aging: YOUNG: %s\n", rb_obj_info(obj));
4402 RVALUE_AGE_INC(objspace, obj);
4404 else if (is_full_marking(objspace)) {
4405 GC_ASSERT(RVALUE_PAGE_UNCOLLECTIBLE(page, obj) == FALSE);
4406 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace, page, obj);
4409 check_rvalue_consistency(objspace, obj);
4411 objspace->marked_slots++;
4417 #if RGENGC_CHECK_MODE
4418 if (RVALUE_MARKED(objspace, obj) == FALSE)
rb_bug(
"gc_grey: %s is not marked.", rb_obj_info(obj));
4419 if (RVALUE_MARKING(objspace, obj) == TRUE)
rb_bug(
"gc_grey: %s is marking/remembered.", rb_obj_info(obj));
4422 if (is_incremental_marking(objspace)) {
4423 MARK_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
4426 push_mark_stack(&objspace->mark_stack, obj);
4432 GC_ASSERT(during_gc);
4434 rgengc_check_relation(objspace, obj);
4435 if (!gc_mark_set(objspace, obj))
return;
4438 if (objspace->rgengc.parent_object) {
4439 RUBY_DEBUG_LOG(
"%p (%s) parent:%p (%s)",
4440 (
void *)obj, obj_type_name(obj),
4441 (
void *)objspace->rgengc.parent_object, obj_type_name(objspace->rgengc.parent_object));
4444 RUBY_DEBUG_LOG(
"%p (%s)", (
void *)obj, obj_type_name(obj));
4449 rb_obj_info_dump(obj);
4450 rb_bug(
"try to mark T_NONE object");
4453 gc_aging(objspace, obj);
4454 gc_grey(objspace, obj);
4461 if (
RB_UNLIKELY(objspace->flags.during_compacting)) {
4463 if (!RVALUE_PINNED(objspace, obj)) {
4464 GC_ASSERT(GET_HEAP_PAGE(obj)->pinned_slots <= GET_HEAP_PAGE(obj)->total_slots);
4465 GET_HEAP_PAGE(obj)->pinned_slots++;
4466 MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj);
4475 gc_pin(objspace, obj);
4476 gc_mark(objspace, obj);
4480 rb_gc_impl_mark_and_move(
void *objspace_ptr,
VALUE *
ptr)
4484 if (
RB_UNLIKELY(objspace->flags.during_reference_updating)) {
4485 GC_ASSERT(objspace->flags.during_compacting);
4486 GC_ASSERT(during_gc);
4488 *
ptr = rb_gc_impl_location(objspace, *
ptr);
4491 gc_mark(objspace, *
ptr);
4496 rb_gc_impl_mark(
void *objspace_ptr,
VALUE obj)
4500 gc_mark(objspace, obj);
4504 rb_gc_impl_mark_and_pin(
void *objspace_ptr,
VALUE obj)
4508 gc_mark_and_pin(objspace, obj);
4512 rb_gc_impl_mark_maybe(
void *objspace_ptr,
VALUE obj)
4516 (void)VALGRIND_MAKE_MEM_DEFINED(&obj,
sizeof(obj));
4518 if (is_pointer_to_heap(objspace, (
void *)obj)) {
4519 asan_unpoisoning_object(obj) {
4526 gc_mark_and_pin(objspace, obj);
4534 rb_gc_impl_mark_weak(
void *objspace_ptr,
VALUE *
ptr)
4538 GC_ASSERT(objspace->rgengc.parent_object == 0 ||
FL_TEST(objspace->rgengc.parent_object,
FL_WB_PROTECTED));
4543 rb_obj_info_dump(obj);
4544 rb_bug(
"try to mark T_NONE object");
4550 if (!is_full_marking(objspace) && RVALUE_OLD_P(objspace, obj)) {
4551 GC_ASSERT(RVALUE_MARKED(objspace, obj));
4552 GC_ASSERT(!objspace->flags.during_compacting);
4557 rgengc_check_relation(objspace, obj);
4559 DURING_GC_COULD_MALLOC_REGION_START();
4561 rb_darray_append(&objspace->weak_references,
ptr);
4563 DURING_GC_COULD_MALLOC_REGION_END();
4565 objspace->profile.weak_references_count++;
4569 rb_gc_impl_remove_weak(
void *objspace_ptr,
VALUE parent_obj,
VALUE *
ptr)
4575 if (!is_incremental_marking(objspace))
return;
4578 if (!RVALUE_MARKED(objspace, parent_obj))
return;
4581 rb_darray_foreach(objspace->weak_references, i, ptr_ptr) {
4582 if (*ptr_ptr ==
ptr) {
4590 pin_value(st_data_t key, st_data_t value, st_data_t data)
4592 rb_gc_impl_mark_and_pin((
void *)data, (
VALUE)value);
4600 #define MARK_CHECKPOINT(category) do { \
4601 if (categoryp) *categoryp = category; \
4604 MARK_CHECKPOINT(
"objspace");
4605 objspace->rgengc.parent_object =
Qfalse;
4607 if (finalizer_table != NULL) {
4608 st_foreach(finalizer_table, pin_value, (st_data_t)objspace);
4611 st_foreach(objspace->obj_to_id_tbl, gc_mark_tbl_no_pin_i, (st_data_t)objspace);
4613 if (stress_to_class)
rb_gc_mark(stress_to_class);
4615 rb_gc_mark_roots(objspace, categoryp);
4621 if (RVALUE_OLD_P(objspace, obj)) {
4622 objspace->rgengc.parent_object = obj;
4625 objspace->rgengc.parent_object =
Qfalse;
4632 gc_mark_set_parent(objspace, obj);
4633 rb_gc_mark_children(objspace, obj);
4641 gc_mark_stacked_objects(
rb_objspace_t *objspace,
int incremental,
size_t count)
4645 size_t marked_slots_at_the_beginning = objspace->marked_slots;
4646 size_t popped_count = 0;
4648 while (pop_mark_stack(mstack, &obj)) {
4649 if (obj ==
Qundef)
continue;
4651 if (RGENGC_CHECK_MODE && !RVALUE_MARKED(objspace, obj)) {
4652 rb_bug(
"gc_mark_stacked_objects: %s is not marked.", rb_obj_info(obj));
4654 gc_mark_children(objspace, obj);
4657 if (RGENGC_CHECK_MODE && !RVALUE_MARKING(objspace, obj)) {
4658 rb_bug(
"gc_mark_stacked_objects: incremental, but marking bit is 0");
4660 CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
4663 if (popped_count + (objspace->marked_slots - marked_slots_at_the_beginning) > count) {
4672 if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(objspace);
4674 if (is_mark_stack_empty(mstack)) {
4675 shrink_stack_chunk_cache(mstack);
4684 gc_mark_stacked_objects_incremental(
rb_objspace_t *objspace,
size_t count)
4686 return gc_mark_stacked_objects(objspace, TRUE, count);
4692 return gc_mark_stacked_objects(objspace, FALSE, 0);
4695 #if RGENGC_CHECK_MODE >= 4
4697 #define MAKE_ROOTSIG(obj) (((VALUE)(obj) << 1) | 0x01)
4698 #define IS_ROOTSIG(obj) ((VALUE)(obj) & 0x01)
4699 #define GET_ROOTSIG(obj) ((const char *)((VALUE)(obj) >> 1))
4707 static struct reflist *
4708 reflist_create(
VALUE obj)
4710 struct reflist *refs =
xmalloc(
sizeof(
struct reflist));
4713 refs->list[0] = obj;
4719 reflist_destruct(
struct reflist *refs)
4726 reflist_add(
struct reflist *refs,
VALUE obj)
4728 if (refs->pos == refs->size) {
4730 SIZED_REALLOC_N(refs->list,
VALUE, refs->size, refs->size/2);
4733 refs->list[refs->pos++] = obj;
4737 reflist_dump(
struct reflist *refs)
4740 for (i=0; i<refs->pos; i++) {
4741 VALUE obj = refs->list[i];
4742 if (IS_ROOTSIG(obj)) {
4743 fprintf(stderr,
"<root@%s>", GET_ROOTSIG(obj));
4746 fprintf(stderr,
"<%s>", rb_obj_info(obj));
4748 if (i+1 < refs->pos) fprintf(stderr,
", ");
4753 reflist_referred_from_machine_context(
struct reflist *refs)
4756 for (i=0; i<refs->pos; i++) {
4757 VALUE obj = refs->list[i];
4758 if (IS_ROOTSIG(obj) && strcmp(GET_ROOTSIG(obj),
"machine_context") == 0)
return 1;
4773 const char *category;
4779 allrefs_add(
struct allrefs *data,
VALUE obj)
4781 struct reflist *refs;
4784 if (st_lookup(data->references, obj, &r)) {
4785 refs = (
struct reflist *)r;
4786 reflist_add(refs, data->root_obj);
4790 refs = reflist_create(data->root_obj);
4791 st_insert(data->references, obj, (st_data_t)refs);
4799 struct allrefs *data = (
struct allrefs *)
ptr;
4801 if (allrefs_add(data, obj)) {
4802 push_mark_stack(&data->mark_stack, obj);
4807 allrefs_roots_i(
VALUE obj,
void *
ptr)
4809 struct allrefs *data = (
struct allrefs *)
ptr;
4810 if (strlen(data->category) == 0)
rb_bug(
"!!!");
4811 data->root_obj = MAKE_ROOTSIG(data->category);
4813 if (allrefs_add(data, obj)) {
4814 push_mark_stack(&data->mark_stack, obj);
4817 #define PUSH_MARK_FUNC_DATA(v) do { \
4818 struct gc_mark_func_data_struct *prev_mark_func_data = GET_VM()->gc.mark_func_data; \
4819 GET_VM()->gc.mark_func_data = (v);
4821 #define POP_MARK_FUNC_DATA() GET_VM()->gc.mark_func_data = prev_mark_func_data;} while (0)
4826 struct allrefs data;
4827 struct gc_mark_func_data_struct mfd;
4829 int prev_dont_gc = dont_gc_val();
4832 data.objspace = objspace;
4833 data.references = st_init_numtable();
4834 init_mark_stack(&data.mark_stack);
4836 mfd.mark_func = allrefs_roots_i;
4840 PUSH_MARK_FUNC_DATA(&mfd);
4841 GET_VM()->gc.mark_func_data = &mfd;
4842 mark_roots(objspace, &data.category);
4843 POP_MARK_FUNC_DATA();
4846 while (pop_mark_stack(&data.mark_stack, &obj)) {
4847 rb_objspace_reachable_objects_from(data.root_obj = obj, allrefs_i, &data);
4849 free_stack_chunks(&data.mark_stack);
4851 dont_gc_set(prev_dont_gc);
4852 return data.references;
4856 objspace_allrefs_destruct_i(st_data_t key, st_data_t value, st_data_t
ptr)
4858 struct reflist *refs = (
struct reflist *)value;
4859 reflist_destruct(refs);
4864 objspace_allrefs_destruct(
struct st_table *refs)
4866 st_foreach(refs, objspace_allrefs_destruct_i, 0);
4867 st_free_table(refs);
4870 #if RGENGC_CHECK_MODE >= 5
4872 allrefs_dump_i(st_data_t k, st_data_t v, st_data_t
ptr)
4875 struct reflist *refs = (
struct reflist *)v;
4876 fprintf(stderr,
"[allrefs_dump_i] %s <- ", rb_obj_info(obj));
4878 fprintf(stderr,
"\n");
4885 VALUE size = objspace->rgengc.allrefs_table->num_entries;
4886 fprintf(stderr,
"[all refs] (size: %"PRIuVALUE
")\n", size);
4887 st_foreach(objspace->rgengc.allrefs_table, allrefs_dump_i, 0);
4892 gc_check_after_marks_i(st_data_t k, st_data_t v, st_data_t
ptr)
4895 struct reflist *refs = (
struct reflist *)v;
4899 if (!RVALUE_MARKED(objspace, obj)) {
4900 fprintf(stderr,
"gc_check_after_marks_i: %s is not marked and not oldgen.\n", rb_obj_info(obj));
4901 fprintf(stderr,
"gc_check_after_marks_i: %p is referred from ", (
void *)obj);
4904 if (reflist_referred_from_machine_context(refs)) {
4905 fprintf(stderr,
" (marked from machine stack).\n");
4909 objspace->rgengc.error_count++;
4910 fprintf(stderr,
"\n");
4917 gc_marks_check(
rb_objspace_t *objspace, st_foreach_callback_func *checker_func,
const char *checker_name)
4919 size_t saved_malloc_increase = objspace->malloc_params.increase;
4920 #if RGENGC_ESTIMATE_OLDMALLOC
4921 size_t saved_oldmalloc_increase = objspace->rgengc.oldmalloc_increase;
4923 VALUE already_disabled = rb_objspace_gc_disable(objspace);
4925 objspace->rgengc.allrefs_table = objspace_allrefs(objspace);
4928 st_foreach(objspace->rgengc.allrefs_table, checker_func, (st_data_t)objspace);
4931 if (objspace->rgengc.error_count > 0) {
4932 #if RGENGC_CHECK_MODE >= 5
4933 allrefs_dump(objspace);
4935 if (checker_name)
rb_bug(
"%s: GC has problem.", checker_name);
4938 objspace_allrefs_destruct(objspace->rgengc.allrefs_table);
4939 objspace->rgengc.allrefs_table = 0;
4941 if (already_disabled ==
Qfalse) rb_objspace_gc_enable(objspace);
4942 objspace->malloc_params.increase = saved_malloc_increase;
4943 #if RGENGC_ESTIMATE_OLDMALLOC
4944 objspace->rgengc.oldmalloc_increase = saved_oldmalloc_increase;
4952 size_t live_object_count;
4953 size_t zombie_object_count;
4956 size_t old_object_count;
4957 size_t remembered_shady_count;
4961 check_generation_i(
const VALUE child,
void *
ptr)
4964 const VALUE parent = data->parent;
4966 if (RGENGC_CHECK_MODE) GC_ASSERT(RVALUE_OLD_P(data->objspace, parent));
4968 if (!RVALUE_OLD_P(data->objspace, child)) {
4969 if (!RVALUE_REMEMBERED(data->objspace, parent) &&
4970 !RVALUE_REMEMBERED(data->objspace, child) &&
4971 !RVALUE_UNCOLLECTIBLE(data->objspace, child)) {
4972 fprintf(stderr,
"verify_internal_consistency_reachable_i: WB miss (O->Y) %s -> %s\n", rb_obj_info(parent), rb_obj_info(child));
4979 check_color_i(
const VALUE child,
void *
ptr)
4982 const VALUE parent = data->parent;
4984 if (!RVALUE_WB_UNPROTECTED(data->objspace, parent) && RVALUE_WHITE_P(data->objspace, child)) {
4985 fprintf(stderr,
"verify_internal_consistency_reachable_i: WB miss (B->W) - %s -> %s\n",
4986 rb_obj_info(parent), rb_obj_info(child));
4992 check_children_i(
const VALUE child,
void *
ptr)
4995 if (check_rvalue_consistency_force(data->objspace, child, FALSE) != 0) {
4996 fprintf(stderr,
"check_children_i: %s has error (referenced from %s)",
4997 rb_obj_info(child), rb_obj_info(data->parent));
5004 verify_internal_consistency_i(
void *page_start,
void *page_end,
size_t stride,
5010 for (obj = (
VALUE)page_start; obj != (
VALUE)page_end; obj += stride) {
5011 asan_unpoisoning_object(obj) {
5012 if (!rb_gc_impl_garbage_object_p(objspace, obj)) {
5014 data->live_object_count++;
5019 if (!gc_object_moved_p(objspace, obj)) {
5021 rb_objspace_reachable_objects_from(obj, check_children_i, (
void *)data);
5025 if (RVALUE_OLD_P(objspace, obj)) data->old_object_count++;
5026 if (RVALUE_WB_UNPROTECTED(objspace, obj) && RVALUE_UNCOLLECTIBLE(objspace, obj)) data->remembered_shady_count++;
5028 if (!is_marking(objspace) && RVALUE_OLD_P(objspace, obj)) {
5031 rb_objspace_reachable_objects_from(obj, check_generation_i, (
void *)data);
5034 if (is_incremental_marking(objspace)) {
5035 if (RVALUE_BLACK_P(objspace, obj)) {
5038 rb_objspace_reachable_objects_from(obj, check_color_i, (
void *)data);
5044 data->zombie_object_count++;
5047 fprintf(stderr,
"verify_internal_consistency_i: T_ZOMBIE has extra flags set: %s\n",
5053 fprintf(stderr,
"verify_internal_consistency_i: FL_FINALIZE %s but %s finalizer_table: %s\n",
5054 FL_TEST(obj,
FL_FINALIZE) ?
"set" :
"not set", st_is_member(finalizer_table, obj) ?
"in" :
"not in",
5069 unsigned int has_remembered_shady = FALSE;
5070 unsigned int has_remembered_old = FALSE;
5071 int remembered_old_objects = 0;
5072 int free_objects = 0;
5073 int zombie_objects = 0;
5075 short slot_size = page->slot_size;
5076 uintptr_t start = (uintptr_t)page->start;
5077 uintptr_t end = start + page->total_slots * slot_size;
5079 for (uintptr_t
ptr = start;
ptr < end;
ptr += slot_size) {
5081 asan_unpoisoning_object(val) {
5086 if (RVALUE_PAGE_UNCOLLECTIBLE(page, val) && RVALUE_PAGE_WB_UNPROTECTED(page, val)) {
5087 has_remembered_shady = TRUE;
5089 if (RVALUE_PAGE_MARKING(page, val)) {
5090 has_remembered_old = TRUE;
5091 remembered_old_objects++;
5096 if (!is_incremental_marking(objspace) &&
5097 page->flags.has_remembered_objects == FALSE && has_remembered_old == TRUE) {
5099 for (uintptr_t
ptr = start;
ptr < end;
ptr += slot_size) {
5101 if (RVALUE_PAGE_MARKING(page, val)) {
5102 fprintf(stderr,
"marking -> %s\n", rb_obj_info(val));
5105 rb_bug(
"page %p's has_remembered_objects should be false, but there are remembered old objects (%d). %s",
5106 (
void *)page, remembered_old_objects, obj ? rb_obj_info(obj) :
"");
5109 if (page->flags.has_uncollectible_wb_unprotected_objects == FALSE && has_remembered_shady == TRUE) {
5110 rb_bug(
"page %p's has_remembered_shady should be false, but there are remembered shady objects. %s",
5111 (
void *)page, obj ? rb_obj_info(obj) :
"");
5116 if (page->free_slots != free_objects) {
5117 rb_bug(
"page %p's free_slots should be %d, but %d", (
void *)page, page->free_slots, free_objects);
5120 if (page->final_slots != zombie_objects) {
5121 rb_bug(
"page %p's final_slots should be %d, but %d", (
void *)page, page->final_slots, zombie_objects);
5124 return remembered_old_objects;
5128 gc_verify_heap_pages_(
rb_objspace_t *objspace,
struct ccan_list_head *head)
5130 int remembered_old_objects = 0;
5133 ccan_list_for_each(head, page, page_node) {
5134 asan_unlock_freelist(page);
5139 asan_unpoison_object(vp,
false);
5141 fprintf(stderr,
"freelist slot expected to be T_NONE but was: %s\n", rb_obj_info(vp));
5144 asan_poison_object(prev);
5146 asan_lock_freelist(page);
5148 if (page->flags.has_remembered_objects == FALSE) {
5149 remembered_old_objects += gc_verify_heap_page(objspace, page,
Qfalse);
5153 return remembered_old_objects;
5159 int remembered_old_objects = 0;
5160 for (
int i = 0; i < HEAP_COUNT; i++) {
5161 remembered_old_objects += gc_verify_heap_pages_(objspace, &((&heaps[i])->pages));
5163 return remembered_old_objects;
5171 data.objspace = objspace;
5172 gc_report(5, objspace,
"gc_verify_internal_consistency: start\n");
5175 for (
size_t i = 0; i < rb_darray_size(objspace->heap_pages.sorted); i++) {
5176 struct heap_page *page = rb_darray_get(objspace->heap_pages.sorted, i);
5177 short slot_size = page->slot_size;
5179 uintptr_t start = (uintptr_t)page->start;
5180 uintptr_t end = start + page->total_slots * slot_size;
5182 verify_internal_consistency_i((
void *)start, (
void *)end, slot_size, &data);
5185 if (data.err_count != 0) {
5186 #if RGENGC_CHECK_MODE >= 5
5187 objspace->rgengc.error_count = data.err_count;
5188 gc_marks_check(objspace, NULL, NULL);
5189 allrefs_dump(objspace);
5191 rb_bug(
"gc_verify_internal_consistency: found internal inconsistency.");
5195 gc_verify_heap_pages(objspace);
5199 if (!is_lazy_sweeping(objspace) &&
5201 !rb_gc_multi_ractor_p()) {
5202 if (objspace_live_slots(objspace) != data.live_object_count) {
5203 fprintf(stderr,
"heap_pages_final_slots: %"PRIdSIZE
", total_freed_objects: %"PRIdSIZE
"\n",
5204 total_final_slots_count(objspace), total_freed_objects(objspace));
5205 rb_bug(
"inconsistent live slot number: expect %"PRIuSIZE
", but %"PRIuSIZE
".",
5206 objspace_live_slots(objspace), data.live_object_count);
5210 if (!is_marking(objspace)) {
5211 if (objspace->rgengc.old_objects != data.old_object_count) {
5212 rb_bug(
"inconsistent old slot number: expect %"PRIuSIZE
", but %"PRIuSIZE
".",
5213 objspace->rgengc.old_objects, data.old_object_count);
5215 if (objspace->rgengc.uncollectible_wb_unprotected_objects != data.remembered_shady_count) {
5216 rb_bug(
"inconsistent number of wb unprotected objects: expect %"PRIuSIZE
", but %"PRIuSIZE
".",
5217 objspace->rgengc.uncollectible_wb_unprotected_objects, data.remembered_shady_count);
5222 size_t list_count = 0;
5225 VALUE z = heap_pages_deferred_final;
5228 z = RZOMBIE(z)->next;
5232 if (total_final_slots_count(objspace) != data.zombie_object_count ||
5233 total_final_slots_count(objspace) != list_count) {
5235 rb_bug(
"inconsistent finalizing object count:\n"
5236 " expect %"PRIuSIZE
"\n"
5237 " but %"PRIuSIZE
" zombies\n"
5238 " heap_pages_deferred_final list has %"PRIuSIZE
" items.",
5239 total_final_slots_count(objspace),
5240 data.zombie_object_count,
5245 gc_report(5, objspace,
"gc_verify_internal_consistency: OK\n");
5249 gc_verify_internal_consistency(
void *objspace_ptr)
5253 unsigned int lev = rb_gc_vm_lock();
5257 unsigned int prev_during_gc = during_gc;
5260 gc_verify_internal_consistency_(objspace);
5262 during_gc = prev_during_gc;
5264 rb_gc_vm_unlock(lev);
5268 heap_move_pooled_pages_to_free_pages(
rb_heap_t *heap)
5270 if (heap->pooled_pages) {
5271 if (heap->free_pages) {
5272 struct heap_page *free_pages_tail = heap->free_pages;
5273 while (free_pages_tail->free_next) {
5274 free_pages_tail = free_pages_tail->free_next;
5276 free_pages_tail->free_next = heap->pooled_pages;
5279 heap->free_pages = heap->pooled_pages;
5282 heap->pooled_pages = NULL;
5289 struct heap_page *page = GET_HEAP_PAGE(obj);
5290 bits_t *uncollectible_bits = &page->uncollectible_bits[0];
5292 if (!MARKED_IN_BITMAP(uncollectible_bits, obj)) {
5293 page->flags.has_uncollectible_wb_unprotected_objects = TRUE;
5294 MARK_IN_BITMAP(uncollectible_bits, obj);
5295 objspace->rgengc.uncollectible_wb_unprotected_objects++;
5297 #if RGENGC_PROFILE > 0
5298 objspace->profile.total_remembered_shady_object_count++;
5299 #if RGENGC_PROFILE >= 2
5300 objspace->profile.remembered_shady_object_count_types[
BUILTIN_TYPE(obj)]++;
5311 gc_marks_wb_unprotected_objects_plane(
rb_objspace_t *objspace, uintptr_t p, bits_t bits)
5316 gc_report(2, objspace,
"gc_marks_wb_unprotected_objects: marked shady: %s\n", rb_obj_info((
VALUE)p));
5317 GC_ASSERT(RVALUE_WB_UNPROTECTED(objspace, (
VALUE)p));
5318 GC_ASSERT(RVALUE_MARKED(objspace, (
VALUE)p));
5319 gc_mark_children(objspace, (
VALUE)p);
5321 p += BASE_SLOT_SIZE;
5332 ccan_list_for_each(&heap->pages, page, page_node) {
5333 bits_t *mark_bits = page->mark_bits;
5334 bits_t *wbun_bits = page->wb_unprotected_bits;
5335 uintptr_t p = page->start;
5338 bits_t bits = mark_bits[0] & wbun_bits[0];
5339 bits >>= NUM_IN_PAGE(p);
5340 gc_marks_wb_unprotected_objects_plane(objspace, p, bits);
5341 p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
5343 for (j=1; j<HEAP_PAGE_BITMAP_LIMIT; j++) {
5344 bits_t bits = mark_bits[j] & wbun_bits[j];
5346 gc_marks_wb_unprotected_objects_plane(objspace, p, bits);
5347 p += BITS_BITLENGTH * BASE_SLOT_SIZE;
5351 gc_mark_stacked_objects_all(objspace);
5357 size_t retained_weak_references_count = 0;
5359 rb_darray_foreach(objspace->weak_references, i, ptr_ptr) {
5360 if (!*ptr_ptr)
continue;
5362 VALUE obj = **ptr_ptr;
5366 if (!RVALUE_MARKED(objspace, obj)) {
5370 retained_weak_references_count++;
5374 objspace->profile.retained_weak_references_count = retained_weak_references_count;
5376 rb_darray_clear(objspace->weak_references);
5377 DURING_GC_COULD_MALLOC_REGION_START();
5379 rb_darray_resize_capa(&objspace->weak_references, retained_weak_references_count);
5381 DURING_GC_COULD_MALLOC_REGION_END();
5388 if (is_incremental_marking(objspace)) {
5389 if (RGENGC_CHECK_MODE && is_mark_stack_empty(&objspace->mark_stack) == 0) {
5390 rb_bug(
"gc_marks_finish: mark stack is not empty (%"PRIdSIZE
").",
5391 mark_stack_size(&objspace->mark_stack));
5394 mark_roots(objspace, NULL);
5395 while (gc_mark_stacked_objects_incremental(objspace, INT_MAX) ==
false);
5397 #if RGENGC_CHECK_MODE >= 2
5398 if (gc_verify_heap_pages(objspace) != 0) {
5399 rb_bug(
"gc_marks_finish (incremental): there are remembered old objects.");
5403 objspace->flags.during_incremental_marking = FALSE;
5405 for (
int i = 0; i < HEAP_COUNT; i++) {
5406 gc_marks_wb_unprotected_objects(objspace, &heaps[i]);
5410 gc_update_weak_references(objspace);
5412 #if RGENGC_CHECK_MODE >= 2
5413 gc_verify_internal_consistency(objspace);
5416 #if RGENGC_CHECK_MODE >= 4
5418 gc_marks_check(objspace, gc_check_after_marks_i,
"after_marks");
5423 const unsigned long r_mul = objspace->live_ractor_cache_count > 8 ? 8 : objspace->live_ractor_cache_count;
5425 size_t total_slots = objspace_available_slots(objspace);
5426 size_t sweep_slots = total_slots - objspace->marked_slots;
5427 size_t max_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_max_ratio);
5428 size_t min_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_min_ratio);
5429 if (min_free_slots < gc_params.heap_free_slots * r_mul) {
5430 min_free_slots = gc_params.heap_free_slots * r_mul;
5433 int full_marking = is_full_marking(objspace);
5435 GC_ASSERT(objspace_available_slots(objspace) >= objspace->marked_slots);
5438 size_t total_init_slots = 0;
5439 for (
int i = 0; i < HEAP_COUNT; i++) {
5440 total_init_slots += gc_params.heap_init_slots[i] * r_mul;
5443 if (max_free_slots < total_init_slots) {
5444 max_free_slots = total_init_slots;
5447 if (sweep_slots > max_free_slots) {
5448 heap_pages_freeable_pages = (sweep_slots - max_free_slots) / HEAP_PAGE_OBJ_LIMIT;
5451 heap_pages_freeable_pages = 0;
5454 if (objspace->heap_pages.allocatable_slots == 0 && sweep_slots < min_free_slots) {
5455 if (!full_marking) {
5456 if (objspace->profile.count - objspace->rgengc.last_major_gc < RVALUE_OLD_AGE) {
5457 full_marking = TRUE;
5460 gc_report(1, objspace,
"gc_marks_finish: next is full GC!!)\n");
5461 gc_needs_major_flags |= GPR_FLAG_MAJOR_BY_NOFREE;
5468 const double r = gc_params.oldobject_limit_factor;
5469 objspace->rgengc.uncollectible_wb_unprotected_objects_limit = MAX(
5470 (
size_t)(objspace->rgengc.uncollectible_wb_unprotected_objects * r),
5471 (
size_t)(objspace->rgengc.old_objects * gc_params.uncollectible_wb_unprotected_objects_limit_ratio)
5473 objspace->rgengc.old_objects_limit = (size_t)(objspace->rgengc.old_objects * r);
5476 if (objspace->rgengc.uncollectible_wb_unprotected_objects > objspace->rgengc.uncollectible_wb_unprotected_objects_limit) {
5477 gc_needs_major_flags |= GPR_FLAG_MAJOR_BY_SHADY;
5479 if (objspace->rgengc.old_objects > objspace->rgengc.old_objects_limit) {
5480 gc_needs_major_flags |= GPR_FLAG_MAJOR_BY_OLDGEN;
5482 if (RGENGC_FORCE_MAJOR_GC) {
5483 gc_needs_major_flags = GPR_FLAG_MAJOR_BY_FORCE;
5486 gc_report(1, objspace,
"gc_marks_finish (marks %"PRIdSIZE
" objects, "
5487 "old %"PRIdSIZE
" objects, total %"PRIdSIZE
" slots, "
5488 "sweep %"PRIdSIZE
" slots, allocatable %"PRIdSIZE
" slots, next GC: %s)\n",
5489 objspace->marked_slots, objspace->rgengc.old_objects, objspace_available_slots(objspace), sweep_slots, objspace->heap_pages.allocatable_slots,
5490 gc_needs_major_flags ?
"major" :
"minor");
5494 rb_ractor_finish_marking();
5500 gc_compact_heap_cursors_met_p(
rb_heap_t *heap)
5502 return heap->sweeping_page == heap->compact_cursor;
5509 size_t obj_size = rb_gc_obj_optimal_size(obj);
5510 if (obj_size == 0) {
5515 if (rb_gc_impl_size_allocatable_p(obj_size)) {
5516 idx = heap_idx_for_size(obj_size);
5526 GC_ASSERT(gc_is_moveable_obj(objspace, src));
5528 rb_heap_t *dest_pool = gc_compact_destination_pool(objspace, heap, src);
5529 uint32_t orig_shape = 0;
5530 uint32_t new_shape = 0;
5532 if (gc_compact_heap_cursors_met_p(dest_pool)) {
5533 return dest_pool != heap;
5537 orig_shape = rb_gc_get_shape(src);
5539 if (dest_pool != heap) {
5540 new_shape = rb_gc_rebuild_shape(src, dest_pool - heaps);
5542 if (new_shape == 0) {
5548 while (!try_move(objspace, dest_pool, dest_pool->free_pages, src)) {
5550 .page = dest_pool->sweeping_page,
5559 lock_page_body(objspace, GET_PAGE_BODY(src));
5560 gc_sweep_page(objspace, dest_pool, &ctx);
5561 unlock_page_body(objspace, GET_PAGE_BODY(src));
5563 if (dest_pool->sweeping_page->free_slots > 0) {
5564 heap_add_freepage(dest_pool, dest_pool->sweeping_page);
5567 dest_pool->sweeping_page = ccan_list_next(&dest_pool->pages, dest_pool->sweeping_page, page_node);
5568 if (gc_compact_heap_cursors_met_p(dest_pool)) {
5569 return dest_pool != heap;
5573 if (orig_shape != 0) {
5574 if (new_shape != 0) {
5575 VALUE dest = rb_gc_impl_location(objspace, src);
5576 rb_gc_set_shape(dest, new_shape);
5578 RMOVED(src)->original_shape_id = orig_shape;
5587 short slot_size = page->slot_size;
5588 short slot_bits = slot_size / BASE_SLOT_SIZE;
5589 GC_ASSERT(slot_bits > 0);
5593 GC_ASSERT(vp % BASE_SLOT_SIZE == 0);
5596 objspace->rcompactor.considered_count_table[
BUILTIN_TYPE(vp)]++;
5598 if (gc_is_moveable_obj(objspace, vp)) {
5599 if (!gc_compact_move(objspace, heap, vp)) {
5606 bitset >>= slot_bits;
5616 GC_ASSERT(page == heap->compact_cursor);
5618 bits_t *mark_bits, *pin_bits;
5620 uintptr_t p = page->start;
5622 mark_bits = page->mark_bits;
5623 pin_bits = page->pinned_bits;
5626 bitset = (mark_bits[0] & ~pin_bits[0]);
5627 bitset >>= NUM_IN_PAGE(p);
5629 if (!gc_compact_plane(objspace, heap, (uintptr_t)p, bitset, page))
5632 p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
5634 for (
int j = 1; j < HEAP_PAGE_BITMAP_LIMIT; j++) {
5635 bitset = (mark_bits[j] & ~pin_bits[j]);
5637 if (!gc_compact_plane(objspace, heap, (uintptr_t)p, bitset, page))
5640 p += BITS_BITLENGTH * BASE_SLOT_SIZE;
5649 for (
int i = 0; i < HEAP_COUNT; i++) {
5652 if (heap->total_pages > 0 &&
5653 !gc_compact_heap_cursors_met_p(heap)) {
5664 gc_compact_start(objspace);
5665 #if RGENGC_CHECK_MODE >= 2
5666 gc_verify_internal_consistency(objspace);
5669 while (!gc_compact_all_compacted_p(objspace)) {
5670 for (
int i = 0; i < HEAP_COUNT; i++) {
5673 if (gc_compact_heap_cursors_met_p(heap)) {
5677 struct heap_page *start_page = heap->compact_cursor;
5679 if (!gc_compact_page(objspace, heap, start_page)) {
5680 lock_page_body(objspace, start_page->body);
5687 lock_page_body(objspace, start_page->body);
5688 heap->compact_cursor = ccan_list_prev(&heap->pages, heap->compact_cursor, page_node);
5692 gc_compact_finish(objspace);
5694 #if RGENGC_CHECK_MODE >= 2
5695 gc_verify_internal_consistency(objspace);
5702 gc_report(1, objspace,
"gc_marks_rest\n");
5704 for (
int i = 0; i < HEAP_COUNT; i++) {
5705 (&heaps[i])->pooled_pages = NULL;
5708 if (is_incremental_marking(objspace)) {
5709 while (gc_mark_stacked_objects_incremental(objspace, INT_MAX) == FALSE);
5712 gc_mark_stacked_objects_all(objspace);
5715 gc_marks_finish(objspace);
5721 bool marking_finished =
false;
5723 GC_ASSERT(is_marking(objspace));
5724 if (gc_mark_stacked_objects_incremental(objspace, slots)) {
5725 gc_marks_finish(objspace);
5727 marking_finished =
true;
5730 return marking_finished;
5736 GC_ASSERT(dont_gc_val() == FALSE || objspace->profile.latest_gc_info & GPR_FLAG_METHOD);
5737 bool marking_finished =
true;
5739 gc_marking_enter(objspace);
5741 if (heap->free_pages) {
5742 gc_report(2, objspace,
"gc_marks_continue: has pooled pages");
5744 marking_finished = gc_marks_step(objspace, objspace->rincgc.step_slots);
5747 gc_report(2, objspace,
"gc_marks_continue: no more pooled pages (stack depth: %"PRIdSIZE
").\n",
5748 mark_stack_size(&objspace->mark_stack));
5749 heap->force_incremental_marking_finish_count++;
5750 gc_marks_rest(objspace);
5753 gc_marking_exit(objspace);
5755 return marking_finished;
5762 gc_report(1, objspace,
"gc_marks_start: (%s)\n", full_mark ?
"full" :
"minor");
5763 gc_mode_transition(objspace, gc_mode_marking);
5766 size_t incremental_marking_steps = (objspace->rincgc.pooled_slots / INCREMENTAL_MARK_STEP_ALLOCATIONS) + 1;
5767 objspace->rincgc.step_slots = (objspace->marked_slots * 2) / incremental_marking_steps;
5769 if (0) fprintf(stderr,
"objspace->marked_slots: %"PRIdSIZE
", "
5770 "objspace->rincgc.pooled_page_num: %"PRIdSIZE
", "
5771 "objspace->rincgc.step_slots: %"PRIdSIZE
", \n",
5772 objspace->marked_slots, objspace->rincgc.pooled_slots, objspace->rincgc.step_slots);
5773 objspace->flags.during_minor_gc = FALSE;
5774 if (ruby_enable_autocompact) {
5775 objspace->flags.during_compacting |= TRUE;
5777 objspace->profile.major_gc_count++;
5778 objspace->rgengc.uncollectible_wb_unprotected_objects = 0;
5779 objspace->rgengc.old_objects = 0;
5780 objspace->rgengc.last_major_gc = objspace->profile.count;
5781 objspace->marked_slots = 0;
5783 for (
int i = 0; i < HEAP_COUNT; i++) {
5785 rgengc_mark_and_rememberset_clear(objspace, heap);
5786 heap_move_pooled_pages_to_free_pages(heap);
5788 if (objspace->flags.during_compacting) {
5791 ccan_list_for_each(&heap->pages, page, page_node) {
5792 page->pinned_slots = 0;
5798 objspace->flags.during_minor_gc = TRUE;
5799 objspace->marked_slots =
5800 objspace->rgengc.old_objects + objspace->rgengc.uncollectible_wb_unprotected_objects;
5801 objspace->profile.minor_gc_count++;
5803 for (
int i = 0; i < HEAP_COUNT; i++) {
5804 rgengc_rememberset_mark(objspace, &heaps[i]);
5808 mark_roots(objspace, NULL);
5810 gc_report(1, objspace,
"gc_marks_start: (%s) end, stack in %"PRIdSIZE
"\n",
5811 full_mark ?
"full" :
"minor", mark_stack_size(&objspace->mark_stack));
5817 gc_prof_mark_timer_start(objspace);
5818 gc_marking_enter(objspace);
5820 bool marking_finished =
false;
5824 gc_marks_start(objspace, full_mark);
5825 if (!is_incremental_marking(objspace)) {
5826 gc_marks_rest(objspace);
5827 marking_finished =
true;
5830 #if RGENGC_PROFILE > 0
5831 if (gc_prof_record(objspace)) {
5833 record->old_objects = objspace->rgengc.old_objects;
5837 gc_marking_exit(objspace);
5838 gc_prof_mark_timer_stop(objspace);
5840 return marking_finished;
5846 gc_report_body(
int level,
rb_objspace_t *objspace,
const char *fmt, ...)
5848 if (level <= RGENGC_DEBUG) {
5852 const char *status =
" ";
5855 status = is_full_marking(objspace) ?
"+" :
"-";
5858 if (is_lazy_sweeping(objspace)) {
5861 if (is_incremental_marking(objspace)) {
5866 va_start(args, fmt);
5867 vsnprintf(buf, 1024, fmt, args);
5870 fprintf(out,
"%s|", status);
5880 struct heap_page *page = GET_HEAP_PAGE(obj);
5881 bits_t *bits = &page->remembered_bits[0];
5883 if (MARKED_IN_BITMAP(bits, obj)) {
5887 page->flags.has_remembered_objects = TRUE;
5888 MARK_IN_BITMAP(bits, obj);
5899 gc_report(6, objspace,
"rgengc_remember: %s %s\n", rb_obj_info(obj),
5900 RVALUE_REMEMBERED(objspace, obj) ?
"was already remembered" :
"is remembered now");
5902 check_rvalue_consistency(objspace, obj);
5904 if (RGENGC_CHECK_MODE) {
5905 if (RVALUE_WB_UNPROTECTED(objspace, obj))
rb_bug(
"rgengc_remember: %s is not wb protected.", rb_obj_info(obj));
5908 #if RGENGC_PROFILE > 0
5909 if (!RVALUE_REMEMBERED(objspace, obj)) {
5910 if (RVALUE_WB_UNPROTECTED(objspace, obj) == 0) {
5911 objspace->profile.total_remembered_normal_object_count++;
5912 #if RGENGC_PROFILE >= 2
5913 objspace->profile.remembered_normal_object_count_types[
BUILTIN_TYPE(obj)]++;
5919 return rgengc_remembersetbits_set(objspace, obj);
5922 #ifndef PROFILE_REMEMBERSET_MARK
5923 #define PROFILE_REMEMBERSET_MARK 0
5927 rgengc_rememberset_mark_plane(
rb_objspace_t *objspace, uintptr_t p, bits_t bitset)
5933 gc_report(2, objspace,
"rgengc_rememberset_mark: mark %s\n", rb_obj_info(obj));
5934 GC_ASSERT(RVALUE_UNCOLLECTIBLE(objspace, obj));
5935 GC_ASSERT(RVALUE_OLD_P(objspace, obj) || RVALUE_WB_UNPROTECTED(objspace, obj));
5937 gc_mark_children(objspace, obj);
5939 p += BASE_SLOT_SIZE;
5950 #if PROFILE_REMEMBERSET_MARK
5951 int has_old = 0, has_shady = 0, has_both = 0, skip = 0;
5953 gc_report(1, objspace,
"rgengc_rememberset_mark: start\n");
5955 ccan_list_for_each(&heap->pages, page, page_node) {
5956 if (page->flags.has_remembered_objects | page->flags.has_uncollectible_wb_unprotected_objects) {
5957 uintptr_t p = page->start;
5958 bits_t bitset, bits[HEAP_PAGE_BITMAP_LIMIT];
5959 bits_t *remembered_bits = page->remembered_bits;
5960 bits_t *uncollectible_bits = page->uncollectible_bits;
5961 bits_t *wb_unprotected_bits = page->wb_unprotected_bits;
5962 #if PROFILE_REMEMBERSET_MARK
5963 if (page->flags.has_remembered_objects && page->flags.has_uncollectible_wb_unprotected_objects) has_both++;
5964 else if (page->flags.has_remembered_objects) has_old++;
5965 else if (page->flags.has_uncollectible_wb_unprotected_objects) has_shady++;
5967 for (j=0; j<HEAP_PAGE_BITMAP_LIMIT; j++) {
5968 bits[j] = remembered_bits[j] | (uncollectible_bits[j] & wb_unprotected_bits[j]);
5969 remembered_bits[j] = 0;
5971 page->flags.has_remembered_objects = FALSE;
5974 bitset >>= NUM_IN_PAGE(p);
5975 rgengc_rememberset_mark_plane(objspace, p, bitset);
5976 p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
5978 for (j=1; j < HEAP_PAGE_BITMAP_LIMIT; j++) {
5980 rgengc_rememberset_mark_plane(objspace, p, bitset);
5981 p += BITS_BITLENGTH * BASE_SLOT_SIZE;
5984 #if PROFILE_REMEMBERSET_MARK
5991 #if PROFILE_REMEMBERSET_MARK
5992 fprintf(stderr,
"%d\t%d\t%d\t%d\n", has_both, has_old, has_shady, skip);
5994 gc_report(1, objspace,
"rgengc_rememberset_mark: finished\n");
6002 ccan_list_for_each(&heap->pages, page, page_node) {
6003 memset(&page->mark_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
6004 memset(&page->uncollectible_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
6005 memset(&page->marking_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
6006 memset(&page->remembered_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
6007 memset(&page->pinned_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
6008 page->flags.has_uncollectible_wb_unprotected_objects = FALSE;
6009 page->flags.has_remembered_objects = FALSE;
6020 if (RGENGC_CHECK_MODE) {
6021 if (!RVALUE_OLD_P(objspace, a))
rb_bug(
"gc_writebarrier_generational: %s is not an old object.", rb_obj_info(a));
6022 if ( RVALUE_OLD_P(objspace, b))
rb_bug(
"gc_writebarrier_generational: %s is an old object.", rb_obj_info(b));
6023 if (is_incremental_marking(objspace))
rb_bug(
"gc_writebarrier_generational: called while incremental marking: %s -> %s", rb_obj_info(a), rb_obj_info(b));
6027 if (!RVALUE_REMEMBERED(objspace, a)) {
6028 int lev = rb_gc_vm_lock_no_barrier();
6030 rgengc_remember(objspace, a);
6032 rb_gc_vm_unlock_no_barrier(lev);
6034 gc_report(1, objspace,
"gc_writebarrier_generational: %s (remembered) -> %s\n", rb_obj_info(a), rb_obj_info(b));
6037 check_rvalue_consistency(objspace, a);
6038 check_rvalue_consistency(objspace, b);
6044 gc_mark_set_parent(objspace, parent);
6045 rgengc_check_relation(objspace, obj);
6046 if (gc_mark_set(objspace, obj) == FALSE)
return;
6047 gc_aging(objspace, obj);
6048 gc_grey(objspace, obj);
6056 gc_report(2, objspace,
"gc_writebarrier_incremental: [LG] %p -> %s\n", (
void *)a, rb_obj_info(b));
6058 if (RVALUE_BLACK_P(objspace, a)) {
6059 if (RVALUE_WHITE_P(objspace, b)) {
6060 if (!RVALUE_WB_UNPROTECTED(objspace, a)) {
6061 gc_report(2, objspace,
"gc_writebarrier_incremental: [IN] %p -> %s\n", (
void *)a, rb_obj_info(b));
6062 gc_mark_from(objspace, b, a);
6065 else if (RVALUE_OLD_P(objspace, a) && !RVALUE_OLD_P(objspace, b)) {
6066 rgengc_remember(objspace, a);
6069 if (
RB_UNLIKELY(objspace->flags.during_compacting)) {
6070 MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(b), b);
6076 rb_gc_impl_writebarrier(
void *objspace_ptr,
VALUE a,
VALUE b)
6080 if (RGENGC_CHECK_MODE) {
6093 if (!is_incremental_marking(objspace)) {
6094 if (!RVALUE_OLD_P(objspace, a) || RVALUE_OLD_P(objspace, b)) {
6098 gc_writebarrier_generational(a, b, objspace);
6104 int lev = rb_gc_vm_lock_no_barrier();
6106 if (is_incremental_marking(objspace)) {
6107 gc_writebarrier_incremental(a, b, objspace);
6113 rb_gc_vm_unlock_no_barrier(lev);
6115 if (retry)
goto retry;
6121 rb_gc_impl_writebarrier_unprotect(
void *objspace_ptr,
VALUE obj)
6125 if (RVALUE_WB_UNPROTECTED(objspace, obj)) {
6129 gc_report(2, objspace,
"rb_gc_writebarrier_unprotect: %s %s\n", rb_obj_info(obj),
6130 RVALUE_REMEMBERED(objspace, obj) ?
" (already remembered)" :
"");
6132 unsigned int lev = rb_gc_vm_lock_no_barrier();
6134 if (RVALUE_OLD_P(objspace, obj)) {
6135 gc_report(1, objspace,
"rb_gc_writebarrier_unprotect: %s\n", rb_obj_info(obj));
6136 RVALUE_DEMOTE(objspace, obj);
6137 gc_mark_set(objspace, obj);
6138 gc_remember_unprotected(objspace, obj);
6141 objspace->profile.total_shade_operation_count++;
6142 #if RGENGC_PROFILE >= 2
6143 objspace->profile.shade_operation_count_types[
BUILTIN_TYPE(obj)]++;
6148 RVALUE_AGE_RESET(obj);
6151 RB_DEBUG_COUNTER_INC(obj_wb_unprotect);
6152 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
6154 rb_gc_vm_unlock_no_barrier(lev);
6159 rb_gc_impl_copy_attributes(
void *objspace_ptr,
VALUE dest,
VALUE obj)
6163 if (RVALUE_WB_UNPROTECTED(objspace, obj)) {
6164 rb_gc_impl_writebarrier_unprotect(objspace, dest);
6166 rb_gc_impl_copy_finalizer(objspace, dest, obj);
6170 rb_gc_impl_writebarrier_remember(
void *objspace_ptr,
VALUE obj)
6174 gc_report(1, objspace,
"rb_gc_writebarrier_remember: %s\n", rb_obj_info(obj));
6176 if (is_incremental_marking(objspace)) {
6177 if (RVALUE_BLACK_P(objspace, obj)) {
6178 gc_grey(objspace, obj);
6182 if (RVALUE_OLD_P(objspace, obj)) {
6183 rgengc_remember(objspace, obj);
6190 rb_gc_impl_obj_flags(
void *objspace_ptr,
VALUE obj,
ID* flags,
size_t max)
6194 static ID ID_marked;
6195 static ID ID_wb_protected, ID_old, ID_marking, ID_uncollectible, ID_pinned;
6198 #define I(s) ID_##s = rb_intern(#s);
6208 if (RVALUE_WB_UNPROTECTED(objspace, obj) == 0 && n < max) flags[n++] = ID_wb_protected;
6209 if (RVALUE_OLD_P(objspace, obj) && n < max) flags[n++] = ID_old;
6210 if (RVALUE_UNCOLLECTIBLE(objspace, obj) && n < max) flags[n++] = ID_uncollectible;
6211 if (RVALUE_MARKING(objspace, obj) && n < max) flags[n++] = ID_marking;
6212 if (RVALUE_MARKED(objspace, obj) && n < max) flags[n++] = ID_marked;
6213 if (RVALUE_PINNED(objspace, obj) && n < max) flags[n++] = ID_pinned;
6218 rb_gc_impl_ractor_cache_alloc(
void *objspace_ptr)
6222 objspace->live_ractor_cache_count++;
6228 rb_gc_impl_ractor_cache_free(
void *objspace_ptr,
void *cache)
6232 objspace->live_ractor_cache_count--;
6234 gc_ractor_newobj_cache_clear(cache, NULL);
6241 if (!heap->free_pages) {
6242 if (!heap_page_allocate_and_initialize(objspace, heap)) {
6243 objspace->heap_pages.allocatable_slots = 1;
6244 heap_page_allocate_and_initialize(objspace, heap);
6252 if (dont_gc_val() || during_gc || ruby_disable_gc) {
6253 for (
int i = 0; i < HEAP_COUNT; i++) {
6255 heap_ready_to_gc(objspace, heap);
6265 gc_reset_malloc_info(
rb_objspace_t *objspace,
bool full_mark)
6267 gc_prof_set_malloc_info(objspace);
6270 size_t old_limit = malloc_limit;
6272 if (inc > malloc_limit) {
6273 malloc_limit = (size_t)(inc * gc_params.malloc_limit_growth_factor);
6274 if (malloc_limit > gc_params.malloc_limit_max) {
6275 malloc_limit = gc_params.malloc_limit_max;
6279 malloc_limit = (size_t)(malloc_limit * 0.98);
6280 if (malloc_limit < gc_params.malloc_limit_min) {
6281 malloc_limit = gc_params.malloc_limit_min;
6286 if (old_limit != malloc_limit) {
6287 fprintf(stderr,
"[%"PRIuSIZE
"] malloc_limit: %"PRIuSIZE
" -> %"PRIuSIZE
"\n",
6291 fprintf(stderr,
"[%"PRIuSIZE
"] malloc_limit: not changed (%"PRIuSIZE
")\n",
6298 #if RGENGC_ESTIMATE_OLDMALLOC
6300 if (objspace->rgengc.oldmalloc_increase > objspace->rgengc.oldmalloc_increase_limit) {
6301 gc_needs_major_flags |= GPR_FLAG_MAJOR_BY_OLDMALLOC;
6302 objspace->rgengc.oldmalloc_increase_limit =
6303 (size_t)(objspace->rgengc.oldmalloc_increase_limit * gc_params.oldmalloc_limit_growth_factor);
6305 if (objspace->rgengc.oldmalloc_increase_limit > gc_params.oldmalloc_limit_max) {
6306 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_max;
6310 if (0) fprintf(stderr,
"%"PRIdSIZE
"\t%d\t%"PRIuSIZE
"\t%"PRIuSIZE
"\t%"PRIdSIZE
"\n",
6312 gc_needs_major_flags,
6313 objspace->rgengc.oldmalloc_increase,
6314 objspace->rgengc.oldmalloc_increase_limit,
6315 gc_params.oldmalloc_limit_max);
6319 objspace->rgengc.oldmalloc_increase = 0;
6321 if ((objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_BY_OLDMALLOC) == 0) {
6322 objspace->rgengc.oldmalloc_increase_limit =
6323 (size_t)(objspace->rgengc.oldmalloc_increase_limit / ((gc_params.oldmalloc_limit_growth_factor - 1)/10 + 1));
6324 if (objspace->rgengc.oldmalloc_increase_limit < gc_params.oldmalloc_limit_min) {
6325 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
6333 garbage_collect(
rb_objspace_t *objspace,
unsigned int reason)
6337 int lev = rb_gc_vm_lock();
6339 #if GC_PROFILE_MORE_DETAIL
6340 objspace->profile.prepare_time = getrusage_time();
6345 #if GC_PROFILE_MORE_DETAIL
6346 objspace->profile.prepare_time = getrusage_time() - objspace->profile.prepare_time;
6349 ret = gc_start(objspace, reason);
6351 rb_gc_vm_unlock(lev);
6359 unsigned int do_full_mark = !!(reason & GPR_FLAG_FULL_MARK);
6362 objspace->flags.immediate_sweep = !!(reason & GPR_FLAG_IMMEDIATE_SWEEP);
6364 if (!rb_darray_size(objspace->heap_pages.sorted))
return TRUE;
6365 if (!(reason & GPR_FLAG_METHOD) && !ready_to_gc(objspace))
return TRUE;
6367 GC_ASSERT(gc_mode(objspace) == gc_mode_none);
6368 GC_ASSERT(!is_lazy_sweeping(objspace));
6369 GC_ASSERT(!is_incremental_marking(objspace));
6371 unsigned int lock_lev;
6372 gc_enter(objspace, gc_enter_event_start, &lock_lev);
6374 #if RGENGC_CHECK_MODE >= 2
6375 gc_verify_internal_consistency(objspace);
6378 if (ruby_gc_stressful) {
6379 int flag =
FIXNUM_P(ruby_gc_stress_mode) ?
FIX2INT(ruby_gc_stress_mode) : 0;
6381 if ((flag & (1 << gc_stress_no_major)) == 0) {
6382 do_full_mark = TRUE;
6385 objspace->flags.immediate_sweep = !(flag & (1<<gc_stress_no_immediate_sweep));
6388 if (gc_needs_major_flags) {
6389 reason |= gc_needs_major_flags;
6390 do_full_mark = TRUE;
6392 else if (RGENGC_FORCE_MAJOR_GC) {
6393 reason = GPR_FLAG_MAJOR_BY_FORCE;
6394 do_full_mark = TRUE;
6398 if (!gc_config_full_mark_val) {
6399 do_full_mark = FALSE;
6401 gc_needs_major_flags = GPR_FLAG_NONE;
6403 if (do_full_mark && (reason & GPR_FLAG_MAJOR_MASK) == 0) {
6404 reason |= GPR_FLAG_MAJOR_BY_FORCE;
6407 if (objspace->flags.dont_incremental ||
6408 reason & GPR_FLAG_IMMEDIATE_MARK ||
6409 ruby_gc_stressful) {
6410 objspace->flags.during_incremental_marking = FALSE;
6413 objspace->flags.during_incremental_marking = do_full_mark;
6417 if (do_full_mark && ruby_enable_autocompact) {
6418 objspace->flags.during_compacting = TRUE;
6419 #if RGENGC_CHECK_MODE
6420 objspace->rcompactor.compare_func = ruby_autocompact_compare_func;
6424 objspace->flags.during_compacting = !!(reason & GPR_FLAG_COMPACT);
6427 if (!GC_ENABLE_LAZY_SWEEP || objspace->flags.dont_incremental) {
6428 objspace->flags.immediate_sweep = TRUE;
6431 if (objspace->flags.immediate_sweep) reason |= GPR_FLAG_IMMEDIATE_SWEEP;
6433 gc_report(1, objspace,
"gc_start(reason: %x) => %u, %d, %d\n",
6435 do_full_mark, !is_incremental_marking(objspace), objspace->flags.immediate_sweep);
6437 #if USE_DEBUG_COUNTER
6438 RB_DEBUG_COUNTER_INC(gc_count);
6440 if (reason & GPR_FLAG_MAJOR_MASK) {
6441 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_nofree, reason & GPR_FLAG_MAJOR_BY_NOFREE);
6442 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldgen, reason & GPR_FLAG_MAJOR_BY_OLDGEN);
6443 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_shady, reason & GPR_FLAG_MAJOR_BY_SHADY);
6444 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_force, reason & GPR_FLAG_MAJOR_BY_FORCE);
6445 #if RGENGC_ESTIMATE_OLDMALLOC
6446 (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldmalloc, reason & GPR_FLAG_MAJOR_BY_OLDMALLOC);
6450 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_newobj, reason & GPR_FLAG_NEWOBJ);
6451 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_malloc, reason & GPR_FLAG_MALLOC);
6452 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_method, reason & GPR_FLAG_METHOD);
6453 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_capi, reason & GPR_FLAG_CAPI);
6454 (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_stress, reason & GPR_FLAG_STRESS);
6458 objspace->profile.count++;
6459 objspace->profile.latest_gc_info = reason;
6460 objspace->profile.total_allocated_objects_at_gc_start = total_allocated_objects(objspace);
6461 objspace->profile.heap_used_at_gc_start = rb_darray_size(objspace->heap_pages.sorted);
6462 objspace->profile.weak_references_count = 0;
6463 objspace->profile.retained_weak_references_count = 0;
6464 gc_prof_setup_new_record(objspace, reason);
6465 gc_reset_malloc_info(objspace, do_full_mark);
6469 GC_ASSERT(during_gc);
6471 gc_prof_timer_start(objspace);
6473 if (gc_marks(objspace, do_full_mark)) {
6477 gc_prof_timer_stop(objspace);
6479 gc_exit(objspace, gc_enter_event_start, &lock_lev);
6486 if (is_incremental_marking(objspace) || is_lazy_sweeping(objspace)) {
6487 unsigned int lock_lev;
6488 gc_enter(objspace, gc_enter_event_rest, &lock_lev);
6490 if (RGENGC_CHECK_MODE >= 2) gc_verify_internal_consistency(objspace);
6492 if (is_incremental_marking(objspace)) {
6493 gc_marking_enter(objspace);
6494 gc_marks_rest(objspace);
6495 gc_marking_exit(objspace);
6500 if (is_lazy_sweeping(objspace)) {
6501 gc_sweeping_enter(objspace);
6502 gc_sweep_rest(objspace);
6503 gc_sweeping_exit(objspace);
6506 gc_exit(objspace, gc_enter_event_rest, &lock_lev);
6512 unsigned int reason;
6519 if (is_marking(objspace)) {
6521 if (is_full_marking(objspace)) buff[i++] =
'F';
6522 if (is_incremental_marking(objspace)) buff[i++] =
'I';
6524 else if (is_sweeping(objspace)) {
6526 if (is_lazy_sweeping(objspace)) buff[i++] =
'L';
6537 static char buff[0x10];
6538 gc_current_status_fill(objspace, buff);
6542 #if PRINT_ENTER_EXIT_TICK
6544 static tick_t last_exit_tick;
6545 static tick_t enter_tick;
6546 static int enter_count = 0;
6547 static char last_gc_status[0x10];
6550 gc_record(
rb_objspace_t *objspace,
int direction,
const char *event)
6552 if (direction == 0) {
6554 enter_tick = tick();
6555 gc_current_status_fill(objspace, last_gc_status);
6558 tick_t exit_tick = tick();
6559 char current_gc_status[0x10];
6560 gc_current_status_fill(objspace, current_gc_status);
6563 fprintf(stderr,
"%"PRItick
"\t%"PRItick
"\t%s\t[%s->%s|%c]\n",
6564 enter_tick - last_exit_tick,
6565 exit_tick - enter_tick,
6567 last_gc_status, current_gc_status,
6568 (objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_MASK) ?
'+' :
'-');
6569 last_exit_tick = exit_tick;
6572 fprintf(stderr,
"%"PRItick
"\t%"PRItick
"\t%s\t[%s->%s|%c]\n",
6574 exit_tick - enter_tick,
6576 last_gc_status, current_gc_status,
6577 (objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_MASK) ?
'+' :
'-');
6583 gc_record(
rb_objspace_t *objspace,
int direction,
const char *event)
6590 gc_enter_event_cstr(
enum gc_enter_event event)
6593 case gc_enter_event_start:
return "start";
6594 case gc_enter_event_continue:
return "continue";
6595 case gc_enter_event_rest:
return "rest";
6596 case gc_enter_event_finalizer:
return "finalizer";
6602 gc_enter_count(
enum gc_enter_event event)
6605 case gc_enter_event_start: RB_DEBUG_COUNTER_INC(gc_enter_start);
break;
6606 case gc_enter_event_continue: RB_DEBUG_COUNTER_INC(gc_enter_continue);
break;
6607 case gc_enter_event_rest: RB_DEBUG_COUNTER_INC(gc_enter_rest);
break;
6608 case gc_enter_event_finalizer: RB_DEBUG_COUNTER_INC(gc_enter_finalizer);
break;
6612 static bool current_process_time(
struct timespec *ts);
6615 gc_clock_start(
struct timespec *ts)
6617 if (!current_process_time(ts)) {
6623 static unsigned long long
6628 if ((ts->tv_sec > 0 || ts->tv_nsec > 0) &&
6629 current_process_time(&end_time) &&
6630 end_time.tv_sec >= ts->tv_sec) {
6631 return (
unsigned long long)(end_time.tv_sec - ts->tv_sec) * (1000 * 1000 * 1000) +
6632 (end_time.tv_nsec - ts->tv_nsec);
6639 gc_enter(
rb_objspace_t *objspace,
enum gc_enter_event event,
unsigned int *lock_lev)
6641 *lock_lev = rb_gc_vm_lock();
6644 case gc_enter_event_rest:
6645 if (!is_marking(objspace))
break;
6647 case gc_enter_event_start:
6648 case gc_enter_event_continue:
6656 gc_enter_count(event);
6658 if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(objspace);
6661 RUBY_DEBUG_LOG(
"%s (%s)",gc_enter_event_cstr(event), gc_current_status(objspace));
6662 gc_report(1, objspace,
"gc_enter: %s [%s]\n", gc_enter_event_cstr(event), gc_current_status(objspace));
6663 gc_record(objspace, 0, gc_enter_event_cstr(event));
6669 gc_exit(
rb_objspace_t *objspace,
enum gc_enter_event event,
unsigned int *lock_lev)
6671 GC_ASSERT(during_gc != 0);
6675 gc_record(objspace, 1, gc_enter_event_cstr(event));
6676 RUBY_DEBUG_LOG(
"%s (%s)", gc_enter_event_cstr(event), gc_current_status(objspace));
6677 gc_report(1, objspace,
"gc_exit: %s [%s]\n", gc_enter_event_cstr(event), gc_current_status(objspace));
6680 rb_gc_vm_unlock(*lock_lev);
6684 #define MEASURE_GC (objspace->flags.measure_gc)
6690 GC_ASSERT(during_gc != 0);
6693 gc_clock_start(&objspace->profile.marking_start_time);
6700 GC_ASSERT(during_gc != 0);
6703 objspace->profile.marking_time_ns += gc_clock_end(&objspace->profile.marking_start_time);
6710 GC_ASSERT(during_gc != 0);
6713 gc_clock_start(&objspace->profile.sweeping_start_time);
6720 GC_ASSERT(during_gc != 0);
6723 objspace->profile.sweeping_time_ns += gc_clock_end(&objspace->profile.sweeping_start_time);
6728 gc_with_gvl(
void *
ptr)
6731 return (
void *)(
VALUE)garbage_collect(oar->objspace, oar->reason);
6734 int ruby_thread_has_gvl_p(
void);
6737 garbage_collect_with_gvl(
rb_objspace_t *objspace,
unsigned int reason)
6739 if (dont_gc_val())
return TRUE;
6740 if (ruby_thread_has_gvl_p()) {
6741 return garbage_collect(objspace, reason);
6746 oar.objspace = objspace;
6747 oar.reason = reason;
6752 fprintf(stderr,
"[FATAL] failed to allocate memory\n");
6759 gc_set_candidate_object_i(
void *vstart,
void *vend,
size_t stride,
void *data)
6764 for (; v != (
VALUE)vend; v += stride) {
6765 asan_unpoisoning_object(v) {
6771 rb_gc_prepare_heap_process_object(v);
6772 if (!RVALUE_OLD_P(objspace, v) && !RVALUE_WB_UNPROTECTED(objspace, v)) {
6773 RVALUE_AGE_SET_CANDIDATE(objspace, v);
6783 rb_gc_impl_start(
void *objspace_ptr,
bool full_mark,
bool immediate_mark,
bool immediate_sweep,
bool compact)
6786 unsigned int reason = (GPR_FLAG_FULL_MARK |
6787 GPR_FLAG_IMMEDIATE_MARK |
6788 GPR_FLAG_IMMEDIATE_SWEEP |
6791 int full_marking_p = gc_config_full_mark_val;
6792 gc_config_full_mark_set(TRUE);
6796 GC_ASSERT(GC_COMPACTION_SUPPORTED);
6798 reason |= GPR_FLAG_COMPACT;
6801 if (!full_mark) reason &= ~GPR_FLAG_FULL_MARK;
6802 if (!immediate_mark) reason &= ~GPR_FLAG_IMMEDIATE_MARK;
6803 if (!immediate_sweep) reason &= ~GPR_FLAG_IMMEDIATE_SWEEP;
6806 garbage_collect(objspace, reason);
6807 gc_finalize_deferred(objspace);
6809 gc_config_full_mark_set(full_marking_p);
6813 rb_gc_impl_prepare_heap(
void *objspace_ptr)
6817 size_t orig_total_slots = objspace_available_slots(objspace);
6818 size_t orig_allocatable_slots = objspace->heap_pages.allocatable_slots;
6820 rb_gc_impl_each_objects(objspace, gc_set_candidate_object_i, objspace_ptr);
6822 double orig_max_free_slots = gc_params.heap_free_slots_max_ratio;
6824 gc_params.heap_free_slots_max_ratio = 0.0;
6825 rb_gc_impl_start(objspace,
true,
true,
true,
true);
6826 gc_params.heap_free_slots_max_ratio = orig_max_free_slots;
6828 objspace->heap_pages.allocatable_slots = 0;
6829 heap_pages_free_unused_pages(objspace_ptr);
6830 GC_ASSERT(objspace->empty_pages_count == 0);
6831 objspace->heap_pages.allocatable_slots = orig_allocatable_slots;
6833 size_t total_slots = objspace_available_slots(objspace);
6834 if (orig_total_slots > total_slots) {
6835 objspace->heap_pages.allocatable_slots += orig_total_slots - total_slots;
6838 #if defined(HAVE_MALLOC_TRIM) && !defined(RUBY_ALTERNATIVE_MALLOC_HEADER)
6884 GC_ASSERT(st_is_member(finalizer_table, obj));
6888 GC_ASSERT(RVALUE_MARKED(objspace, obj));
6889 GC_ASSERT(!RVALUE_PINNED(objspace, obj));
6901 void rb_mv_generic_ivar(
VALUE src,
VALUE dst);
6911 gc_report(4, objspace,
"Moving object: %p -> %p\n", (
void *)src, (
void *)dest);
6914 GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(dest), dest));
6916 GC_ASSERT(!RVALUE_MARKING(objspace, src));
6919 marked = RVALUE_MARKED(objspace, src);
6920 wb_unprotected = RVALUE_WB_UNPROTECTED(objspace, src);
6921 uncollectible = RVALUE_UNCOLLECTIBLE(objspace, src);
6922 bool remembered = RVALUE_REMEMBERED(objspace, src);
6923 age = RVALUE_AGE_GET(src);
6926 CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS(src), src);
6927 CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(src), src);
6928 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(src), src);
6929 CLEAR_IN_BITMAP(GET_HEAP_PAGE(src)->remembered_bits, src);
6933 DURING_GC_COULD_MALLOC_REGION_START();
6935 rb_mv_generic_ivar(src, dest);
6937 DURING_GC_COULD_MALLOC_REGION_END();
6943 st_data_t srcid = (st_data_t)src,
id;
6945 gc_report(4, objspace,
"Moving object with seen id: %p -> %p\n", (
void *)src, (
void *)dest);
6947 DURING_GC_COULD_MALLOC_REGION_START();
6949 if (!st_delete(objspace->obj_to_id_tbl, &srcid, &
id)) {
6950 rb_bug(
"gc_move: object ID seen, but not in mapping table: %s", rb_obj_info((
VALUE)src));
6953 st_insert(objspace->obj_to_id_tbl, (st_data_t)dest,
id);
6955 DURING_GC_COULD_MALLOC_REGION_END();
6958 GC_ASSERT(!st_lookup(objspace->obj_to_id_tbl, (st_data_t)src, NULL));
6962 memcpy((
void *)dest, (
void *)src, MIN(src_slot_size, slot_size));
6964 if (RVALUE_OVERHEAD > 0) {
6965 void *dest_overhead = (
void *)(((uintptr_t)dest) + slot_size - RVALUE_OVERHEAD);
6966 void *src_overhead = (
void *)(((uintptr_t)src) + src_slot_size - RVALUE_OVERHEAD);
6968 memcpy(dest_overhead, src_overhead, RVALUE_OVERHEAD);
6971 memset((
void *)src, 0, src_slot_size);
6972 RVALUE_AGE_RESET(src);
6976 MARK_IN_BITMAP(GET_HEAP_PAGE(dest)->remembered_bits, dest);
6979 CLEAR_IN_BITMAP(GET_HEAP_PAGE(dest)->remembered_bits, dest);
6983 MARK_IN_BITMAP(GET_HEAP_MARK_BITS(dest), dest);
6986 CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS(dest), dest);
6989 if (wb_unprotected) {
6990 MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(dest), dest);
6993 CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(dest), dest);
6996 if (uncollectible) {
6997 MARK_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(dest), dest);
7000 CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(dest), dest);
7003 RVALUE_AGE_SET(dest, age);
7006 RMOVED(src)->dummy =
Qundef;
7007 RMOVED(src)->destination = dest;
7010 GET_HEAP_PAGE(src)->heap->total_freed_objects++;
7011 GET_HEAP_PAGE(dest)->heap->total_allocated_objects++;
7016 #if GC_CAN_COMPILE_COMPACTION
7018 compare_pinned_slots(
const void *left,
const void *right,
void *dummy)
7023 left_page = *(
struct heap_page *
const *)left;
7024 right_page = *(
struct heap_page *
const *)right;
7026 return left_page->pinned_slots - right_page->pinned_slots;
7030 compare_free_slots(
const void *left,
const void *right,
void *dummy)
7035 left_page = *(
struct heap_page *
const *)left;
7036 right_page = *(
struct heap_page *
const *)right;
7038 return left_page->free_slots - right_page->free_slots;
7042 gc_sort_heap_by_compare_func(
rb_objspace_t *objspace, gc_compact_compare_func compare_func)
7044 for (
int j = 0; j < HEAP_COUNT; j++) {
7047 size_t total_pages = heap->total_pages;
7049 struct heap_page *page = 0, **page_list = malloc(size);
7052 heap->free_pages = NULL;
7053 ccan_list_for_each(&heap->pages, page, page_node) {
7054 page_list[i++] = page;
7058 GC_ASSERT((
size_t)i == total_pages);
7065 ccan_list_head_init(&heap->pages);
7067 for (i = 0; i < total_pages; i++) {
7068 ccan_list_add(&heap->pages, &page_list[i]->page_node);
7069 if (page_list[i]->free_slots != 0) {
7070 heap_add_freepage(heap, page_list[i]);
7080 rb_gc_impl_object_moved_p(
void *objspace_ptr,
VALUE obj)
7082 return gc_object_moved_p(objspace_ptr, obj);
7090 page->flags.has_uncollectible_wb_unprotected_objects = FALSE;
7091 page->flags.has_remembered_objects = FALSE;
7094 for (; v != (
VALUE)vend; v += stride) {
7095 asan_unpoisoning_object(v) {
7102 if (RVALUE_WB_UNPROTECTED(objspace, v)) {
7103 page->flags.has_uncollectible_wb_unprotected_objects = TRUE;
7105 if (RVALUE_REMEMBERED(objspace, v)) {
7106 page->flags.has_remembered_objects = TRUE;
7108 if (page->flags.before_sweep) {
7109 if (RVALUE_MARKED(objspace, v)) {
7110 rb_gc_update_object_references(objspace, v);
7114 rb_gc_update_object_references(objspace, v);
7126 objspace->flags.during_reference_updating =
true;
7130 for (
int i = 0; i < HEAP_COUNT; i++) {
7131 bool should_set_mark_bits = TRUE;
7134 ccan_list_for_each(&heap->pages, page, page_node) {
7135 uintptr_t start = (uintptr_t)page->start;
7136 uintptr_t end = start + (page->total_slots * heap->slot_size);
7138 gc_ref_update((
void *)start, (
void *)end, heap->slot_size, objspace, page);
7139 if (page == heap->sweeping_page) {
7140 should_set_mark_bits = FALSE;
7142 if (should_set_mark_bits) {
7143 gc_setup_mark_bits(page);
7147 gc_ref_update_table_values_only(objspace->obj_to_id_tbl);
7148 gc_update_table_refs(objspace->id_to_obj_tbl);
7149 gc_update_table_refs(finalizer_table);
7151 rb_gc_update_vm_references((
void *)objspace);
7153 objspace->flags.during_reference_updating =
false;
7156 #if GC_CAN_COMPILE_COMPACTION
7158 root_obj_check_moved_i(
const char *category,
VALUE obj,
void *data)
7162 if (gc_object_moved_p(objspace, obj)) {
7163 rb_bug(
"ROOT %s points to MOVED: %p -> %s", category, (
void *)obj, rb_obj_info(rb_gc_impl_location(objspace, obj)));
7168 reachable_object_check_moved_i(
VALUE ref,
void *data)
7171 if (gc_object_moved_p(rb_gc_get_objspace(), ref)) {
7172 rb_bug(
"Object %s points to MOVED: %p -> %s", rb_obj_info(parent), (
void *)ref, rb_obj_info(rb_gc_impl_location(rb_gc_get_objspace(), ref)));
7177 heap_check_moved_i(
void *vstart,
void *vend,
size_t stride,
void *data)
7182 for (; v != (
VALUE)vend; v += stride) {
7183 if (gc_object_moved_p(objspace, v)) {
7187 asan_unpoisoning_object(v) {
7193 if (!rb_gc_impl_garbage_object_p(objspace, v)) {
7194 rb_objspace_reachable_objects_from(v, reachable_object_check_moved_i, (
void *)v);
7206 rb_gc_impl_during_gc_p(
void *objspace_ptr)
7213 #if RGENGC_PROFILE >= 2
7246 default:
return "unknown";
7251 gc_count_add_each_types(
VALUE hash,
const char *name,
const size_t *types)
7255 for (i=0; i<
T_MASK; i++) {
7256 const char *
type = type_name(i, 0);
7264 rb_gc_impl_gc_count(
void *objspace_ptr)
7268 return objspace->profile.count;
7272 gc_info_decode(
rb_objspace_t *objspace,
const VALUE hash_or_key,
const unsigned int orig_flags)
7274 static VALUE sym_major_by =
Qnil, sym_gc_by, sym_immediate_sweep, sym_have_finalizer, sym_state, sym_need_major_by;
7275 static VALUE sym_nofree, sym_oldgen, sym_shady, sym_force, sym_stress;
7276 #if RGENGC_ESTIMATE_OLDMALLOC
7277 static VALUE sym_oldmalloc;
7279 static VALUE sym_newobj, sym_malloc, sym_method, sym_capi;
7280 static VALUE sym_none, sym_marking, sym_sweeping;
7281 static VALUE sym_weak_references_count, sym_retained_weak_references_count;
7283 VALUE major_by, need_major_by;
7284 unsigned int flags = orig_flags ? orig_flags : objspace->profile.latest_gc_info;
7293 rb_bug(
"gc_info_decode: non-hash or symbol given");
7296 if (
NIL_P(sym_major_by)) {
7297 #define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
7310 #if RGENGC_ESTIMATE_OLDMALLOC
7322 S(weak_references_count);
7323 S(retained_weak_references_count);
7327 #define SET(name, attr) \
7328 if (key == sym_##name) \
7330 else if (hash != Qnil) \
7331 rb_hash_aset(hash, sym_##name, (attr));
7334 (flags & GPR_FLAG_MAJOR_BY_NOFREE) ? sym_nofree :
7335 (flags & GPR_FLAG_MAJOR_BY_OLDGEN) ? sym_oldgen :
7336 (flags & GPR_FLAG_MAJOR_BY_SHADY) ? sym_shady :
7337 (flags & GPR_FLAG_MAJOR_BY_FORCE) ? sym_force :
7338 #if RGENGC_ESTIMATE_OLDMALLOC
7339 (flags & GPR_FLAG_MAJOR_BY_OLDMALLOC) ? sym_oldmalloc :
7342 SET(major_by, major_by);
7344 if (orig_flags == 0) {
7345 unsigned int need_major_flags = gc_needs_major_flags;
7347 (need_major_flags & GPR_FLAG_MAJOR_BY_NOFREE) ? sym_nofree :
7348 (need_major_flags & GPR_FLAG_MAJOR_BY_OLDGEN) ? sym_oldgen :
7349 (need_major_flags & GPR_FLAG_MAJOR_BY_SHADY) ? sym_shady :
7350 (need_major_flags & GPR_FLAG_MAJOR_BY_FORCE) ? sym_force :
7351 #if RGENGC_ESTIMATE_OLDMALLOC
7352 (need_major_flags & GPR_FLAG_MAJOR_BY_OLDMALLOC) ? sym_oldmalloc :
7355 SET(need_major_by, need_major_by);
7359 (flags & GPR_FLAG_NEWOBJ) ? sym_newobj :
7360 (flags & GPR_FLAG_MALLOC) ? sym_malloc :
7361 (flags & GPR_FLAG_METHOD) ? sym_method :
7362 (flags & GPR_FLAG_CAPI) ? sym_capi :
7363 (flags & GPR_FLAG_STRESS) ? sym_stress :
7367 SET(have_finalizer, (flags & GPR_FLAG_HAVE_FINALIZE) ?
Qtrue :
Qfalse);
7368 SET(immediate_sweep, (flags & GPR_FLAG_IMMEDIATE_SWEEP) ?
Qtrue :
Qfalse);
7370 if (orig_flags == 0) {
7371 SET(state, gc_mode(objspace) == gc_mode_none ? sym_none :
7372 gc_mode(objspace) == gc_mode_marking ? sym_marking : sym_sweeping);
7375 SET(weak_references_count,
LONG2FIX(objspace->profile.weak_references_count));
7376 SET(retained_weak_references_count,
LONG2FIX(objspace->profile.retained_weak_references_count));
7388 rb_gc_impl_latest_gc_info(
void *objspace_ptr,
VALUE key)
7392 return gc_info_decode(objspace, key, 0);
7399 gc_stat_sym_marking_time,
7400 gc_stat_sym_sweeping_time,
7401 gc_stat_sym_heap_allocated_pages,
7402 gc_stat_sym_heap_empty_pages,
7403 gc_stat_sym_heap_allocatable_slots,
7404 gc_stat_sym_heap_available_slots,
7405 gc_stat_sym_heap_live_slots,
7406 gc_stat_sym_heap_free_slots,
7407 gc_stat_sym_heap_final_slots,
7408 gc_stat_sym_heap_marked_slots,
7409 gc_stat_sym_heap_eden_pages,
7410 gc_stat_sym_total_allocated_pages,
7411 gc_stat_sym_total_freed_pages,
7412 gc_stat_sym_total_allocated_objects,
7413 gc_stat_sym_total_freed_objects,
7414 gc_stat_sym_malloc_increase_bytes,
7415 gc_stat_sym_malloc_increase_bytes_limit,
7416 gc_stat_sym_minor_gc_count,
7417 gc_stat_sym_major_gc_count,
7418 gc_stat_sym_compact_count,
7419 gc_stat_sym_read_barrier_faults,
7420 gc_stat_sym_total_moved_objects,
7421 gc_stat_sym_remembered_wb_unprotected_objects,
7422 gc_stat_sym_remembered_wb_unprotected_objects_limit,
7423 gc_stat_sym_old_objects,
7424 gc_stat_sym_old_objects_limit,
7425 #if RGENGC_ESTIMATE_OLDMALLOC
7426 gc_stat_sym_oldmalloc_increase_bytes,
7427 gc_stat_sym_oldmalloc_increase_bytes_limit,
7429 gc_stat_sym_weak_references_count,
7431 gc_stat_sym_total_generated_normal_object_count,
7432 gc_stat_sym_total_generated_shady_object_count,
7433 gc_stat_sym_total_shade_operation_count,
7434 gc_stat_sym_total_promoted_count,
7435 gc_stat_sym_total_remembered_normal_object_count,
7436 gc_stat_sym_total_remembered_shady_object_count,
7441 static VALUE gc_stat_symbols[gc_stat_sym_last];
7444 setup_gc_stat_symbols(
void)
7446 if (gc_stat_symbols[0] == 0) {
7447 #define S(s) gc_stat_symbols[gc_stat_sym_##s] = ID2SYM(rb_intern_const(#s))
7452 S(heap_allocated_pages);
7453 S(heap_empty_pages);
7454 S(heap_allocatable_slots);
7455 S(heap_available_slots);
7458 S(heap_final_slots);
7459 S(heap_marked_slots);
7461 S(total_allocated_pages);
7462 S(total_freed_pages);
7463 S(total_allocated_objects);
7464 S(total_freed_objects);
7465 S(malloc_increase_bytes);
7466 S(malloc_increase_bytes_limit);
7470 S(read_barrier_faults);
7471 S(total_moved_objects);
7472 S(remembered_wb_unprotected_objects);
7473 S(remembered_wb_unprotected_objects_limit);
7475 S(old_objects_limit);
7476 #if RGENGC_ESTIMATE_OLDMALLOC
7477 S(oldmalloc_increase_bytes);
7478 S(oldmalloc_increase_bytes_limit);
7480 S(weak_references_count);
7482 S(total_generated_normal_object_count);
7483 S(total_generated_shady_object_count);
7484 S(total_shade_operation_count);
7485 S(total_promoted_count);
7486 S(total_remembered_normal_object_count);
7487 S(total_remembered_shady_object_count);
7494 ns_to_ms(uint64_t ns)
7496 return ns / (1000 * 1000);
7500 rb_gc_impl_stat(
void *objspace_ptr,
VALUE hash_or_sym)
7505 setup_gc_stat_symbols();
7514 rb_bug(
"non-hash or symbol given");
7517 #define SET(name, attr) \
7518 if (key == gc_stat_symbols[gc_stat_sym_##name]) \
7519 return SIZET2NUM(attr); \
7520 else if (hash != Qnil) \
7521 rb_hash_aset(hash, gc_stat_symbols[gc_stat_sym_##name], SIZET2NUM(attr));
7523 SET(count, objspace->profile.count);
7524 SET(time, (
size_t)ns_to_ms(objspace->profile.marking_time_ns + objspace->profile.sweeping_time_ns));
7525 SET(marking_time, (
size_t)ns_to_ms(objspace->profile.marking_time_ns));
7526 SET(sweeping_time, (
size_t)ns_to_ms(objspace->profile.sweeping_time_ns));
7529 SET(heap_allocated_pages, rb_darray_size(objspace->heap_pages.sorted));
7530 SET(heap_empty_pages, objspace->empty_pages_count)
7531 SET(heap_allocatable_slots, objspace->heap_pages.allocatable_slots);
7532 SET(heap_available_slots, objspace_available_slots(objspace));
7533 SET(heap_live_slots, objspace_live_slots(objspace));
7534 SET(heap_free_slots, objspace_free_slots(objspace));
7535 SET(heap_final_slots, total_final_slots_count(objspace));
7536 SET(heap_marked_slots, objspace->marked_slots);
7537 SET(heap_eden_pages, heap_eden_total_pages(objspace));
7538 SET(total_allocated_pages, objspace->heap_pages.allocated_pages);
7539 SET(total_freed_pages, objspace->heap_pages.freed_pages);
7540 SET(total_allocated_objects, total_allocated_objects(objspace));
7541 SET(total_freed_objects, total_freed_objects(objspace));
7542 SET(malloc_increase_bytes, malloc_increase);
7543 SET(malloc_increase_bytes_limit, malloc_limit);
7544 SET(minor_gc_count, objspace->profile.minor_gc_count);
7545 SET(major_gc_count, objspace->profile.major_gc_count);
7546 SET(compact_count, objspace->profile.compact_count);
7547 SET(read_barrier_faults, objspace->profile.read_barrier_faults);
7548 SET(total_moved_objects, objspace->rcompactor.total_moved);
7549 SET(remembered_wb_unprotected_objects, objspace->rgengc.uncollectible_wb_unprotected_objects);
7550 SET(remembered_wb_unprotected_objects_limit, objspace->rgengc.uncollectible_wb_unprotected_objects_limit);
7551 SET(old_objects, objspace->rgengc.old_objects);
7552 SET(old_objects_limit, objspace->rgengc.old_objects_limit);
7553 #if RGENGC_ESTIMATE_OLDMALLOC
7554 SET(oldmalloc_increase_bytes, objspace->rgengc.oldmalloc_increase);
7555 SET(oldmalloc_increase_bytes_limit, objspace->rgengc.oldmalloc_increase_limit);
7559 SET(total_generated_normal_object_count, objspace->profile.total_generated_normal_object_count);
7560 SET(total_generated_shady_object_count, objspace->profile.total_generated_shady_object_count);
7561 SET(total_shade_operation_count, objspace->profile.total_shade_operation_count);
7562 SET(total_promoted_count, objspace->profile.total_promoted_count);
7563 SET(total_remembered_normal_object_count, objspace->profile.total_remembered_normal_object_count);
7564 SET(total_remembered_shady_object_count, objspace->profile.total_remembered_shady_object_count);
7573 #if defined(RGENGC_PROFILE) && RGENGC_PROFILE >= 2
7575 gc_count_add_each_types(hash,
"generated_normal_object_count_types", objspace->profile.generated_normal_object_count_types);
7576 gc_count_add_each_types(hash,
"generated_shady_object_count_types", objspace->profile.generated_shady_object_count_types);
7577 gc_count_add_each_types(hash,
"shade_operation_count_types", objspace->profile.shade_operation_count_types);
7578 gc_count_add_each_types(hash,
"promoted_types", objspace->profile.promoted_types);
7579 gc_count_add_each_types(hash,
"remembered_normal_object_count_types", objspace->profile.remembered_normal_object_count_types);
7580 gc_count_add_each_types(hash,
"remembered_shady_object_count_types", objspace->profile.remembered_shady_object_count_types);
7587 enum gc_stat_heap_sym {
7588 gc_stat_heap_sym_slot_size,
7589 gc_stat_heap_sym_heap_eden_pages,
7590 gc_stat_heap_sym_heap_eden_slots,
7591 gc_stat_heap_sym_total_allocated_pages,
7592 gc_stat_heap_sym_force_major_gc_count,
7593 gc_stat_heap_sym_force_incremental_marking_finish_count,
7594 gc_stat_heap_sym_total_allocated_objects,
7595 gc_stat_heap_sym_total_freed_objects,
7596 gc_stat_heap_sym_last
7599 static VALUE gc_stat_heap_symbols[gc_stat_heap_sym_last];
7602 setup_gc_stat_heap_symbols(
void)
7604 if (gc_stat_heap_symbols[0] == 0) {
7605 #define S(s) gc_stat_heap_symbols[gc_stat_heap_sym_##s] = ID2SYM(rb_intern_const(#s))
7609 S(total_allocated_pages);
7610 S(force_major_gc_count);
7611 S(force_incremental_marking_finish_count);
7612 S(total_allocated_objects);
7613 S(total_freed_objects);
7621 #define SET(name, attr) \
7622 if (key == gc_stat_heap_symbols[gc_stat_heap_sym_##name]) \
7623 return SIZET2NUM(attr); \
7624 else if (hash != Qnil) \
7625 rb_hash_aset(hash, gc_stat_heap_symbols[gc_stat_heap_sym_##name], SIZET2NUM(attr));
7627 SET(slot_size, heap->slot_size);
7628 SET(heap_eden_pages, heap->total_pages);
7629 SET(heap_eden_slots, heap->total_slots);
7630 SET(total_allocated_pages, heap->total_allocated_pages);
7631 SET(force_major_gc_count, heap->force_major_gc_count);
7632 SET(force_incremental_marking_finish_count, heap->force_incremental_marking_finish_count);
7633 SET(total_allocated_objects, heap->total_allocated_objects);
7634 SET(total_freed_objects, heap->total_freed_objects);
7646 rb_gc_impl_stat_heap(
void *objspace_ptr,
VALUE heap_name,
VALUE hash_or_sym)
7650 setup_gc_stat_heap_symbols();
7652 if (
NIL_P(heap_name)) {
7654 rb_bug(
"non-hash given");
7657 for (
int i = 0; i < HEAP_COUNT; i++) {
7664 stat_one_heap(&heaps[i], hash,
Qnil);
7668 int heap_idx =
FIX2INT(heap_name);
7670 if (heap_idx < 0 || heap_idx >= HEAP_COUNT) {
7675 return stat_one_heap(&heaps[heap_idx],
Qnil, hash_or_sym);
7678 return stat_one_heap(&heaps[heap_idx], hash_or_sym,
Qnil);
7681 rb_bug(
"non-hash or symbol given");
7685 rb_bug(
"heap_name must be nil or an Integer");
7697 #define RBOOL(v) (v ? Qtrue : Qfalse)
7701 rb_gc_impl_config_get(
void *objspace_ptr)
7703 #define sym(name) ID2SYM(rb_intern_const(name))
7707 rb_hash_aset(hash, sym(
"rgengc_allow_full_mark"), RBOOL(gc_config_full_mark_val));
7713 gc_config_set_key(st_data_t key, st_data_t value, st_data_t data)
7718 gc_config_full_mark_set(
RTEST(value));
7724 rb_gc_impl_config_set(
void *objspace_ptr,
VALUE hash)
7732 rb_hash_stlike_foreach(hash, gc_config_set_key, (st_data_t)objspace);
7736 rb_gc_impl_stress_get(
void *objspace_ptr)
7739 return ruby_gc_stress_mode;
7743 rb_gc_impl_stress_set(
void *objspace_ptr,
VALUE flag)
7747 objspace->flags.gc_stressful =
RTEST(flag);
7748 objspace->gc_stress_mode = flag;
7752 get_envparam_size(
const char *name,
size_t *default_value,
size_t lower_bound)
7754 const char *
ptr = getenv(name);
7757 if (
ptr != NULL && *
ptr) {
7760 #if SIZEOF_SIZE_T == SIZEOF_LONG_LONG
7761 val = strtoll(
ptr, &end, 0);
7763 val = strtol(
ptr, &end, 0);
7775 unit = 1024*1024*1024;
7779 while (*end && isspace((
unsigned char)*end)) end++;
7785 if (val < -(ssize_t)(SIZE_MAX / 2 / unit) || (ssize_t)(SIZE_MAX / 2 / unit) < val) {
7786 if (
RTEST(
ruby_verbose)) fprintf(stderr,
"%s=%s is ignored because it overflows\n", name,
ptr);
7791 if (val > 0 && (
size_t)val > lower_bound) {
7793 fprintf(stderr,
"%s=%"PRIdSIZE
" (default value: %"PRIuSIZE
")\n", name, val, *default_value);
7795 *default_value = (size_t)val;
7800 fprintf(stderr,
"%s=%"PRIdSIZE
" (default value: %"PRIuSIZE
") is ignored because it must be greater than %"PRIuSIZE
".\n",
7801 name, val, *default_value, lower_bound);
7810 get_envparam_double(
const char *name,
double *default_value,
double lower_bound,
double upper_bound,
int accept_zero)
7812 const char *
ptr = getenv(name);
7815 if (
ptr != NULL && *
ptr) {
7818 if (!*
ptr || *end) {
7823 if (accept_zero && val == 0.0) {
7826 else if (val <= lower_bound) {
7828 fprintf(stderr,
"%s=%f (default value: %f) is ignored because it must be greater than %f.\n",
7829 name, val, *default_value, lower_bound);
7832 else if (upper_bound != 0.0 &&
7833 val > upper_bound) {
7835 fprintf(stderr,
"%s=%f (default value: %f) is ignored because it must be lower than %f.\n",
7836 name, val, *default_value, upper_bound);
7846 if (
RTEST(
ruby_verbose)) fprintf(stderr,
"%s=%f (default value: %f)\n", name, val, *default_value);
7847 *default_value = val;
7892 rb_gc_impl_set_params(
void *objspace_ptr)
7896 if (get_envparam_size(
"RUBY_GC_HEAP_FREE_SLOTS", &gc_params.heap_free_slots, 0)) {
7900 for (
int i = 0; i < HEAP_COUNT; i++) {
7901 char env_key[
sizeof(
"RUBY_GC_HEAP_" "_INIT_SLOTS") +
DECIMAL_SIZE_OF_BITS(
sizeof(
int) * CHAR_BIT)];
7902 snprintf(env_key,
sizeof(env_key),
"RUBY_GC_HEAP_%d_INIT_SLOTS", i);
7904 get_envparam_size(env_key, &gc_params.heap_init_slots[i], 0);
7907 get_envparam_double(
"RUBY_GC_HEAP_GROWTH_FACTOR", &gc_params.growth_factor, 1.0, 0.0, FALSE);
7908 get_envparam_size (
"RUBY_GC_HEAP_GROWTH_MAX_SLOTS", &gc_params.growth_max_slots, 0);
7909 get_envparam_double(
"RUBY_GC_HEAP_FREE_SLOTS_MIN_RATIO", &gc_params.heap_free_slots_min_ratio,
7911 get_envparam_double(
"RUBY_GC_HEAP_FREE_SLOTS_MAX_RATIO", &gc_params.heap_free_slots_max_ratio,
7912 gc_params.heap_free_slots_min_ratio, 1.0, FALSE);
7913 get_envparam_double(
"RUBY_GC_HEAP_FREE_SLOTS_GOAL_RATIO", &gc_params.heap_free_slots_goal_ratio,
7914 gc_params.heap_free_slots_min_ratio, gc_params.heap_free_slots_max_ratio, TRUE);
7915 get_envparam_double(
"RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR", &gc_params.oldobject_limit_factor, 0.0, 0.0, TRUE);
7916 get_envparam_double(
"RUBY_GC_HEAP_REMEMBERED_WB_UNPROTECTED_OBJECTS_LIMIT_RATIO", &gc_params.uncollectible_wb_unprotected_objects_limit_ratio, 0.0, 0.0, TRUE);
7918 if (get_envparam_size(
"RUBY_GC_MALLOC_LIMIT", &gc_params.malloc_limit_min, 0)) {
7919 malloc_limit = gc_params.malloc_limit_min;
7921 get_envparam_size (
"RUBY_GC_MALLOC_LIMIT_MAX", &gc_params.malloc_limit_max, 0);
7922 if (!gc_params.malloc_limit_max) {
7923 gc_params.malloc_limit_max = SIZE_MAX;
7925 get_envparam_double(
"RUBY_GC_MALLOC_LIMIT_GROWTH_FACTOR", &gc_params.malloc_limit_growth_factor, 1.0, 0.0, FALSE);
7927 #if RGENGC_ESTIMATE_OLDMALLOC
7928 if (get_envparam_size(
"RUBY_GC_OLDMALLOC_LIMIT", &gc_params.oldmalloc_limit_min, 0)) {
7929 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
7931 get_envparam_size (
"RUBY_GC_OLDMALLOC_LIMIT_MAX", &gc_params.oldmalloc_limit_max, 0);
7932 get_envparam_double(
"RUBY_GC_OLDMALLOC_LIMIT_GROWTH_FACTOR", &gc_params.oldmalloc_limit_growth_factor, 1.0, 0.0, FALSE);
7936 static inline size_t
7939 #ifdef HAVE_MALLOC_USABLE_SIZE
7940 return malloc_usable_size(
ptr);
7947 MEMOP_TYPE_MALLOC = 0,
7953 atomic_sub_nounderflow(
size_t *var,
size_t sub)
7955 if (sub == 0)
return;
7959 if (val < sub) sub = val;
7964 #define gc_stress_full_mark_after_malloc_p() \
7965 (FIXNUM_P(ruby_gc_stress_mode) && (FIX2LONG(ruby_gc_stress_mode) & (1<<gc_stress_full_mark_after_malloc)))
7971 unsigned int reason = (GPR_FLAG_IMMEDIATE_MARK | GPR_FLAG_IMMEDIATE_SWEEP |
7972 GPR_FLAG_STRESS | GPR_FLAG_MALLOC);
7974 if (gc_stress_full_mark_after_malloc_p()) {
7975 reason |= GPR_FLAG_FULL_MARK;
7977 garbage_collect_with_gvl(objspace, reason);
7982 objspace_malloc_increase_report(
rb_objspace_t *objspace,
void *mem,
size_t new_size,
size_t old_size,
enum memop_type
type)
7984 if (0) fprintf(stderr,
"increase - ptr: %p, type: %s, new_size: %"PRIdSIZE
", old_size: %"PRIdSIZE
"\n",
7986 type == MEMOP_TYPE_MALLOC ?
"malloc" :
7987 type == MEMOP_TYPE_FREE ?
"free " :
7988 type == MEMOP_TYPE_REALLOC ?
"realloc":
"error",
7989 new_size, old_size);
7994 objspace_malloc_increase_body(
rb_objspace_t *objspace,
void *mem,
size_t new_size,
size_t old_size,
enum memop_type
type)
7996 if (new_size > old_size) {
7998 #if RGENGC_ESTIMATE_OLDMALLOC
8003 atomic_sub_nounderflow(&malloc_increase, old_size - new_size);
8004 #if RGENGC_ESTIMATE_OLDMALLOC
8005 atomic_sub_nounderflow(&objspace->rgengc.oldmalloc_increase, old_size - new_size);
8009 if (
type == MEMOP_TYPE_MALLOC) {
8012 if (ruby_thread_has_gvl_p() && is_lazy_sweeping(objspace)) {
8016 garbage_collect_with_gvl(objspace, GPR_FLAG_MALLOC);
8020 #if MALLOC_ALLOCATED_SIZE
8021 if (new_size >= old_size) {
8025 size_t dec_size = old_size - new_size;
8026 size_t allocated_size = objspace->malloc_params.allocated_size;
8028 #if MALLOC_ALLOCATED_SIZE_CHECK
8029 if (allocated_size < dec_size) {
8030 rb_bug(
"objspace_malloc_increase: underflow malloc_params.allocated_size.");
8033 atomic_sub_nounderflow(&objspace->malloc_params.allocated_size, dec_size);
8037 case MEMOP_TYPE_MALLOC:
8040 case MEMOP_TYPE_FREE:
8042 size_t allocations = objspace->malloc_params.allocations;
8043 if (allocations > 0) {
8044 atomic_sub_nounderflow(&objspace->malloc_params.allocations, 1);
8046 #if MALLOC_ALLOCATED_SIZE_CHECK
8048 GC_ASSERT(objspace->malloc_params.allocations > 0);
8053 case MEMOP_TYPE_REALLOC:
break;
8059 #define objspace_malloc_increase(...) \
8060 for (bool malloc_increase_done = objspace_malloc_increase_report(__VA_ARGS__); \
8061 !malloc_increase_done; \
8062 malloc_increase_done = objspace_malloc_increase_body(__VA_ARGS__))
8068 static inline size_t
8069 objspace_malloc_prepare(
rb_objspace_t *objspace,
size_t size)
8071 if (size == 0) size = 1;
8073 #if CALC_EXACT_MALLOC_SIZE
8087 return during_gc && !dont_gc_val() && !rb_gc_multi_ractor_p() && ruby_thread_has_gvl_p();
8090 static inline void *
8091 objspace_malloc_fixup(
rb_objspace_t *objspace,
void *mem,
size_t size)
8093 size = objspace_malloc_size(objspace, mem, size);
8094 objspace_malloc_increase(objspace, mem, size, 0, MEMOP_TYPE_MALLOC) {}
8096 #if CALC_EXACT_MALLOC_SIZE
8107 #if defined(__GNUC__) && RUBY_DEBUG
8108 #define RB_BUG_INSTEAD_OF_RB_MEMERROR 1
8111 #ifndef RB_BUG_INSTEAD_OF_RB_MEMERROR
8112 # define RB_BUG_INSTEAD_OF_RB_MEMERROR 0
8115 #define GC_MEMERROR(...) \
8116 ((RB_BUG_INSTEAD_OF_RB_MEMERROR+0) ? rb_bug("" __VA_ARGS__) : rb_memerror())
8118 #define TRY_WITH_GC(siz, expr) do { \
8119 const gc_profile_record_flag gpr = \
8120 GPR_FLAG_FULL_MARK | \
8121 GPR_FLAG_IMMEDIATE_MARK | \
8122 GPR_FLAG_IMMEDIATE_SWEEP | \
8124 objspace_malloc_gc_stress(objspace); \
8126 if (RB_LIKELY((expr))) { \
8129 else if (!garbage_collect_with_gvl(objspace, gpr)) { \
8131 GC_MEMERROR("TRY_WITH_GC: could not GC"); \
8133 else if ((expr)) { \
8137 GC_MEMERROR("TRY_WITH_GC: could not allocate:" \
8138 "%"PRIdSIZE" bytes for %s", \
8144 check_malloc_not_in_gc(
rb_objspace_t *objspace,
const char *msg)
8149 rb_bug(
"Cannot %s during GC", msg);
8154 rb_gc_impl_free(
void *objspace_ptr,
void *
ptr,
size_t old_size)
8165 #if CALC_EXACT_MALLOC_SIZE
8168 old_size = info->size;
8170 old_size = objspace_malloc_size(objspace,
ptr, old_size);
8172 objspace_malloc_increase(objspace,
ptr, 0, old_size, MEMOP_TYPE_FREE) {
8175 RB_DEBUG_COUNTER_INC(heap_xfree);
8180 rb_gc_impl_malloc(
void *objspace_ptr,
size_t size)
8183 check_malloc_not_in_gc(objspace,
"malloc");
8187 size = objspace_malloc_prepare(objspace, size);
8188 TRY_WITH_GC(size, mem = malloc(size));
8189 RB_DEBUG_COUNTER_INC(heap_xmalloc);
8190 return objspace_malloc_fixup(objspace, mem, size);
8194 rb_gc_impl_calloc(
void *objspace_ptr,
size_t size)
8199 rb_warn(
"calloc during GC detected, this could cause crashes if it triggers another GC");
8200 #if RGENGC_CHECK_MODE || RUBY_DEBUG
8201 rb_bug(
"Cannot calloc during GC");
8207 size = objspace_malloc_prepare(objspace, size);
8208 TRY_WITH_GC(size, mem = calloc1(size));
8209 return objspace_malloc_fixup(objspace, mem, size);
8213 rb_gc_impl_realloc(
void *objspace_ptr,
void *
ptr,
size_t new_size,
size_t old_size)
8217 check_malloc_not_in_gc(objspace,
"realloc");
8221 if (!
ptr)
return rb_gc_impl_malloc(objspace, new_size);
8228 if (new_size == 0) {
8229 if ((mem = rb_gc_impl_malloc(objspace, 0)) != NULL) {
8252 rb_gc_impl_free(objspace,
ptr, old_size);
8266 #if CALC_EXACT_MALLOC_SIZE
8271 old_size = info->size;
8275 old_size = objspace_malloc_size(objspace,
ptr, old_size);
8277 new_size = objspace_malloc_size(objspace, mem, new_size);
8279 #if CALC_EXACT_MALLOC_SIZE
8282 info->size = new_size;
8287 objspace_malloc_increase(objspace, mem, new_size, old_size, MEMOP_TYPE_REALLOC);
8289 RB_DEBUG_COUNTER_INC(heap_xrealloc);
8294 rb_gc_impl_adjust_memory_usage(
void *objspace_ptr, ssize_t diff)
8299 objspace_malloc_increase(objspace, 0, diff, 0, MEMOP_TYPE_REALLOC);
8301 else if (diff < 0) {
8302 objspace_malloc_increase(objspace, 0, 0, -diff, MEMOP_TYPE_REALLOC);
8311 #define GC_PROFILE_RECORD_DEFAULT_SIZE 100
8314 current_process_time(
struct timespec *ts)
8316 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_PROCESS_CPUTIME_ID)
8318 static int try_clock_gettime = 1;
8319 if (try_clock_gettime && clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ts) == 0) {
8323 try_clock_gettime = 0;
8330 struct rusage usage;
8332 if (getrusage(RUSAGE_SELF, &usage) == 0) {
8333 time = usage.ru_utime;
8334 ts->tv_sec = time.tv_sec;
8335 ts->tv_nsec = (int32_t)time.tv_usec * 1000;
8343 FILETIME creation_time, exit_time, kernel_time, user_time;
8346 if (GetProcessTimes(GetCurrentProcess(),
8347 &creation_time, &exit_time, &kernel_time, &user_time) != 0) {
8348 memcpy(&ui, &user_time,
sizeof(FILETIME));
8349 #define PER100NSEC (uint64_t)(1000 * 1000 * 10)
8350 ts->tv_nsec = (long)(ui.QuadPart % PER100NSEC);
8351 ts->tv_sec = (time_t)(ui.QuadPart / PER100NSEC);
8361 getrusage_time(
void)
8364 if (current_process_time(&ts)) {
8365 return ts.tv_sec + ts.tv_nsec * 1e-9;
8374 gc_prof_setup_new_record(
rb_objspace_t *objspace,
unsigned int reason)
8376 if (objspace->profile.run) {
8377 size_t index = objspace->profile.next_index;
8381 objspace->profile.next_index++;
8383 if (!objspace->profile.records) {
8384 objspace->profile.size = GC_PROFILE_RECORD_DEFAULT_SIZE;
8385 objspace->profile.records = malloc(xmalloc2_size(
sizeof(
gc_profile_record), objspace->profile.size));
8387 if (index >= objspace->profile.size) {
8389 objspace->profile.size += 1000;
8390 ptr = realloc(objspace->profile.records, xmalloc2_size(
sizeof(
gc_profile_record), objspace->profile.size));
8392 objspace->profile.records =
ptr;
8394 if (!objspace->profile.records) {
8395 rb_bug(
"gc_profile malloc or realloc miss");
8397 record = objspace->profile.current_record = &objspace->profile.records[objspace->profile.next_index - 1];
8401 record->flags = reason | (ruby_gc_stressful ? GPR_FLAG_STRESS : 0);
8402 #if MALLOC_ALLOCATED_SIZE
8403 record->allocated_size = malloc_allocated_size;
8405 #if GC_PROFILE_MORE_DETAIL && GC_PROFILE_DETAIL_MEMORY
8408 struct rusage usage;
8409 if (getrusage(RUSAGE_SELF, &usage) == 0) {
8410 record->maxrss = usage.ru_maxrss;
8411 record->minflt = usage.ru_minflt;
8412 record->majflt = usage.ru_majflt;
8423 if (gc_prof_enabled(objspace)) {
8425 #if GC_PROFILE_MORE_DETAIL
8426 record->prepare_time = objspace->profile.prepare_time;
8428 record->gc_time = 0;
8429 record->gc_invoke_time = getrusage_time();
8434 elapsed_time_from(
double time)
8436 double now = getrusage_time();
8448 if (gc_prof_enabled(objspace)) {
8450 record->gc_time = elapsed_time_from(record->gc_invoke_time);
8451 record->gc_invoke_time -= objspace->profile.invoke_time;
8455 #ifdef BUILDING_SHARED_GC
8456 # define RUBY_DTRACE_GC_HOOK(name)
8458 # define RUBY_DTRACE_GC_HOOK(name) \
8459 do {if (RUBY_DTRACE_GC_##name##_ENABLED()) RUBY_DTRACE_GC_##name();} while (0)
8465 RUBY_DTRACE_GC_HOOK(MARK_BEGIN);
8466 #if GC_PROFILE_MORE_DETAIL
8467 if (gc_prof_enabled(objspace)) {
8468 gc_prof_record(objspace)->gc_mark_time = getrusage_time();
8476 RUBY_DTRACE_GC_HOOK(MARK_END);
8477 #if GC_PROFILE_MORE_DETAIL
8478 if (gc_prof_enabled(objspace)) {
8480 record->gc_mark_time = elapsed_time_from(record->gc_mark_time);
8488 RUBY_DTRACE_GC_HOOK(SWEEP_BEGIN);
8489 if (gc_prof_enabled(objspace)) {
8492 if (record->gc_time > 0 || GC_PROFILE_MORE_DETAIL) {
8493 objspace->profile.gc_sweep_start_time = getrusage_time();
8501 RUBY_DTRACE_GC_HOOK(SWEEP_END);
8503 if (gc_prof_enabled(objspace)) {
8507 if (record->gc_time > 0) {
8508 sweep_time = elapsed_time_from(objspace->profile.gc_sweep_start_time);
8510 record->gc_time += sweep_time;
8512 else if (GC_PROFILE_MORE_DETAIL) {
8513 sweep_time = elapsed_time_from(objspace->profile.gc_sweep_start_time);
8516 #if GC_PROFILE_MORE_DETAIL
8517 record->gc_sweep_time += sweep_time;
8518 if (heap_pages_deferred_final) record->flags |= GPR_FLAG_HAVE_FINALIZE;
8520 if (heap_pages_deferred_final) objspace->profile.latest_gc_info |= GPR_FLAG_HAVE_FINALIZE;
8527 #if GC_PROFILE_MORE_DETAIL
8528 if (gc_prof_enabled(objspace)) {
8530 record->allocate_increase = malloc_increase;
8531 record->allocate_limit = malloc_limit;
8539 if (gc_prof_enabled(objspace)) {
8541 size_t live = objspace->profile.total_allocated_objects_at_gc_start - total_freed_objects(objspace);
8542 size_t total = objspace->profile.heap_used_at_gc_start * HEAP_PAGE_OBJ_LIMIT;
8544 #if GC_PROFILE_MORE_DETAIL
8545 record->heap_use_pages = objspace->profile.heap_used_at_gc_start;
8546 record->heap_live_objects = live;
8547 record->heap_free_objects = total - live;
8550 record->heap_total_objects = total;
8551 record->heap_use_size = live * BASE_SLOT_SIZE;
8552 record->heap_total_size = total * BASE_SLOT_SIZE;
8565 gc_profile_clear(
VALUE _)
8568 void *p = objspace->profile.records;
8569 objspace->profile.records = NULL;
8570 objspace->profile.size = 0;
8571 objspace->profile.next_index = 0;
8572 objspace->profile.current_record = 0;
8628 gc_profile_record_get(
VALUE _)
8635 if (!objspace->profile.run) {
8639 for (i =0; i < objspace->profile.next_index; i++) {
8651 #if GC_PROFILE_MORE_DETAIL
8666 #if RGENGC_PROFILE > 0
8677 #if GC_PROFILE_MORE_DETAIL
8678 #define MAJOR_REASON_MAX 0x10
8681 gc_profile_dump_major_reason(
unsigned int flags,
char *buff)
8683 unsigned int reason = flags & GPR_FLAG_MAJOR_MASK;
8686 if (reason == GPR_FLAG_NONE) {
8692 if (reason & GPR_FLAG_MAJOR_BY_##x) { \
8693 buff[i++] = #x[0]; \
8694 if (i >= MAJOR_REASON_MAX) rb_bug("gc_profile_dump_major_reason: overflow"); \
8700 #if RGENGC_ESTIMATE_OLDMALLOC
8715 size_t count = objspace->profile.next_index;
8716 #ifdef MAJOR_REASON_MAX
8717 char reason_str[MAJOR_REASON_MAX];
8720 if (objspace->profile.run && count ) {
8724 append(out,
rb_sprintf(
"GC %"PRIuSIZE
" invokes.\n", objspace->profile.count));
8725 append(out,
rb_str_new_cstr(
"Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC Time(ms)\n"));
8727 for (i = 0; i < count; i++) {
8728 record = &objspace->profile.records[i];
8729 append(out,
rb_sprintf(
"%5"PRIuSIZE
" %19.3f %20"PRIuSIZE
" %20"PRIuSIZE
" %20"PRIuSIZE
" %30.20f\n",
8730 i+1, record->gc_invoke_time, record->heap_use_size,
8731 record->heap_total_size, record->heap_total_objects, record->gc_time*1000));
8734 #if GC_PROFILE_MORE_DETAIL
8735 const char *str =
"\n\n" \
8737 "Prepare Time = Previously GC's rest sweep time\n"
8738 "Index Flags Allocate Inc. Allocate Limit"
8739 #if CALC_EXACT_MALLOC_SIZE
8742 " Use Page Mark Time(ms) Sweep Time(ms) Prepare Time(ms) LivingObj FreeObj RemovedObj EmptyObj"
8744 " OldgenObj RemNormObj RemShadObj"
8746 #if GC_PROFILE_DETAIL_MEMORY
8747 " MaxRSS(KB) MinorFLT MajorFLT"
8752 for (i = 0; i < count; i++) {
8753 record = &objspace->profile.records[i];
8754 append(out,
rb_sprintf(
"%5"PRIuSIZE
" %4s/%c/%6s%c %13"PRIuSIZE
" %15"PRIuSIZE
8755 #
if CALC_EXACT_MALLOC_SIZE
8758 " %9"PRIuSIZE
" %17.12f %17.12f %17.12f %10"PRIuSIZE
" %10"PRIuSIZE
" %10"PRIuSIZE
" %10"PRIuSIZE
8760 "%10"PRIuSIZE
" %10"PRIuSIZE
" %10"PRIuSIZE
8762 #
if GC_PROFILE_DETAIL_MEMORY
8768 gc_profile_dump_major_reason(record->flags, reason_str),
8769 (record->flags & GPR_FLAG_HAVE_FINALIZE) ?
'F' :
'.',
8770 (record->flags & GPR_FLAG_NEWOBJ) ?
"NEWOBJ" :
8771 (record->flags & GPR_FLAG_MALLOC) ?
"MALLOC" :
8772 (record->flags & GPR_FLAG_METHOD) ?
"METHOD" :
8773 (record->flags & GPR_FLAG_CAPI) ?
"CAPI__" :
"??????",
8774 (record->flags & GPR_FLAG_STRESS) ?
'!' :
' ',
8775 record->allocate_increase, record->allocate_limit,
8776 #
if CALC_EXACT_MALLOC_SIZE
8777 record->allocated_size,
8779 record->heap_use_pages,
8780 record->gc_mark_time*1000,
8781 record->gc_sweep_time*1000,
8782 record->prepare_time*1000,
8784 record->heap_live_objects,
8785 record->heap_free_objects,
8786 record->removing_objects,
8787 record->empty_objects
8790 record->old_objects,
8791 record->remembered_normal_objects,
8792 record->remembered_shady_objects
8794 #
if GC_PROFILE_DETAIL_MEMORY
8796 record->maxrss / 1024,
8819 gc_profile_result(
VALUE _)
8836 gc_profile_report(
int argc,
VALUE *argv,
VALUE self)
8854 gc_profile_total_time(
VALUE self)
8859 if (objspace->profile.run && objspace->profile.next_index > 0) {
8861 size_t count = objspace->profile.next_index;
8863 for (i = 0; i < count; i++) {
8864 time += objspace->profile.records[i].gc_time;
8878 gc_profile_enable_get(
VALUE self)
8893 gc_profile_enable(
VALUE _)
8896 objspace->profile.run = TRUE;
8897 objspace->profile.current_record = 0;
8910 gc_profile_disable(
VALUE _)
8914 objspace->profile.run = FALSE;
8915 objspace->profile.current_record = 0;
8930 gc_verify_internal_consistency_m(
VALUE dummy)
8932 gc_verify_internal_consistency(rb_gc_get_objspace());
8936 #if GC_CAN_COMPILE_COMPACTION
8950 GC_ASSERT(GC_COMPACTION_SUPPORTED);
8952 ruby_enable_autocompact =
RTEST(v);
8954 #if RGENGC_CHECK_MODE
8955 ruby_autocompact_compare_func = NULL;
8960 ruby_autocompact_compare_func = compare_free_slots;
8968 # define gc_set_auto_compact rb_f_notimplement
8971 #if GC_CAN_COMPILE_COMPACTION
8979 gc_get_auto_compact(
VALUE _)
8984 # define gc_get_auto_compact rb_f_notimplement
8987 #if GC_CAN_COMPILE_COMPACTION
9013 gc_compact_stats(
VALUE self)
9022 for (
size_t i = 0; i <
T_MASK; i++) {
9023 if (objspace->rcompactor.considered_count_table[i]) {
9027 if (objspace->rcompactor.moved_count_table[i]) {
9031 if (objspace->rcompactor.moved_up_count_table[i]) {
9035 if (objspace->rcompactor.moved_down_count_table[i]) {
9048 # define gc_compact_stats rb_f_notimplement
9051 #if GC_CAN_COMPILE_COMPACTION
9070 gc_compact(
VALUE self)
9073 int full_marking_p = gc_config_full_mark_val;
9074 gc_config_full_mark_set(TRUE);
9077 rb_gc_impl_start(rb_gc_get_objspace(),
true,
true,
true,
true);
9078 gc_config_full_mark_set(full_marking_p);
9080 return gc_compact_stats(
self);
9083 # define gc_compact rb_f_notimplement
9086 #if GC_CAN_COMPILE_COMPACTION
9089 size_t required_slots[HEAP_COUNT];
9093 desired_compaction_pages_i(
struct heap_page *page,
void *data)
9098 VALUE vend = vstart + (
VALUE)(page->total_slots * page->heap->slot_size);
9101 for (
VALUE v = vstart; v != vend; v += page->heap->slot_size) {
9102 asan_unpoisoning_object(v) {
9105 rb_heap_t *dest_pool = gc_compact_destination_pool(objspace, page->heap, v);
9106 size_t dest_pool_idx = dest_pool - heaps;
9107 tdata->required_slots[dest_pool_idx]++;
9131 gc_verify_compaction_references(
int argc,
VALUE* argv,
VALUE self)
9133 static ID keywords[3] = {0};
9144 int kwarg_count =
rb_get_kwargs(options, keywords, 0, 3, arguments);
9146 bool expand_heap = (kwarg_count > 1 &&
RTEST(arguments[1])) || (kwarg_count > 2 &&
RTEST(arguments[2]));
9151 rb_gc_impl_start(objspace,
true,
true,
true,
false);
9153 unsigned int lev = rb_gc_vm_lock();
9160 .objspace = objspace,
9161 .required_slots = {0},
9164 objspace_each_pages(objspace, desired_compaction_pages_i, &desired_compaction, TRUE);
9167 size_t max_existing_pages = 0;
9168 for (
int i = 0; i < HEAP_COUNT; i++) {
9170 max_existing_pages = MAX(max_existing_pages, heap->total_pages);
9174 for (
int i = 0; i < HEAP_COUNT; i++) {
9177 size_t pages_to_add = 0;
9184 pages_to_add += max_existing_pages - heap->total_pages;
9189 objspace->heap_pages.allocatable_slots = desired_compaction.required_slots[i];
9190 while (objspace->heap_pages.allocatable_slots > 0) {
9191 heap_page_allocate_and_initialize(objspace, heap);
9199 for (; pages_to_add > 0; pages_to_add--) {
9200 heap_page_allocate_and_initialize_force(objspace, heap);
9206 objspace->rcompactor.compare_func = compare_free_slots;
9209 rb_gc_vm_unlock(lev);
9211 rb_gc_impl_start(rb_gc_get_objspace(),
true,
true,
true,
true);
9213 rb_objspace_reachable_objects_from_root(root_obj_check_moved_i, objspace);
9214 objspace_each_objects(objspace, heap_check_moved_i, objspace, TRUE);
9216 objspace->rcompactor.compare_func = NULL;
9218 return gc_compact_stats(
self);
9221 # define gc_verify_compaction_references rb_f_notimplement
9225 rb_gc_impl_objspace_free(
void *objspace_ptr)
9229 if (is_lazy_sweeping(objspace))
9230 rb_bug(
"lazy sweeping underway when freeing object space");
9232 free(objspace->profile.records);
9233 objspace->profile.records = NULL;
9235 for (
size_t i = 0; i < rb_darray_size(objspace->heap_pages.sorted); i++) {
9236 heap_page_free(objspace, rb_darray_get(objspace->heap_pages.sorted, i));
9238 rb_darray_free(objspace->heap_pages.sorted);
9239 heap_pages_lomem = 0;
9240 heap_pages_himem = 0;
9242 for (
int i = 0; i < HEAP_COUNT; i++) {
9244 heap->total_pages = 0;
9245 heap->total_slots = 0;
9248 st_free_table(objspace->id_to_obj_tbl);
9249 st_free_table(objspace->obj_to_id_tbl);
9251 free_stack_chunks(&objspace->mark_stack);
9252 mark_stack_free_cache(&objspace->mark_stack);
9254 rb_darray_free(objspace->weak_references);
9259 #if MALLOC_ALLOCATED_SIZE
9270 gc_malloc_allocated_size(
VALUE self)
9285 gc_malloc_allocations(
VALUE self)
9292 rb_gc_impl_objspace_alloc(
void)
9300 rb_gc_impl_objspace_init(
void *objspace_ptr)
9304 gc_config_full_mark_set(TRUE);
9306 objspace->flags.measure_gc =
true;
9307 malloc_limit = gc_params.malloc_limit_min;
9309 if (objspace->finalize_deferred_pjob == POSTPONED_JOB_HANDLE_INVALID) {
9310 rb_bug(
"Could not preregister postponed job for GC");
9313 for (
int i = 0; i < HEAP_COUNT; i++) {
9316 heap->slot_size = (1 << i) * BASE_SLOT_SIZE;
9318 ccan_list_head_init(&heap->pages);
9321 rb_darray_make(&objspace->heap_pages.sorted, 0);
9322 rb_darray_make(&objspace->weak_references, 0);
9329 #if defined(INIT_HEAP_PAGE_ALLOC_USE_MMAP)
9331 heap_page_alloc_use_mmap = INIT_HEAP_PAGE_ALLOC_USE_MMAP;
9333 objspace->next_object_id = OBJ_ID_INITIAL;
9334 objspace->id_to_obj_tbl = st_init_table(&object_id_hash_type);
9335 objspace->obj_to_id_tbl = st_init_numtable();
9336 #if RGENGC_ESTIMATE_OLDMALLOC
9337 objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
9340 for (
int i = 0; i < HEAP_COUNT; i++) {
9342 gc_params.heap_init_slots[i] = GC_HEAP_INIT_SLOTS;
9345 init_mark_stack(&objspace->mark_stack);
9347 objspace->profile.invoke_time = getrusage_time();
9348 finalizer_table = st_init_numtable();
9352 rb_gc_impl_init(
void)
9364 if (RB_BUG_INSTEAD_OF_RB_MEMERROR+0) {
9371 if (GC_COMPACTION_SUPPORTED) {
9389 #if MALLOC_ALLOCATED_SIZE
9408 #define OPT(o) if (o) rb_ary_push(opts, rb_interned_str(#o, sizeof(#o) - 1))
9412 OPT(RGENGC_CHECK_MODE);
9413 OPT(RGENGC_PROFILE);
9414 OPT(RGENGC_ESTIMATE_OLDMALLOC);
9415 OPT(GC_PROFILE_MORE_DETAIL);
9416 OPT(GC_ENABLE_LAZY_SWEEP);
9417 OPT(CALC_EXACT_MALLOC_SIZE);
9418 OPT(MALLOC_ALLOCATED_SIZE);
9419 OPT(MALLOC_ALLOCATED_SIZE_CHECK);
9420 OPT(GC_PROFILE_DETAIL_MEMORY);
9421 OPT(GC_COMPACTION_SUPPORTED);
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
#define RUBY_ATOMIC_VALUE_CAS(var, oldval, newval)
Identical to RUBY_ATOMIC_CAS, except it expects its arguments are VALUE.
#define RUBY_ATOMIC_SIZE_EXCHANGE(var, val)
Identical to RUBY_ATOMIC_EXCHANGE, except it expects its arguments are size_t.
#define RUBY_ATOMIC_SIZE_INC(var)
Identical to RUBY_ATOMIC_INC, except it expects its argument is size_t.
#define RUBY_ATOMIC_SIZE_CAS(var, oldval, newval)
Identical to RUBY_ATOMIC_CAS, except it expects its arguments are size_t.
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
#define RUBY_ATOMIC_SIZE_ADD(var, val)
Identical to RUBY_ATOMIC_ADD, except it expects its arguments are size_t.
#define RUBY_ATOMIC_VALUE_EXCHANGE(var, val)
Identical to RUBY_ATOMIC_EXCHANGE, except it expects its arguments are VALUE.
#define RUBY_ATOMIC_SET(var, val)
Identical to RUBY_ATOMIC_EXCHANGE, except for the return type.
#define RUBY_ATOMIC_EXCHANGE(var, val)
Atomically replaces the value pointed by var with val.
#define RB_LIKELY(x)
Asserts that the given Boolean expression likely holds.
#define RB_UNLIKELY(x)
Asserts that the given Boolean expression likely doesn't hold.
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
unsigned int rb_postponed_job_handle_t
The type of a handle returned from rb_postponed_job_preregister and passed to rb_postponed_job_trigge...
void rb_postponed_job_trigger(rb_postponed_job_handle_t h)
Triggers a pre-registered job registered with rb_postponed_job_preregister, scheduling it for executi...
rb_postponed_job_handle_t rb_postponed_job_preregister(unsigned int flags, rb_postponed_job_func_t func, void *data)
Pre-registers a func in Ruby's postponed job preregistration table, returning an opaque handle which ...
#define RUBY_INTERNAL_EVENT_GC_EXIT
gc_exit() is called.
#define RUBY_INTERNAL_EVENT_GC_ENTER
gc_enter() is called.
#define RUBY_INTERNAL_EVENT_GC_END_SWEEP
GC ended sweep phase.
#define RUBY_INTERNAL_EVENT_GC_END_MARK
GC ended mark phase.
#define RUBY_INTERNAL_EVENT_OBJSPACE_MASK
Bitmask of GC events.
#define RUBY_INTERNAL_EVENT_FREEOBJ
Object swept.
#define RUBY_INTERNAL_EVENT_GC_START
GC started.
uint32_t rb_event_flag_t
Represents event(s).
#define RUBY_INTERNAL_EVENT_NEWOBJ
Object allocated.
static VALUE RB_FL_TEST(VALUE obj, VALUE flags)
Tests if the given flag(s) are set or not.
static void RB_FL_SET_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_SET().
static void RB_FL_UNSET_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_UNSET().
@ RUBY_FL_PROMOTED
Ruby objects are "generational".
VALUE rb_define_module_under(VALUE outer, const char *name)
Defines a module under the namespace of outer.
int rb_scan_args_kw(int kw_flag, int argc, const VALUE *argv, const char *fmt,...)
Identical to rb_scan_args(), except it also accepts kw_splat.
int rb_keyword_given_p(void)
Determines if the current method is given a keyword argument.
int rb_get_kwargs(VALUE keyword_hash, const ID *table, int required, int optional, VALUE *values)
Keyword argument deconstructor.
#define T_COMPLEX
Old name of RUBY_T_COMPLEX.
#define T_FILE
Old name of RUBY_T_FILE.
#define FL_EXIVAR
Old name of RUBY_FL_EXIVAR.
#define T_STRING
Old name of RUBY_T_STRING.
#define xfree
Old name of ruby_xfree.
#define T_MASK
Old name of RUBY_T_MASK.
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
#define OBJ_FROZEN
Old name of RB_OBJ_FROZEN.
#define T_NIL
Old name of RUBY_T_NIL.
#define T_FLOAT
Old name of RUBY_T_FLOAT.
#define T_IMEMO
Old name of RUBY_T_IMEMO.
#define ID2SYM
Old name of RB_ID2SYM.
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
#define T_FIXNUM
Old name of RUBY_T_FIXNUM.
#define SYM2ID
Old name of RB_SYM2ID.
#define T_DATA
Old name of RUBY_T_DATA.
#define FL_SEEN_OBJ_ID
Old name of RUBY_FL_SEEN_OBJ_ID.
#define T_NONE
Old name of RUBY_T_NONE.
#define T_NODE
Old name of RUBY_T_NODE.
#define SIZET2NUM
Old name of RB_SIZE2NUM.
#define xmalloc
Old name of ruby_xmalloc.
#define LONG2FIX
Old name of RB_INT2FIX.
#define FIX2INT
Old name of RB_FIX2INT.
#define FL_FINALIZE
Old name of RUBY_FL_FINALIZE.
#define T_MODULE
Old name of RUBY_T_MODULE.
#define T_TRUE
Old name of RUBY_T_TRUE.
#define T_RATIONAL
Old name of RUBY_T_RATIONAL.
#define T_ICLASS
Old name of RUBY_T_ICLASS.
#define T_HASH
Old name of RUBY_T_HASH.
#define ALLOC_N
Old name of RB_ALLOC_N.
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
#define FL_SET
Old name of RB_FL_SET.
#define rb_ary_new3
Old name of rb_ary_new_from_args.
#define T_FALSE
Old name of RUBY_T_FALSE.
#define ULL2NUM
Old name of RB_ULL2NUM.
#define T_UNDEF
Old name of RUBY_T_UNDEF.
#define Qtrue
Old name of RUBY_Qtrue.
#define T_ZOMBIE
Old name of RUBY_T_ZOMBIE.
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
#define T_OBJECT
Old name of RUBY_T_OBJECT.
#define NIL_P
Old name of RB_NIL_P.
#define FL_WB_PROTECTED
Old name of RUBY_FL_WB_PROTECTED.
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
#define DBL2NUM
Old name of rb_float_new.
#define T_MATCH
Old name of RUBY_T_MATCH.
#define T_CLASS
Old name of RUBY_T_CLASS.
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
#define T_MOVED
Old name of RUBY_T_MOVED.
#define FL_TEST
Old name of RB_FL_TEST.
#define FL_UNSET
Old name of RB_FL_UNSET.
#define UINT2NUM
Old name of RB_UINT2NUM.
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
#define T_REGEXP
Old name of RUBY_T_REGEXP.
void rb_raise(VALUE exc, const char *fmt,...)
Exception entry point.
void rb_bug(const char *fmt,...)
Interpreter panic switch.
VALUE rb_eRangeError
RangeError exception.
#define ruby_verbose
This variable controls whether the interpreter is in debug mode.
VALUE rb_eRuntimeError
RuntimeError exception.
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
VALUE rb_eArgError
ArgumentError exception.
VALUE rb_ensure(VALUE(*b_proc)(VALUE), VALUE data1, VALUE(*e_proc)(VALUE), VALUE data2)
An equivalent to ensure clause.
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
VALUE rb_equal(VALUE lhs, VALUE rhs)
This function is an optimised version of calling #==.
VALUE rb_stdout
STDOUT constant.
#define RB_GNUC_EXTENSION_BLOCK(x)
This is expanded to the passed token for non-GCC compilers.
Routines to manipulate encodings of strings.
VALUE rb_funcall(VALUE recv, ID mid, int n,...)
Calls a method.
void rb_gc_mark(VALUE obj)
Marks an object.
void rb_memerror(void)
Triggers out-of-memory error.
static bool RB_OBJ_PROMOTED_RAW(VALUE obj)
This is the implementation of RB_OBJ_PROMOTED().
size_t rb_gc_count(void)
Identical to rb_gc_stat(), with "count" parameter.
VALUE rb_ary_new(void)
Allocates a new, empty array.
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
VALUE rb_big_eql(VALUE lhs, VALUE rhs)
Equality, in terms of eql?.
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
VALUE rb_hash_aref(VALUE hash, VALUE key)
Queries the given key in the given hash table.
VALUE rb_hash_aset(VALUE hash, VALUE key, VALUE val)
Inserts or replaces ("upsert"s) the objects into the given hash table.
VALUE rb_hash(VALUE obj)
Calculates a message authentication code of the passed object.
VALUE rb_hash_new(void)
Creates a new, empty hash object.
VALUE rb_io_write(VALUE io, VALUE str)
Writes the given string to the given IO.
VALUE rb_str_buf_append(VALUE dst, VALUE src)
Identical to rb_str_cat_cstr(), except it takes Ruby's string instead of C's.
VALUE rb_str_new_cstr(const char *ptr)
Identical to rb_str_new(), except it assumes the passed pointer is a pointer to a C string.
VALUE rb_str_buf_new(long capa)
Allocates a "string buffer".
const char * rb_sourcefile(void)
Resembles __FILE__.
VALUE rb_f_notimplement(int argc, const VALUE *argv, VALUE obj, VALUE marker)
Raises rb_eNotImpError.
int rb_sourceline(void)
Resembles __LINE__.
#define RB_SYM2ID
Just another name of rb_sym2id.
ID rb_intern(const char *name)
Finds or creates a symbol of the given name.
ID rb_sym2id(VALUE obj)
Converts an instance of rb_cSymbol into an ID.
void rb_define_const(VALUE klass, const char *name, VALUE val)
Defines a Ruby level constant under a namespace.
char * ptr
Pointer to the underlying memory region, of at least capa bytes.
int len
Length of the buffer.
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
#define strtod(s, e)
Just another name of ruby_strtod.
void ruby_qsort(void *, const size_t, const size_t, int(*)(const void *, const void *, void *), void *)
Reentrant implementation of quick sort.
#define DECIMAL_SIZE_OF_BITS(n)
an approximation of ceil(n * log10(2)), up to 1,048,576 (1<<20) without overflow within 32-bit calcul...
VALUE rb_sprintf(const char *fmt,...)
Ruby's extended sprintf(3).
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
VALUE type(ANYARGS)
ANYARGS-ed function type.
int st_foreach(st_table *q, int_type *w, st_data_t e)
Iteration over the given table.
#define RARRAY_LEN
Just another name of rb_array_len.
#define RARRAY_AREF(a, i)
#define RBASIC(obj)
Convenient casting macro.
#define errno
Ractor-aware version of errno.
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
#define _(args)
This was a transition path from K&R to ANSI.
Ruby object's base components.
VALUE flags
Per-object flags.
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
uintptr_t VALUE
Type that represents a Ruby object.
static enum ruby_value_type RB_BUILTIN_TYPE(VALUE obj)
Queries the type of the object.
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
ruby_value_type
C-level type of an object.
@ RUBY_T_ICLASS
Hidden classes known as IClasses.
@ RUBY_T_FIXNUM
Integers formerly known as Fixnums.
@ RUBY_T_MASK
Bitmask of ruby_value_type.
@ RUBY_T_NONE
Non-object (swept etc.)