Ruby  3.4.0dev (2024-11-05 revision 348a53415339076afc4a02fcd09f3ae36e9c4c61)
default.c
1 #include "ruby/internal/config.h"
2 
3 #include <signal.h>
4 
5 #ifndef _WIN32
6 # include <sys/mman.h>
7 # include <unistd.h>
8 #endif
9 
10 #if !defined(PAGE_SIZE) && defined(HAVE_SYS_USER_H)
11 /* LIST_HEAD conflicts with sys/queue.h on macOS */
12 # include <sys/user.h>
13 #endif
14 
15 #include "internal/hash.h"
16 
17 #include "ruby/ruby.h"
18 #include "ruby/atomic.h"
19 #include "ruby/debug.h"
20 #include "ruby/thread.h"
21 #include "ruby/util.h"
22 #include "ruby/vm.h"
24 #include "ccan/list/list.h"
25 #include "darray.h"
26 #include "gc/gc.h"
27 #include "gc/gc_impl.h"
28 
29 #ifndef BUILDING_SHARED_GC
30 # include "probes.h"
31 #endif
32 
33 #include "debug_counter.h"
34 #include "internal/sanitizers.h"
35 
36 /* MALLOC_HEADERS_BEGIN */
37 #ifndef HAVE_MALLOC_USABLE_SIZE
38 # ifdef _WIN32
39 # define HAVE_MALLOC_USABLE_SIZE
40 # define malloc_usable_size(a) _msize(a)
41 # elif defined HAVE_MALLOC_SIZE
42 # define HAVE_MALLOC_USABLE_SIZE
43 # define malloc_usable_size(a) malloc_size(a)
44 # endif
45 #endif
46 
47 #ifdef HAVE_MALLOC_USABLE_SIZE
48 # ifdef RUBY_ALTERNATIVE_MALLOC_HEADER
49 /* Alternative malloc header is included in ruby/missing.h */
50 # elif defined(HAVE_MALLOC_H)
51 # include <malloc.h>
52 # elif defined(HAVE_MALLOC_NP_H)
53 # include <malloc_np.h>
54 # elif defined(HAVE_MALLOC_MALLOC_H)
55 # include <malloc/malloc.h>
56 # endif
57 #endif
58 
59 #ifdef HAVE_MALLOC_TRIM
60 # include <malloc.h>
61 
62 # ifdef __EMSCRIPTEN__
63 /* malloc_trim is defined in emscripten/emmalloc.h on emscripten. */
64 # include <emscripten/emmalloc.h>
65 # endif
66 #endif
67 
68 #ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
69 # include <mach/task.h>
70 # include <mach/mach_init.h>
71 # include <mach/mach_port.h>
72 #endif
73 
74 #ifndef VM_CHECK_MODE
75 # define VM_CHECK_MODE RUBY_DEBUG
76 #endif
77 
78 // From ractor_core.h
79 #ifndef RACTOR_CHECK_MODE
80 # define RACTOR_CHECK_MODE (VM_CHECK_MODE || RUBY_DEBUG) && (SIZEOF_UINT64_T == SIZEOF_VALUE)
81 #endif
82 
83 #ifndef RUBY_DEBUG_LOG
84 # define RUBY_DEBUG_LOG(...)
85 #endif
86 
87 #ifndef GC_HEAP_INIT_SLOTS
88 #define GC_HEAP_INIT_SLOTS 10000
89 #endif
90 #ifndef GC_HEAP_FREE_SLOTS
91 #define GC_HEAP_FREE_SLOTS 4096
92 #endif
93 #ifndef GC_HEAP_GROWTH_FACTOR
94 #define GC_HEAP_GROWTH_FACTOR 1.8
95 #endif
96 #ifndef GC_HEAP_GROWTH_MAX_SLOTS
97 #define GC_HEAP_GROWTH_MAX_SLOTS 0 /* 0 is disable */
98 #endif
99 #ifndef GC_HEAP_REMEMBERED_WB_UNPROTECTED_OBJECTS_LIMIT_RATIO
100 # define GC_HEAP_REMEMBERED_WB_UNPROTECTED_OBJECTS_LIMIT_RATIO 0.01
101 #endif
102 #ifndef GC_HEAP_OLDOBJECT_LIMIT_FACTOR
103 #define GC_HEAP_OLDOBJECT_LIMIT_FACTOR 2.0
104 #endif
105 
106 #ifndef GC_HEAP_FREE_SLOTS_MIN_RATIO
107 #define GC_HEAP_FREE_SLOTS_MIN_RATIO 0.20
108 #endif
109 #ifndef GC_HEAP_FREE_SLOTS_GOAL_RATIO
110 #define GC_HEAP_FREE_SLOTS_GOAL_RATIO 0.40
111 #endif
112 #ifndef GC_HEAP_FREE_SLOTS_MAX_RATIO
113 #define GC_HEAP_FREE_SLOTS_MAX_RATIO 0.65
114 #endif
115 
116 #ifndef GC_MALLOC_LIMIT_MIN
117 #define GC_MALLOC_LIMIT_MIN (16 * 1024 * 1024 /* 16MB */)
118 #endif
119 #ifndef GC_MALLOC_LIMIT_MAX
120 #define GC_MALLOC_LIMIT_MAX (32 * 1024 * 1024 /* 32MB */)
121 #endif
122 #ifndef GC_MALLOC_LIMIT_GROWTH_FACTOR
123 #define GC_MALLOC_LIMIT_GROWTH_FACTOR 1.4
124 #endif
125 
126 #ifndef GC_OLDMALLOC_LIMIT_MIN
127 #define GC_OLDMALLOC_LIMIT_MIN (16 * 1024 * 1024 /* 16MB */)
128 #endif
129 #ifndef GC_OLDMALLOC_LIMIT_GROWTH_FACTOR
130 #define GC_OLDMALLOC_LIMIT_GROWTH_FACTOR 1.2
131 #endif
132 #ifndef GC_OLDMALLOC_LIMIT_MAX
133 #define GC_OLDMALLOC_LIMIT_MAX (128 * 1024 * 1024 /* 128MB */)
134 #endif
135 
136 #ifndef GC_CAN_COMPILE_COMPACTION
137 #if defined(__wasi__) /* WebAssembly doesn't support signals */
138 # define GC_CAN_COMPILE_COMPACTION 0
139 #else
140 # define GC_CAN_COMPILE_COMPACTION 1
141 #endif
142 #endif
143 
144 #ifndef PRINT_ENTER_EXIT_TICK
145 # define PRINT_ENTER_EXIT_TICK 0
146 #endif
147 #ifndef PRINT_ROOT_TICKS
148 #define PRINT_ROOT_TICKS 0
149 #endif
150 
151 #define USE_TICK_T (PRINT_ENTER_EXIT_TICK || PRINT_ROOT_TICKS)
152 
153 #ifndef HEAP_COUNT
154 # define HEAP_COUNT 5
155 #endif
156 
157 typedef struct ractor_newobj_heap_cache {
158  struct free_slot *freelist;
159  struct heap_page *using_page;
161 
162 typedef struct ractor_newobj_cache {
163  size_t incremental_mark_step_allocated_slots;
164  rb_ractor_newobj_heap_cache_t heap_caches[HEAP_COUNT];
166 
167 typedef struct {
168  size_t heap_init_slots[HEAP_COUNT];
169  size_t heap_free_slots;
170  double growth_factor;
171  size_t growth_max_slots;
172 
173  double heap_free_slots_min_ratio;
174  double heap_free_slots_goal_ratio;
175  double heap_free_slots_max_ratio;
176  double uncollectible_wb_unprotected_objects_limit_ratio;
177  double oldobject_limit_factor;
178 
179  size_t malloc_limit_min;
180  size_t malloc_limit_max;
181  double malloc_limit_growth_factor;
182 
183  size_t oldmalloc_limit_min;
184  size_t oldmalloc_limit_max;
185  double oldmalloc_limit_growth_factor;
187 
188 static ruby_gc_params_t gc_params = {
189  { GC_HEAP_INIT_SLOTS },
190  GC_HEAP_FREE_SLOTS,
191  GC_HEAP_GROWTH_FACTOR,
192  GC_HEAP_GROWTH_MAX_SLOTS,
193 
194  GC_HEAP_FREE_SLOTS_MIN_RATIO,
195  GC_HEAP_FREE_SLOTS_GOAL_RATIO,
196  GC_HEAP_FREE_SLOTS_MAX_RATIO,
197  GC_HEAP_REMEMBERED_WB_UNPROTECTED_OBJECTS_LIMIT_RATIO,
198  GC_HEAP_OLDOBJECT_LIMIT_FACTOR,
199 
200  GC_MALLOC_LIMIT_MIN,
201  GC_MALLOC_LIMIT_MAX,
202  GC_MALLOC_LIMIT_GROWTH_FACTOR,
203 
204  GC_OLDMALLOC_LIMIT_MIN,
205  GC_OLDMALLOC_LIMIT_MAX,
206  GC_OLDMALLOC_LIMIT_GROWTH_FACTOR,
207 };
208 
209 /* GC_DEBUG:
210  * enable to embed GC debugging information.
211  */
212 #ifndef GC_DEBUG
213 #define GC_DEBUG 0
214 #endif
215 
216 /* RGENGC_DEBUG:
217  * 1: basic information
218  * 2: remember set operation
219  * 3: mark
220  * 4:
221  * 5: sweep
222  */
223 #ifndef RGENGC_DEBUG
224 #ifdef RUBY_DEVEL
225 #define RGENGC_DEBUG -1
226 #else
227 #define RGENGC_DEBUG 0
228 #endif
229 #endif
230 #if RGENGC_DEBUG < 0 && !defined(_MSC_VER)
231 # define RGENGC_DEBUG_ENABLED(level) (-(RGENGC_DEBUG) >= (level) && ruby_rgengc_debug >= (level))
232 #elif defined(HAVE_VA_ARGS_MACRO)
233 # define RGENGC_DEBUG_ENABLED(level) ((RGENGC_DEBUG) >= (level))
234 #else
235 # define RGENGC_DEBUG_ENABLED(level) 0
236 #endif
237 int ruby_rgengc_debug;
238 
239 /* RGENGC_PROFILE
240  * 0: disable RGenGC profiling
241  * 1: enable profiling for basic information
242  * 2: enable profiling for each types
243  */
244 #ifndef RGENGC_PROFILE
245 # define RGENGC_PROFILE 0
246 #endif
247 
248 /* RGENGC_ESTIMATE_OLDMALLOC
249  * Enable/disable to estimate increase size of malloc'ed size by old objects.
250  * If estimation exceeds threshold, then will invoke full GC.
251  * 0: disable estimation.
252  * 1: enable estimation.
253  */
254 #ifndef RGENGC_ESTIMATE_OLDMALLOC
255 # define RGENGC_ESTIMATE_OLDMALLOC 1
256 #endif
257 
258 /* RGENGC_FORCE_MAJOR_GC
259  * Force major/full GC if this macro is not 0.
260  */
261 #ifndef RGENGC_FORCE_MAJOR_GC
262 # define RGENGC_FORCE_MAJOR_GC 0
263 #endif
264 
265 #ifndef GC_PROFILE_MORE_DETAIL
266 # define GC_PROFILE_MORE_DETAIL 0
267 #endif
268 #ifndef GC_PROFILE_DETAIL_MEMORY
269 # define GC_PROFILE_DETAIL_MEMORY 0
270 #endif
271 #ifndef GC_ENABLE_LAZY_SWEEP
272 # define GC_ENABLE_LAZY_SWEEP 1
273 #endif
274 #ifndef CALC_EXACT_MALLOC_SIZE
275 # define CALC_EXACT_MALLOC_SIZE 0
276 #endif
277 #if defined(HAVE_MALLOC_USABLE_SIZE) || CALC_EXACT_MALLOC_SIZE > 0
278 # ifndef MALLOC_ALLOCATED_SIZE
279 # define MALLOC_ALLOCATED_SIZE 0
280 # endif
281 #else
282 # define MALLOC_ALLOCATED_SIZE 0
283 #endif
284 #ifndef MALLOC_ALLOCATED_SIZE_CHECK
285 # define MALLOC_ALLOCATED_SIZE_CHECK 0
286 #endif
287 
288 #ifndef GC_DEBUG_STRESS_TO_CLASS
289 # define GC_DEBUG_STRESS_TO_CLASS RUBY_DEBUG
290 #endif
291 
292 typedef enum {
293  GPR_FLAG_NONE = 0x000,
294  /* major reason */
295  GPR_FLAG_MAJOR_BY_NOFREE = 0x001,
296  GPR_FLAG_MAJOR_BY_OLDGEN = 0x002,
297  GPR_FLAG_MAJOR_BY_SHADY = 0x004,
298  GPR_FLAG_MAJOR_BY_FORCE = 0x008,
299 #if RGENGC_ESTIMATE_OLDMALLOC
300  GPR_FLAG_MAJOR_BY_OLDMALLOC = 0x020,
301 #endif
302  GPR_FLAG_MAJOR_MASK = 0x0ff,
303 
304  /* gc reason */
305  GPR_FLAG_NEWOBJ = 0x100,
306  GPR_FLAG_MALLOC = 0x200,
307  GPR_FLAG_METHOD = 0x400,
308  GPR_FLAG_CAPI = 0x800,
309  GPR_FLAG_STRESS = 0x1000,
310 
311  /* others */
312  GPR_FLAG_IMMEDIATE_SWEEP = 0x2000,
313  GPR_FLAG_HAVE_FINALIZE = 0x4000,
314  GPR_FLAG_IMMEDIATE_MARK = 0x8000,
315  GPR_FLAG_FULL_MARK = 0x10000,
316  GPR_FLAG_COMPACT = 0x20000,
317 
318  GPR_DEFAULT_REASON =
319  (GPR_FLAG_FULL_MARK | GPR_FLAG_IMMEDIATE_MARK |
320  GPR_FLAG_IMMEDIATE_SWEEP | GPR_FLAG_CAPI),
321 } gc_profile_record_flag;
322 
323 typedef struct gc_profile_record {
324  unsigned int flags;
325 
326  double gc_time;
327  double gc_invoke_time;
328 
329  size_t heap_total_objects;
330  size_t heap_use_size;
331  size_t heap_total_size;
332  size_t moved_objects;
333 
334 #if GC_PROFILE_MORE_DETAIL
335  double gc_mark_time;
336  double gc_sweep_time;
337 
338  size_t heap_use_pages;
339  size_t heap_live_objects;
340  size_t heap_free_objects;
341 
342  size_t allocate_increase;
343  size_t allocate_limit;
344 
345  double prepare_time;
346  size_t removing_objects;
347  size_t empty_objects;
348 #if GC_PROFILE_DETAIL_MEMORY
349  long maxrss;
350  long minflt;
351  long majflt;
352 #endif
353 #endif
354 #if MALLOC_ALLOCATED_SIZE
355  size_t allocated_size;
356 #endif
357 
358 #if RGENGC_PROFILE > 0
359  size_t old_objects;
360  size_t remembered_normal_objects;
361  size_t remembered_shady_objects;
362 #endif
364 
365 struct RMoved {
366  VALUE flags;
367  VALUE dummy;
368  VALUE destination;
369  uint32_t original_shape_id;
370 };
371 
372 #define RMOVED(obj) ((struct RMoved *)(obj))
373 
374 typedef uintptr_t bits_t;
375 enum {
376  BITS_SIZE = sizeof(bits_t),
377  BITS_BITLENGTH = ( BITS_SIZE * CHAR_BIT )
378 };
379 
381  struct heap_page *page;
382 };
383 
385  struct heap_page_header header;
386  /* char gap[]; */
387  /* RVALUE values[]; */
388 };
389 
390 #define STACK_CHUNK_SIZE 500
391 
392 typedef struct stack_chunk {
393  VALUE data[STACK_CHUNK_SIZE];
394  struct stack_chunk *next;
395 } stack_chunk_t;
396 
397 typedef struct mark_stack {
398  stack_chunk_t *chunk;
399  stack_chunk_t *cache;
400  int index;
401  int limit;
402  size_t cache_size;
403  size_t unused_cache_size;
404 } mark_stack_t;
405 
406 typedef int (*gc_compact_compare_func)(const void *l, const void *r, void *d);
407 
408 typedef struct rb_heap_struct {
409  short slot_size;
410 
411  /* Basic statistics */
412  size_t total_allocated_pages;
413  size_t force_major_gc_count;
414  size_t force_incremental_marking_finish_count;
415  size_t total_allocated_objects;
416  size_t total_freed_objects;
417  size_t final_slots_count;
418 
419  /* Sweeping statistics */
420  size_t freed_slots;
421  size_t empty_slots;
422 
423  struct heap_page *free_pages;
424  struct ccan_list_head pages;
425  struct heap_page *sweeping_page; /* iterator for .pages */
426  struct heap_page *compact_cursor;
427  uintptr_t compact_cursor_index;
428  struct heap_page *pooled_pages;
429  size_t total_pages; /* total page count in a heap */
430  size_t total_slots; /* total slot count (about total_pages * HEAP_PAGE_OBJ_LIMIT) */
431 
432 } rb_heap_t;
433 
434 enum {
435  gc_stress_no_major,
436  gc_stress_no_immediate_sweep,
437  gc_stress_full_mark_after_malloc,
438  gc_stress_max
439 };
440 
441 enum gc_mode {
442  gc_mode_none,
443  gc_mode_marking,
444  gc_mode_sweeping,
445  gc_mode_compacting,
446 };
447 
448 typedef struct rb_objspace {
449  struct {
450  size_t limit;
451  size_t increase;
452 #if MALLOC_ALLOCATED_SIZE
453  size_t allocated_size;
454  size_t allocations;
455 #endif
456  } malloc_params;
457 
458  struct rb_gc_config {
459  bool full_mark;
460  } gc_config;
461 
462  struct {
463  unsigned int mode : 2;
464  unsigned int immediate_sweep : 1;
465  unsigned int dont_gc : 1;
466  unsigned int dont_incremental : 1;
467  unsigned int during_gc : 1;
468  unsigned int during_compacting : 1;
469  unsigned int during_reference_updating : 1;
470  unsigned int gc_stressful: 1;
471  unsigned int has_newobj_hook: 1;
472  unsigned int during_minor_gc : 1;
473  unsigned int during_incremental_marking : 1;
474  unsigned int measure_gc : 1;
475  } flags;
476 
477  rb_event_flag_t hook_events;
478  unsigned long long next_object_id;
479 
480  rb_heap_t heaps[HEAP_COUNT];
481  size_t empty_pages_count;
482  struct heap_page *empty_pages;
483 
484  struct {
485  rb_atomic_t finalizing;
486  } atomic_flags;
487 
489  size_t marked_slots;
490 
491  struct {
492  rb_darray(struct heap_page *) sorted;
493 
494  size_t allocated_pages;
495  size_t freed_pages;
496  uintptr_t range[2];
497  size_t freeable_pages;
498 
499  size_t allocatable_slots;
500 
501  /* final */
502  VALUE deferred_final;
503  } heap_pages;
504 
505  st_table *finalizer_table;
506 
507  struct {
508  int run;
509  unsigned int latest_gc_info;
510  gc_profile_record *records;
511  gc_profile_record *current_record;
512  size_t next_index;
513  size_t size;
514 
515 #if GC_PROFILE_MORE_DETAIL
516  double prepare_time;
517 #endif
518  double invoke_time;
519 
520  size_t minor_gc_count;
521  size_t major_gc_count;
522  size_t compact_count;
523  size_t read_barrier_faults;
524 #if RGENGC_PROFILE > 0
525  size_t total_generated_normal_object_count;
526  size_t total_generated_shady_object_count;
527  size_t total_shade_operation_count;
528  size_t total_promoted_count;
529  size_t total_remembered_normal_object_count;
530  size_t total_remembered_shady_object_count;
531 
532 #if RGENGC_PROFILE >= 2
533  size_t generated_normal_object_count_types[RUBY_T_MASK];
534  size_t generated_shady_object_count_types[RUBY_T_MASK];
535  size_t shade_operation_count_types[RUBY_T_MASK];
536  size_t promoted_types[RUBY_T_MASK];
537  size_t remembered_normal_object_count_types[RUBY_T_MASK];
538  size_t remembered_shady_object_count_types[RUBY_T_MASK];
539 #endif
540 #endif /* RGENGC_PROFILE */
541 
542  /* temporary profiling space */
543  double gc_sweep_start_time;
544  size_t total_allocated_objects_at_gc_start;
545  size_t heap_used_at_gc_start;
546 
547  /* basic statistics */
548  size_t count;
549  unsigned long long marking_time_ns;
550  struct timespec marking_start_time;
551  unsigned long long sweeping_time_ns;
552  struct timespec sweeping_start_time;
553 
554  /* Weak references */
555  size_t weak_references_count;
556  size_t retained_weak_references_count;
557  } profile;
558 
559  VALUE gc_stress_mode;
560 
561  struct {
562  VALUE parent_object;
563  int need_major_gc;
564  size_t last_major_gc;
565  size_t uncollectible_wb_unprotected_objects;
566  size_t uncollectible_wb_unprotected_objects_limit;
567  size_t old_objects;
568  size_t old_objects_limit;
569 
570 #if RGENGC_ESTIMATE_OLDMALLOC
571  size_t oldmalloc_increase;
572  size_t oldmalloc_increase_limit;
573 #endif
574 
575 #if RGENGC_CHECK_MODE >= 2
576  struct st_table *allrefs_table;
577  size_t error_count;
578 #endif
579  } rgengc;
580 
581  struct {
582  size_t considered_count_table[T_MASK];
583  size_t moved_count_table[T_MASK];
584  size_t moved_up_count_table[T_MASK];
585  size_t moved_down_count_table[T_MASK];
586  size_t total_moved;
587 
588  /* This function will be used, if set, to sort the heap prior to compaction */
589  gc_compact_compare_func compare_func;
590  } rcompactor;
591 
592  struct {
593  size_t pooled_slots;
594  size_t step_slots;
595  } rincgc;
596 
597  st_table *id_to_obj_tbl;
598  st_table *obj_to_id_tbl;
599 
600 #if GC_DEBUG_STRESS_TO_CLASS
601  VALUE stress_to_class;
602 #endif
603 
604  rb_darray(VALUE *) weak_references;
605  rb_postponed_job_handle_t finalize_deferred_pjob;
606 
607  unsigned long live_ractor_cache_count;
608 } rb_objspace_t;
609 
610 #ifndef HEAP_PAGE_ALIGN_LOG
611 /* default tiny heap size: 64KiB */
612 #define HEAP_PAGE_ALIGN_LOG 16
613 #endif
614 
615 #if RACTOR_CHECK_MODE || GC_DEBUG
616 struct rvalue_overhead {
617 # if RACTOR_CHECK_MODE
618  uint32_t _ractor_belonging_id;
619 # endif
620 # if GC_DEBUG
621  const char *file;
622  int line;
623 # endif
624 };
625 
626 // Make sure that RVALUE_OVERHEAD aligns to sizeof(VALUE)
627 # define RVALUE_OVERHEAD (sizeof(struct { \
628  union { \
629  struct rvalue_overhead overhead; \
630  VALUE value; \
631  }; \
632 }))
633 size_t rb_gc_impl_obj_slot_size(VALUE obj);
634 # define GET_RVALUE_OVERHEAD(obj) ((struct rvalue_overhead *)((uintptr_t)obj + rb_gc_impl_obj_slot_size(obj)))
635 #else
636 # define RVALUE_OVERHEAD 0
637 #endif
638 
639 #define BASE_SLOT_SIZE (sizeof(struct RBasic) + sizeof(VALUE[RBIMPL_RVALUE_EMBED_LEN_MAX]) + RVALUE_OVERHEAD)
640 
641 #ifndef MAX
642 # define MAX(a, b) (((a) > (b)) ? (a) : (b))
643 #endif
644 #ifndef MIN
645 # define MIN(a, b) (((a) < (b)) ? (a) : (b))
646 #endif
647 #define roomof(x, y) (((x) + (y) - 1) / (y))
648 #define CEILDIV(i, mod) roomof(i, mod)
649 enum {
650  HEAP_PAGE_ALIGN = (1UL << HEAP_PAGE_ALIGN_LOG),
651  HEAP_PAGE_ALIGN_MASK = (~(~0UL << HEAP_PAGE_ALIGN_LOG)),
652  HEAP_PAGE_SIZE = HEAP_PAGE_ALIGN,
653  HEAP_PAGE_OBJ_LIMIT = (unsigned int)((HEAP_PAGE_SIZE - sizeof(struct heap_page_header)) / BASE_SLOT_SIZE),
654  HEAP_PAGE_BITMAP_LIMIT = CEILDIV(CEILDIV(HEAP_PAGE_SIZE, BASE_SLOT_SIZE), BITS_BITLENGTH),
655  HEAP_PAGE_BITMAP_SIZE = (BITS_SIZE * HEAP_PAGE_BITMAP_LIMIT),
656 };
657 #define HEAP_PAGE_ALIGN (1 << HEAP_PAGE_ALIGN_LOG)
658 #define HEAP_PAGE_SIZE HEAP_PAGE_ALIGN
659 
660 #if !defined(INCREMENTAL_MARK_STEP_ALLOCATIONS)
661 # define INCREMENTAL_MARK_STEP_ALLOCATIONS 500
662 #endif
663 
664 #undef INIT_HEAP_PAGE_ALLOC_USE_MMAP
665 /* Must define either HEAP_PAGE_ALLOC_USE_MMAP or
666  * INIT_HEAP_PAGE_ALLOC_USE_MMAP. */
667 
668 #ifndef HAVE_MMAP
669 /* We can't use mmap of course, if it is not available. */
670 static const bool HEAP_PAGE_ALLOC_USE_MMAP = false;
671 
672 #elif defined(__wasm__)
673 /* wasmtime does not have proper support for mmap.
674  * See https://github.com/bytecodealliance/wasmtime/blob/main/docs/WASI-rationale.md#why-no-mmap-and-friends
675  */
676 static const bool HEAP_PAGE_ALLOC_USE_MMAP = false;
677 
678 #elif HAVE_CONST_PAGE_SIZE
679 /* If we have the PAGE_SIZE and it is a constant, then we can directly use it. */
680 static const bool HEAP_PAGE_ALLOC_USE_MMAP = (PAGE_SIZE <= HEAP_PAGE_SIZE);
681 
682 #elif defined(PAGE_MAX_SIZE) && (PAGE_MAX_SIZE <= HEAP_PAGE_SIZE)
683 /* If we can use the maximum page size. */
684 static const bool HEAP_PAGE_ALLOC_USE_MMAP = true;
685 
686 #elif defined(PAGE_SIZE)
687 /* If the PAGE_SIZE macro can be used dynamically. */
688 # define INIT_HEAP_PAGE_ALLOC_USE_MMAP (PAGE_SIZE <= HEAP_PAGE_SIZE)
689 
690 #elif defined(HAVE_SYSCONF) && defined(_SC_PAGE_SIZE)
691 /* If we can use sysconf to determine the page size. */
692 # define INIT_HEAP_PAGE_ALLOC_USE_MMAP (sysconf(_SC_PAGE_SIZE) <= HEAP_PAGE_SIZE)
693 
694 #else
695 /* Otherwise we can't determine the system page size, so don't use mmap. */
696 static const bool HEAP_PAGE_ALLOC_USE_MMAP = false;
697 #endif
698 
699 #ifdef INIT_HEAP_PAGE_ALLOC_USE_MMAP
700 /* We can determine the system page size at runtime. */
701 # define HEAP_PAGE_ALLOC_USE_MMAP (heap_page_alloc_use_mmap != false)
702 
703 static bool heap_page_alloc_use_mmap;
704 #endif
705 
706 #define RVALUE_AGE_BIT_COUNT 2
707 #define RVALUE_AGE_BIT_MASK (((bits_t)1 << RVALUE_AGE_BIT_COUNT) - 1)
708 #define RVALUE_OLD_AGE 3
709 
710 struct free_slot {
711  VALUE flags; /* always 0 for freed obj */
712  struct free_slot *next;
713 };
714 
715 struct heap_page {
716  unsigned short slot_size;
717  unsigned short total_slots;
718  unsigned short free_slots;
719  unsigned short final_slots;
720  unsigned short pinned_slots;
721  struct {
722  unsigned int before_sweep : 1;
723  unsigned int has_remembered_objects : 1;
724  unsigned int has_uncollectible_wb_unprotected_objects : 1;
725  } flags;
726 
727  rb_heap_t *heap;
728 
729  struct heap_page *free_next;
730  struct heap_page_body *body;
731  uintptr_t start;
732  struct free_slot *freelist;
733  struct ccan_list_node page_node;
734 
735  bits_t wb_unprotected_bits[HEAP_PAGE_BITMAP_LIMIT];
736  /* the following three bitmaps are cleared at the beginning of full GC */
737  bits_t mark_bits[HEAP_PAGE_BITMAP_LIMIT];
738  bits_t uncollectible_bits[HEAP_PAGE_BITMAP_LIMIT];
739  bits_t marking_bits[HEAP_PAGE_BITMAP_LIMIT];
740 
741  bits_t remembered_bits[HEAP_PAGE_BITMAP_LIMIT];
742 
743  /* If set, the object is not movable */
744  bits_t pinned_bits[HEAP_PAGE_BITMAP_LIMIT];
745  bits_t age_bits[HEAP_PAGE_BITMAP_LIMIT * RVALUE_AGE_BIT_COUNT];
746 };
747 
748 /*
749  * When asan is enabled, this will prohibit writing to the freelist until it is unlocked
750  */
751 static void
752 asan_lock_freelist(struct heap_page *page)
753 {
754  asan_poison_memory_region(&page->freelist, sizeof(struct free_list *));
755 }
756 
757 /*
758  * When asan is enabled, this will enable the ability to write to the freelist
759  */
760 static void
761 asan_unlock_freelist(struct heap_page *page)
762 {
763  asan_unpoison_memory_region(&page->freelist, sizeof(struct free_list *), false);
764 }
765 
766 static inline bool
767 heap_page_in_global_empty_pages_pool(rb_objspace_t *objspace, struct heap_page *page)
768 {
769  if (page->total_slots == 0) {
770  GC_ASSERT(page->start == 0);
771  GC_ASSERT(page->slot_size == 0);
772  GC_ASSERT(page->heap == NULL);
773  GC_ASSERT(page->free_slots == 0);
774  asan_unpoisoning_memory_region(&page->freelist, sizeof(&page->freelist)) {
775  GC_ASSERT(page->freelist == NULL);
776  }
777 
778  return true;
779  }
780  else {
781  GC_ASSERT(page->start != 0);
782  GC_ASSERT(page->slot_size != 0);
783  GC_ASSERT(page->heap != NULL);
784 
785  return false;
786  }
787 }
788 
789 #define GET_PAGE_BODY(x) ((struct heap_page_body *)((bits_t)(x) & ~(HEAP_PAGE_ALIGN_MASK)))
790 #define GET_PAGE_HEADER(x) (&GET_PAGE_BODY(x)->header)
791 #define GET_HEAP_PAGE(x) (GET_PAGE_HEADER(x)->page)
792 
793 #define NUM_IN_PAGE(p) (((bits_t)(p) & HEAP_PAGE_ALIGN_MASK) / BASE_SLOT_SIZE)
794 #define BITMAP_INDEX(p) (NUM_IN_PAGE(p) / BITS_BITLENGTH )
795 #define BITMAP_OFFSET(p) (NUM_IN_PAGE(p) & (BITS_BITLENGTH-1))
796 #define BITMAP_BIT(p) ((bits_t)1 << BITMAP_OFFSET(p))
797 
798 /* Bitmap Operations */
799 #define MARKED_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] & BITMAP_BIT(p))
800 #define MARK_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] | BITMAP_BIT(p))
801 #define CLEAR_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] & ~BITMAP_BIT(p))
802 
803 /* getting bitmap */
804 #define GET_HEAP_MARK_BITS(x) (&GET_HEAP_PAGE(x)->mark_bits[0])
805 #define GET_HEAP_PINNED_BITS(x) (&GET_HEAP_PAGE(x)->pinned_bits[0])
806 #define GET_HEAP_UNCOLLECTIBLE_BITS(x) (&GET_HEAP_PAGE(x)->uncollectible_bits[0])
807 #define GET_HEAP_WB_UNPROTECTED_BITS(x) (&GET_HEAP_PAGE(x)->wb_unprotected_bits[0])
808 #define GET_HEAP_MARKING_BITS(x) (&GET_HEAP_PAGE(x)->marking_bits[0])
809 
810 #define GC_SWEEP_PAGES_FREEABLE_PER_STEP 3
811 
812 #define RVALUE_AGE_BITMAP_INDEX(n) (NUM_IN_PAGE(n) / (BITS_BITLENGTH / RVALUE_AGE_BIT_COUNT))
813 #define RVALUE_AGE_BITMAP_OFFSET(n) ((NUM_IN_PAGE(n) % (BITS_BITLENGTH / RVALUE_AGE_BIT_COUNT)) * RVALUE_AGE_BIT_COUNT)
814 
815 static int
816 RVALUE_AGE_GET(VALUE obj)
817 {
818  bits_t *age_bits = GET_HEAP_PAGE(obj)->age_bits;
819  return (int)(age_bits[RVALUE_AGE_BITMAP_INDEX(obj)] >> RVALUE_AGE_BITMAP_OFFSET(obj)) & RVALUE_AGE_BIT_MASK;
820 }
821 
822 static void
823 RVALUE_AGE_SET(VALUE obj, int age)
824 {
825  RUBY_ASSERT(age <= RVALUE_OLD_AGE);
826  bits_t *age_bits = GET_HEAP_PAGE(obj)->age_bits;
827  // clear the bits
828  age_bits[RVALUE_AGE_BITMAP_INDEX(obj)] &= ~(RVALUE_AGE_BIT_MASK << (RVALUE_AGE_BITMAP_OFFSET(obj)));
829  // shift the correct value in
830  age_bits[RVALUE_AGE_BITMAP_INDEX(obj)] |= ((bits_t)age << RVALUE_AGE_BITMAP_OFFSET(obj));
831  if (age == RVALUE_OLD_AGE) {
833  }
834  else {
836  }
837 }
838 
839 #define malloc_limit objspace->malloc_params.limit
840 #define malloc_increase objspace->malloc_params.increase
841 #define malloc_allocated_size objspace->malloc_params.allocated_size
842 #define heap_pages_lomem objspace->heap_pages.range[0]
843 #define heap_pages_himem objspace->heap_pages.range[1]
844 #define heap_pages_freeable_pages objspace->heap_pages.freeable_pages
845 #define heap_pages_deferred_final objspace->heap_pages.deferred_final
846 #define heaps objspace->heaps
847 #define during_gc objspace->flags.during_gc
848 #define finalizing objspace->atomic_flags.finalizing
849 #define finalizer_table objspace->finalizer_table
850 #define ruby_gc_stressful objspace->flags.gc_stressful
851 #define ruby_gc_stress_mode objspace->gc_stress_mode
852 #if GC_DEBUG_STRESS_TO_CLASS
853 #define stress_to_class objspace->stress_to_class
854 #define set_stress_to_class(c) (stress_to_class = (c))
855 #else
856 #define stress_to_class (objspace, 0)
857 #define set_stress_to_class(c) (objspace, (c))
858 #endif
859 
860 #if 0
861 #define dont_gc_on() (fprintf(stderr, "dont_gc_on@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 1)
862 #define dont_gc_off() (fprintf(stderr, "dont_gc_off@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 0)
863 #define dont_gc_set(b) (fprintf(stderr, "dont_gc_set(%d)@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = (int)(b))
864 #define dont_gc_val() (objspace->flags.dont_gc)
865 #else
866 #define dont_gc_on() (objspace->flags.dont_gc = 1)
867 #define dont_gc_off() (objspace->flags.dont_gc = 0)
868 #define dont_gc_set(b) (objspace->flags.dont_gc = (int)(b))
869 #define dont_gc_val() (objspace->flags.dont_gc)
870 #endif
871 
872 #define gc_config_full_mark_set(b) (objspace->gc_config.full_mark = (int)(b))
873 #define gc_config_full_mark_val (objspace->gc_config.full_mark)
874 
875 #ifndef DURING_GC_COULD_MALLOC_REGION_START
876 # define DURING_GC_COULD_MALLOC_REGION_START() \
877  assert(rb_during_gc()); \
878  bool _prev_enabled = rb_gc_impl_gc_enabled_p(objspace); \
879  rb_gc_impl_gc_disable(objspace, false)
880 #endif
881 
882 #ifndef DURING_GC_COULD_MALLOC_REGION_END
883 # define DURING_GC_COULD_MALLOC_REGION_END() \
884  if (_prev_enabled) rb_gc_impl_gc_enable(objspace)
885 #endif
886 
887 static inline enum gc_mode
888 gc_mode_verify(enum gc_mode mode)
889 {
890 #if RGENGC_CHECK_MODE > 0
891  switch (mode) {
892  case gc_mode_none:
893  case gc_mode_marking:
894  case gc_mode_sweeping:
895  case gc_mode_compacting:
896  break;
897  default:
898  rb_bug("gc_mode_verify: unreachable (%d)", (int)mode);
899  }
900 #endif
901  return mode;
902 }
903 
904 static inline bool
905 has_sweeping_pages(rb_objspace_t *objspace)
906 {
907  for (int i = 0; i < HEAP_COUNT; i++) {
908  if ((&heaps[i])->sweeping_page) {
909  return TRUE;
910  }
911  }
912  return FALSE;
913 }
914 
915 static inline size_t
916 heap_eden_total_pages(rb_objspace_t *objspace)
917 {
918  size_t count = 0;
919  for (int i = 0; i < HEAP_COUNT; i++) {
920  count += (&heaps[i])->total_pages;
921  }
922  return count;
923 }
924 
925 static inline size_t
926 total_allocated_objects(rb_objspace_t *objspace)
927 {
928  size_t count = 0;
929  for (int i = 0; i < HEAP_COUNT; i++) {
930  rb_heap_t *heap = &heaps[i];
931  count += heap->total_allocated_objects;
932  }
933  return count;
934 }
935 
936 static inline size_t
937 total_freed_objects(rb_objspace_t *objspace)
938 {
939  size_t count = 0;
940  for (int i = 0; i < HEAP_COUNT; i++) {
941  rb_heap_t *heap = &heaps[i];
942  count += heap->total_freed_objects;
943  }
944  return count;
945 }
946 
947 static inline size_t
948 total_final_slots_count(rb_objspace_t *objspace)
949 {
950  size_t count = 0;
951  for (int i = 0; i < HEAP_COUNT; i++) {
952  rb_heap_t *heap = &heaps[i];
953  count += heap->final_slots_count;
954  }
955  return count;
956 }
957 
958 #define gc_mode(objspace) gc_mode_verify((enum gc_mode)(objspace)->flags.mode)
959 #define gc_mode_set(objspace, m) ((objspace)->flags.mode = (unsigned int)gc_mode_verify(m))
960 #define gc_needs_major_flags objspace->rgengc.need_major_gc
961 
962 #define is_marking(objspace) (gc_mode(objspace) == gc_mode_marking)
963 #define is_sweeping(objspace) (gc_mode(objspace) == gc_mode_sweeping)
964 #define is_full_marking(objspace) ((objspace)->flags.during_minor_gc == FALSE)
965 #define is_incremental_marking(objspace) ((objspace)->flags.during_incremental_marking != FALSE)
966 #define will_be_incremental_marking(objspace) ((objspace)->rgengc.need_major_gc != GPR_FLAG_NONE)
967 #define GC_INCREMENTAL_SWEEP_SLOT_COUNT 2048
968 #define GC_INCREMENTAL_SWEEP_POOL_SLOT_COUNT 1024
969 #define is_lazy_sweeping(objspace) (GC_ENABLE_LAZY_SWEEP && has_sweeping_pages(objspace))
970 
971 #if SIZEOF_LONG == SIZEOF_VOIDP
972 # define obj_id_to_ref(objid) ((objid) ^ FIXNUM_FLAG) /* unset FIXNUM_FLAG */
973 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
974 # define obj_id_to_ref(objid) (FIXNUM_P(objid) ? \
975  ((objid) ^ FIXNUM_FLAG) : (NUM2PTR(objid) << 1))
976 #else
977 # error not supported
978 #endif
979 
980 struct RZombie {
981  struct RBasic basic;
982  VALUE next;
983  void (*dfree)(void *);
984  void *data;
985 };
986 
987 #define RZOMBIE(o) ((struct RZombie *)(o))
988 
989 int ruby_disable_gc = 0;
990 int ruby_enable_autocompact = 0;
991 #if RGENGC_CHECK_MODE
992 gc_compact_compare_func ruby_autocompact_compare_func;
993 #endif
994 
995 static void init_mark_stack(mark_stack_t *stack);
996 static int garbage_collect(rb_objspace_t *, unsigned int reason);
997 
998 static int gc_start(rb_objspace_t *objspace, unsigned int reason);
999 static void gc_rest(rb_objspace_t *objspace);
1000 
1001 enum gc_enter_event {
1002  gc_enter_event_start,
1003  gc_enter_event_continue,
1004  gc_enter_event_rest,
1005  gc_enter_event_finalizer,
1006 };
1007 
1008 static inline void gc_enter(rb_objspace_t *objspace, enum gc_enter_event event, unsigned int *lock_lev);
1009 static inline void gc_exit(rb_objspace_t *objspace, enum gc_enter_event event, unsigned int *lock_lev);
1010 static void gc_marking_enter(rb_objspace_t *objspace);
1011 static void gc_marking_exit(rb_objspace_t *objspace);
1012 static void gc_sweeping_enter(rb_objspace_t *objspace);
1013 static void gc_sweeping_exit(rb_objspace_t *objspace);
1014 static bool gc_marks_continue(rb_objspace_t *objspace, rb_heap_t *heap);
1015 
1016 static void gc_sweep(rb_objspace_t *objspace);
1017 static void gc_sweep_finish_heap(rb_objspace_t *objspace, rb_heap_t *heap);
1018 static void gc_sweep_continue(rb_objspace_t *objspace, rb_heap_t *heap);
1019 
1020 static inline void gc_mark(rb_objspace_t *objspace, VALUE ptr);
1021 static inline void gc_pin(rb_objspace_t *objspace, VALUE ptr);
1022 static inline void gc_mark_and_pin(rb_objspace_t *objspace, VALUE ptr);
1023 
1024 static int gc_mark_stacked_objects_incremental(rb_objspace_t *, size_t count);
1025 NO_SANITIZE("memory", static inline bool is_pointer_to_heap(rb_objspace_t *objspace, const void *ptr));
1026 
1027 static void gc_verify_internal_consistency(void *objspace_ptr);
1028 
1029 static double getrusage_time(void);
1030 static inline void gc_prof_setup_new_record(rb_objspace_t *objspace, unsigned int reason);
1031 static inline void gc_prof_timer_start(rb_objspace_t *);
1032 static inline void gc_prof_timer_stop(rb_objspace_t *);
1033 static inline void gc_prof_mark_timer_start(rb_objspace_t *);
1034 static inline void gc_prof_mark_timer_stop(rb_objspace_t *);
1035 static inline void gc_prof_sweep_timer_start(rb_objspace_t *);
1036 static inline void gc_prof_sweep_timer_stop(rb_objspace_t *);
1037 static inline void gc_prof_set_malloc_info(rb_objspace_t *);
1038 static inline void gc_prof_set_heap_info(rb_objspace_t *);
1039 
1040 #define gc_prof_record(objspace) (objspace)->profile.current_record
1041 #define gc_prof_enabled(objspace) ((objspace)->profile.run && (objspace)->profile.current_record)
1042 
1043 #ifdef HAVE_VA_ARGS_MACRO
1044 # define gc_report(level, objspace, ...) \
1045  if (!RGENGC_DEBUG_ENABLED(level)) {} else gc_report_body(level, objspace, __VA_ARGS__)
1046 #else
1047 # define gc_report if (!RGENGC_DEBUG_ENABLED(0)) {} else gc_report_body
1048 #endif
1049 PRINTF_ARGS(static void gc_report_body(int level, rb_objspace_t *objspace, const char *fmt, ...), 3, 4);
1050 
1051 static void gc_finalize_deferred(void *dmy);
1052 
1053 #if USE_TICK_T
1054 
1055 /* the following code is only for internal tuning. */
1056 
1057 /* Source code to use RDTSC is quoted and modified from
1058  * https://www.mcs.anl.gov/~kazutomo/rdtsc.html
1059  * written by Kazutomo Yoshii <kazutomo@mcs.anl.gov>
1060  */
1061 
1062 #if defined(__GNUC__) && defined(__i386__)
1063 typedef unsigned long long tick_t;
1064 #define PRItick "llu"
1065 static inline tick_t
1066 tick(void)
1067 {
1068  unsigned long long int x;
1069  __asm__ __volatile__ ("rdtsc" : "=A" (x));
1070  return x;
1071 }
1072 
1073 #elif defined(__GNUC__) && defined(__x86_64__)
1074 typedef unsigned long long tick_t;
1075 #define PRItick "llu"
1076 
1077 static __inline__ tick_t
1078 tick(void)
1079 {
1080  unsigned long hi, lo;
1081  __asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi));
1082  return ((unsigned long long)lo)|( ((unsigned long long)hi)<<32);
1083 }
1084 
1085 #elif defined(__powerpc64__) && (GCC_VERSION_SINCE(4,8,0) || defined(__clang__))
1086 typedef unsigned long long tick_t;
1087 #define PRItick "llu"
1088 
1089 static __inline__ tick_t
1090 tick(void)
1091 {
1092  unsigned long long val = __builtin_ppc_get_timebase();
1093  return val;
1094 }
1095 
1096 /* Implementation for macOS PPC by @nobu
1097  * See: https://github.com/ruby/ruby/pull/5975#discussion_r890045558
1098  */
1099 #elif defined(__POWERPC__) && defined(__APPLE__)
1100 typedef unsigned long long tick_t;
1101 #define PRItick "llu"
1102 
1103 static __inline__ tick_t
1104 tick(void)
1105 {
1106  unsigned long int upper, lower, tmp;
1107  # define mftbu(r) __asm__ volatile("mftbu %0" : "=r"(r))
1108  # define mftb(r) __asm__ volatile("mftb %0" : "=r"(r))
1109  do {
1110  mftbu(upper);
1111  mftb(lower);
1112  mftbu(tmp);
1113  } while (tmp != upper);
1114  return ((tick_t)upper << 32) | lower;
1115 }
1116 
1117 #elif defined(__aarch64__) && defined(__GNUC__)
1118 typedef unsigned long tick_t;
1119 #define PRItick "lu"
1120 
1121 static __inline__ tick_t
1122 tick(void)
1123 {
1124  unsigned long val;
1125  __asm__ __volatile__ ("mrs %0, cntvct_el0" : "=r" (val));
1126  return val;
1127 }
1128 
1129 
1130 #elif defined(_WIN32) && defined(_MSC_VER)
1131 #include <intrin.h>
1132 typedef unsigned __int64 tick_t;
1133 #define PRItick "llu"
1134 
1135 static inline tick_t
1136 tick(void)
1137 {
1138  return __rdtsc();
1139 }
1140 
1141 #else /* use clock */
1142 typedef clock_t tick_t;
1143 #define PRItick "llu"
1144 
1145 static inline tick_t
1146 tick(void)
1147 {
1148  return clock();
1149 }
1150 #endif /* TSC */
1151 #else /* USE_TICK_T */
1152 #define MEASURE_LINE(expr) expr
1153 #endif /* USE_TICK_T */
1154 
1155 static inline VALUE check_rvalue_consistency(rb_objspace_t *objspace, const VALUE obj);
1156 
1157 #define RVALUE_MARKED_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), (obj))
1158 #define RVALUE_WB_UNPROTECTED_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), (obj))
1159 #define RVALUE_MARKING_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), (obj))
1160 #define RVALUE_UNCOLLECTIBLE_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), (obj))
1161 #define RVALUE_PINNED_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), (obj))
1162 
1163 static inline int
1164 RVALUE_MARKED(rb_objspace_t *objspace, VALUE obj)
1165 {
1166  check_rvalue_consistency(objspace, obj);
1167  return RVALUE_MARKED_BITMAP(obj) != 0;
1168 }
1169 
1170 static inline int
1171 RVALUE_PINNED(rb_objspace_t *objspace, VALUE obj)
1172 {
1173  check_rvalue_consistency(objspace, obj);
1174  return RVALUE_PINNED_BITMAP(obj) != 0;
1175 }
1176 
1177 static inline int
1178 RVALUE_WB_UNPROTECTED(rb_objspace_t *objspace, VALUE obj)
1179 {
1180  check_rvalue_consistency(objspace, obj);
1181  return RVALUE_WB_UNPROTECTED_BITMAP(obj) != 0;
1182 }
1183 
1184 static inline int
1185 RVALUE_MARKING(rb_objspace_t *objspace, VALUE obj)
1186 {
1187  check_rvalue_consistency(objspace, obj);
1188  return RVALUE_MARKING_BITMAP(obj) != 0;
1189 }
1190 
1191 static inline int
1192 RVALUE_REMEMBERED(rb_objspace_t *objspace, VALUE obj)
1193 {
1194  check_rvalue_consistency(objspace, obj);
1195  return MARKED_IN_BITMAP(GET_HEAP_PAGE(obj)->remembered_bits, obj) != 0;
1196 }
1197 
1198 static inline int
1199 RVALUE_UNCOLLECTIBLE(rb_objspace_t *objspace, VALUE obj)
1200 {
1201  check_rvalue_consistency(objspace, obj);
1202  return RVALUE_UNCOLLECTIBLE_BITMAP(obj) != 0;
1203 }
1204 
1205 #define RVALUE_PAGE_WB_UNPROTECTED(page, obj) MARKED_IN_BITMAP((page)->wb_unprotected_bits, (obj))
1206 #define RVALUE_PAGE_UNCOLLECTIBLE(page, obj) MARKED_IN_BITMAP((page)->uncollectible_bits, (obj))
1207 #define RVALUE_PAGE_MARKING(page, obj) MARKED_IN_BITMAP((page)->marking_bits, (obj))
1208 
1209 static int rgengc_remember(rb_objspace_t *objspace, VALUE obj);
1210 static void rgengc_mark_and_rememberset_clear(rb_objspace_t *objspace, rb_heap_t *heap);
1211 static void rgengc_rememberset_mark(rb_objspace_t *objspace, rb_heap_t *heap);
1212 
1213 static int
1214 check_rvalue_consistency_force(rb_objspace_t *objspace, const VALUE obj, int terminate)
1215 {
1216  int err = 0;
1217 
1218  int lev = rb_gc_vm_lock_no_barrier();
1219  {
1220  if (SPECIAL_CONST_P(obj)) {
1221  fprintf(stderr, "check_rvalue_consistency: %p is a special const.\n", (void *)obj);
1222  err++;
1223  }
1224  else if (!is_pointer_to_heap(objspace, (void *)obj)) {
1225  struct heap_page *empty_page = objspace->empty_pages;
1226  while (empty_page) {
1227  if ((uintptr_t)empty_page->body <= (uintptr_t)obj &&
1228  (uintptr_t)obj < (uintptr_t)empty_page->body + HEAP_PAGE_SIZE) {
1229  GC_ASSERT(heap_page_in_global_empty_pages_pool(objspace, empty_page));
1230  fprintf(stderr, "check_rvalue_consistency: %p is in an empty page (%p).\n",
1231  (void *)obj, (void *)empty_page);
1232  err++;
1233  goto skip;
1234  }
1235  }
1236  fprintf(stderr, "check_rvalue_consistency: %p is not a Ruby object.\n", (void *)obj);
1237  err++;
1238  skip:
1239  ;
1240  }
1241  else {
1242  const int wb_unprotected_bit = RVALUE_WB_UNPROTECTED_BITMAP(obj) != 0;
1243  const int uncollectible_bit = RVALUE_UNCOLLECTIBLE_BITMAP(obj) != 0;
1244  const int mark_bit = RVALUE_MARKED_BITMAP(obj) != 0;
1245  const int marking_bit = RVALUE_MARKING_BITMAP(obj) != 0;
1246  const int remembered_bit = MARKED_IN_BITMAP(GET_HEAP_PAGE(obj)->remembered_bits, obj) != 0;
1247  const int age = RVALUE_AGE_GET((VALUE)obj);
1248 
1249  if (heap_page_in_global_empty_pages_pool(objspace, GET_HEAP_PAGE(obj))) {
1250  fprintf(stderr, "check_rvalue_consistency: %s is in tomb page.\n", rb_obj_info(obj));
1251  err++;
1252  }
1253  if (BUILTIN_TYPE(obj) == T_NONE) {
1254  fprintf(stderr, "check_rvalue_consistency: %s is T_NONE.\n", rb_obj_info(obj));
1255  err++;
1256  }
1257  if (BUILTIN_TYPE(obj) == T_ZOMBIE) {
1258  fprintf(stderr, "check_rvalue_consistency: %s is T_ZOMBIE.\n", rb_obj_info(obj));
1259  err++;
1260  }
1261 
1262  if (BUILTIN_TYPE(obj) != T_DATA) {
1263  rb_obj_memsize_of((VALUE)obj);
1264  }
1265 
1266  /* check generation
1267  *
1268  * OLD == age == 3 && old-bitmap && mark-bit (except incremental marking)
1269  */
1270  if (age > 0 && wb_unprotected_bit) {
1271  fprintf(stderr, "check_rvalue_consistency: %s is not WB protected, but age is %d > 0.\n", rb_obj_info(obj), age);
1272  err++;
1273  }
1274 
1275  if (!is_marking(objspace) && uncollectible_bit && !mark_bit) {
1276  fprintf(stderr, "check_rvalue_consistency: %s is uncollectible, but is not marked while !gc.\n", rb_obj_info(obj));
1277  err++;
1278  }
1279 
1280  if (!is_full_marking(objspace)) {
1281  if (uncollectible_bit && age != RVALUE_OLD_AGE && !wb_unprotected_bit) {
1282  fprintf(stderr, "check_rvalue_consistency: %s is uncollectible, but not old (age: %d) and not WB unprotected.\n",
1283  rb_obj_info(obj), age);
1284  err++;
1285  }
1286  if (remembered_bit && age != RVALUE_OLD_AGE) {
1287  fprintf(stderr, "check_rvalue_consistency: %s is remembered, but not old (age: %d).\n",
1288  rb_obj_info(obj), age);
1289  err++;
1290  }
1291  }
1292 
1293  /*
1294  * check coloring
1295  *
1296  * marking:false marking:true
1297  * marked:false white *invalid*
1298  * marked:true black grey
1299  */
1300  if (is_incremental_marking(objspace) && marking_bit) {
1301  if (!is_marking(objspace) && !mark_bit) {
1302  fprintf(stderr, "check_rvalue_consistency: %s is marking, but not marked.\n", rb_obj_info(obj));
1303  err++;
1304  }
1305  }
1306  }
1307  }
1308  rb_gc_vm_unlock_no_barrier(lev);
1309 
1310  if (err > 0 && terminate) {
1311  rb_bug("check_rvalue_consistency_force: there is %d errors.", err);
1312  }
1313  return err;
1314 }
1315 
1316 #if RGENGC_CHECK_MODE == 0
1317 static inline VALUE
1318 check_rvalue_consistency(rb_objspace_t *objspace, const VALUE obj)
1319 {
1320  return obj;
1321 }
1322 #else
1323 static VALUE
1324 check_rvalue_consistency(rb_objspace_t *objspace, const VALUE obj)
1325 {
1326  check_rvalue_consistency_force(objspace, obj, TRUE);
1327  return obj;
1328 }
1329 #endif
1330 
1331 static inline bool
1332 gc_object_moved_p(rb_objspace_t *objspace, VALUE obj)
1333 {
1334  if (RB_SPECIAL_CONST_P(obj)) {
1335  return FALSE;
1336  }
1337  else {
1338  int ret;
1339  asan_unpoisoning_object(obj) {
1340  ret = BUILTIN_TYPE(obj) == T_MOVED;
1341  }
1342  return ret;
1343  }
1344 }
1345 
1346 static inline int
1347 RVALUE_OLD_P(rb_objspace_t *objspace, VALUE obj)
1348 {
1349  GC_ASSERT(!RB_SPECIAL_CONST_P(obj));
1350  check_rvalue_consistency(objspace, obj);
1351  // Because this will only ever be called on GC controlled objects,
1352  // we can use the faster _RAW function here
1353  return RB_OBJ_PROMOTED_RAW(obj);
1354 }
1355 
1356 static inline void
1357 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
1358 {
1359  MARK_IN_BITMAP(&page->uncollectible_bits[0], obj);
1360  objspace->rgengc.old_objects++;
1361 
1362 #if RGENGC_PROFILE >= 2
1363  objspace->profile.total_promoted_count++;
1364  objspace->profile.promoted_types[BUILTIN_TYPE(obj)]++;
1365 #endif
1366 }
1367 
1368 static inline void
1369 RVALUE_OLD_UNCOLLECTIBLE_SET(rb_objspace_t *objspace, VALUE obj)
1370 {
1371  RB_DEBUG_COUNTER_INC(obj_promote);
1372  RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace, GET_HEAP_PAGE(obj), obj);
1373 }
1374 
1375 /* set age to age+1 */
1376 static inline void
1377 RVALUE_AGE_INC(rb_objspace_t *objspace, VALUE obj)
1378 {
1379  int age = RVALUE_AGE_GET((VALUE)obj);
1380 
1381  if (RGENGC_CHECK_MODE && age == RVALUE_OLD_AGE) {
1382  rb_bug("RVALUE_AGE_INC: can not increment age of OLD object %s.", rb_obj_info(obj));
1383  }
1384 
1385  age++;
1386  RVALUE_AGE_SET(obj, age);
1387 
1388  if (age == RVALUE_OLD_AGE) {
1389  RVALUE_OLD_UNCOLLECTIBLE_SET(objspace, obj);
1390  }
1391 
1392  check_rvalue_consistency(objspace, obj);
1393 }
1394 
1395 static inline void
1396 RVALUE_AGE_SET_CANDIDATE(rb_objspace_t *objspace, VALUE obj)
1397 {
1398  check_rvalue_consistency(objspace, obj);
1399  GC_ASSERT(!RVALUE_OLD_P(objspace, obj));
1400  RVALUE_AGE_SET(obj, RVALUE_OLD_AGE - 1);
1401  check_rvalue_consistency(objspace, obj);
1402 }
1403 
1404 static inline void
1405 RVALUE_AGE_RESET(VALUE obj)
1406 {
1407  RVALUE_AGE_SET(obj, 0);
1408 }
1409 
1410 static inline void
1411 RVALUE_DEMOTE(rb_objspace_t *objspace, VALUE obj)
1412 {
1413  check_rvalue_consistency(objspace, obj);
1414  GC_ASSERT(RVALUE_OLD_P(objspace, obj));
1415 
1416  if (!is_incremental_marking(objspace) && RVALUE_REMEMBERED(objspace, obj)) {
1417  CLEAR_IN_BITMAP(GET_HEAP_PAGE(obj)->remembered_bits, obj);
1418  }
1419 
1420  CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), obj);
1421  RVALUE_AGE_RESET(obj);
1422 
1423  if (RVALUE_MARKED(objspace, obj)) {
1424  objspace->rgengc.old_objects--;
1425  }
1426 
1427  check_rvalue_consistency(objspace, obj);
1428 }
1429 
1430 static inline int
1431 RVALUE_BLACK_P(rb_objspace_t *objspace, VALUE obj)
1432 {
1433  return RVALUE_MARKED(objspace, obj) && !RVALUE_MARKING(objspace, obj);
1434 }
1435 
1436 static inline int
1437 RVALUE_WHITE_P(rb_objspace_t *objspace, VALUE obj)
1438 {
1439  return !RVALUE_MARKED(objspace, obj);
1440 }
1441 
1442 bool
1443 rb_gc_impl_gc_enabled_p(void *objspace_ptr)
1444 {
1445  rb_objspace_t *objspace = objspace_ptr;
1446  return !dont_gc_val();
1447 }
1448 
1449 void
1450 rb_gc_impl_gc_enable(void *objspace_ptr)
1451 {
1452  rb_objspace_t *objspace = objspace_ptr;
1453 
1454  dont_gc_off();
1455 }
1456 
1457 void
1458 rb_gc_impl_gc_disable(void *objspace_ptr, bool finish_current_gc)
1459 {
1460  rb_objspace_t *objspace = objspace_ptr;
1461 
1462  if (finish_current_gc) {
1463  gc_rest(objspace);
1464  }
1465 
1466  dont_gc_on();
1467 }
1468 
1469 /*
1470  --------------------------- ObjectSpace -----------------------------
1471 */
1472 
1473 static inline void *
1474 calloc1(size_t n)
1475 {
1476  return calloc(1, n);
1477 }
1478 
1479 void
1480 rb_gc_impl_set_event_hook(void *objspace_ptr, const rb_event_flag_t event)
1481 {
1482  rb_objspace_t *objspace = objspace_ptr;
1483  objspace->hook_events = event & RUBY_INTERNAL_EVENT_OBJSPACE_MASK;
1484  objspace->flags.has_newobj_hook = !!(objspace->hook_events & RUBY_INTERNAL_EVENT_NEWOBJ);
1485 }
1486 
1487 unsigned long long
1488 rb_gc_impl_get_total_time(void *objspace_ptr)
1489 {
1490  rb_objspace_t *objspace = objspace_ptr;
1491 
1492  unsigned long long marking_time = objspace->profile.marking_time_ns;
1493  unsigned long long sweeping_time = objspace->profile.sweeping_time_ns;
1494 
1495  return marking_time + sweeping_time;
1496 }
1497 
1498 void
1499 rb_gc_impl_set_measure_total_time(void *objspace_ptr, VALUE flag)
1500 {
1501  rb_objspace_t *objspace = objspace_ptr;
1502 
1503  objspace->flags.measure_gc = RTEST(flag) ? TRUE : FALSE;
1504 }
1505 
1506 bool
1507 rb_gc_impl_get_measure_total_time(void *objspace_ptr)
1508 {
1509  rb_objspace_t *objspace = objspace_ptr;
1510 
1511  return objspace->flags.measure_gc;
1512 }
1513 
1514 static size_t
1515 minimum_slots_for_heap(rb_objspace_t *objspace, rb_heap_t *heap)
1516 {
1517  size_t heap_idx = heap - heaps;
1518  return gc_params.heap_init_slots[heap_idx];
1519 }
1520 
1521 static int
1522 object_id_cmp(st_data_t x, st_data_t y)
1523 {
1524  if (RB_TYPE_P(x, T_BIGNUM)) {
1525  return !rb_big_eql(x, y);
1526  }
1527  else {
1528  return x != y;
1529  }
1530 }
1531 
1532 static st_index_t
1533 object_id_hash(st_data_t n)
1534 {
1535  return FIX2LONG(rb_hash((VALUE)n));
1536 }
1537 
1538 #define OBJ_ID_INCREMENT (RUBY_IMMEDIATE_MASK + 1)
1539 #define OBJ_ID_INITIAL (OBJ_ID_INCREMENT)
1540 
1541 static const struct st_hash_type object_id_hash_type = {
1542  object_id_cmp,
1543  object_id_hash,
1544 };
1545 
1546 /* garbage objects will be collected soon. */
1547 bool
1548 rb_gc_impl_garbage_object_p(void *objspace_ptr, VALUE ptr)
1549 {
1550  rb_objspace_t *objspace = objspace_ptr;
1551 
1552  bool dead = false;
1553 
1554  asan_unpoisoning_object(ptr) {
1555  switch (BUILTIN_TYPE(ptr)) {
1556  case T_NONE:
1557  case T_MOVED:
1558  case T_ZOMBIE:
1559  dead = true;
1560  break;
1561  default:
1562  break;
1563  }
1564  }
1565 
1566  if (dead) return true;
1567  return is_lazy_sweeping(objspace) && GET_HEAP_PAGE(ptr)->flags.before_sweep &&
1568  !RVALUE_MARKED(objspace, ptr);
1569 }
1570 
1571 VALUE
1572 rb_gc_impl_object_id_to_ref(void *objspace_ptr, VALUE object_id)
1573 {
1574  rb_objspace_t *objspace = objspace_ptr;
1575 
1576  VALUE obj;
1577  if (st_lookup(objspace->id_to_obj_tbl, object_id, &obj) &&
1578  !rb_gc_impl_garbage_object_p(objspace, obj)) {
1579  return obj;
1580  }
1581 
1582  if (rb_funcall(object_id, rb_intern(">="), 1, ULL2NUM(objspace->next_object_id))) {
1583  rb_raise(rb_eRangeError, "%+"PRIsVALUE" is not id value", rb_funcall(object_id, rb_intern("to_s"), 1, INT2FIX(10)));
1584  }
1585  else {
1586  rb_raise(rb_eRangeError, "%+"PRIsVALUE" is recycled object", rb_funcall(object_id, rb_intern("to_s"), 1, INT2FIX(10)));
1587  }
1588 }
1589 
1590 VALUE
1591 rb_gc_impl_object_id(void *objspace_ptr, VALUE obj)
1592 {
1593  VALUE id;
1594  rb_objspace_t *objspace = objspace_ptr;
1595 
1596  unsigned int lev = rb_gc_vm_lock();
1597  if (FL_TEST(obj, FL_SEEN_OBJ_ID)) {
1598  st_data_t val;
1599  if (st_lookup(objspace->obj_to_id_tbl, (st_data_t)obj, &val)) {
1600  id = (VALUE)val;
1601  }
1602  else {
1603  rb_bug("rb_gc_impl_object_id: FL_SEEN_OBJ_ID flag set but not found in table");
1604  }
1605  }
1606  else {
1607  GC_ASSERT(!st_lookup(objspace->obj_to_id_tbl, (st_data_t)obj, NULL));
1608 
1609  id = ULL2NUM(objspace->next_object_id);
1610  objspace->next_object_id += OBJ_ID_INCREMENT;
1611 
1612  st_insert(objspace->obj_to_id_tbl, (st_data_t)obj, (st_data_t)id);
1613  st_insert(objspace->id_to_obj_tbl, (st_data_t)id, (st_data_t)obj);
1614  FL_SET(obj, FL_SEEN_OBJ_ID);
1615  }
1616  rb_gc_vm_unlock(lev);
1617 
1618  return id;
1619 }
1620 
1621 static void free_stack_chunks(mark_stack_t *);
1622 static void mark_stack_free_cache(mark_stack_t *);
1623 static void heap_page_free(rb_objspace_t *objspace, struct heap_page *page);
1624 
1625 static inline void
1626 heap_page_add_freeobj(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
1627 {
1628  asan_unpoison_object(obj, false);
1629 
1630  asan_unlock_freelist(page);
1631 
1632  struct free_slot *slot = (struct free_slot *)obj;
1633  slot->flags = 0;
1634  slot->next = page->freelist;
1635  page->freelist = slot;
1636  asan_lock_freelist(page);
1637 
1638  RVALUE_AGE_RESET(obj);
1639 
1640  if (RGENGC_CHECK_MODE &&
1641  /* obj should belong to page */
1642  !(page->start <= (uintptr_t)obj &&
1643  (uintptr_t)obj < ((uintptr_t)page->start + (page->total_slots * page->slot_size)) &&
1644  obj % BASE_SLOT_SIZE == 0)) {
1645  rb_bug("heap_page_add_freeobj: %p is not rvalue.", (void *)obj);
1646  }
1647 
1648  asan_poison_object(obj);
1649  gc_report(3, objspace, "heap_page_add_freeobj: add %p to freelist\n", (void *)obj);
1650 }
1651 
1652 static void
1653 heap_allocatable_slots_expand(rb_objspace_t *objspace,
1654  rb_heap_t *heap, size_t free_slots, size_t total_slots)
1655 {
1656  double goal_ratio = gc_params.heap_free_slots_goal_ratio;
1657  size_t target_total_slots;
1658 
1659  if (goal_ratio == 0.0) {
1660  target_total_slots = (size_t)(total_slots * gc_params.growth_factor);
1661  }
1662  else if (total_slots == 0) {
1663  target_total_slots = minimum_slots_for_heap(objspace, heap);
1664  }
1665  else {
1666  /* Find `f' where free_slots = f * total_slots * goal_ratio
1667  * => f = (total_slots - free_slots) / ((1 - goal_ratio) * total_slots)
1668  */
1669  double f = (double)(total_slots - free_slots) / ((1 - goal_ratio) * total_slots);
1670 
1671  if (f > gc_params.growth_factor) f = gc_params.growth_factor;
1672  if (f < 1.0) f = 1.1;
1673 
1674  target_total_slots = (size_t)(f * total_slots);
1675 
1676  if (0) {
1677  fprintf(stderr,
1678  "free_slots(%8"PRIuSIZE")/total_slots(%8"PRIuSIZE")=%1.2f,"
1679  " G(%1.2f), f(%1.2f),"
1680  " total_slots(%8"PRIuSIZE") => target_total_slots(%8"PRIuSIZE")\n",
1681  free_slots, total_slots, free_slots/(double)total_slots,
1682  goal_ratio, f, total_slots, target_total_slots);
1683  }
1684  }
1685 
1686  if (gc_params.growth_max_slots > 0) {
1687  size_t max_total_slots = (size_t)(total_slots + gc_params.growth_max_slots);
1688  if (target_total_slots > max_total_slots) target_total_slots = max_total_slots;
1689  }
1690 
1691  size_t extend_slot_count = target_total_slots - total_slots;
1692  /* Extend by at least 1 page. */
1693  if (extend_slot_count == 0) extend_slot_count = 1;
1694 
1695  objspace->heap_pages.allocatable_slots += extend_slot_count;
1696 }
1697 
1698 static inline void
1699 heap_add_freepage(rb_heap_t *heap, struct heap_page *page)
1700 {
1701  asan_unlock_freelist(page);
1702  GC_ASSERT(page->free_slots != 0);
1703  GC_ASSERT(page->freelist != NULL);
1704 
1705  page->free_next = heap->free_pages;
1706  heap->free_pages = page;
1707 
1708  RUBY_DEBUG_LOG("page:%p freelist:%p", (void *)page, (void *)page->freelist);
1709 
1710  asan_lock_freelist(page);
1711 }
1712 
1713 static inline void
1714 heap_add_poolpage(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
1715 {
1716  asan_unlock_freelist(page);
1717  GC_ASSERT(page->free_slots != 0);
1718  GC_ASSERT(page->freelist != NULL);
1719 
1720  page->free_next = heap->pooled_pages;
1721  heap->pooled_pages = page;
1722  objspace->rincgc.pooled_slots += page->free_slots;
1723 
1724  asan_lock_freelist(page);
1725 }
1726 
1727 static void
1728 heap_unlink_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
1729 {
1730  ccan_list_del(&page->page_node);
1731  heap->total_pages--;
1732  heap->total_slots -= page->total_slots;
1733 }
1734 
1735 static void
1736 gc_aligned_free(void *ptr, size_t size)
1737 {
1738 #if defined __MINGW32__
1739  __mingw_aligned_free(ptr);
1740 #elif defined _WIN32
1741  _aligned_free(ptr);
1742 #elif defined(HAVE_POSIX_MEMALIGN) || defined(HAVE_MEMALIGN)
1743  free(ptr);
1744 #else
1745  free(((void**)ptr)[-1]);
1746 #endif
1747 }
1748 
1749 static void
1750 heap_page_body_free(struct heap_page_body *page_body)
1751 {
1752  GC_ASSERT((uintptr_t)page_body % HEAP_PAGE_ALIGN == 0);
1753 
1754  if (HEAP_PAGE_ALLOC_USE_MMAP) {
1755 #ifdef HAVE_MMAP
1756  GC_ASSERT(HEAP_PAGE_SIZE % sysconf(_SC_PAGE_SIZE) == 0);
1757  if (munmap(page_body, HEAP_PAGE_SIZE)) {
1758  rb_bug("heap_page_body_free: munmap failed");
1759  }
1760 #endif
1761  }
1762  else {
1763  gc_aligned_free(page_body, HEAP_PAGE_SIZE);
1764  }
1765 }
1766 
1767 static void
1768 heap_page_free(rb_objspace_t *objspace, struct heap_page *page)
1769 {
1770  objspace->heap_pages.freed_pages++;
1771  heap_page_body_free(page->body);
1772  free(page);
1773 }
1774 
1775 static void
1776 heap_pages_free_unused_pages(rb_objspace_t *objspace)
1777 {
1778  size_t pages_to_keep_count =
1779  // Get number of pages estimated for the smallest size pool
1780  CEILDIV(objspace->heap_pages.allocatable_slots, HEAP_PAGE_OBJ_LIMIT) *
1781  // Estimate the average slot size multiple
1782  (1 << (HEAP_COUNT / 2));
1783 
1784  if (objspace->empty_pages != NULL && objspace->empty_pages_count > pages_to_keep_count) {
1785  GC_ASSERT(objspace->empty_pages_count > 0);
1786  objspace->empty_pages = NULL;
1787  objspace->empty_pages_count = 0;
1788 
1789  size_t i, j;
1790  for (i = j = 0; i < rb_darray_size(objspace->heap_pages.sorted); i++) {
1791  struct heap_page *page = rb_darray_get(objspace->heap_pages.sorted, i);
1792 
1793  if (heap_page_in_global_empty_pages_pool(objspace, page) && pages_to_keep_count == 0) {
1794  heap_page_free(objspace, page);
1795  }
1796  else {
1797  if (heap_page_in_global_empty_pages_pool(objspace, page) && pages_to_keep_count > 0) {
1798  page->free_next = objspace->empty_pages;
1799  objspace->empty_pages = page;
1800  objspace->empty_pages_count++;
1801  pages_to_keep_count--;
1802  }
1803 
1804  if (i != j) {
1805  rb_darray_set(objspace->heap_pages.sorted, j, page);
1806  }
1807  j++;
1808  }
1809  }
1810 
1811  rb_darray_pop(objspace->heap_pages.sorted, i - j);
1812  GC_ASSERT(rb_darray_size(objspace->heap_pages.sorted) == j);
1813 
1814  struct heap_page *hipage = rb_darray_get(objspace->heap_pages.sorted, rb_darray_size(objspace->heap_pages.sorted) - 1);
1815  uintptr_t himem = (uintptr_t)hipage->body + HEAP_PAGE_SIZE;
1816  GC_ASSERT(himem <= heap_pages_himem);
1817  heap_pages_himem = himem;
1818 
1819  struct heap_page *lopage = rb_darray_get(objspace->heap_pages.sorted, 0);
1820  uintptr_t lomem = (uintptr_t)lopage->body + sizeof(struct heap_page_header);
1821  GC_ASSERT(lomem >= heap_pages_lomem);
1822  heap_pages_lomem = lomem;
1823  }
1824 }
1825 
1826 static void *
1827 gc_aligned_malloc(size_t alignment, size_t size)
1828 {
1829  /* alignment must be a power of 2 */
1830  GC_ASSERT(((alignment - 1) & alignment) == 0);
1831  GC_ASSERT(alignment % sizeof(void*) == 0);
1832 
1833  void *res;
1834 
1835 #if defined __MINGW32__
1836  res = __mingw_aligned_malloc(size, alignment);
1837 #elif defined _WIN32
1838  void *_aligned_malloc(size_t, size_t);
1839  res = _aligned_malloc(size, alignment);
1840 #elif defined(HAVE_POSIX_MEMALIGN)
1841  if (posix_memalign(&res, alignment, size) != 0) {
1842  return NULL;
1843  }
1844 #elif defined(HAVE_MEMALIGN)
1845  res = memalign(alignment, size);
1846 #else
1847  char* aligned;
1848  res = malloc(alignment + size + sizeof(void*));
1849  aligned = (char*)res + alignment + sizeof(void*);
1850  aligned -= ((VALUE)aligned & (alignment - 1));
1851  ((void**)aligned)[-1] = res;
1852  res = (void*)aligned;
1853 #endif
1854 
1855  GC_ASSERT((uintptr_t)res % alignment == 0);
1856 
1857  return res;
1858 }
1859 
1860 static struct heap_page_body *
1861 heap_page_body_allocate(void)
1862 {
1863  struct heap_page_body *page_body;
1864 
1865  if (HEAP_PAGE_ALLOC_USE_MMAP) {
1866 #ifdef HAVE_MMAP
1867  GC_ASSERT(HEAP_PAGE_ALIGN % sysconf(_SC_PAGE_SIZE) == 0);
1868 
1869  char *ptr = mmap(NULL, HEAP_PAGE_ALIGN + HEAP_PAGE_SIZE,
1870  PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1871  if (ptr == MAP_FAILED) {
1872  return NULL;
1873  }
1874 
1875  char *aligned = ptr + HEAP_PAGE_ALIGN;
1876  aligned -= ((VALUE)aligned & (HEAP_PAGE_ALIGN - 1));
1877  GC_ASSERT(aligned > ptr);
1878  GC_ASSERT(aligned <= ptr + HEAP_PAGE_ALIGN);
1879 
1880  size_t start_out_of_range_size = aligned - ptr;
1881  GC_ASSERT(start_out_of_range_size % sysconf(_SC_PAGE_SIZE) == 0);
1882  if (start_out_of_range_size > 0) {
1883  if (munmap(ptr, start_out_of_range_size)) {
1884  rb_bug("heap_page_body_allocate: munmap failed for start");
1885  }
1886  }
1887 
1888  size_t end_out_of_range_size = HEAP_PAGE_ALIGN - start_out_of_range_size;
1889  GC_ASSERT(end_out_of_range_size % sysconf(_SC_PAGE_SIZE) == 0);
1890  if (end_out_of_range_size > 0) {
1891  if (munmap(aligned + HEAP_PAGE_SIZE, end_out_of_range_size)) {
1892  rb_bug("heap_page_body_allocate: munmap failed for end");
1893  }
1894  }
1895 
1896  page_body = (struct heap_page_body *)aligned;
1897 #endif
1898  }
1899  else {
1900  page_body = gc_aligned_malloc(HEAP_PAGE_ALIGN, HEAP_PAGE_SIZE);
1901  }
1902 
1903  GC_ASSERT((uintptr_t)page_body % HEAP_PAGE_ALIGN == 0);
1904 
1905  return page_body;
1906 }
1907 
1908 static struct heap_page *
1909 heap_page_resurrect(rb_objspace_t *objspace)
1910 {
1911  struct heap_page *page = NULL;
1912  if (objspace->empty_pages != NULL) {
1913  GC_ASSERT(objspace->empty_pages_count > 0);
1914  objspace->empty_pages_count--;
1915  page = objspace->empty_pages;
1916  objspace->empty_pages = page->free_next;
1917  }
1918 
1919  return page;
1920 }
1921 
1922 static struct heap_page *
1923 heap_page_allocate(rb_objspace_t *objspace)
1924 {
1925  struct heap_page_body *page_body = heap_page_body_allocate();
1926  if (page_body == 0) {
1927  rb_memerror();
1928  }
1929 
1930  struct heap_page *page = calloc1(sizeof(struct heap_page));
1931  if (page == 0) {
1932  heap_page_body_free(page_body);
1933  rb_memerror();
1934  }
1935 
1936  uintptr_t start = (uintptr_t)page_body + sizeof(struct heap_page_header);
1937  uintptr_t end = (uintptr_t)page_body + HEAP_PAGE_SIZE;
1938 
1939  size_t lo = 0;
1940  size_t hi = rb_darray_size(objspace->heap_pages.sorted);
1941  while (lo < hi) {
1942  struct heap_page *mid_page;
1943 
1944  size_t mid = (lo + hi) / 2;
1945  mid_page = rb_darray_get(objspace->heap_pages.sorted, mid);
1946  if ((uintptr_t)mid_page->start < start) {
1947  lo = mid + 1;
1948  }
1949  else if ((uintptr_t)mid_page->start > start) {
1950  hi = mid;
1951  }
1952  else {
1953  rb_bug("same heap page is allocated: %p at %"PRIuVALUE, (void *)page_body, (VALUE)mid);
1954  }
1955  }
1956 
1957  rb_darray_insert(&objspace->heap_pages.sorted, hi, page);
1958 
1959  if (heap_pages_lomem == 0 || heap_pages_lomem > start) heap_pages_lomem = start;
1960  if (heap_pages_himem < end) heap_pages_himem = end;
1961 
1962  page->body = page_body;
1963  page_body->header.page = page;
1964 
1965  objspace->heap_pages.allocated_pages++;
1966 
1967  return page;
1968 }
1969 
1970 static void
1971 heap_add_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
1972 {
1973  /* Adding to eden heap during incremental sweeping is forbidden */
1974  GC_ASSERT(!heap->sweeping_page);
1975  GC_ASSERT(heap_page_in_global_empty_pages_pool(objspace, page));
1976 
1977  /* adjust obj_limit (object number available in this page) */
1978  uintptr_t start = (uintptr_t)page->body + sizeof(struct heap_page_header);
1979  if (start % BASE_SLOT_SIZE != 0) {
1980  int delta = BASE_SLOT_SIZE - (start % BASE_SLOT_SIZE);
1981  start = start + delta;
1982  GC_ASSERT(NUM_IN_PAGE(start) == 0 || NUM_IN_PAGE(start) == 1);
1983 
1984  /* Find a num in page that is evenly divisible by `stride`.
1985  * This is to ensure that objects are aligned with bit planes.
1986  * In other words, ensure there are an even number of objects
1987  * per bit plane. */
1988  if (NUM_IN_PAGE(start) == 1) {
1989  start += heap->slot_size - BASE_SLOT_SIZE;
1990  }
1991 
1992  GC_ASSERT(NUM_IN_PAGE(start) * BASE_SLOT_SIZE % heap->slot_size == 0);
1993  }
1994 
1995  int slot_count = (int)((HEAP_PAGE_SIZE - (start - (uintptr_t)page->body))/heap->slot_size);
1996 
1997  page->start = start;
1998  page->total_slots = slot_count;
1999  page->slot_size = heap->slot_size;
2000  page->heap = heap;
2001 
2002  asan_unlock_freelist(page);
2003  page->freelist = NULL;
2004  asan_unpoison_memory_region(page->body, HEAP_PAGE_SIZE, false);
2005  for (VALUE p = (VALUE)start; p < start + (slot_count * heap->slot_size); p += heap->slot_size) {
2006  heap_page_add_freeobj(objspace, page, p);
2007  }
2008  asan_lock_freelist(page);
2009 
2010  page->free_slots = slot_count;
2011 
2012  heap->total_allocated_pages++;
2013 
2014  ccan_list_add_tail(&heap->pages, &page->page_node);
2015  heap->total_pages++;
2016  heap->total_slots += page->total_slots;
2017 }
2018 
2019 static int
2020 heap_page_allocate_and_initialize(rb_objspace_t *objspace, rb_heap_t *heap)
2021 {
2022  if (objspace->heap_pages.allocatable_slots > 0) {
2023  gc_report(1, objspace, "heap_page_allocate_and_initialize: rb_darray_size(objspace->heap_pages.sorted): %"PRIdSIZE", "
2024  "allocatable_slots: %"PRIdSIZE", heap->total_pages: %"PRIdSIZE"\n",
2025  rb_darray_size(objspace->heap_pages.sorted), objspace->heap_pages.allocatable_slots, heap->total_pages);
2026 
2027  struct heap_page *page = heap_page_resurrect(objspace);
2028  if (page == NULL) {
2029  page = heap_page_allocate(objspace);
2030  }
2031  heap_add_page(objspace, heap, page);
2032  heap_add_freepage(heap, page);
2033 
2034  if (objspace->heap_pages.allocatable_slots > (size_t)page->total_slots) {
2035  objspace->heap_pages.allocatable_slots -= page->total_slots;
2036  }
2037  else {
2038  objspace->heap_pages.allocatable_slots = 0;
2039  }
2040 
2041  return true;
2042  }
2043 
2044  return false;
2045 }
2046 
2047 static void
2048 heap_page_allocate_and_initialize_force(rb_objspace_t *objspace, rb_heap_t *heap)
2049 {
2050  size_t prev_allocatable_slots = objspace->heap_pages.allocatable_slots;
2051  // Set allocatable slots to 1 to force a page to be created.
2052  objspace->heap_pages.allocatable_slots = 1;
2053  heap_page_allocate_and_initialize(objspace, heap);
2054  GC_ASSERT(heap->free_pages != NULL);
2055  objspace->heap_pages.allocatable_slots = prev_allocatable_slots;
2056 }
2057 
2058 static void
2059 gc_continue(rb_objspace_t *objspace, rb_heap_t *heap)
2060 {
2061  unsigned int lock_lev;
2062  gc_enter(objspace, gc_enter_event_continue, &lock_lev);
2063 
2064  /* Continue marking if in incremental marking. */
2065  if (is_incremental_marking(objspace)) {
2066  if (gc_marks_continue(objspace, heap)) {
2067  gc_sweep(objspace);
2068  }
2069  }
2070 
2071  /* Continue sweeping if in lazy sweeping or the previous incremental
2072  * marking finished and did not yield a free page. */
2073  if (heap->free_pages == NULL && is_lazy_sweeping(objspace)) {
2074  gc_sweep_continue(objspace, heap);
2075  }
2076 
2077  gc_exit(objspace, gc_enter_event_continue, &lock_lev);
2078 }
2079 
2080 static void
2081 heap_prepare(rb_objspace_t *objspace, rb_heap_t *heap)
2082 {
2083  GC_ASSERT(heap->free_pages == NULL);
2084 
2085  if (heap->total_slots < gc_params.heap_init_slots[heap - heaps] &&
2086  heap->sweeping_page == NULL) {
2087  heap_page_allocate_and_initialize_force(objspace, heap);
2088  GC_ASSERT(heap->free_pages != NULL);
2089  return;
2090  }
2091 
2092  /* Continue incremental marking or lazy sweeping, if in any of those steps. */
2093  gc_continue(objspace, heap);
2094 
2095  if (heap->free_pages == NULL) {
2096  heap_page_allocate_and_initialize(objspace, heap);
2097  }
2098 
2099  /* If we still don't have a free page and not allowed to create a new page,
2100  * we should start a new GC cycle. */
2101  if (heap->free_pages == NULL) {
2102  if (gc_start(objspace, GPR_FLAG_NEWOBJ) == FALSE) {
2103  rb_memerror();
2104  }
2105  else {
2106  if (objspace->heap_pages.allocatable_slots == 0 && !gc_config_full_mark_val) {
2107  heap_allocatable_slots_expand(objspace, heap,
2108  heap->freed_slots + heap->empty_slots,
2109  heap->total_slots);
2110  GC_ASSERT(objspace->heap_pages.allocatable_slots > 0);
2111  }
2112  /* Do steps of incremental marking or lazy sweeping if the GC run permits. */
2113  gc_continue(objspace, heap);
2114 
2115  /* If we're not incremental marking (e.g. a minor GC) or finished
2116  * sweeping and still don't have a free page, then
2117  * gc_sweep_finish_heap should allow us to create a new page. */
2118  if (heap->free_pages == NULL && !heap_page_allocate_and_initialize(objspace, heap)) {
2119  if (gc_needs_major_flags == GPR_FLAG_NONE) {
2120  rb_bug("cannot create a new page after GC");
2121  }
2122  else { // Major GC is required, which will allow us to create new page
2123  if (gc_start(objspace, GPR_FLAG_NEWOBJ) == FALSE) {
2124  rb_memerror();
2125  }
2126  else {
2127  /* Do steps of incremental marking or lazy sweeping. */
2128  gc_continue(objspace, heap);
2129 
2130  if (heap->free_pages == NULL &&
2131  !heap_page_allocate_and_initialize(objspace, heap)) {
2132  rb_bug("cannot create a new page after major GC");
2133  }
2134  }
2135  }
2136  }
2137  }
2138  }
2139 
2140  GC_ASSERT(heap->free_pages != NULL);
2141 }
2142 
2143 static inline VALUE
2144 newobj_fill(VALUE obj, VALUE v1, VALUE v2, VALUE v3)
2145 {
2146  VALUE *p = (VALUE *)obj;
2147  p[2] = v1;
2148  p[3] = v2;
2149  p[4] = v3;
2150  return obj;
2151 }
2152 
2153 #if GC_DEBUG
2154 static inline const char*
2155 rb_gc_impl_source_location_cstr(int *ptr)
2156 {
2157  /* We could directly refer `rb_source_location_cstr()` before, but not any
2158  * longer. We have to heavy lift using our debugging API. */
2159  if (! ptr) {
2160  return NULL;
2161  }
2162  else if (! (*ptr = rb_sourceline())) {
2163  return NULL;
2164  }
2165  else {
2166  return rb_sourcefile();
2167  }
2168 }
2169 #endif
2170 
2171 static inline VALUE
2172 newobj_init(VALUE klass, VALUE flags, int wb_protected, rb_objspace_t *objspace, VALUE obj)
2173 {
2174 #if !__has_feature(memory_sanitizer)
2175  GC_ASSERT(BUILTIN_TYPE(obj) == T_NONE);
2176  GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
2177 #endif
2178  RBASIC(obj)->flags = flags;
2179  *((VALUE *)&RBASIC(obj)->klass) = klass;
2180 
2181  int t = flags & RUBY_T_MASK;
2182  if (t == T_CLASS || t == T_MODULE || t == T_ICLASS) {
2183  RVALUE_AGE_SET_CANDIDATE(objspace, obj);
2184  }
2185 
2186 #if RACTOR_CHECK_MODE
2187  void rb_ractor_setup_belonging(VALUE obj);
2188  rb_ractor_setup_belonging(obj);
2189 #endif
2190 
2191 #if RGENGC_CHECK_MODE
2192  newobj_fill(obj, 0, 0, 0);
2193 
2194  int lev = rb_gc_vm_lock_no_barrier();
2195  {
2196  check_rvalue_consistency(objspace, obj);
2197 
2198  GC_ASSERT(RVALUE_MARKED(objspace, obj) == FALSE);
2199  GC_ASSERT(RVALUE_MARKING(objspace, obj) == FALSE);
2200  GC_ASSERT(RVALUE_OLD_P(objspace, obj) == FALSE);
2201  GC_ASSERT(RVALUE_WB_UNPROTECTED(objspace, obj) == FALSE);
2202 
2203  if (RVALUE_REMEMBERED(objspace, obj)) rb_bug("newobj: %s is remembered.", rb_obj_info(obj));
2204  }
2205  rb_gc_vm_unlock_no_barrier(lev);
2206 #endif
2207 
2208  if (RB_UNLIKELY(wb_protected == FALSE)) {
2209  MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
2210  }
2211 
2212 #if RGENGC_PROFILE
2213  if (wb_protected) {
2214  objspace->profile.total_generated_normal_object_count++;
2215 #if RGENGC_PROFILE >= 2
2216  objspace->profile.generated_normal_object_count_types[BUILTIN_TYPE(obj)]++;
2217 #endif
2218  }
2219  else {
2220  objspace->profile.total_generated_shady_object_count++;
2221 #if RGENGC_PROFILE >= 2
2222  objspace->profile.generated_shady_object_count_types[BUILTIN_TYPE(obj)]++;
2223 #endif
2224  }
2225 #endif
2226 
2227 #if GC_DEBUG
2228  GET_RVALUE_OVERHEAD(obj)->file = rb_gc_impl_source_location_cstr(&GET_RVALUE_OVERHEAD(obj)->line);
2229  GC_ASSERT(!SPECIAL_CONST_P(obj)); /* check alignment */
2230 #endif
2231 
2232  gc_report(5, objspace, "newobj: %s\n", rb_obj_info(obj));
2233 
2234  RUBY_DEBUG_LOG("obj:%p (%s)", (void *)obj, rb_obj_info(obj));
2235  return obj;
2236 }
2237 
2238 size_t
2239 rb_gc_impl_obj_slot_size(VALUE obj)
2240 {
2241  return GET_HEAP_PAGE(obj)->slot_size - RVALUE_OVERHEAD;
2242 }
2243 
2244 static inline size_t
2245 heap_slot_size(unsigned char pool_id)
2246 {
2247  GC_ASSERT(pool_id < HEAP_COUNT);
2248 
2249  size_t slot_size = (1 << pool_id) * BASE_SLOT_SIZE;
2250 
2251 #if RGENGC_CHECK_MODE
2252  rb_objspace_t *objspace = rb_gc_get_objspace();
2253  GC_ASSERT(heaps[pool_id].slot_size == (short)slot_size);
2254 #endif
2255 
2256  slot_size -= RVALUE_OVERHEAD;
2257 
2258  return slot_size;
2259 }
2260 
2261 bool
2262 rb_gc_impl_size_allocatable_p(size_t size)
2263 {
2264  return size <= heap_slot_size(HEAP_COUNT - 1);
2265 }
2266 
2267 static inline VALUE
2268 ractor_cache_allocate_slot(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache,
2269  size_t heap_idx)
2270 {
2271  rb_ractor_newobj_heap_cache_t *heap_cache = &cache->heap_caches[heap_idx];
2272  struct free_slot *p = heap_cache->freelist;
2273 
2274  if (RB_UNLIKELY(is_incremental_marking(objspace))) {
2275  // Not allowed to allocate without running an incremental marking step
2276  if (cache->incremental_mark_step_allocated_slots >= INCREMENTAL_MARK_STEP_ALLOCATIONS) {
2277  return Qfalse;
2278  }
2279 
2280  if (p) {
2281  cache->incremental_mark_step_allocated_slots++;
2282  }
2283  }
2284 
2285  if (RB_LIKELY(p)) {
2286  VALUE obj = (VALUE)p;
2287  MAYBE_UNUSED(const size_t) stride = heap_slot_size(heap_idx);
2288  heap_cache->freelist = p->next;
2289  asan_unpoison_memory_region(p, stride, true);
2290 #if RGENGC_CHECK_MODE
2291  GC_ASSERT(rb_gc_impl_obj_slot_size(obj) == stride);
2292  // zero clear
2293  MEMZERO((char *)obj, char, stride);
2294 #endif
2295  return obj;
2296  }
2297  else {
2298  return Qfalse;
2299  }
2300 }
2301 
2302 static struct heap_page *
2303 heap_next_free_page(rb_objspace_t *objspace, rb_heap_t *heap)
2304 {
2305  struct heap_page *page;
2306 
2307  if (heap->free_pages == NULL) {
2308  heap_prepare(objspace, heap);
2309  }
2310 
2311  page = heap->free_pages;
2312  heap->free_pages = page->free_next;
2313 
2314  GC_ASSERT(page->free_slots != 0);
2315 
2316  asan_unlock_freelist(page);
2317 
2318  return page;
2319 }
2320 
2321 static inline void
2322 ractor_cache_set_page(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t heap_idx,
2323  struct heap_page *page)
2324 {
2325  gc_report(3, objspace, "ractor_set_cache: Using page %p\n", (void *)page->body);
2326 
2327  rb_ractor_newobj_heap_cache_t *heap_cache = &cache->heap_caches[heap_idx];
2328 
2329  GC_ASSERT(heap_cache->freelist == NULL);
2330  GC_ASSERT(page->free_slots != 0);
2331  GC_ASSERT(page->freelist != NULL);
2332 
2333  heap_cache->using_page = page;
2334  heap_cache->freelist = page->freelist;
2335  page->free_slots = 0;
2336  page->freelist = NULL;
2337 
2338  asan_unpoison_object((VALUE)heap_cache->freelist, false);
2339  GC_ASSERT(RB_TYPE_P((VALUE)heap_cache->freelist, T_NONE));
2340  asan_poison_object((VALUE)heap_cache->freelist);
2341 }
2342 
2343 static inline size_t
2344 heap_idx_for_size(size_t size)
2345 {
2346  size += RVALUE_OVERHEAD;
2347 
2348  size_t slot_count = CEILDIV(size, BASE_SLOT_SIZE);
2349 
2350  /* heap_idx is ceil(log2(slot_count)) */
2351  size_t heap_idx = 64 - nlz_int64(slot_count - 1);
2352 
2353  if (heap_idx >= HEAP_COUNT) {
2354  rb_bug("heap_idx_for_size: allocation size too large "
2355  "(size=%"PRIuSIZE"u, heap_idx=%"PRIuSIZE"u)", size, heap_idx);
2356  }
2357 
2358 #if RGENGC_CHECK_MODE
2359  rb_objspace_t *objspace = rb_gc_get_objspace();
2360  GC_ASSERT(size <= (size_t)heaps[heap_idx].slot_size);
2361  if (heap_idx > 0) GC_ASSERT(size > (size_t)heaps[heap_idx - 1].slot_size);
2362 #endif
2363 
2364  return heap_idx;
2365 }
2366 
2367 size_t
2368 rb_gc_impl_heap_id_for_size(void *objspace_ptr, size_t size)
2369 {
2370  return heap_idx_for_size(size);
2371 }
2372 
2373 
2374 static size_t heap_sizes[HEAP_COUNT + 1] = { 0 };
2375 
2376 size_t *
2377 rb_gc_impl_heap_sizes(void *objspace_ptr)
2378 {
2379  if (heap_sizes[0] == 0) {
2380  for (unsigned char i = 0; i < HEAP_COUNT; i++) {
2381  heap_sizes[i] = heap_slot_size(i);
2382  }
2383  }
2384 
2385  return heap_sizes;
2386 }
2387 
2388 NOINLINE(static VALUE newobj_cache_miss(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t heap_idx, bool vm_locked));
2389 
2390 static VALUE
2391 newobj_cache_miss(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t heap_idx, bool vm_locked)
2392 {
2393  rb_heap_t *heap = &heaps[heap_idx];
2394  VALUE obj = Qfalse;
2395 
2396  unsigned int lev = 0;
2397  bool unlock_vm = false;
2398 
2399  if (!vm_locked) {
2400  lev = rb_gc_cr_lock();
2401  vm_locked = true;
2402  unlock_vm = true;
2403  }
2404 
2405  {
2406  if (is_incremental_marking(objspace)) {
2407  gc_continue(objspace, heap);
2408  cache->incremental_mark_step_allocated_slots = 0;
2409 
2410  // Retry allocation after resetting incremental_mark_step_allocated_slots
2411  obj = ractor_cache_allocate_slot(objspace, cache, heap_idx);
2412  }
2413 
2414  if (obj == Qfalse) {
2415  // Get next free page (possibly running GC)
2416  struct heap_page *page = heap_next_free_page(objspace, heap);
2417  ractor_cache_set_page(objspace, cache, heap_idx, page);
2418 
2419  // Retry allocation after moving to new page
2420  obj = ractor_cache_allocate_slot(objspace, cache, heap_idx);
2421  }
2422  }
2423 
2424  if (unlock_vm) {
2425  rb_gc_cr_unlock(lev);
2426  }
2427 
2428  if (RB_UNLIKELY(obj == Qfalse)) {
2429  rb_memerror();
2430  }
2431  return obj;
2432 }
2433 
2434 static VALUE
2435 newobj_alloc(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t heap_idx, bool vm_locked)
2436 {
2437  VALUE obj = ractor_cache_allocate_slot(objspace, cache, heap_idx);
2438 
2439  if (RB_UNLIKELY(obj == Qfalse)) {
2440  obj = newobj_cache_miss(objspace, cache, heap_idx, vm_locked);
2441  }
2442 
2443  rb_heap_t *heap = &heaps[heap_idx];
2444  heap->total_allocated_objects++;
2445  GC_ASSERT(rb_gc_multi_ractor_p() ||
2446  heap->total_slots >=
2447  (heap->total_allocated_objects - heap->total_freed_objects - heap->final_slots_count));
2448 
2449  return obj;
2450 }
2451 
2452 ALWAYS_INLINE(static VALUE newobj_slowpath(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, int wb_protected, size_t heap_idx));
2453 
2454 static inline VALUE
2455 newobj_slowpath(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, int wb_protected, size_t heap_idx)
2456 {
2457  VALUE obj;
2458  unsigned int lev;
2459 
2460  lev = rb_gc_cr_lock();
2461  {
2462  if (RB_UNLIKELY(during_gc || ruby_gc_stressful)) {
2463  if (during_gc) {
2464  dont_gc_on();
2465  during_gc = 0;
2466  rb_bug("object allocation during garbage collection phase");
2467  }
2468 
2469  if (ruby_gc_stressful) {
2470  if (!garbage_collect(objspace, GPR_FLAG_NEWOBJ)) {
2471  rb_memerror();
2472  }
2473  }
2474  }
2475 
2476  obj = newobj_alloc(objspace, cache, heap_idx, true);
2477  newobj_init(klass, flags, wb_protected, objspace, obj);
2478  }
2479  rb_gc_cr_unlock(lev);
2480 
2481  return obj;
2482 }
2483 
2484 NOINLINE(static VALUE newobj_slowpath_wb_protected(VALUE klass, VALUE flags,
2485  rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t heap_idx));
2486 NOINLINE(static VALUE newobj_slowpath_wb_unprotected(VALUE klass, VALUE flags,
2487  rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t heap_idx));
2488 
2489 static VALUE
2490 newobj_slowpath_wb_protected(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t heap_idx)
2491 {
2492  return newobj_slowpath(klass, flags, objspace, cache, TRUE, heap_idx);
2493 }
2494 
2495 static VALUE
2496 newobj_slowpath_wb_unprotected(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t heap_idx)
2497 {
2498  return newobj_slowpath(klass, flags, objspace, cache, FALSE, heap_idx);
2499 }
2500 
2501 VALUE
2502 rb_gc_impl_new_obj(void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, bool wb_protected, size_t alloc_size)
2503 {
2504  VALUE obj;
2505  rb_objspace_t *objspace = objspace_ptr;
2506 
2507  RB_DEBUG_COUNTER_INC(obj_newobj);
2508  (void)RB_DEBUG_COUNTER_INC_IF(obj_newobj_wb_unprotected, !wb_protected);
2509 
2510  if (RB_UNLIKELY(stress_to_class)) {
2511  long cnt = RARRAY_LEN(stress_to_class);
2512  for (long i = 0; i < cnt; i++) {
2513  if (klass == RARRAY_AREF(stress_to_class, i)) rb_memerror();
2514  }
2515  }
2516 
2517  size_t heap_idx = heap_idx_for_size(alloc_size);
2518 
2519  rb_ractor_newobj_cache_t *cache = (rb_ractor_newobj_cache_t *)cache_ptr;
2520 
2521  if (!RB_UNLIKELY(during_gc || ruby_gc_stressful) &&
2522  wb_protected) {
2523  obj = newobj_alloc(objspace, cache, heap_idx, false);
2524  newobj_init(klass, flags, wb_protected, objspace, obj);
2525  }
2526  else {
2527  RB_DEBUG_COUNTER_INC(obj_newobj_slowpath);
2528 
2529  obj = wb_protected ?
2530  newobj_slowpath_wb_protected(klass, flags, objspace, cache, heap_idx) :
2531  newobj_slowpath_wb_unprotected(klass, flags, objspace, cache, heap_idx);
2532  }
2533 
2534  return newobj_fill(obj, v1, v2, v3);
2535 }
2536 
2537 static int
2538 ptr_in_page_body_p(const void *ptr, const void *memb)
2539 {
2540  struct heap_page *page = *(struct heap_page **)memb;
2541  uintptr_t p_body = (uintptr_t)page->body;
2542 
2543  if ((uintptr_t)ptr >= p_body) {
2544  return (uintptr_t)ptr < (p_body + HEAP_PAGE_SIZE) ? 0 : 1;
2545  }
2546  else {
2547  return -1;
2548  }
2549 }
2550 
2551 PUREFUNC(static inline struct heap_page *heap_page_for_ptr(rb_objspace_t *objspace, uintptr_t ptr);)
2552 static inline struct heap_page *
2553 heap_page_for_ptr(rb_objspace_t *objspace, uintptr_t ptr)
2554 {
2555  struct heap_page **res;
2556 
2557  if (ptr < (uintptr_t)heap_pages_lomem ||
2558  ptr > (uintptr_t)heap_pages_himem) {
2559  return NULL;
2560  }
2561 
2562  res = bsearch((void *)ptr, rb_darray_ref(objspace->heap_pages.sorted, 0),
2563  rb_darray_size(objspace->heap_pages.sorted), sizeof(struct heap_page *),
2564  ptr_in_page_body_p);
2565 
2566  if (res) {
2567  return *res;
2568  }
2569  else {
2570  return NULL;
2571  }
2572 }
2573 
2574 PUREFUNC(static inline bool is_pointer_to_heap(rb_objspace_t *objspace, const void *ptr);)
2575 static inline bool
2576 is_pointer_to_heap(rb_objspace_t *objspace, const void *ptr)
2577 {
2578  register uintptr_t p = (uintptr_t)ptr;
2579  register struct heap_page *page;
2580 
2581  RB_DEBUG_COUNTER_INC(gc_isptr_trial);
2582 
2583  if (p < heap_pages_lomem || p > heap_pages_himem) return FALSE;
2584  RB_DEBUG_COUNTER_INC(gc_isptr_range);
2585 
2586  if (p % BASE_SLOT_SIZE != 0) return FALSE;
2587  RB_DEBUG_COUNTER_INC(gc_isptr_align);
2588 
2589  page = heap_page_for_ptr(objspace, (uintptr_t)ptr);
2590  if (page) {
2591  RB_DEBUG_COUNTER_INC(gc_isptr_maybe);
2592  if (heap_page_in_global_empty_pages_pool(objspace, page)) {
2593  return FALSE;
2594  }
2595  else {
2596  if (p < page->start) return FALSE;
2597  if (p >= page->start + (page->total_slots * page->slot_size)) return FALSE;
2598  if ((NUM_IN_PAGE(p) * BASE_SLOT_SIZE) % page->slot_size != 0) return FALSE;
2599 
2600  return TRUE;
2601  }
2602  }
2603  return FALSE;
2604 }
2605 
2606 bool
2607 rb_gc_impl_pointer_to_heap_p(void *objspace_ptr, const void *ptr)
2608 {
2609  return is_pointer_to_heap(objspace_ptr, ptr);
2610 }
2611 
2612 #define ZOMBIE_OBJ_KEPT_FLAGS (FL_SEEN_OBJ_ID | FL_FINALIZE)
2613 
2614 void
2615 rb_gc_impl_make_zombie(void *objspace_ptr, VALUE obj, void (*dfree)(void *), void *data)
2616 {
2617  rb_objspace_t *objspace = objspace_ptr;
2618 
2619  struct RZombie *zombie = RZOMBIE(obj);
2620  zombie->basic.flags = T_ZOMBIE | (zombie->basic.flags & ZOMBIE_OBJ_KEPT_FLAGS);
2621  zombie->dfree = dfree;
2622  zombie->data = data;
2623  VALUE prev, next = heap_pages_deferred_final;
2624  do {
2625  zombie->next = prev = next;
2626  next = RUBY_ATOMIC_VALUE_CAS(heap_pages_deferred_final, prev, obj);
2627  } while (next != prev);
2628 
2629  struct heap_page *page = GET_HEAP_PAGE(obj);
2630  page->final_slots++;
2631  page->heap->final_slots_count++;
2632 }
2633 
2634 static void
2635 obj_free_object_id(rb_objspace_t *objspace, VALUE obj)
2636 {
2637  st_data_t o = (st_data_t)obj, id;
2638 
2639  GC_ASSERT(BUILTIN_TYPE(obj) == T_NONE || FL_TEST(obj, FL_SEEN_OBJ_ID));
2640  FL_UNSET(obj, FL_SEEN_OBJ_ID);
2641 
2642  if (st_delete(objspace->obj_to_id_tbl, &o, &id)) {
2643  GC_ASSERT(id);
2644  st_delete(objspace->id_to_obj_tbl, &id, NULL);
2645  }
2646  else {
2647  rb_bug("Object ID seen, but not in mapping table: %s", rb_obj_info(obj));
2648  }
2649 }
2650 
2651 typedef int each_obj_callback(void *, void *, size_t, void *);
2652 typedef int each_page_callback(struct heap_page *, void *);
2653 
2655  rb_objspace_t *objspace;
2656  bool reenable_incremental;
2657 
2658  each_obj_callback *each_obj_callback;
2659  each_page_callback *each_page_callback;
2660  void *data;
2661 
2662  struct heap_page **pages[HEAP_COUNT];
2663  size_t pages_counts[HEAP_COUNT];
2664 };
2665 
2666 static VALUE
2667 objspace_each_objects_ensure(VALUE arg)
2668 {
2669  struct each_obj_data *data = (struct each_obj_data *)arg;
2670  rb_objspace_t *objspace = data->objspace;
2671 
2672  /* Reenable incremental GC */
2673  if (data->reenable_incremental) {
2674  objspace->flags.dont_incremental = FALSE;
2675  }
2676 
2677  for (int i = 0; i < HEAP_COUNT; i++) {
2678  struct heap_page **pages = data->pages[i];
2679  free(pages);
2680  }
2681 
2682  return Qnil;
2683 }
2684 
2685 static VALUE
2686 objspace_each_objects_try(VALUE arg)
2687 {
2688  struct each_obj_data *data = (struct each_obj_data *)arg;
2689  rb_objspace_t *objspace = data->objspace;
2690 
2691  /* Copy pages from all heaps to their respective buffers. */
2692  for (int i = 0; i < HEAP_COUNT; i++) {
2693  rb_heap_t *heap = &heaps[i];
2694  size_t size = heap->total_pages * sizeof(struct heap_page *);
2695 
2696  struct heap_page **pages = malloc(size);
2697  if (!pages) rb_memerror();
2698 
2699  /* Set up pages buffer by iterating over all pages in the current eden
2700  * heap. This will be a snapshot of the state of the heap before we
2701  * call the callback over each page that exists in this buffer. Thus it
2702  * is safe for the callback to allocate objects without possibly entering
2703  * an infinite loop. */
2704  struct heap_page *page = 0;
2705  size_t pages_count = 0;
2706  ccan_list_for_each(&heap->pages, page, page_node) {
2707  pages[pages_count] = page;
2708  pages_count++;
2709  }
2710  data->pages[i] = pages;
2711  data->pages_counts[i] = pages_count;
2712  GC_ASSERT(pages_count == heap->total_pages);
2713  }
2714 
2715  for (int i = 0; i < HEAP_COUNT; i++) {
2716  rb_heap_t *heap = &heaps[i];
2717  size_t pages_count = data->pages_counts[i];
2718  struct heap_page **pages = data->pages[i];
2719 
2720  struct heap_page *page = ccan_list_top(&heap->pages, struct heap_page, page_node);
2721  for (size_t i = 0; i < pages_count; i++) {
2722  /* If we have reached the end of the linked list then there are no
2723  * more pages, so break. */
2724  if (page == NULL) break;
2725 
2726  /* If this page does not match the one in the buffer, then move to
2727  * the next page in the buffer. */
2728  if (pages[i] != page) continue;
2729 
2730  uintptr_t pstart = (uintptr_t)page->start;
2731  uintptr_t pend = pstart + (page->total_slots * heap->slot_size);
2732 
2733  if (data->each_obj_callback &&
2734  (*data->each_obj_callback)((void *)pstart, (void *)pend, heap->slot_size, data->data)) {
2735  break;
2736  }
2737  if (data->each_page_callback &&
2738  (*data->each_page_callback)(page, data->data)) {
2739  break;
2740  }
2741 
2742  page = ccan_list_next(&heap->pages, page, page_node);
2743  }
2744  }
2745 
2746  return Qnil;
2747 }
2748 
2749 static void
2750 objspace_each_exec(bool protected, struct each_obj_data *each_obj_data)
2751 {
2752  /* Disable incremental GC */
2753  rb_objspace_t *objspace = each_obj_data->objspace;
2754  bool reenable_incremental = FALSE;
2755  if (protected) {
2756  reenable_incremental = !objspace->flags.dont_incremental;
2757 
2758  gc_rest(objspace);
2759  objspace->flags.dont_incremental = TRUE;
2760  }
2761 
2762  each_obj_data->reenable_incremental = reenable_incremental;
2763  memset(&each_obj_data->pages, 0, sizeof(each_obj_data->pages));
2764  memset(&each_obj_data->pages_counts, 0, sizeof(each_obj_data->pages_counts));
2765  rb_ensure(objspace_each_objects_try, (VALUE)each_obj_data,
2766  objspace_each_objects_ensure, (VALUE)each_obj_data);
2767 }
2768 
2769 static void
2770 objspace_each_objects(rb_objspace_t *objspace, each_obj_callback *callback, void *data, bool protected)
2771 {
2772  struct each_obj_data each_obj_data = {
2773  .objspace = objspace,
2774  .each_obj_callback = callback,
2775  .each_page_callback = NULL,
2776  .data = data,
2777  };
2778  objspace_each_exec(protected, &each_obj_data);
2779 }
2780 
2781 void
2782 rb_gc_impl_each_objects(void *objspace_ptr, each_obj_callback *callback, void *data)
2783 {
2784  objspace_each_objects(objspace_ptr, callback, data, TRUE);
2785 }
2786 
2787 #if GC_CAN_COMPILE_COMPACTION
2788 static void
2789 objspace_each_pages(rb_objspace_t *objspace, each_page_callback *callback, void *data, bool protected)
2790 {
2791  struct each_obj_data each_obj_data = {
2792  .objspace = objspace,
2793  .each_obj_callback = NULL,
2794  .each_page_callback = callback,
2795  .data = data,
2796  };
2797  objspace_each_exec(protected, &each_obj_data);
2798 }
2799 #endif
2800 
2801 VALUE
2802 rb_gc_impl_define_finalizer(void *objspace_ptr, VALUE obj, VALUE block)
2803 {
2804  rb_objspace_t *objspace = objspace_ptr;
2805  VALUE table;
2806  st_data_t data;
2807 
2808  GC_ASSERT(!OBJ_FROZEN(obj));
2809 
2810  RBASIC(obj)->flags |= FL_FINALIZE;
2811 
2812  if (st_lookup(finalizer_table, obj, &data)) {
2813  table = (VALUE)data;
2814 
2815  /* avoid duplicate block, table is usually small */
2816  {
2817  long len = RARRAY_LEN(table);
2818  long i;
2819 
2820  for (i = 0; i < len; i++) {
2821  VALUE recv = RARRAY_AREF(table, i);
2822  if (rb_equal(recv, block)) {
2823  return recv;
2824  }
2825  }
2826  }
2827 
2828  rb_ary_push(table, block);
2829  }
2830  else {
2831  table = rb_ary_new3(1, block);
2832  rb_obj_hide(table);
2833  st_add_direct(finalizer_table, obj, table);
2834  }
2835 
2836  return block;
2837 }
2838 
2839 void
2840 rb_gc_impl_undefine_finalizer(void *objspace_ptr, VALUE obj)
2841 {
2842  rb_objspace_t *objspace = objspace_ptr;
2843 
2844  GC_ASSERT(!OBJ_FROZEN(obj));
2845 
2846  st_data_t data = obj;
2847  st_delete(finalizer_table, &data, 0);
2848  FL_UNSET(obj, FL_FINALIZE);
2849 }
2850 
2851 void
2852 rb_gc_impl_copy_finalizer(void *objspace_ptr, VALUE dest, VALUE obj)
2853 {
2854  rb_objspace_t *objspace = objspace_ptr;
2855  VALUE table;
2856  st_data_t data;
2857 
2858  if (!FL_TEST(obj, FL_FINALIZE)) return;
2859 
2860  if (RB_LIKELY(st_lookup(finalizer_table, obj, &data))) {
2861  table = (VALUE)data;
2862  st_insert(finalizer_table, dest, table);
2863  FL_SET(dest, FL_FINALIZE);
2864  }
2865  else {
2866  rb_bug("rb_gc_copy_finalizer: FL_FINALIZE set but not found in finalizer_table: %s", rb_obj_info(obj));
2867  }
2868 }
2869 
2870 static VALUE
2871 get_object_id_in_finalizer(rb_objspace_t *objspace, VALUE obj)
2872 {
2873  if (FL_TEST(obj, FL_SEEN_OBJ_ID)) {
2874  return rb_gc_impl_object_id(objspace, obj);
2875  }
2876  else {
2877  VALUE id = ULL2NUM(objspace->next_object_id);
2878  objspace->next_object_id += OBJ_ID_INCREMENT;
2879  return id;
2880  }
2881 }
2882 
2883 static VALUE
2884 get_final(long i, void *data)
2885 {
2886  VALUE table = (VALUE)data;
2887 
2888  return RARRAY_AREF(table, i);
2889 }
2890 
2891 static void
2892 run_final(rb_objspace_t *objspace, VALUE zombie)
2893 {
2894  if (RZOMBIE(zombie)->dfree) {
2895  RZOMBIE(zombie)->dfree(RZOMBIE(zombie)->data);
2896  }
2897 
2898  st_data_t key = (st_data_t)zombie;
2899  if (FL_TEST_RAW(zombie, FL_FINALIZE)) {
2900  FL_UNSET(zombie, FL_FINALIZE);
2901  st_data_t table;
2902  if (st_delete(finalizer_table, &key, &table)) {
2903  rb_gc_run_obj_finalizer(get_object_id_in_finalizer(objspace, zombie), RARRAY_LEN(table), get_final, (void *)table);
2904  }
2905  else {
2906  rb_bug("FL_FINALIZE flag is set, but finalizers are not found");
2907  }
2908  }
2909  else {
2910  GC_ASSERT(!st_lookup(finalizer_table, key, NULL));
2911  }
2912 }
2913 
2914 static void
2915 finalize_list(rb_objspace_t *objspace, VALUE zombie)
2916 {
2917  while (zombie) {
2918  VALUE next_zombie;
2919  struct heap_page *page;
2920  asan_unpoison_object(zombie, false);
2921  next_zombie = RZOMBIE(zombie)->next;
2922  page = GET_HEAP_PAGE(zombie);
2923 
2924  run_final(objspace, zombie);
2925 
2926  int lev = rb_gc_vm_lock();
2927  {
2928  GC_ASSERT(BUILTIN_TYPE(zombie) == T_ZOMBIE);
2929  if (FL_TEST(zombie, FL_SEEN_OBJ_ID)) {
2930  obj_free_object_id(objspace, zombie);
2931  }
2932 
2933  GC_ASSERT(page->heap->final_slots_count > 0);
2934  GC_ASSERT(page->final_slots > 0);
2935 
2936  page->heap->final_slots_count--;
2937  page->final_slots--;
2938  page->free_slots++;
2939  heap_page_add_freeobj(objspace, page, zombie);
2940  page->heap->total_freed_objects++;
2941  }
2942  rb_gc_vm_unlock(lev);
2943 
2944  zombie = next_zombie;
2945  }
2946 }
2947 
2948 static void
2949 finalize_deferred_heap_pages(rb_objspace_t *objspace)
2950 {
2951  VALUE zombie;
2952  while ((zombie = RUBY_ATOMIC_VALUE_EXCHANGE(heap_pages_deferred_final, 0)) != 0) {
2953  finalize_list(objspace, zombie);
2954  }
2955 }
2956 
2957 static void
2958 finalize_deferred(rb_objspace_t *objspace)
2959 {
2960  rb_gc_set_pending_interrupt();
2961  finalize_deferred_heap_pages(objspace);
2962  rb_gc_unset_pending_interrupt();
2963 }
2964 
2965 static void
2966 gc_finalize_deferred(void *dmy)
2967 {
2968  rb_objspace_t *objspace = dmy;
2969  if (RUBY_ATOMIC_EXCHANGE(finalizing, 1)) return;
2970 
2971  finalize_deferred(objspace);
2972  RUBY_ATOMIC_SET(finalizing, 0);
2973 }
2974 
2975 static void
2976 gc_finalize_deferred_register(rb_objspace_t *objspace)
2977 {
2978  /* will enqueue a call to gc_finalize_deferred */
2979  rb_postponed_job_trigger(objspace->finalize_deferred_pjob);
2980 }
2981 
2982 static int pop_mark_stack(mark_stack_t *stack, VALUE *data);
2983 
2984 static void
2985 gc_abort(void *objspace_ptr)
2986 {
2987  rb_objspace_t *objspace = objspace_ptr;
2988 
2989  if (is_incremental_marking(objspace)) {
2990  /* Remove all objects from the mark stack. */
2991  VALUE obj;
2992  while (pop_mark_stack(&objspace->mark_stack, &obj));
2993 
2994  objspace->flags.during_incremental_marking = FALSE;
2995  }
2996 
2997  if (is_lazy_sweeping(objspace)) {
2998  for (int i = 0; i < HEAP_COUNT; i++) {
2999  rb_heap_t *heap = &heaps[i];
3000 
3001  heap->sweeping_page = NULL;
3002  struct heap_page *page = NULL;
3003 
3004  ccan_list_for_each(&heap->pages, page, page_node) {
3005  page->flags.before_sweep = false;
3006  }
3007  }
3008  }
3009 
3010  for (int i = 0; i < HEAP_COUNT; i++) {
3011  rb_heap_t *heap = &heaps[i];
3012  rgengc_mark_and_rememberset_clear(objspace, heap);
3013  }
3014 
3015  gc_mode_set(objspace, gc_mode_none);
3016 }
3017 
3018 void
3019 rb_gc_impl_shutdown_free_objects(void *objspace_ptr)
3020 {
3021  rb_objspace_t *objspace = objspace_ptr;
3022 
3023  for (size_t i = 0; i < rb_darray_size(objspace->heap_pages.sorted); i++) {
3024  struct heap_page *page = rb_darray_get(objspace->heap_pages.sorted, i);
3025  short stride = page->slot_size;
3026 
3027  uintptr_t p = (uintptr_t)page->start;
3028  uintptr_t pend = p + page->total_slots * stride;
3029  for (; p < pend; p += stride) {
3030  VALUE vp = (VALUE)p;
3031  asan_unpoisoning_object(vp) {
3032  if (RB_BUILTIN_TYPE(vp) != T_NONE) {
3033  if (rb_gc_obj_free(objspace, vp)) {
3034  RBASIC(vp)->flags = 0;
3035  }
3036  }
3037  }
3038  }
3039  }
3040 }
3041 
3042 static int
3043 rb_gc_impl_shutdown_call_finalizer_i(st_data_t key, st_data_t val, st_data_t data)
3044 {
3045  rb_objspace_t *objspace = (rb_objspace_t *)data;
3046  VALUE obj = (VALUE)key;
3047  VALUE table = (VALUE)val;
3048 
3049  GC_ASSERT(RB_FL_TEST(obj, FL_FINALIZE));
3050  GC_ASSERT(RB_BUILTIN_TYPE(val) == T_ARRAY);
3051 
3052  rb_gc_run_obj_finalizer(rb_gc_impl_object_id(objspace, obj), RARRAY_LEN(table), get_final, (void *)table);
3053 
3054  FL_UNSET(obj, FL_FINALIZE);
3055 
3056  return ST_DELETE;
3057 }
3058 
3059 void
3060 rb_gc_impl_shutdown_call_finalizer(void *objspace_ptr)
3061 {
3062  rb_objspace_t *objspace = objspace_ptr;
3063 
3064 #if RGENGC_CHECK_MODE >= 2
3065  gc_verify_internal_consistency(objspace);
3066 #endif
3067 
3068  /* prohibit incremental GC */
3069  objspace->flags.dont_incremental = 1;
3070 
3071  if (RUBY_ATOMIC_EXCHANGE(finalizing, 1)) {
3072  /* Abort incremental marking and lazy sweeping to speed up shutdown. */
3073  gc_abort(objspace);
3074  dont_gc_on();
3075  return;
3076  }
3077 
3078  while (finalizer_table->num_entries) {
3079  st_foreach(finalizer_table, rb_gc_impl_shutdown_call_finalizer_i, (st_data_t)objspace);
3080  }
3081 
3082  /* run finalizers */
3083  finalize_deferred(objspace);
3084  GC_ASSERT(heap_pages_deferred_final == 0);
3085 
3086  /* Abort incremental marking and lazy sweeping to speed up shutdown. */
3087  gc_abort(objspace);
3088 
3089  /* prohibit GC because force T_DATA finalizers can break an object graph consistency */
3090  dont_gc_on();
3091 
3092  /* running data/file finalizers are part of garbage collection */
3093  unsigned int lock_lev;
3094  gc_enter(objspace, gc_enter_event_finalizer, &lock_lev);
3095 
3096  /* run data/file object's finalizers */
3097  for (size_t i = 0; i < rb_darray_size(objspace->heap_pages.sorted); i++) {
3098  struct heap_page *page = rb_darray_get(objspace->heap_pages.sorted, i);
3099  short stride = page->slot_size;
3100 
3101  uintptr_t p = (uintptr_t)page->start;
3102  uintptr_t pend = p + page->total_slots * stride;
3103  for (; p < pend; p += stride) {
3104  VALUE vp = (VALUE)p;
3105  asan_unpoisoning_object(vp) {
3106  if (rb_gc_shutdown_call_finalizer_p(vp)) {
3107  if (rb_gc_obj_free(objspace, vp)) {
3108  RBASIC(vp)->flags = 0;
3109  }
3110  }
3111  }
3112  }
3113  }
3114 
3115  gc_exit(objspace, gc_enter_event_finalizer, &lock_lev);
3116 
3117  finalize_deferred_heap_pages(objspace);
3118 
3119  st_free_table(finalizer_table);
3120  finalizer_table = 0;
3121  RUBY_ATOMIC_SET(finalizing, 0);
3122 }
3123 
3124 void
3125 rb_gc_impl_each_object(void *objspace_ptr, void (*func)(VALUE obj, void *data), void *data)
3126 {
3127  rb_objspace_t *objspace = objspace_ptr;
3128 
3129  for (size_t i = 0; i < rb_darray_size(objspace->heap_pages.sorted); i++) {
3130  struct heap_page *page = rb_darray_get(objspace->heap_pages.sorted, i);
3131  short stride = page->slot_size;
3132 
3133  uintptr_t p = (uintptr_t)page->start;
3134  uintptr_t pend = p + page->total_slots * stride;
3135  for (; p < pend; p += stride) {
3136  VALUE obj = (VALUE)p;
3137 
3138  asan_unpoisoning_object(obj) {
3139  func(obj, data);
3140  }
3141  }
3142  }
3143 }
3144 
3145 /*
3146  ------------------------ Garbage Collection ------------------------
3147 */
3148 
3149 /* Sweeping */
3150 
3151 static size_t
3152 objspace_available_slots(rb_objspace_t *objspace)
3153 {
3154  size_t total_slots = 0;
3155  for (int i = 0; i < HEAP_COUNT; i++) {
3156  rb_heap_t *heap = &heaps[i];
3157  total_slots += heap->total_slots;
3158  }
3159  return total_slots;
3160 }
3161 
3162 static size_t
3163 objspace_live_slots(rb_objspace_t *objspace)
3164 {
3165  return total_allocated_objects(objspace) - total_freed_objects(objspace) - total_final_slots_count(objspace);
3166 }
3167 
3168 static size_t
3169 objspace_free_slots(rb_objspace_t *objspace)
3170 {
3171  return objspace_available_slots(objspace) - objspace_live_slots(objspace) - total_final_slots_count(objspace);
3172 }
3173 
3174 static void
3175 gc_setup_mark_bits(struct heap_page *page)
3176 {
3177  /* copy oldgen bitmap to mark bitmap */
3178  memcpy(&page->mark_bits[0], &page->uncollectible_bits[0], HEAP_PAGE_BITMAP_SIZE);
3179 }
3180 
3181 static int gc_is_moveable_obj(rb_objspace_t *objspace, VALUE obj);
3182 static VALUE gc_move(rb_objspace_t *objspace, VALUE scan, VALUE free, size_t src_slot_size, size_t slot_size);
3183 
3184 #if defined(_WIN32)
3185 enum {HEAP_PAGE_LOCK = PAGE_NOACCESS, HEAP_PAGE_UNLOCK = PAGE_READWRITE};
3186 
3187 static BOOL
3188 protect_page_body(struct heap_page_body *body, DWORD protect)
3189 {
3190  DWORD old_protect;
3191  return VirtualProtect(body, HEAP_PAGE_SIZE, protect, &old_protect) != 0;
3192 }
3193 #else
3194 enum {HEAP_PAGE_LOCK = PROT_NONE, HEAP_PAGE_UNLOCK = PROT_READ | PROT_WRITE};
3195 #define protect_page_body(body, protect) !mprotect((body), HEAP_PAGE_SIZE, (protect))
3196 #endif
3197 
3198 static void
3199 lock_page_body(rb_objspace_t *objspace, struct heap_page_body *body)
3200 {
3201  if (!protect_page_body(body, HEAP_PAGE_LOCK)) {
3202  rb_bug("Couldn't protect page %p, errno: %s", (void *)body, strerror(errno));
3203  }
3204  else {
3205  gc_report(5, objspace, "Protecting page in move %p\n", (void *)body);
3206  }
3207 }
3208 
3209 static void
3210 unlock_page_body(rb_objspace_t *objspace, struct heap_page_body *body)
3211 {
3212  if (!protect_page_body(body, HEAP_PAGE_UNLOCK)) {
3213  rb_bug("Couldn't unprotect page %p, errno: %s", (void *)body, strerror(errno));
3214  }
3215  else {
3216  gc_report(5, objspace, "Unprotecting page in move %p\n", (void *)body);
3217  }
3218 }
3219 
3220 static bool
3221 try_move(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *free_page, VALUE src)
3222 {
3223  GC_ASSERT(gc_is_moveable_obj(objspace, src));
3224 
3225  struct heap_page *src_page = GET_HEAP_PAGE(src);
3226  if (!free_page) {
3227  return false;
3228  }
3229 
3230  /* We should return true if either src is successfully moved, or src is
3231  * unmoveable. A false return will cause the sweeping cursor to be
3232  * incremented to the next page, and src will attempt to move again */
3233  GC_ASSERT(RVALUE_MARKED(objspace, src));
3234 
3235  asan_unlock_freelist(free_page);
3236  VALUE dest = (VALUE)free_page->freelist;
3237  asan_lock_freelist(free_page);
3238  asan_unpoison_object(dest, false);
3239  if (!dest) {
3240  /* if we can't get something from the freelist then the page must be
3241  * full */
3242  return false;
3243  }
3244  asan_unlock_freelist(free_page);
3245  free_page->freelist = ((struct free_slot *)dest)->next;
3246  asan_lock_freelist(free_page);
3247 
3248  GC_ASSERT(RB_BUILTIN_TYPE(dest) == T_NONE);
3249 
3250  if (src_page->slot_size > free_page->slot_size) {
3251  objspace->rcompactor.moved_down_count_table[BUILTIN_TYPE(src)]++;
3252  }
3253  else if (free_page->slot_size > src_page->slot_size) {
3254  objspace->rcompactor.moved_up_count_table[BUILTIN_TYPE(src)]++;
3255  }
3256  objspace->rcompactor.moved_count_table[BUILTIN_TYPE(src)]++;
3257  objspace->rcompactor.total_moved++;
3258 
3259  gc_move(objspace, src, dest, src_page->slot_size, free_page->slot_size);
3260  gc_pin(objspace, src);
3261  free_page->free_slots--;
3262 
3263  return true;
3264 }
3265 
3266 static void
3267 gc_unprotect_pages(rb_objspace_t *objspace, rb_heap_t *heap)
3268 {
3269  struct heap_page *cursor = heap->compact_cursor;
3270 
3271  while (cursor) {
3272  unlock_page_body(objspace, cursor->body);
3273  cursor = ccan_list_next(&heap->pages, cursor, page_node);
3274  }
3275 }
3276 
3277 static void gc_update_references(rb_objspace_t *objspace);
3278 #if GC_CAN_COMPILE_COMPACTION
3279 static void invalidate_moved_page(rb_objspace_t *objspace, struct heap_page *page);
3280 #endif
3281 
3282 #if defined(__MINGW32__) || defined(_WIN32)
3283 # define GC_COMPACTION_SUPPORTED 1
3284 #else
3285 /* If not MinGW, Windows, or does not have mmap, we cannot use mprotect for
3286  * the read barrier, so we must disable compaction. */
3287 # define GC_COMPACTION_SUPPORTED (GC_CAN_COMPILE_COMPACTION && HEAP_PAGE_ALLOC_USE_MMAP)
3288 #endif
3289 
3290 #if GC_CAN_COMPILE_COMPACTION
3291 static void
3292 read_barrier_handler(uintptr_t original_address)
3293 {
3294  VALUE obj;
3295  rb_objspace_t *objspace = (rb_objspace_t *)rb_gc_get_objspace();
3296 
3297  /* Calculate address aligned to slots. */
3298  uintptr_t address = original_address - (original_address % BASE_SLOT_SIZE);
3299 
3300  obj = (VALUE)address;
3301 
3302  struct heap_page_body *page_body = GET_PAGE_BODY(obj);
3303 
3304  /* If the page_body is NULL, then mprotect cannot handle it and will crash
3305  * with "Cannot allocate memory". */
3306  if (page_body == NULL) {
3307  rb_bug("read_barrier_handler: segmentation fault at %p", (void *)original_address);
3308  }
3309 
3310  int lev = rb_gc_vm_lock();
3311  {
3312  unlock_page_body(objspace, page_body);
3313 
3314  objspace->profile.read_barrier_faults++;
3315 
3316  invalidate_moved_page(objspace, GET_HEAP_PAGE(obj));
3317  }
3318  rb_gc_vm_unlock(lev);
3319 }
3320 #endif
3321 
3322 #if !GC_CAN_COMPILE_COMPACTION
3323 static void
3324 uninstall_handlers(void)
3325 {
3326  /* no-op */
3327 }
3328 
3329 static void
3330 install_handlers(void)
3331 {
3332  /* no-op */
3333 }
3334 #elif defined(_WIN32)
3335 static LPTOP_LEVEL_EXCEPTION_FILTER old_handler;
3336 typedef void (*signal_handler)(int);
3337 static signal_handler old_sigsegv_handler;
3338 
3339 static LONG WINAPI
3340 read_barrier_signal(EXCEPTION_POINTERS *info)
3341 {
3342  /* EXCEPTION_ACCESS_VIOLATION is what's raised by access to protected pages */
3343  if (info->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) {
3344  /* > The second array element specifies the virtual address of the inaccessible data.
3345  * https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-exception_record
3346  *
3347  * Use this address to invalidate the page */
3348  read_barrier_handler((uintptr_t)info->ExceptionRecord->ExceptionInformation[1]);
3349  return EXCEPTION_CONTINUE_EXECUTION;
3350  }
3351  else {
3352  return EXCEPTION_CONTINUE_SEARCH;
3353  }
3354 }
3355 
3356 static void
3357 uninstall_handlers(void)
3358 {
3359  signal(SIGSEGV, old_sigsegv_handler);
3360  SetUnhandledExceptionFilter(old_handler);
3361 }
3362 
3363 static void
3364 install_handlers(void)
3365 {
3366  /* Remove SEGV handler so that the Unhandled Exception Filter handles it */
3367  old_sigsegv_handler = signal(SIGSEGV, NULL);
3368  /* Unhandled Exception Filter has access to the violation address similar
3369  * to si_addr from sigaction */
3370  old_handler = SetUnhandledExceptionFilter(read_barrier_signal);
3371 }
3372 #else
3373 static struct sigaction old_sigbus_handler;
3374 static struct sigaction old_sigsegv_handler;
3375 
3376 #ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
3377 static exception_mask_t old_exception_masks[32];
3378 static mach_port_t old_exception_ports[32];
3379 static exception_behavior_t old_exception_behaviors[32];
3380 static thread_state_flavor_t old_exception_flavors[32];
3381 static mach_msg_type_number_t old_exception_count;
3382 
3383 static void
3384 disable_mach_bad_access_exc(void)
3385 {
3386  old_exception_count = sizeof(old_exception_masks) / sizeof(old_exception_masks[0]);
3387  task_swap_exception_ports(
3388  mach_task_self(), EXC_MASK_BAD_ACCESS,
3389  MACH_PORT_NULL, EXCEPTION_DEFAULT, 0,
3390  old_exception_masks, &old_exception_count,
3391  old_exception_ports, old_exception_behaviors, old_exception_flavors
3392  );
3393 }
3394 
3395 static void
3396 restore_mach_bad_access_exc(void)
3397 {
3398  for (mach_msg_type_number_t i = 0; i < old_exception_count; i++) {
3399  task_set_exception_ports(
3400  mach_task_self(),
3401  old_exception_masks[i], old_exception_ports[i],
3402  old_exception_behaviors[i], old_exception_flavors[i]
3403  );
3404  }
3405 }
3406 #endif
3407 
3408 static void
3409 read_barrier_signal(int sig, siginfo_t *info, void *data)
3410 {
3411  // setup SEGV/BUS handlers for errors
3412  struct sigaction prev_sigbus, prev_sigsegv;
3413  sigaction(SIGBUS, &old_sigbus_handler, &prev_sigbus);
3414  sigaction(SIGSEGV, &old_sigsegv_handler, &prev_sigsegv);
3415 
3416  // enable SIGBUS/SEGV
3417  sigset_t set, prev_set;
3418  sigemptyset(&set);
3419  sigaddset(&set, SIGBUS);
3420  sigaddset(&set, SIGSEGV);
3421  sigprocmask(SIG_UNBLOCK, &set, &prev_set);
3422 #ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
3423  disable_mach_bad_access_exc();
3424 #endif
3425  // run handler
3426  read_barrier_handler((uintptr_t)info->si_addr);
3427 
3428  // reset SEGV/BUS handlers
3429 #ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
3430  restore_mach_bad_access_exc();
3431 #endif
3432  sigaction(SIGBUS, &prev_sigbus, NULL);
3433  sigaction(SIGSEGV, &prev_sigsegv, NULL);
3434  sigprocmask(SIG_SETMASK, &prev_set, NULL);
3435 }
3436 
3437 static void
3438 uninstall_handlers(void)
3439 {
3440 #ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
3441  restore_mach_bad_access_exc();
3442 #endif
3443  sigaction(SIGBUS, &old_sigbus_handler, NULL);
3444  sigaction(SIGSEGV, &old_sigsegv_handler, NULL);
3445 }
3446 
3447 static void
3448 install_handlers(void)
3449 {
3450  struct sigaction action;
3451  memset(&action, 0, sizeof(struct sigaction));
3452  sigemptyset(&action.sa_mask);
3453  action.sa_sigaction = read_barrier_signal;
3454  action.sa_flags = SA_SIGINFO | SA_ONSTACK;
3455 
3456  sigaction(SIGBUS, &action, &old_sigbus_handler);
3457  sigaction(SIGSEGV, &action, &old_sigsegv_handler);
3458 #ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
3459  disable_mach_bad_access_exc();
3460 #endif
3461 }
3462 #endif
3463 
3464 static void
3465 gc_compact_finish(rb_objspace_t *objspace)
3466 {
3467  for (int i = 0; i < HEAP_COUNT; i++) {
3468  rb_heap_t *heap = &heaps[i];
3469  gc_unprotect_pages(objspace, heap);
3470  }
3471 
3472  uninstall_handlers();
3473 
3474  gc_update_references(objspace);
3475  objspace->profile.compact_count++;
3476 
3477  for (int i = 0; i < HEAP_COUNT; i++) {
3478  rb_heap_t *heap = &heaps[i];
3479  heap->compact_cursor = NULL;
3480  heap->free_pages = NULL;
3481  heap->compact_cursor_index = 0;
3482  }
3483 
3484  if (gc_prof_enabled(objspace)) {
3485  gc_profile_record *record = gc_prof_record(objspace);
3486  record->moved_objects = objspace->rcompactor.total_moved - record->moved_objects;
3487  }
3488  objspace->flags.during_compacting = FALSE;
3489 }
3490 
3492  struct heap_page *page;
3493  int final_slots;
3494  int freed_slots;
3495  int empty_slots;
3496 };
3497 
3498 static inline void
3499 gc_sweep_plane(rb_objspace_t *objspace, rb_heap_t *heap, uintptr_t p, bits_t bitset, struct gc_sweep_context *ctx)
3500 {
3501  struct heap_page *sweep_page = ctx->page;
3502  short slot_size = sweep_page->slot_size;
3503  short slot_bits = slot_size / BASE_SLOT_SIZE;
3504  GC_ASSERT(slot_bits > 0);
3505 
3506  do {
3507  VALUE vp = (VALUE)p;
3508  GC_ASSERT(vp % BASE_SLOT_SIZE == 0);
3509 
3510  asan_unpoison_object(vp, false);
3511  if (bitset & 1) {
3512  switch (BUILTIN_TYPE(vp)) {
3513  default: /* majority case */
3514  gc_report(2, objspace, "page_sweep: free %p\n", (void *)p);
3515 #if RGENGC_CHECK_MODE
3516  if (!is_full_marking(objspace)) {
3517  if (RVALUE_OLD_P(objspace, vp)) rb_bug("page_sweep: %p - old while minor GC.", (void *)p);
3518  if (RVALUE_REMEMBERED(objspace, vp)) rb_bug("page_sweep: %p - remembered.", (void *)p);
3519  }
3520 #endif
3521 
3522  if (RVALUE_WB_UNPROTECTED(objspace, vp)) CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(vp), vp);
3523 
3524 #if RGENGC_CHECK_MODE
3525 #define CHECK(x) if (x(objspace, vp) != FALSE) rb_bug("obj_free: " #x "(%s) != FALSE", rb_obj_info(vp))
3526  CHECK(RVALUE_WB_UNPROTECTED);
3527  CHECK(RVALUE_MARKED);
3528  CHECK(RVALUE_MARKING);
3529  CHECK(RVALUE_UNCOLLECTIBLE);
3530 #undef CHECK
3531 #endif
3532 
3533  rb_gc_event_hook(vp, RUBY_INTERNAL_EVENT_FREEOBJ);
3534 
3535  bool has_object_id = FL_TEST(vp, FL_SEEN_OBJ_ID);
3536  if (rb_gc_obj_free(objspace, vp)) {
3537  if (has_object_id) {
3538  obj_free_object_id(objspace, vp);
3539  }
3540  // always add free slots back to the swept pages freelist,
3541  // so that if we're compacting, we can re-use the slots
3542  (void)VALGRIND_MAKE_MEM_UNDEFINED((void*)p, BASE_SLOT_SIZE);
3543  heap_page_add_freeobj(objspace, sweep_page, vp);
3544  gc_report(3, objspace, "page_sweep: %s is added to freelist\n", rb_obj_info(vp));
3545  ctx->freed_slots++;
3546  }
3547  else {
3548  ctx->final_slots++;
3549  }
3550  break;
3551 
3552  case T_MOVED:
3553  if (objspace->flags.during_compacting) {
3554  /* The sweep cursor shouldn't have made it to any
3555  * T_MOVED slots while the compact flag is enabled.
3556  * The sweep cursor and compact cursor move in
3557  * opposite directions, and when they meet references will
3558  * get updated and "during_compacting" should get disabled */
3559  rb_bug("T_MOVED shouldn't be seen until compaction is finished");
3560  }
3561  gc_report(3, objspace, "page_sweep: %s is added to freelist\n", rb_obj_info(vp));
3562  ctx->empty_slots++;
3563  heap_page_add_freeobj(objspace, sweep_page, vp);
3564  break;
3565  case T_ZOMBIE:
3566  /* already counted */
3567  break;
3568  case T_NONE:
3569  ctx->empty_slots++; /* already freed */
3570  break;
3571  }
3572  }
3573  p += slot_size;
3574  bitset >>= slot_bits;
3575  } while (bitset);
3576 }
3577 
3578 static inline void
3579 gc_sweep_page(rb_objspace_t *objspace, rb_heap_t *heap, struct gc_sweep_context *ctx)
3580 {
3581  struct heap_page *sweep_page = ctx->page;
3582  GC_ASSERT(sweep_page->heap == heap);
3583 
3584  uintptr_t p;
3585  bits_t *bits, bitset;
3586 
3587  gc_report(2, objspace, "page_sweep: start.\n");
3588 
3589 #if RGENGC_CHECK_MODE
3590  if (!objspace->flags.immediate_sweep) {
3591  GC_ASSERT(sweep_page->flags.before_sweep == TRUE);
3592  }
3593 #endif
3594  sweep_page->flags.before_sweep = FALSE;
3595  sweep_page->free_slots = 0;
3596 
3597  p = (uintptr_t)sweep_page->start;
3598  bits = sweep_page->mark_bits;
3599 
3600  int page_rvalue_count = sweep_page->total_slots * (sweep_page->slot_size / BASE_SLOT_SIZE);
3601  int out_of_range_bits = (NUM_IN_PAGE(p) + page_rvalue_count) % BITS_BITLENGTH;
3602  if (out_of_range_bits != 0) { // sizeof(RVALUE) == 64
3603  bits[BITMAP_INDEX(p) + page_rvalue_count / BITS_BITLENGTH] |= ~(((bits_t)1 << out_of_range_bits) - 1);
3604  }
3605 
3606  /* The last bitmap plane may not be used if the last plane does not
3607  * have enough space for the slot_size. In that case, the last plane must
3608  * be skipped since none of the bits will be set. */
3609  int bitmap_plane_count = CEILDIV(NUM_IN_PAGE(p) + page_rvalue_count, BITS_BITLENGTH);
3610  GC_ASSERT(bitmap_plane_count == HEAP_PAGE_BITMAP_LIMIT - 1 ||
3611  bitmap_plane_count == HEAP_PAGE_BITMAP_LIMIT);
3612 
3613  // Skip out of range slots at the head of the page
3614  bitset = ~bits[0];
3615  bitset >>= NUM_IN_PAGE(p);
3616  if (bitset) {
3617  gc_sweep_plane(objspace, heap, p, bitset, ctx);
3618  }
3619  p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
3620 
3621  for (int i = 1; i < bitmap_plane_count; i++) {
3622  bitset = ~bits[i];
3623  if (bitset) {
3624  gc_sweep_plane(objspace, heap, p, bitset, ctx);
3625  }
3626  p += BITS_BITLENGTH * BASE_SLOT_SIZE;
3627  }
3628 
3629  if (!heap->compact_cursor) {
3630  gc_setup_mark_bits(sweep_page);
3631  }
3632 
3633 #if GC_PROFILE_MORE_DETAIL
3634  if (gc_prof_enabled(objspace)) {
3635  gc_profile_record *record = gc_prof_record(objspace);
3636  record->removing_objects += ctx->final_slots + ctx->freed_slots;
3637  record->empty_objects += ctx->empty_slots;
3638  }
3639 #endif
3640  if (0) fprintf(stderr, "gc_sweep_page(%"PRIdSIZE"): total_slots: %d, freed_slots: %d, empty_slots: %d, final_slots: %d\n",
3641  rb_gc_count(),
3642  sweep_page->total_slots,
3643  ctx->freed_slots, ctx->empty_slots, ctx->final_slots);
3644 
3645  sweep_page->free_slots += ctx->freed_slots + ctx->empty_slots;
3646  sweep_page->heap->total_freed_objects += ctx->freed_slots;
3647 
3648  if (heap_pages_deferred_final && !finalizing) {
3649  gc_finalize_deferred_register(objspace);
3650  }
3651 
3652 #if RGENGC_CHECK_MODE
3653  short freelist_len = 0;
3654  asan_unlock_freelist(sweep_page);
3655  struct free_slot *ptr = sweep_page->freelist;
3656  while (ptr) {
3657  freelist_len++;
3658  ptr = ptr->next;
3659  }
3660  asan_lock_freelist(sweep_page);
3661  if (freelist_len != sweep_page->free_slots) {
3662  rb_bug("inconsistent freelist length: expected %d but was %d", sweep_page->free_slots, freelist_len);
3663  }
3664 #endif
3665 
3666  gc_report(2, objspace, "page_sweep: end.\n");
3667 }
3668 
3669 static const char *
3670 gc_mode_name(enum gc_mode mode)
3671 {
3672  switch (mode) {
3673  case gc_mode_none: return "none";
3674  case gc_mode_marking: return "marking";
3675  case gc_mode_sweeping: return "sweeping";
3676  case gc_mode_compacting: return "compacting";
3677  default: rb_bug("gc_mode_name: unknown mode: %d", (int)mode);
3678  }
3679 }
3680 
3681 static void
3682 gc_mode_transition(rb_objspace_t *objspace, enum gc_mode mode)
3683 {
3684 #if RGENGC_CHECK_MODE
3685  enum gc_mode prev_mode = gc_mode(objspace);
3686  switch (prev_mode) {
3687  case gc_mode_none: GC_ASSERT(mode == gc_mode_marking); break;
3688  case gc_mode_marking: GC_ASSERT(mode == gc_mode_sweeping); break;
3689  case gc_mode_sweeping: GC_ASSERT(mode == gc_mode_none || mode == gc_mode_compacting); break;
3690  case gc_mode_compacting: GC_ASSERT(mode == gc_mode_none); break;
3691  }
3692 #endif
3693  if (0) fprintf(stderr, "gc_mode_transition: %s->%s\n", gc_mode_name(gc_mode(objspace)), gc_mode_name(mode));
3694  gc_mode_set(objspace, mode);
3695 }
3696 
3697 static void
3698 heap_page_freelist_append(struct heap_page *page, struct free_slot *freelist)
3699 {
3700  if (freelist) {
3701  asan_unlock_freelist(page);
3702  if (page->freelist) {
3703  struct free_slot *p = page->freelist;
3704  asan_unpoison_object((VALUE)p, false);
3705  while (p->next) {
3706  struct free_slot *prev = p;
3707  p = p->next;
3708  asan_poison_object((VALUE)prev);
3709  asan_unpoison_object((VALUE)p, false);
3710  }
3711  p->next = freelist;
3712  asan_poison_object((VALUE)p);
3713  }
3714  else {
3715  page->freelist = freelist;
3716  }
3717  asan_lock_freelist(page);
3718  }
3719 }
3720 
3721 static void
3722 gc_sweep_start_heap(rb_objspace_t *objspace, rb_heap_t *heap)
3723 {
3724  heap->sweeping_page = ccan_list_top(&heap->pages, struct heap_page, page_node);
3725  heap->free_pages = NULL;
3726  heap->pooled_pages = NULL;
3727  if (!objspace->flags.immediate_sweep) {
3728  struct heap_page *page = NULL;
3729 
3730  ccan_list_for_each(&heap->pages, page, page_node) {
3731  page->flags.before_sweep = TRUE;
3732  }
3733  }
3734 }
3735 
3736 #if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 4
3737 __attribute__((noinline))
3738 #endif
3739 
3740 #if GC_CAN_COMPILE_COMPACTION
3741 static void gc_sort_heap_by_compare_func(rb_objspace_t *objspace, gc_compact_compare_func compare_func);
3742 static int compare_pinned_slots(const void *left, const void *right, void *d);
3743 #endif
3744 
3745 static void
3746 gc_ractor_newobj_cache_clear(void *c, void *data)
3747 {
3748  rb_ractor_newobj_cache_t *newobj_cache = c;
3749 
3750  newobj_cache->incremental_mark_step_allocated_slots = 0;
3751 
3752  for (size_t heap_idx = 0; heap_idx < HEAP_COUNT; heap_idx++) {
3753  rb_ractor_newobj_heap_cache_t *cache = &newobj_cache->heap_caches[heap_idx];
3754 
3755  struct heap_page *page = cache->using_page;
3756  struct free_slot *freelist = cache->freelist;
3757  RUBY_DEBUG_LOG("ractor using_page:%p freelist:%p", (void *)page, (void *)freelist);
3758 
3759  heap_page_freelist_append(page, freelist);
3760 
3761  cache->using_page = NULL;
3762  cache->freelist = NULL;
3763  }
3764 }
3765 
3766 static void
3767 gc_sweep_start(rb_objspace_t *objspace)
3768 {
3769  gc_mode_transition(objspace, gc_mode_sweeping);
3770  objspace->rincgc.pooled_slots = 0;
3771  objspace->heap_pages.allocatable_slots = 0;
3772 
3773 #if GC_CAN_COMPILE_COMPACTION
3774  if (objspace->flags.during_compacting) {
3775  gc_sort_heap_by_compare_func(
3776  objspace,
3777  objspace->rcompactor.compare_func ? objspace->rcompactor.compare_func : compare_pinned_slots
3778  );
3779  }
3780 #endif
3781 
3782  for (int i = 0; i < HEAP_COUNT; i++) {
3783  rb_heap_t *heap = &heaps[i];
3784  gc_sweep_start_heap(objspace, heap);
3785 
3786  /* We should call gc_sweep_finish_heap for size pools with no pages. */
3787  if (heap->sweeping_page == NULL) {
3788  GC_ASSERT(heap->total_pages == 0);
3789  GC_ASSERT(heap->total_slots == 0);
3790  gc_sweep_finish_heap(objspace, heap);
3791  }
3792  }
3793 
3794  rb_gc_ractor_newobj_cache_foreach(gc_ractor_newobj_cache_clear, NULL);
3795 }
3796 
3797 static void
3798 gc_sweep_finish_heap(rb_objspace_t *objspace, rb_heap_t *heap)
3799 {
3800  size_t total_slots = heap->total_slots;
3801  size_t swept_slots = heap->freed_slots + heap->empty_slots;
3802 
3803  size_t init_slots = gc_params.heap_init_slots[heap - heaps];
3804  size_t min_free_slots = (size_t)(MAX(total_slots, init_slots) * gc_params.heap_free_slots_min_ratio);
3805 
3806  if (swept_slots < min_free_slots &&
3807  /* The heap is a growth heap if it freed more slots than had empty slots. */
3808  (heap->empty_slots == 0 || heap->freed_slots > heap->empty_slots)) {
3809  /* If we don't have enough slots and we have pages on the tomb heap, move
3810  * pages from the tomb heap to the eden heap. This may prevent page
3811  * creation thrashing (frequently allocating and deallocting pages) and
3812  * GC thrashing (running GC more frequently than required). */
3813  struct heap_page *resurrected_page;
3814  while (swept_slots < min_free_slots &&
3815  (resurrected_page = heap_page_resurrect(objspace))) {
3816  heap_add_page(objspace, heap, resurrected_page);
3817  heap_add_freepage(heap, resurrected_page);
3818 
3819  swept_slots += resurrected_page->free_slots;
3820  }
3821 
3822  if (swept_slots < min_free_slots) {
3823  /* Grow this heap if we are in a major GC or if we haven't run at least
3824  * RVALUE_OLD_AGE minor GC since the last major GC. */
3825  if (is_full_marking(objspace) ||
3826  objspace->profile.count - objspace->rgengc.last_major_gc < RVALUE_OLD_AGE) {
3827  heap_allocatable_slots_expand(objspace, heap, swept_slots, heap->total_slots);
3828  }
3829  else {
3830  gc_needs_major_flags |= GPR_FLAG_MAJOR_BY_NOFREE;
3831  heap->force_major_gc_count++;
3832  }
3833  }
3834  }
3835 }
3836 
3837 static void
3838 gc_sweep_finish(rb_objspace_t *objspace)
3839 {
3840  gc_report(1, objspace, "gc_sweep_finish\n");
3841 
3842  gc_prof_set_heap_info(objspace);
3843  heap_pages_free_unused_pages(objspace);
3844 
3845  for (int i = 0; i < HEAP_COUNT; i++) {
3846  rb_heap_t *heap = &heaps[i];
3847 
3848  heap->freed_slots = 0;
3849  heap->empty_slots = 0;
3850 
3851  if (!will_be_incremental_marking(objspace)) {
3852  struct heap_page *end_page = heap->free_pages;
3853  if (end_page) {
3854  while (end_page->free_next) end_page = end_page->free_next;
3855  end_page->free_next = heap->pooled_pages;
3856  }
3857  else {
3858  heap->free_pages = heap->pooled_pages;
3859  }
3860  heap->pooled_pages = NULL;
3861  objspace->rincgc.pooled_slots = 0;
3862  }
3863  }
3864 
3865  rb_gc_event_hook(0, RUBY_INTERNAL_EVENT_GC_END_SWEEP);
3866  gc_mode_transition(objspace, gc_mode_none);
3867 
3868 #if RGENGC_CHECK_MODE >= 2
3869  gc_verify_internal_consistency(objspace);
3870 #endif
3871 }
3872 
3873 static int
3874 gc_sweep_step(rb_objspace_t *objspace, rb_heap_t *heap)
3875 {
3876  struct heap_page *sweep_page = heap->sweeping_page;
3877  int unlink_limit = GC_SWEEP_PAGES_FREEABLE_PER_STEP;
3878  int swept_slots = 0;
3879  int pooled_slots = 0;
3880 
3881  if (sweep_page == NULL) return FALSE;
3882 
3883 #if GC_ENABLE_LAZY_SWEEP
3884  gc_prof_sweep_timer_start(objspace);
3885 #endif
3886 
3887  do {
3888  RUBY_DEBUG_LOG("sweep_page:%p", (void *)sweep_page);
3889 
3890  struct gc_sweep_context ctx = {
3891  .page = sweep_page,
3892  .final_slots = 0,
3893  .freed_slots = 0,
3894  .empty_slots = 0,
3895  };
3896  gc_sweep_page(objspace, heap, &ctx);
3897  int free_slots = ctx.freed_slots + ctx.empty_slots;
3898 
3899  heap->sweeping_page = ccan_list_next(&heap->pages, sweep_page, page_node);
3900 
3901  if (free_slots == sweep_page->total_slots &&
3902  heap_pages_freeable_pages > 0 &&
3903  unlink_limit > 0) {
3904  heap_pages_freeable_pages--;
3905  unlink_limit--;
3906  /* There are no living objects, so move this page to the global empty pages. */
3907  heap_unlink_page(objspace, heap, sweep_page);
3908 
3909  sweep_page->start = 0;
3910  sweep_page->total_slots = 0;
3911  sweep_page->slot_size = 0;
3912  sweep_page->heap = NULL;
3913  sweep_page->free_slots = 0;
3914 
3915  asan_unlock_freelist(sweep_page);
3916  sweep_page->freelist = NULL;
3917  asan_lock_freelist(sweep_page);
3918 
3919  asan_poison_memory_region(sweep_page->body, HEAP_PAGE_SIZE);
3920 
3921  objspace->empty_pages_count++;
3922  sweep_page->free_next = objspace->empty_pages;
3923  objspace->empty_pages = sweep_page;
3924  }
3925  else if (free_slots > 0) {
3926  heap->freed_slots += ctx.freed_slots;
3927  heap->empty_slots += ctx.empty_slots;
3928 
3929  if (pooled_slots < GC_INCREMENTAL_SWEEP_POOL_SLOT_COUNT) {
3930  heap_add_poolpage(objspace, heap, sweep_page);
3931  pooled_slots += free_slots;
3932  }
3933  else {
3934  heap_add_freepage(heap, sweep_page);
3935  swept_slots += free_slots;
3936  if (swept_slots > GC_INCREMENTAL_SWEEP_SLOT_COUNT) {
3937  break;
3938  }
3939  }
3940  }
3941  else {
3942  sweep_page->free_next = NULL;
3943  }
3944  } while ((sweep_page = heap->sweeping_page));
3945 
3946  if (!heap->sweeping_page) {
3947  gc_sweep_finish_heap(objspace, heap);
3948 
3949  if (!has_sweeping_pages(objspace)) {
3950  gc_sweep_finish(objspace);
3951  }
3952  }
3953 
3954 #if GC_ENABLE_LAZY_SWEEP
3955  gc_prof_sweep_timer_stop(objspace);
3956 #endif
3957 
3958  return heap->free_pages != NULL;
3959 }
3960 
3961 static void
3962 gc_sweep_rest(rb_objspace_t *objspace)
3963 {
3964  for (int i = 0; i < HEAP_COUNT; i++) {
3965  rb_heap_t *heap = &heaps[i];
3966 
3967  while (heap->sweeping_page) {
3968  gc_sweep_step(objspace, heap);
3969  }
3970  }
3971 }
3972 
3973 static void
3974 gc_sweep_continue(rb_objspace_t *objspace, rb_heap_t *sweep_heap)
3975 {
3976  GC_ASSERT(dont_gc_val() == FALSE || objspace->profile.latest_gc_info & GPR_FLAG_METHOD);
3977  if (!GC_ENABLE_LAZY_SWEEP) return;
3978 
3979  gc_sweeping_enter(objspace);
3980 
3981  for (int i = 0; i < HEAP_COUNT; i++) {
3982  rb_heap_t *heap = &heaps[i];
3983  if (!gc_sweep_step(objspace, heap)) {
3984  /* sweep_heap requires a free slot but sweeping did not yield any
3985  * and we cannot allocate a new page. */
3986  if (heap == sweep_heap && objspace->heap_pages.allocatable_slots == 0) {
3987  /* Not allowed to create a new page so finish sweeping. */
3988  gc_sweep_rest(objspace);
3989  break;
3990  }
3991  }
3992  }
3993 
3994  gc_sweeping_exit(objspace);
3995 }
3996 
3997 VALUE
3998 rb_gc_impl_location(void *objspace_ptr, VALUE value)
3999 {
4000  VALUE destination;
4001 
4002  if (!SPECIAL_CONST_P(value)) {
4003  asan_unpoisoning_object(value) {
4004  if (BUILTIN_TYPE(value) == T_MOVED) {
4005  destination = (VALUE)RMOVED(value)->destination;
4006  GC_ASSERT(BUILTIN_TYPE(destination) != T_NONE);
4007  }
4008  else {
4009  destination = value;
4010  }
4011  }
4012  }
4013  else {
4014  destination = value;
4015  }
4016 
4017  return destination;
4018 }
4019 
4020 #if GC_CAN_COMPILE_COMPACTION
4021 static void
4022 invalidate_moved_plane(rb_objspace_t *objspace, struct heap_page *page, uintptr_t p, bits_t bitset)
4023 {
4024  if (bitset) {
4025  do {
4026  if (bitset & 1) {
4027  VALUE forwarding_object = (VALUE)p;
4028  VALUE object;
4029 
4030  if (BUILTIN_TYPE(forwarding_object) == T_MOVED) {
4031  GC_ASSERT(RVALUE_PINNED(objspace, forwarding_object));
4032  GC_ASSERT(!RVALUE_MARKED(objspace, forwarding_object));
4033 
4034  CLEAR_IN_BITMAP(GET_HEAP_PINNED_BITS(forwarding_object), forwarding_object);
4035 
4036  object = rb_gc_impl_location(objspace, forwarding_object);
4037 
4038  uint32_t original_shape_id = 0;
4039  if (RB_TYPE_P(object, T_OBJECT)) {
4040  original_shape_id = RMOVED(forwarding_object)->original_shape_id;
4041  }
4042 
4043  gc_move(objspace, object, forwarding_object, GET_HEAP_PAGE(object)->slot_size, page->slot_size);
4044  /* forwarding_object is now our actual object, and "object"
4045  * is the free slot for the original page */
4046 
4047  if (original_shape_id) {
4048  rb_gc_set_shape(forwarding_object, original_shape_id);
4049  }
4050 
4051  struct heap_page *orig_page = GET_HEAP_PAGE(object);
4052  orig_page->free_slots++;
4053  heap_page_add_freeobj(objspace, orig_page, object);
4054 
4055  GC_ASSERT(RVALUE_MARKED(objspace, forwarding_object));
4056  GC_ASSERT(BUILTIN_TYPE(forwarding_object) != T_MOVED);
4057  GC_ASSERT(BUILTIN_TYPE(forwarding_object) != T_NONE);
4058  }
4059  }
4060  p += BASE_SLOT_SIZE;
4061  bitset >>= 1;
4062  } while (bitset);
4063  }
4064 }
4065 
4066 static void
4067 invalidate_moved_page(rb_objspace_t *objspace, struct heap_page *page)
4068 {
4069  int i;
4070  bits_t *mark_bits, *pin_bits;
4071  bits_t bitset;
4072 
4073  mark_bits = page->mark_bits;
4074  pin_bits = page->pinned_bits;
4075 
4076  uintptr_t p = page->start;
4077 
4078  // Skip out of range slots at the head of the page
4079  bitset = pin_bits[0] & ~mark_bits[0];
4080  bitset >>= NUM_IN_PAGE(p);
4081  invalidate_moved_plane(objspace, page, p, bitset);
4082  p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
4083 
4084  for (i=1; i < HEAP_PAGE_BITMAP_LIMIT; i++) {
4085  /* Moved objects are pinned but never marked. We reuse the pin bits
4086  * to indicate there is a moved object in this slot. */
4087  bitset = pin_bits[i] & ~mark_bits[i];
4088 
4089  invalidate_moved_plane(objspace, page, p, bitset);
4090  p += BITS_BITLENGTH * BASE_SLOT_SIZE;
4091  }
4092 }
4093 #endif
4094 
4095 static void
4096 gc_compact_start(rb_objspace_t *objspace)
4097 {
4098  struct heap_page *page = NULL;
4099  gc_mode_transition(objspace, gc_mode_compacting);
4100 
4101  for (int i = 0; i < HEAP_COUNT; i++) {
4102  rb_heap_t *heap = &heaps[i];
4103  ccan_list_for_each(&heap->pages, page, page_node) {
4104  page->flags.before_sweep = TRUE;
4105  }
4106 
4107  heap->compact_cursor = ccan_list_tail(&heap->pages, struct heap_page, page_node);
4108  heap->compact_cursor_index = 0;
4109  }
4110 
4111  if (gc_prof_enabled(objspace)) {
4112  gc_profile_record *record = gc_prof_record(objspace);
4113  record->moved_objects = objspace->rcompactor.total_moved;
4114  }
4115 
4116  memset(objspace->rcompactor.considered_count_table, 0, T_MASK * sizeof(size_t));
4117  memset(objspace->rcompactor.moved_count_table, 0, T_MASK * sizeof(size_t));
4118  memset(objspace->rcompactor.moved_up_count_table, 0, T_MASK * sizeof(size_t));
4119  memset(objspace->rcompactor.moved_down_count_table, 0, T_MASK * sizeof(size_t));
4120 
4121  /* Set up read barrier for pages containing MOVED objects */
4122  install_handlers();
4123 }
4124 
4125 static void gc_sweep_compact(rb_objspace_t *objspace);
4126 
4127 static void
4128 gc_sweep(rb_objspace_t *objspace)
4129 {
4130  gc_sweeping_enter(objspace);
4131 
4132  const unsigned int immediate_sweep = objspace->flags.immediate_sweep;
4133 
4134  gc_report(1, objspace, "gc_sweep: immediate: %d\n", immediate_sweep);
4135 
4136  gc_sweep_start(objspace);
4137  if (objspace->flags.during_compacting) {
4138  gc_sweep_compact(objspace);
4139  }
4140 
4141  if (immediate_sweep) {
4142 #if !GC_ENABLE_LAZY_SWEEP
4143  gc_prof_sweep_timer_start(objspace);
4144 #endif
4145  gc_sweep_rest(objspace);
4146 #if !GC_ENABLE_LAZY_SWEEP
4147  gc_prof_sweep_timer_stop(objspace);
4148 #endif
4149  }
4150  else {
4151 
4152  /* Sweep every size pool. */
4153  for (int i = 0; i < HEAP_COUNT; i++) {
4154  rb_heap_t *heap = &heaps[i];
4155  gc_sweep_step(objspace, heap);
4156  }
4157  }
4158 
4159  gc_sweeping_exit(objspace);
4160 }
4161 
4162 /* Marking - Marking stack */
4163 
4164 static stack_chunk_t *
4165 stack_chunk_alloc(void)
4166 {
4167  stack_chunk_t *res;
4168 
4169  res = malloc(sizeof(stack_chunk_t));
4170  if (!res)
4171  rb_memerror();
4172 
4173  return res;
4174 }
4175 
4176 static inline int
4177 is_mark_stack_empty(mark_stack_t *stack)
4178 {
4179  return stack->chunk == NULL;
4180 }
4181 
4182 static size_t
4183 mark_stack_size(mark_stack_t *stack)
4184 {
4185  size_t size = stack->index;
4186  stack_chunk_t *chunk = stack->chunk ? stack->chunk->next : NULL;
4187 
4188  while (chunk) {
4189  size += stack->limit;
4190  chunk = chunk->next;
4191  }
4192  return size;
4193 }
4194 
4195 static void
4196 add_stack_chunk_cache(mark_stack_t *stack, stack_chunk_t *chunk)
4197 {
4198  chunk->next = stack->cache;
4199  stack->cache = chunk;
4200  stack->cache_size++;
4201 }
4202 
4203 static void
4204 shrink_stack_chunk_cache(mark_stack_t *stack)
4205 {
4206  stack_chunk_t *chunk;
4207 
4208  if (stack->unused_cache_size > (stack->cache_size/2)) {
4209  chunk = stack->cache;
4210  stack->cache = stack->cache->next;
4211  stack->cache_size--;
4212  free(chunk);
4213  }
4214  stack->unused_cache_size = stack->cache_size;
4215 }
4216 
4217 static void
4218 push_mark_stack_chunk(mark_stack_t *stack)
4219 {
4220  stack_chunk_t *next;
4221 
4222  GC_ASSERT(stack->index == stack->limit);
4223 
4224  if (stack->cache_size > 0) {
4225  next = stack->cache;
4226  stack->cache = stack->cache->next;
4227  stack->cache_size--;
4228  if (stack->unused_cache_size > stack->cache_size)
4229  stack->unused_cache_size = stack->cache_size;
4230  }
4231  else {
4232  next = stack_chunk_alloc();
4233  }
4234  next->next = stack->chunk;
4235  stack->chunk = next;
4236  stack->index = 0;
4237 }
4238 
4239 static void
4240 pop_mark_stack_chunk(mark_stack_t *stack)
4241 {
4242  stack_chunk_t *prev;
4243 
4244  prev = stack->chunk->next;
4245  GC_ASSERT(stack->index == 0);
4246  add_stack_chunk_cache(stack, stack->chunk);
4247  stack->chunk = prev;
4248  stack->index = stack->limit;
4249 }
4250 
4251 static void
4252 mark_stack_chunk_list_free(stack_chunk_t *chunk)
4253 {
4254  stack_chunk_t *next = NULL;
4255 
4256  while (chunk != NULL) {
4257  next = chunk->next;
4258  free(chunk);
4259  chunk = next;
4260  }
4261 }
4262 
4263 static void
4264 free_stack_chunks(mark_stack_t *stack)
4265 {
4266  mark_stack_chunk_list_free(stack->chunk);
4267 }
4268 
4269 static void
4270 mark_stack_free_cache(mark_stack_t *stack)
4271 {
4272  mark_stack_chunk_list_free(stack->cache);
4273  stack->cache_size = 0;
4274  stack->unused_cache_size = 0;
4275 }
4276 
4277 static void
4278 push_mark_stack(mark_stack_t *stack, VALUE obj)
4279 {
4280  switch (BUILTIN_TYPE(obj)) {
4281  case T_OBJECT:
4282  case T_CLASS:
4283  case T_MODULE:
4284  case T_FLOAT:
4285  case T_STRING:
4286  case T_REGEXP:
4287  case T_ARRAY:
4288  case T_HASH:
4289  case T_STRUCT:
4290  case T_BIGNUM:
4291  case T_FILE:
4292  case T_DATA:
4293  case T_MATCH:
4294  case T_COMPLEX:
4295  case T_RATIONAL:
4296  case T_TRUE:
4297  case T_FALSE:
4298  case T_SYMBOL:
4299  case T_IMEMO:
4300  case T_ICLASS:
4301  if (stack->index == stack->limit) {
4302  push_mark_stack_chunk(stack);
4303  }
4304  stack->chunk->data[stack->index++] = obj;
4305  return;
4306 
4307  case T_NONE:
4308  case T_NIL:
4309  case T_FIXNUM:
4310  case T_MOVED:
4311  case T_ZOMBIE:
4312  case T_UNDEF:
4313  case T_MASK:
4314  rb_bug("push_mark_stack() called for broken object");
4315  break;
4316 
4317  case T_NODE:
4318  rb_bug("push_mark_stack: unexpected T_NODE object");
4319  break;
4320  }
4321 
4322  rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
4323  BUILTIN_TYPE(obj), (void *)obj,
4324  is_pointer_to_heap((rb_objspace_t *)rb_gc_get_objspace(), (void *)obj) ? "corrupted object" : "non object");
4325 }
4326 
4327 static int
4328 pop_mark_stack(mark_stack_t *stack, VALUE *data)
4329 {
4330  if (is_mark_stack_empty(stack)) {
4331  return FALSE;
4332  }
4333  if (stack->index == 1) {
4334  *data = stack->chunk->data[--stack->index];
4335  pop_mark_stack_chunk(stack);
4336  }
4337  else {
4338  *data = stack->chunk->data[--stack->index];
4339  }
4340  return TRUE;
4341 }
4342 
4343 static void
4344 init_mark_stack(mark_stack_t *stack)
4345 {
4346  int i;
4347 
4348  MEMZERO(stack, mark_stack_t, 1);
4349  stack->index = stack->limit = STACK_CHUNK_SIZE;
4350 
4351  for (i=0; i < 4; i++) {
4352  add_stack_chunk_cache(stack, stack_chunk_alloc());
4353  }
4354  stack->unused_cache_size = stack->cache_size;
4355 }
4356 
4357 /* Marking */
4358 
4359 static void
4360 rgengc_check_relation(rb_objspace_t *objspace, VALUE obj)
4361 {
4362  const VALUE old_parent = objspace->rgengc.parent_object;
4363 
4364  if (old_parent) { /* parent object is old */
4365  if (RVALUE_WB_UNPROTECTED(objspace, obj) || !RVALUE_OLD_P(objspace, obj)) {
4366  rgengc_remember(objspace, old_parent);
4367  }
4368  }
4369 
4370  GC_ASSERT(old_parent == objspace->rgengc.parent_object);
4371 }
4372 
4373 static inline int
4374 gc_mark_set(rb_objspace_t *objspace, VALUE obj)
4375 {
4376  if (RVALUE_MARKED(objspace, obj)) return 0;
4377  MARK_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj);
4378  return 1;
4379 }
4380 
4381 static void
4382 gc_aging(rb_objspace_t *objspace, VALUE obj)
4383 {
4384  /* Disable aging if Major GC's are disabled. This will prevent longish lived
4385  * objects filling up the heap at the expense of marking many more objects.
4386  *
4387  * We should always pre-warm our process when disabling majors, by running
4388  * GC manually several times so that most objects likely to become oldgen
4389  * are already oldgen.
4390  */
4391  if(!gc_config_full_mark_val)
4392  return;
4393 
4394  struct heap_page *page = GET_HEAP_PAGE(obj);
4395 
4396  GC_ASSERT(RVALUE_MARKING(objspace, obj) == FALSE);
4397  check_rvalue_consistency(objspace, obj);
4398 
4399  if (!RVALUE_PAGE_WB_UNPROTECTED(page, obj)) {
4400  if (!RVALUE_OLD_P(objspace, obj)) {
4401  gc_report(3, objspace, "gc_aging: YOUNG: %s\n", rb_obj_info(obj));
4402  RVALUE_AGE_INC(objspace, obj);
4403  }
4404  else if (is_full_marking(objspace)) {
4405  GC_ASSERT(RVALUE_PAGE_UNCOLLECTIBLE(page, obj) == FALSE);
4406  RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace, page, obj);
4407  }
4408  }
4409  check_rvalue_consistency(objspace, obj);
4410 
4411  objspace->marked_slots++;
4412 }
4413 
4414 static void
4415 gc_grey(rb_objspace_t *objspace, VALUE obj)
4416 {
4417 #if RGENGC_CHECK_MODE
4418  if (RVALUE_MARKED(objspace, obj) == FALSE) rb_bug("gc_grey: %s is not marked.", rb_obj_info(obj));
4419  if (RVALUE_MARKING(objspace, obj) == TRUE) rb_bug("gc_grey: %s is marking/remembered.", rb_obj_info(obj));
4420 #endif
4421 
4422  if (is_incremental_marking(objspace)) {
4423  MARK_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
4424  }
4425 
4426  push_mark_stack(&objspace->mark_stack, obj);
4427 }
4428 
4429 static void
4430 gc_mark(rb_objspace_t *objspace, VALUE obj)
4431 {
4432  GC_ASSERT(during_gc);
4433 
4434  rgengc_check_relation(objspace, obj);
4435  if (!gc_mark_set(objspace, obj)) return; /* already marked */
4436 
4437  if (0) { // for debug GC marking miss
4438  if (objspace->rgengc.parent_object) {
4439  RUBY_DEBUG_LOG("%p (%s) parent:%p (%s)",
4440  (void *)obj, obj_type_name(obj),
4441  (void *)objspace->rgengc.parent_object, obj_type_name(objspace->rgengc.parent_object));
4442  }
4443  else {
4444  RUBY_DEBUG_LOG("%p (%s)", (void *)obj, obj_type_name(obj));
4445  }
4446  }
4447 
4448  if (RB_UNLIKELY(RB_TYPE_P(obj, T_NONE))) {
4449  rb_obj_info_dump(obj);
4450  rb_bug("try to mark T_NONE object"); /* check here will help debugging */
4451  }
4452 
4453  gc_aging(objspace, obj);
4454  gc_grey(objspace, obj);
4455 }
4456 
4457 static inline void
4458 gc_pin(rb_objspace_t *objspace, VALUE obj)
4459 {
4460  GC_ASSERT(!SPECIAL_CONST_P(obj));
4461  if (RB_UNLIKELY(objspace->flags.during_compacting)) {
4462  if (RB_LIKELY(during_gc)) {
4463  if (!RVALUE_PINNED(objspace, obj)) {
4464  GC_ASSERT(GET_HEAP_PAGE(obj)->pinned_slots <= GET_HEAP_PAGE(obj)->total_slots);
4465  GET_HEAP_PAGE(obj)->pinned_slots++;
4466  MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj);
4467  }
4468  }
4469  }
4470 }
4471 
4472 static inline void
4473 gc_mark_and_pin(rb_objspace_t *objspace, VALUE obj)
4474 {
4475  gc_pin(objspace, obj);
4476  gc_mark(objspace, obj);
4477 }
4478 
4479 void
4480 rb_gc_impl_mark_and_move(void *objspace_ptr, VALUE *ptr)
4481 {
4482  rb_objspace_t *objspace = objspace_ptr;
4483 
4484  if (RB_UNLIKELY(objspace->flags.during_reference_updating)) {
4485  GC_ASSERT(objspace->flags.during_compacting);
4486  GC_ASSERT(during_gc);
4487 
4488  *ptr = rb_gc_impl_location(objspace, *ptr);
4489  }
4490  else {
4491  gc_mark(objspace, *ptr);
4492  }
4493 }
4494 
4495 void
4496 rb_gc_impl_mark(void *objspace_ptr, VALUE obj)
4497 {
4498  rb_objspace_t *objspace = objspace_ptr;
4499 
4500  gc_mark(objspace, obj);
4501 }
4502 
4503 void
4504 rb_gc_impl_mark_and_pin(void *objspace_ptr, VALUE obj)
4505 {
4506  rb_objspace_t *objspace = objspace_ptr;
4507 
4508  gc_mark_and_pin(objspace, obj);
4509 }
4510 
4511 void
4512 rb_gc_impl_mark_maybe(void *objspace_ptr, VALUE obj)
4513 {
4514  rb_objspace_t *objspace = objspace_ptr;
4515 
4516  (void)VALGRIND_MAKE_MEM_DEFINED(&obj, sizeof(obj));
4517 
4518  if (is_pointer_to_heap(objspace, (void *)obj)) {
4519  asan_unpoisoning_object(obj) {
4520  /* Garbage can live on the stack, so do not mark or pin */
4521  switch (BUILTIN_TYPE(obj)) {
4522  case T_ZOMBIE:
4523  case T_NONE:
4524  break;
4525  default:
4526  gc_mark_and_pin(objspace, obj);
4527  break;
4528  }
4529  }
4530  }
4531 }
4532 
4533 void
4534 rb_gc_impl_mark_weak(void *objspace_ptr, VALUE *ptr)
4535 {
4536  rb_objspace_t *objspace = objspace_ptr;
4537 
4538  GC_ASSERT(objspace->rgengc.parent_object == 0 || FL_TEST(objspace->rgengc.parent_object, FL_WB_PROTECTED));
4539 
4540  VALUE obj = *ptr;
4541 
4542  if (RB_UNLIKELY(RB_TYPE_P(obj, T_NONE))) {
4543  rb_obj_info_dump(obj);
4544  rb_bug("try to mark T_NONE object");
4545  }
4546 
4547  /* If we are in a minor GC and the other object is old, then obj should
4548  * already be marked and cannot be reclaimed in this GC cycle so we don't
4549  * need to add it to the weak references list. */
4550  if (!is_full_marking(objspace) && RVALUE_OLD_P(objspace, obj)) {
4551  GC_ASSERT(RVALUE_MARKED(objspace, obj));
4552  GC_ASSERT(!objspace->flags.during_compacting);
4553 
4554  return;
4555  }
4556 
4557  rgengc_check_relation(objspace, obj);
4558 
4559  DURING_GC_COULD_MALLOC_REGION_START();
4560  {
4561  rb_darray_append(&objspace->weak_references, ptr);
4562  }
4563  DURING_GC_COULD_MALLOC_REGION_END();
4564 
4565  objspace->profile.weak_references_count++;
4566 }
4567 
4568 void
4569 rb_gc_impl_remove_weak(void *objspace_ptr, VALUE parent_obj, VALUE *ptr)
4570 {
4571  rb_objspace_t *objspace = objspace_ptr;
4572 
4573  /* If we're not incremental marking, then the state of the objects can't
4574  * change so we don't need to do anything. */
4575  if (!is_incremental_marking(objspace)) return;
4576  /* If parent_obj has not been marked, then ptr has not yet been marked
4577  * weak, so we don't need to do anything. */
4578  if (!RVALUE_MARKED(objspace, parent_obj)) return;
4579 
4580  VALUE **ptr_ptr;
4581  rb_darray_foreach(objspace->weak_references, i, ptr_ptr) {
4582  if (*ptr_ptr == ptr) {
4583  *ptr_ptr = NULL;
4584  break;
4585  }
4586  }
4587 }
4588 
4589 static int
4590 pin_value(st_data_t key, st_data_t value, st_data_t data)
4591 {
4592  rb_gc_impl_mark_and_pin((void *)data, (VALUE)value);
4593 
4594  return ST_CONTINUE;
4595 }
4596 
4597 static void
4598 mark_roots(rb_objspace_t *objspace, const char **categoryp)
4599 {
4600 #define MARK_CHECKPOINT(category) do { \
4601  if (categoryp) *categoryp = category; \
4602 } while (0)
4603 
4604  MARK_CHECKPOINT("objspace");
4605  objspace->rgengc.parent_object = Qfalse;
4606 
4607  if (finalizer_table != NULL) {
4608  st_foreach(finalizer_table, pin_value, (st_data_t)objspace);
4609  }
4610 
4611  st_foreach(objspace->obj_to_id_tbl, gc_mark_tbl_no_pin_i, (st_data_t)objspace);
4612 
4613  if (stress_to_class) rb_gc_mark(stress_to_class);
4614 
4615  rb_gc_mark_roots(objspace, categoryp);
4616 }
4617 
4618 static inline void
4619 gc_mark_set_parent(rb_objspace_t *objspace, VALUE obj)
4620 {
4621  if (RVALUE_OLD_P(objspace, obj)) {
4622  objspace->rgengc.parent_object = obj;
4623  }
4624  else {
4625  objspace->rgengc.parent_object = Qfalse;
4626  }
4627 }
4628 
4629 static void
4630 gc_mark_children(rb_objspace_t *objspace, VALUE obj)
4631 {
4632  gc_mark_set_parent(objspace, obj);
4633  rb_gc_mark_children(objspace, obj);
4634 }
4635 
4640 static inline int
4641 gc_mark_stacked_objects(rb_objspace_t *objspace, int incremental, size_t count)
4642 {
4643  mark_stack_t *mstack = &objspace->mark_stack;
4644  VALUE obj;
4645  size_t marked_slots_at_the_beginning = objspace->marked_slots;
4646  size_t popped_count = 0;
4647 
4648  while (pop_mark_stack(mstack, &obj)) {
4649  if (obj == Qundef) continue; /* skip */
4650 
4651  if (RGENGC_CHECK_MODE && !RVALUE_MARKED(objspace, obj)) {
4652  rb_bug("gc_mark_stacked_objects: %s is not marked.", rb_obj_info(obj));
4653  }
4654  gc_mark_children(objspace, obj);
4655 
4656  if (incremental) {
4657  if (RGENGC_CHECK_MODE && !RVALUE_MARKING(objspace, obj)) {
4658  rb_bug("gc_mark_stacked_objects: incremental, but marking bit is 0");
4659  }
4660  CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
4661  popped_count++;
4662 
4663  if (popped_count + (objspace->marked_slots - marked_slots_at_the_beginning) > count) {
4664  break;
4665  }
4666  }
4667  else {
4668  /* just ignore marking bits */
4669  }
4670  }
4671 
4672  if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(objspace);
4673 
4674  if (is_mark_stack_empty(mstack)) {
4675  shrink_stack_chunk_cache(mstack);
4676  return TRUE;
4677  }
4678  else {
4679  return FALSE;
4680  }
4681 }
4682 
4683 static int
4684 gc_mark_stacked_objects_incremental(rb_objspace_t *objspace, size_t count)
4685 {
4686  return gc_mark_stacked_objects(objspace, TRUE, count);
4687 }
4688 
4689 static int
4690 gc_mark_stacked_objects_all(rb_objspace_t *objspace)
4691 {
4692  return gc_mark_stacked_objects(objspace, FALSE, 0);
4693 }
4694 
4695 #if RGENGC_CHECK_MODE >= 4
4696 
4697 #define MAKE_ROOTSIG(obj) (((VALUE)(obj) << 1) | 0x01)
4698 #define IS_ROOTSIG(obj) ((VALUE)(obj) & 0x01)
4699 #define GET_ROOTSIG(obj) ((const char *)((VALUE)(obj) >> 1))
4700 
4701 struct reflist {
4702  VALUE *list;
4703  int pos;
4704  int size;
4705 };
4706 
4707 static struct reflist *
4708 reflist_create(VALUE obj)
4709 {
4710  struct reflist *refs = xmalloc(sizeof(struct reflist));
4711  refs->size = 1;
4712  refs->list = ALLOC_N(VALUE, refs->size);
4713  refs->list[0] = obj;
4714  refs->pos = 1;
4715  return refs;
4716 }
4717 
4718 static void
4719 reflist_destruct(struct reflist *refs)
4720 {
4721  xfree(refs->list);
4722  xfree(refs);
4723 }
4724 
4725 static void
4726 reflist_add(struct reflist *refs, VALUE obj)
4727 {
4728  if (refs->pos == refs->size) {
4729  refs->size *= 2;
4730  SIZED_REALLOC_N(refs->list, VALUE, refs->size, refs->size/2);
4731  }
4732 
4733  refs->list[refs->pos++] = obj;
4734 }
4735 
4736 static void
4737 reflist_dump(struct reflist *refs)
4738 {
4739  int i;
4740  for (i=0; i<refs->pos; i++) {
4741  VALUE obj = refs->list[i];
4742  if (IS_ROOTSIG(obj)) { /* root */
4743  fprintf(stderr, "<root@%s>", GET_ROOTSIG(obj));
4744  }
4745  else {
4746  fprintf(stderr, "<%s>", rb_obj_info(obj));
4747  }
4748  if (i+1 < refs->pos) fprintf(stderr, ", ");
4749  }
4750 }
4751 
4752 static int
4753 reflist_referred_from_machine_context(struct reflist *refs)
4754 {
4755  int i;
4756  for (i=0; i<refs->pos; i++) {
4757  VALUE obj = refs->list[i];
4758  if (IS_ROOTSIG(obj) && strcmp(GET_ROOTSIG(obj), "machine_context") == 0) return 1;
4759  }
4760  return 0;
4761 }
4762 
4763 struct allrefs {
4764  rb_objspace_t *objspace;
4765  /* a -> obj1
4766  * b -> obj1
4767  * c -> obj1
4768  * c -> obj2
4769  * d -> obj3
4770  * #=> {obj1 => [a, b, c], obj2 => [c, d]}
4771  */
4772  struct st_table *references;
4773  const char *category;
4774  VALUE root_obj;
4776 };
4777 
4778 static int
4779 allrefs_add(struct allrefs *data, VALUE obj)
4780 {
4781  struct reflist *refs;
4782  st_data_t r;
4783 
4784  if (st_lookup(data->references, obj, &r)) {
4785  refs = (struct reflist *)r;
4786  reflist_add(refs, data->root_obj);
4787  return 0;
4788  }
4789  else {
4790  refs = reflist_create(data->root_obj);
4791  st_insert(data->references, obj, (st_data_t)refs);
4792  return 1;
4793  }
4794 }
4795 
4796 static void
4797 allrefs_i(VALUE obj, void *ptr)
4798 {
4799  struct allrefs *data = (struct allrefs *)ptr;
4800 
4801  if (allrefs_add(data, obj)) {
4802  push_mark_stack(&data->mark_stack, obj);
4803  }
4804 }
4805 
4806 static void
4807 allrefs_roots_i(VALUE obj, void *ptr)
4808 {
4809  struct allrefs *data = (struct allrefs *)ptr;
4810  if (strlen(data->category) == 0) rb_bug("!!!");
4811  data->root_obj = MAKE_ROOTSIG(data->category);
4812 
4813  if (allrefs_add(data, obj)) {
4814  push_mark_stack(&data->mark_stack, obj);
4815  }
4816 }
4817 #define PUSH_MARK_FUNC_DATA(v) do { \
4818  struct gc_mark_func_data_struct *prev_mark_func_data = GET_VM()->gc.mark_func_data; \
4819  GET_VM()->gc.mark_func_data = (v);
4820 
4821 #define POP_MARK_FUNC_DATA() GET_VM()->gc.mark_func_data = prev_mark_func_data;} while (0)
4822 
4823 static st_table *
4824 objspace_allrefs(rb_objspace_t *objspace)
4825 {
4826  struct allrefs data;
4827  struct gc_mark_func_data_struct mfd;
4828  VALUE obj;
4829  int prev_dont_gc = dont_gc_val();
4830  dont_gc_on();
4831 
4832  data.objspace = objspace;
4833  data.references = st_init_numtable();
4834  init_mark_stack(&data.mark_stack);
4835 
4836  mfd.mark_func = allrefs_roots_i;
4837  mfd.data = &data;
4838 
4839  /* traverse root objects */
4840  PUSH_MARK_FUNC_DATA(&mfd);
4841  GET_VM()->gc.mark_func_data = &mfd;
4842  mark_roots(objspace, &data.category);
4843  POP_MARK_FUNC_DATA();
4844 
4845  /* traverse rest objects reachable from root objects */
4846  while (pop_mark_stack(&data.mark_stack, &obj)) {
4847  rb_objspace_reachable_objects_from(data.root_obj = obj, allrefs_i, &data);
4848  }
4849  free_stack_chunks(&data.mark_stack);
4850 
4851  dont_gc_set(prev_dont_gc);
4852  return data.references;
4853 }
4854 
4855 static int
4856 objspace_allrefs_destruct_i(st_data_t key, st_data_t value, st_data_t ptr)
4857 {
4858  struct reflist *refs = (struct reflist *)value;
4859  reflist_destruct(refs);
4860  return ST_CONTINUE;
4861 }
4862 
4863 static void
4864 objspace_allrefs_destruct(struct st_table *refs)
4865 {
4866  st_foreach(refs, objspace_allrefs_destruct_i, 0);
4867  st_free_table(refs);
4868 }
4869 
4870 #if RGENGC_CHECK_MODE >= 5
4871 static int
4872 allrefs_dump_i(st_data_t k, st_data_t v, st_data_t ptr)
4873 {
4874  VALUE obj = (VALUE)k;
4875  struct reflist *refs = (struct reflist *)v;
4876  fprintf(stderr, "[allrefs_dump_i] %s <- ", rb_obj_info(obj));
4877  reflist_dump(refs);
4878  fprintf(stderr, "\n");
4879  return ST_CONTINUE;
4880 }
4881 
4882 static void
4883 allrefs_dump(rb_objspace_t *objspace)
4884 {
4885  VALUE size = objspace->rgengc.allrefs_table->num_entries;
4886  fprintf(stderr, "[all refs] (size: %"PRIuVALUE")\n", size);
4887  st_foreach(objspace->rgengc.allrefs_table, allrefs_dump_i, 0);
4888 }
4889 #endif
4890 
4891 static int
4892 gc_check_after_marks_i(st_data_t k, st_data_t v, st_data_t ptr)
4893 {
4894  VALUE obj = k;
4895  struct reflist *refs = (struct reflist *)v;
4896  rb_objspace_t *objspace = (rb_objspace_t *)ptr;
4897 
4898  /* object should be marked or oldgen */
4899  if (!RVALUE_MARKED(objspace, obj)) {
4900  fprintf(stderr, "gc_check_after_marks_i: %s is not marked and not oldgen.\n", rb_obj_info(obj));
4901  fprintf(stderr, "gc_check_after_marks_i: %p is referred from ", (void *)obj);
4902  reflist_dump(refs);
4903 
4904  if (reflist_referred_from_machine_context(refs)) {
4905  fprintf(stderr, " (marked from machine stack).\n");
4906  /* marked from machine context can be false positive */
4907  }
4908  else {
4909  objspace->rgengc.error_count++;
4910  fprintf(stderr, "\n");
4911  }
4912  }
4913  return ST_CONTINUE;
4914 }
4915 
4916 static void
4917 gc_marks_check(rb_objspace_t *objspace, st_foreach_callback_func *checker_func, const char *checker_name)
4918 {
4919  size_t saved_malloc_increase = objspace->malloc_params.increase;
4920 #if RGENGC_ESTIMATE_OLDMALLOC
4921  size_t saved_oldmalloc_increase = objspace->rgengc.oldmalloc_increase;
4922 #endif
4923  VALUE already_disabled = rb_objspace_gc_disable(objspace);
4924 
4925  objspace->rgengc.allrefs_table = objspace_allrefs(objspace);
4926 
4927  if (checker_func) {
4928  st_foreach(objspace->rgengc.allrefs_table, checker_func, (st_data_t)objspace);
4929  }
4930 
4931  if (objspace->rgengc.error_count > 0) {
4932 #if RGENGC_CHECK_MODE >= 5
4933  allrefs_dump(objspace);
4934 #endif
4935  if (checker_name) rb_bug("%s: GC has problem.", checker_name);
4936  }
4937 
4938  objspace_allrefs_destruct(objspace->rgengc.allrefs_table);
4939  objspace->rgengc.allrefs_table = 0;
4940 
4941  if (already_disabled == Qfalse) rb_objspace_gc_enable(objspace);
4942  objspace->malloc_params.increase = saved_malloc_increase;
4943 #if RGENGC_ESTIMATE_OLDMALLOC
4944  objspace->rgengc.oldmalloc_increase = saved_oldmalloc_increase;
4945 #endif
4946 }
4947 #endif /* RGENGC_CHECK_MODE >= 4 */
4948 
4950  rb_objspace_t *objspace;
4951  int err_count;
4952  size_t live_object_count;
4953  size_t zombie_object_count;
4954 
4955  VALUE parent;
4956  size_t old_object_count;
4957  size_t remembered_shady_count;
4958 };
4959 
4960 static void
4961 check_generation_i(const VALUE child, void *ptr)
4962 {
4964  const VALUE parent = data->parent;
4965 
4966  if (RGENGC_CHECK_MODE) GC_ASSERT(RVALUE_OLD_P(data->objspace, parent));
4967 
4968  if (!RVALUE_OLD_P(data->objspace, child)) {
4969  if (!RVALUE_REMEMBERED(data->objspace, parent) &&
4970  !RVALUE_REMEMBERED(data->objspace, child) &&
4971  !RVALUE_UNCOLLECTIBLE(data->objspace, child)) {
4972  fprintf(stderr, "verify_internal_consistency_reachable_i: WB miss (O->Y) %s -> %s\n", rb_obj_info(parent), rb_obj_info(child));
4973  data->err_count++;
4974  }
4975  }
4976 }
4977 
4978 static void
4979 check_color_i(const VALUE child, void *ptr)
4980 {
4982  const VALUE parent = data->parent;
4983 
4984  if (!RVALUE_WB_UNPROTECTED(data->objspace, parent) && RVALUE_WHITE_P(data->objspace, child)) {
4985  fprintf(stderr, "verify_internal_consistency_reachable_i: WB miss (B->W) - %s -> %s\n",
4986  rb_obj_info(parent), rb_obj_info(child));
4987  data->err_count++;
4988  }
4989 }
4990 
4991 static void
4992 check_children_i(const VALUE child, void *ptr)
4993 {
4995  if (check_rvalue_consistency_force(data->objspace, child, FALSE) != 0) {
4996  fprintf(stderr, "check_children_i: %s has error (referenced from %s)",
4997  rb_obj_info(child), rb_obj_info(data->parent));
4998 
4999  data->err_count++;
5000  }
5001 }
5002 
5003 static int
5004 verify_internal_consistency_i(void *page_start, void *page_end, size_t stride,
5006 {
5007  VALUE obj;
5008  rb_objspace_t *objspace = data->objspace;
5009 
5010  for (obj = (VALUE)page_start; obj != (VALUE)page_end; obj += stride) {
5011  asan_unpoisoning_object(obj) {
5012  if (!rb_gc_impl_garbage_object_p(objspace, obj)) {
5013  /* count objects */
5014  data->live_object_count++;
5015  data->parent = obj;
5016 
5017  /* Normally, we don't expect T_MOVED objects to be in the heap.
5018  * But they can stay alive on the stack, */
5019  if (!gc_object_moved_p(objspace, obj)) {
5020  /* moved slots don't have children */
5021  rb_objspace_reachable_objects_from(obj, check_children_i, (void *)data);
5022  }
5023 
5024  /* check health of children */
5025  if (RVALUE_OLD_P(objspace, obj)) data->old_object_count++;
5026  if (RVALUE_WB_UNPROTECTED(objspace, obj) && RVALUE_UNCOLLECTIBLE(objspace, obj)) data->remembered_shady_count++;
5027 
5028  if (!is_marking(objspace) && RVALUE_OLD_P(objspace, obj)) {
5029  /* reachable objects from an oldgen object should be old or (young with remember) */
5030  data->parent = obj;
5031  rb_objspace_reachable_objects_from(obj, check_generation_i, (void *)data);
5032  }
5033 
5034  if (is_incremental_marking(objspace)) {
5035  if (RVALUE_BLACK_P(objspace, obj)) {
5036  /* reachable objects from black objects should be black or grey objects */
5037  data->parent = obj;
5038  rb_objspace_reachable_objects_from(obj, check_color_i, (void *)data);
5039  }
5040  }
5041  }
5042  else {
5043  if (BUILTIN_TYPE(obj) == T_ZOMBIE) {
5044  data->zombie_object_count++;
5045 
5046  if ((RBASIC(obj)->flags & ~ZOMBIE_OBJ_KEPT_FLAGS) != T_ZOMBIE) {
5047  fprintf(stderr, "verify_internal_consistency_i: T_ZOMBIE has extra flags set: %s\n",
5048  rb_obj_info(obj));
5049  data->err_count++;
5050  }
5051 
5052  if (!!FL_TEST(obj, FL_FINALIZE) != !!st_is_member(finalizer_table, obj)) {
5053  fprintf(stderr, "verify_internal_consistency_i: FL_FINALIZE %s but %s finalizer_table: %s\n",
5054  FL_TEST(obj, FL_FINALIZE) ? "set" : "not set", st_is_member(finalizer_table, obj) ? "in" : "not in",
5055  rb_obj_info(obj));
5056  data->err_count++;
5057  }
5058  }
5059  }
5060  }
5061  }
5062 
5063  return 0;
5064 }
5065 
5066 static int
5067 gc_verify_heap_page(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
5068 {
5069  unsigned int has_remembered_shady = FALSE;
5070  unsigned int has_remembered_old = FALSE;
5071  int remembered_old_objects = 0;
5072  int free_objects = 0;
5073  int zombie_objects = 0;
5074 
5075  short slot_size = page->slot_size;
5076  uintptr_t start = (uintptr_t)page->start;
5077  uintptr_t end = start + page->total_slots * slot_size;
5078 
5079  for (uintptr_t ptr = start; ptr < end; ptr += slot_size) {
5080  VALUE val = (VALUE)ptr;
5081  asan_unpoisoning_object(val) {
5082  enum ruby_value_type type = BUILTIN_TYPE(val);
5083 
5084  if (type == T_NONE) free_objects++;
5085  if (type == T_ZOMBIE) zombie_objects++;
5086  if (RVALUE_PAGE_UNCOLLECTIBLE(page, val) && RVALUE_PAGE_WB_UNPROTECTED(page, val)) {
5087  has_remembered_shady = TRUE;
5088  }
5089  if (RVALUE_PAGE_MARKING(page, val)) {
5090  has_remembered_old = TRUE;
5091  remembered_old_objects++;
5092  }
5093  }
5094  }
5095 
5096  if (!is_incremental_marking(objspace) &&
5097  page->flags.has_remembered_objects == FALSE && has_remembered_old == TRUE) {
5098 
5099  for (uintptr_t ptr = start; ptr < end; ptr += slot_size) {
5100  VALUE val = (VALUE)ptr;
5101  if (RVALUE_PAGE_MARKING(page, val)) {
5102  fprintf(stderr, "marking -> %s\n", rb_obj_info(val));
5103  }
5104  }
5105  rb_bug("page %p's has_remembered_objects should be false, but there are remembered old objects (%d). %s",
5106  (void *)page, remembered_old_objects, obj ? rb_obj_info(obj) : "");
5107  }
5108 
5109  if (page->flags.has_uncollectible_wb_unprotected_objects == FALSE && has_remembered_shady == TRUE) {
5110  rb_bug("page %p's has_remembered_shady should be false, but there are remembered shady objects. %s",
5111  (void *)page, obj ? rb_obj_info(obj) : "");
5112  }
5113 
5114  if (0) {
5115  /* free_slots may not equal to free_objects */
5116  if (page->free_slots != free_objects) {
5117  rb_bug("page %p's free_slots should be %d, but %d", (void *)page, page->free_slots, free_objects);
5118  }
5119  }
5120  if (page->final_slots != zombie_objects) {
5121  rb_bug("page %p's final_slots should be %d, but %d", (void *)page, page->final_slots, zombie_objects);
5122  }
5123 
5124  return remembered_old_objects;
5125 }
5126 
5127 static int
5128 gc_verify_heap_pages_(rb_objspace_t *objspace, struct ccan_list_head *head)
5129 {
5130  int remembered_old_objects = 0;
5131  struct heap_page *page = 0;
5132 
5133  ccan_list_for_each(head, page, page_node) {
5134  asan_unlock_freelist(page);
5135  struct free_slot *p = page->freelist;
5136  while (p) {
5137  VALUE vp = (VALUE)p;
5138  VALUE prev = vp;
5139  asan_unpoison_object(vp, false);
5140  if (BUILTIN_TYPE(vp) != T_NONE) {
5141  fprintf(stderr, "freelist slot expected to be T_NONE but was: %s\n", rb_obj_info(vp));
5142  }
5143  p = p->next;
5144  asan_poison_object(prev);
5145  }
5146  asan_lock_freelist(page);
5147 
5148  if (page->flags.has_remembered_objects == FALSE) {
5149  remembered_old_objects += gc_verify_heap_page(objspace, page, Qfalse);
5150  }
5151  }
5152 
5153  return remembered_old_objects;
5154 }
5155 
5156 static int
5157 gc_verify_heap_pages(rb_objspace_t *objspace)
5158 {
5159  int remembered_old_objects = 0;
5160  for (int i = 0; i < HEAP_COUNT; i++) {
5161  remembered_old_objects += gc_verify_heap_pages_(objspace, &((&heaps[i])->pages));
5162  }
5163  return remembered_old_objects;
5164 }
5165 
5166 static void
5167 gc_verify_internal_consistency_(rb_objspace_t *objspace)
5168 {
5169  struct verify_internal_consistency_struct data = {0};
5170 
5171  data.objspace = objspace;
5172  gc_report(5, objspace, "gc_verify_internal_consistency: start\n");
5173 
5174  /* check relations */
5175  for (size_t i = 0; i < rb_darray_size(objspace->heap_pages.sorted); i++) {
5176  struct heap_page *page = rb_darray_get(objspace->heap_pages.sorted, i);
5177  short slot_size = page->slot_size;
5178 
5179  uintptr_t start = (uintptr_t)page->start;
5180  uintptr_t end = start + page->total_slots * slot_size;
5181 
5182  verify_internal_consistency_i((void *)start, (void *)end, slot_size, &data);
5183  }
5184 
5185  if (data.err_count != 0) {
5186 #if RGENGC_CHECK_MODE >= 5
5187  objspace->rgengc.error_count = data.err_count;
5188  gc_marks_check(objspace, NULL, NULL);
5189  allrefs_dump(objspace);
5190 #endif
5191  rb_bug("gc_verify_internal_consistency: found internal inconsistency.");
5192  }
5193 
5194  /* check heap_page status */
5195  gc_verify_heap_pages(objspace);
5196 
5197  /* check counters */
5198 
5199  if (!is_lazy_sweeping(objspace) &&
5200  !finalizing &&
5201  !rb_gc_multi_ractor_p()) {
5202  if (objspace_live_slots(objspace) != data.live_object_count) {
5203  fprintf(stderr, "heap_pages_final_slots: %"PRIdSIZE", total_freed_objects: %"PRIdSIZE"\n",
5204  total_final_slots_count(objspace), total_freed_objects(objspace));
5205  rb_bug("inconsistent live slot number: expect %"PRIuSIZE", but %"PRIuSIZE".",
5206  objspace_live_slots(objspace), data.live_object_count);
5207  }
5208  }
5209 
5210  if (!is_marking(objspace)) {
5211  if (objspace->rgengc.old_objects != data.old_object_count) {
5212  rb_bug("inconsistent old slot number: expect %"PRIuSIZE", but %"PRIuSIZE".",
5213  objspace->rgengc.old_objects, data.old_object_count);
5214  }
5215  if (objspace->rgengc.uncollectible_wb_unprotected_objects != data.remembered_shady_count) {
5216  rb_bug("inconsistent number of wb unprotected objects: expect %"PRIuSIZE", but %"PRIuSIZE".",
5217  objspace->rgengc.uncollectible_wb_unprotected_objects, data.remembered_shady_count);
5218  }
5219  }
5220 
5221  if (!finalizing) {
5222  size_t list_count = 0;
5223 
5224  {
5225  VALUE z = heap_pages_deferred_final;
5226  while (z) {
5227  list_count++;
5228  z = RZOMBIE(z)->next;
5229  }
5230  }
5231 
5232  if (total_final_slots_count(objspace) != data.zombie_object_count ||
5233  total_final_slots_count(objspace) != list_count) {
5234 
5235  rb_bug("inconsistent finalizing object count:\n"
5236  " expect %"PRIuSIZE"\n"
5237  " but %"PRIuSIZE" zombies\n"
5238  " heap_pages_deferred_final list has %"PRIuSIZE" items.",
5239  total_final_slots_count(objspace),
5240  data.zombie_object_count,
5241  list_count);
5242  }
5243  }
5244 
5245  gc_report(5, objspace, "gc_verify_internal_consistency: OK\n");
5246 }
5247 
5248 static void
5249 gc_verify_internal_consistency(void *objspace_ptr)
5250 {
5251  rb_objspace_t *objspace = objspace_ptr;
5252 
5253  unsigned int lev = rb_gc_vm_lock();
5254  {
5255  rb_gc_vm_barrier(); // stop other ractors
5256 
5257  unsigned int prev_during_gc = during_gc;
5258  during_gc = FALSE; // stop gc here
5259  {
5260  gc_verify_internal_consistency_(objspace);
5261  }
5262  during_gc = prev_during_gc;
5263  }
5264  rb_gc_vm_unlock(lev);
5265 }
5266 
5267 static void
5268 heap_move_pooled_pages_to_free_pages(rb_heap_t *heap)
5269 {
5270  if (heap->pooled_pages) {
5271  if (heap->free_pages) {
5272  struct heap_page *free_pages_tail = heap->free_pages;
5273  while (free_pages_tail->free_next) {
5274  free_pages_tail = free_pages_tail->free_next;
5275  }
5276  free_pages_tail->free_next = heap->pooled_pages;
5277  }
5278  else {
5279  heap->free_pages = heap->pooled_pages;
5280  }
5281 
5282  heap->pooled_pages = NULL;
5283  }
5284 }
5285 
5286 static int
5287 gc_remember_unprotected(rb_objspace_t *objspace, VALUE obj)
5288 {
5289  struct heap_page *page = GET_HEAP_PAGE(obj);
5290  bits_t *uncollectible_bits = &page->uncollectible_bits[0];
5291 
5292  if (!MARKED_IN_BITMAP(uncollectible_bits, obj)) {
5293  page->flags.has_uncollectible_wb_unprotected_objects = TRUE;
5294  MARK_IN_BITMAP(uncollectible_bits, obj);
5295  objspace->rgengc.uncollectible_wb_unprotected_objects++;
5296 
5297 #if RGENGC_PROFILE > 0
5298  objspace->profile.total_remembered_shady_object_count++;
5299 #if RGENGC_PROFILE >= 2
5300  objspace->profile.remembered_shady_object_count_types[BUILTIN_TYPE(obj)]++;
5301 #endif
5302 #endif
5303  return TRUE;
5304  }
5305  else {
5306  return FALSE;
5307  }
5308 }
5309 
5310 static inline void
5311 gc_marks_wb_unprotected_objects_plane(rb_objspace_t *objspace, uintptr_t p, bits_t bits)
5312 {
5313  if (bits) {
5314  do {
5315  if (bits & 1) {
5316  gc_report(2, objspace, "gc_marks_wb_unprotected_objects: marked shady: %s\n", rb_obj_info((VALUE)p));
5317  GC_ASSERT(RVALUE_WB_UNPROTECTED(objspace, (VALUE)p));
5318  GC_ASSERT(RVALUE_MARKED(objspace, (VALUE)p));
5319  gc_mark_children(objspace, (VALUE)p);
5320  }
5321  p += BASE_SLOT_SIZE;
5322  bits >>= 1;
5323  } while (bits);
5324  }
5325 }
5326 
5327 static void
5328 gc_marks_wb_unprotected_objects(rb_objspace_t *objspace, rb_heap_t *heap)
5329 {
5330  struct heap_page *page = 0;
5331 
5332  ccan_list_for_each(&heap->pages, page, page_node) {
5333  bits_t *mark_bits = page->mark_bits;
5334  bits_t *wbun_bits = page->wb_unprotected_bits;
5335  uintptr_t p = page->start;
5336  size_t j;
5337 
5338  bits_t bits = mark_bits[0] & wbun_bits[0];
5339  bits >>= NUM_IN_PAGE(p);
5340  gc_marks_wb_unprotected_objects_plane(objspace, p, bits);
5341  p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
5342 
5343  for (j=1; j<HEAP_PAGE_BITMAP_LIMIT; j++) {
5344  bits_t bits = mark_bits[j] & wbun_bits[j];
5345 
5346  gc_marks_wb_unprotected_objects_plane(objspace, p, bits);
5347  p += BITS_BITLENGTH * BASE_SLOT_SIZE;
5348  }
5349  }
5350 
5351  gc_mark_stacked_objects_all(objspace);
5352 }
5353 
5354 static void
5355 gc_update_weak_references(rb_objspace_t *objspace)
5356 {
5357  size_t retained_weak_references_count = 0;
5358  VALUE **ptr_ptr;
5359  rb_darray_foreach(objspace->weak_references, i, ptr_ptr) {
5360  if (!*ptr_ptr) continue;
5361 
5362  VALUE obj = **ptr_ptr;
5363 
5364  if (RB_SPECIAL_CONST_P(obj)) continue;
5365 
5366  if (!RVALUE_MARKED(objspace, obj)) {
5367  **ptr_ptr = Qundef;
5368  }
5369  else {
5370  retained_weak_references_count++;
5371  }
5372  }
5373 
5374  objspace->profile.retained_weak_references_count = retained_weak_references_count;
5375 
5376  rb_darray_clear(objspace->weak_references);
5377  DURING_GC_COULD_MALLOC_REGION_START();
5378  {
5379  rb_darray_resize_capa(&objspace->weak_references, retained_weak_references_count);
5380  }
5381  DURING_GC_COULD_MALLOC_REGION_END();
5382 }
5383 
5384 static void
5385 gc_marks_finish(rb_objspace_t *objspace)
5386 {
5387  /* finish incremental GC */
5388  if (is_incremental_marking(objspace)) {
5389  if (RGENGC_CHECK_MODE && is_mark_stack_empty(&objspace->mark_stack) == 0) {
5390  rb_bug("gc_marks_finish: mark stack is not empty (%"PRIdSIZE").",
5391  mark_stack_size(&objspace->mark_stack));
5392  }
5393 
5394  mark_roots(objspace, NULL);
5395  while (gc_mark_stacked_objects_incremental(objspace, INT_MAX) == false);
5396 
5397 #if RGENGC_CHECK_MODE >= 2
5398  if (gc_verify_heap_pages(objspace) != 0) {
5399  rb_bug("gc_marks_finish (incremental): there are remembered old objects.");
5400  }
5401 #endif
5402 
5403  objspace->flags.during_incremental_marking = FALSE;
5404  /* check children of all marked wb-unprotected objects */
5405  for (int i = 0; i < HEAP_COUNT; i++) {
5406  gc_marks_wb_unprotected_objects(objspace, &heaps[i]);
5407  }
5408  }
5409 
5410  gc_update_weak_references(objspace);
5411 
5412 #if RGENGC_CHECK_MODE >= 2
5413  gc_verify_internal_consistency(objspace);
5414 #endif
5415 
5416 #if RGENGC_CHECK_MODE >= 4
5417  during_gc = FALSE;
5418  gc_marks_check(objspace, gc_check_after_marks_i, "after_marks");
5419  during_gc = TRUE;
5420 #endif
5421 
5422  {
5423  const unsigned long r_mul = objspace->live_ractor_cache_count > 8 ? 8 : objspace->live_ractor_cache_count; // upto 8
5424 
5425  size_t total_slots = objspace_available_slots(objspace);
5426  size_t sweep_slots = total_slots - objspace->marked_slots; /* will be swept slots */
5427  size_t max_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_max_ratio);
5428  size_t min_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_min_ratio);
5429  if (min_free_slots < gc_params.heap_free_slots * r_mul) {
5430  min_free_slots = gc_params.heap_free_slots * r_mul;
5431  }
5432 
5433  int full_marking = is_full_marking(objspace);
5434 
5435  GC_ASSERT(objspace_available_slots(objspace) >= objspace->marked_slots);
5436 
5437  /* Setup freeable slots. */
5438  size_t total_init_slots = 0;
5439  for (int i = 0; i < HEAP_COUNT; i++) {
5440  total_init_slots += gc_params.heap_init_slots[i] * r_mul;
5441  }
5442 
5443  if (max_free_slots < total_init_slots) {
5444  max_free_slots = total_init_slots;
5445  }
5446 
5447  if (sweep_slots > max_free_slots) {
5448  heap_pages_freeable_pages = (sweep_slots - max_free_slots) / HEAP_PAGE_OBJ_LIMIT;
5449  }
5450  else {
5451  heap_pages_freeable_pages = 0;
5452  }
5453 
5454  if (objspace->heap_pages.allocatable_slots == 0 && sweep_slots < min_free_slots) {
5455  if (!full_marking) {
5456  if (objspace->profile.count - objspace->rgengc.last_major_gc < RVALUE_OLD_AGE) {
5457  full_marking = TRUE;
5458  }
5459  else {
5460  gc_report(1, objspace, "gc_marks_finish: next is full GC!!)\n");
5461  gc_needs_major_flags |= GPR_FLAG_MAJOR_BY_NOFREE;
5462  }
5463  }
5464  }
5465 
5466  if (full_marking) {
5467  /* See the comment about RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR */
5468  const double r = gc_params.oldobject_limit_factor;
5469  objspace->rgengc.uncollectible_wb_unprotected_objects_limit = MAX(
5470  (size_t)(objspace->rgengc.uncollectible_wb_unprotected_objects * r),
5471  (size_t)(objspace->rgengc.old_objects * gc_params.uncollectible_wb_unprotected_objects_limit_ratio)
5472  );
5473  objspace->rgengc.old_objects_limit = (size_t)(objspace->rgengc.old_objects * r);
5474  }
5475 
5476  if (objspace->rgengc.uncollectible_wb_unprotected_objects > objspace->rgengc.uncollectible_wb_unprotected_objects_limit) {
5477  gc_needs_major_flags |= GPR_FLAG_MAJOR_BY_SHADY;
5478  }
5479  if (objspace->rgengc.old_objects > objspace->rgengc.old_objects_limit) {
5480  gc_needs_major_flags |= GPR_FLAG_MAJOR_BY_OLDGEN;
5481  }
5482  if (RGENGC_FORCE_MAJOR_GC) {
5483  gc_needs_major_flags = GPR_FLAG_MAJOR_BY_FORCE;
5484  }
5485 
5486  gc_report(1, objspace, "gc_marks_finish (marks %"PRIdSIZE" objects, "
5487  "old %"PRIdSIZE" objects, total %"PRIdSIZE" slots, "
5488  "sweep %"PRIdSIZE" slots, allocatable %"PRIdSIZE" slots, next GC: %s)\n",
5489  objspace->marked_slots, objspace->rgengc.old_objects, objspace_available_slots(objspace), sweep_slots, objspace->heap_pages.allocatable_slots,
5490  gc_needs_major_flags ? "major" : "minor");
5491  }
5492 
5493  // TODO: refactor so we don't need to call this
5494  rb_ractor_finish_marking();
5495 
5496  rb_gc_event_hook(0, RUBY_INTERNAL_EVENT_GC_END_MARK);
5497 }
5498 
5499 static bool
5500 gc_compact_heap_cursors_met_p(rb_heap_t *heap)
5501 {
5502  return heap->sweeping_page == heap->compact_cursor;
5503 }
5504 
5505 
5506 static rb_heap_t *
5507 gc_compact_destination_pool(rb_objspace_t *objspace, rb_heap_t *src_pool, VALUE obj)
5508 {
5509  size_t obj_size = rb_gc_obj_optimal_size(obj);
5510  if (obj_size == 0) {
5511  return src_pool;
5512  }
5513 
5514  size_t idx = 0;
5515  if (rb_gc_impl_size_allocatable_p(obj_size)) {
5516  idx = heap_idx_for_size(obj_size);
5517  }
5518 
5519  return &heaps[idx];
5520 }
5521 
5522 static bool
5523 gc_compact_move(rb_objspace_t *objspace, rb_heap_t *heap, VALUE src)
5524 {
5525  GC_ASSERT(BUILTIN_TYPE(src) != T_MOVED);
5526  GC_ASSERT(gc_is_moveable_obj(objspace, src));
5527 
5528  rb_heap_t *dest_pool = gc_compact_destination_pool(objspace, heap, src);
5529  uint32_t orig_shape = 0;
5530  uint32_t new_shape = 0;
5531 
5532  if (gc_compact_heap_cursors_met_p(dest_pool)) {
5533  return dest_pool != heap;
5534  }
5535 
5536  if (RB_TYPE_P(src, T_OBJECT)) {
5537  orig_shape = rb_gc_get_shape(src);
5538 
5539  if (dest_pool != heap) {
5540  new_shape = rb_gc_rebuild_shape(src, dest_pool - heaps);
5541 
5542  if (new_shape == 0) {
5543  dest_pool = heap;
5544  }
5545  }
5546  }
5547 
5548  while (!try_move(objspace, dest_pool, dest_pool->free_pages, src)) {
5549  struct gc_sweep_context ctx = {
5550  .page = dest_pool->sweeping_page,
5551  .final_slots = 0,
5552  .freed_slots = 0,
5553  .empty_slots = 0,
5554  };
5555 
5556  /* The page of src could be partially compacted, so it may contain
5557  * T_MOVED. Sweeping a page may read objects on this page, so we
5558  * need to lock the page. */
5559  lock_page_body(objspace, GET_PAGE_BODY(src));
5560  gc_sweep_page(objspace, dest_pool, &ctx);
5561  unlock_page_body(objspace, GET_PAGE_BODY(src));
5562 
5563  if (dest_pool->sweeping_page->free_slots > 0) {
5564  heap_add_freepage(dest_pool, dest_pool->sweeping_page);
5565  }
5566 
5567  dest_pool->sweeping_page = ccan_list_next(&dest_pool->pages, dest_pool->sweeping_page, page_node);
5568  if (gc_compact_heap_cursors_met_p(dest_pool)) {
5569  return dest_pool != heap;
5570  }
5571  }
5572 
5573  if (orig_shape != 0) {
5574  if (new_shape != 0) {
5575  VALUE dest = rb_gc_impl_location(objspace, src);
5576  rb_gc_set_shape(dest, new_shape);
5577  }
5578  RMOVED(src)->original_shape_id = orig_shape;
5579  }
5580 
5581  return true;
5582 }
5583 
5584 static bool
5585 gc_compact_plane(rb_objspace_t *objspace, rb_heap_t *heap, uintptr_t p, bits_t bitset, struct heap_page *page)
5586 {
5587  short slot_size = page->slot_size;
5588  short slot_bits = slot_size / BASE_SLOT_SIZE;
5589  GC_ASSERT(slot_bits > 0);
5590 
5591  do {
5592  VALUE vp = (VALUE)p;
5593  GC_ASSERT(vp % BASE_SLOT_SIZE == 0);
5594 
5595  if (bitset & 1) {
5596  objspace->rcompactor.considered_count_table[BUILTIN_TYPE(vp)]++;
5597 
5598  if (gc_is_moveable_obj(objspace, vp)) {
5599  if (!gc_compact_move(objspace, heap, vp)) {
5600  //the cursors met. bubble up
5601  return false;
5602  }
5603  }
5604  }
5605  p += slot_size;
5606  bitset >>= slot_bits;
5607  } while (bitset);
5608 
5609  return true;
5610 }
5611 
5612 // Iterate up all the objects in page, moving them to where they want to go
5613 static bool
5614 gc_compact_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
5615 {
5616  GC_ASSERT(page == heap->compact_cursor);
5617 
5618  bits_t *mark_bits, *pin_bits;
5619  bits_t bitset;
5620  uintptr_t p = page->start;
5621 
5622  mark_bits = page->mark_bits;
5623  pin_bits = page->pinned_bits;
5624 
5625  // objects that can be moved are marked and not pinned
5626  bitset = (mark_bits[0] & ~pin_bits[0]);
5627  bitset >>= NUM_IN_PAGE(p);
5628  if (bitset) {
5629  if (!gc_compact_plane(objspace, heap, (uintptr_t)p, bitset, page))
5630  return false;
5631  }
5632  p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
5633 
5634  for (int j = 1; j < HEAP_PAGE_BITMAP_LIMIT; j++) {
5635  bitset = (mark_bits[j] & ~pin_bits[j]);
5636  if (bitset) {
5637  if (!gc_compact_plane(objspace, heap, (uintptr_t)p, bitset, page))
5638  return false;
5639  }
5640  p += BITS_BITLENGTH * BASE_SLOT_SIZE;
5641  }
5642 
5643  return true;
5644 }
5645 
5646 static bool
5647 gc_compact_all_compacted_p(rb_objspace_t *objspace)
5648 {
5649  for (int i = 0; i < HEAP_COUNT; i++) {
5650  rb_heap_t *heap = &heaps[i];
5651 
5652  if (heap->total_pages > 0 &&
5653  !gc_compact_heap_cursors_met_p(heap)) {
5654  return false;
5655  }
5656  }
5657 
5658  return true;
5659 }
5660 
5661 static void
5662 gc_sweep_compact(rb_objspace_t *objspace)
5663 {
5664  gc_compact_start(objspace);
5665 #if RGENGC_CHECK_MODE >= 2
5666  gc_verify_internal_consistency(objspace);
5667 #endif
5668 
5669  while (!gc_compact_all_compacted_p(objspace)) {
5670  for (int i = 0; i < HEAP_COUNT; i++) {
5671  rb_heap_t *heap = &heaps[i];
5672 
5673  if (gc_compact_heap_cursors_met_p(heap)) {
5674  continue;
5675  }
5676 
5677  struct heap_page *start_page = heap->compact_cursor;
5678 
5679  if (!gc_compact_page(objspace, heap, start_page)) {
5680  lock_page_body(objspace, start_page->body);
5681 
5682  continue;
5683  }
5684 
5685  // If we get here, we've finished moving all objects on the compact_cursor page
5686  // So we can lock it and move the cursor on to the next one.
5687  lock_page_body(objspace, start_page->body);
5688  heap->compact_cursor = ccan_list_prev(&heap->pages, heap->compact_cursor, page_node);
5689  }
5690  }
5691 
5692  gc_compact_finish(objspace);
5693 
5694 #if RGENGC_CHECK_MODE >= 2
5695  gc_verify_internal_consistency(objspace);
5696 #endif
5697 }
5698 
5699 static void
5700 gc_marks_rest(rb_objspace_t *objspace)
5701 {
5702  gc_report(1, objspace, "gc_marks_rest\n");
5703 
5704  for (int i = 0; i < HEAP_COUNT; i++) {
5705  (&heaps[i])->pooled_pages = NULL;
5706  }
5707 
5708  if (is_incremental_marking(objspace)) {
5709  while (gc_mark_stacked_objects_incremental(objspace, INT_MAX) == FALSE);
5710  }
5711  else {
5712  gc_mark_stacked_objects_all(objspace);
5713  }
5714 
5715  gc_marks_finish(objspace);
5716 }
5717 
5718 static bool
5719 gc_marks_step(rb_objspace_t *objspace, size_t slots)
5720 {
5721  bool marking_finished = false;
5722 
5723  GC_ASSERT(is_marking(objspace));
5724  if (gc_mark_stacked_objects_incremental(objspace, slots)) {
5725  gc_marks_finish(objspace);
5726 
5727  marking_finished = true;
5728  }
5729 
5730  return marking_finished;
5731 }
5732 
5733 static bool
5734 gc_marks_continue(rb_objspace_t *objspace, rb_heap_t *heap)
5735 {
5736  GC_ASSERT(dont_gc_val() == FALSE || objspace->profile.latest_gc_info & GPR_FLAG_METHOD);
5737  bool marking_finished = true;
5738 
5739  gc_marking_enter(objspace);
5740 
5741  if (heap->free_pages) {
5742  gc_report(2, objspace, "gc_marks_continue: has pooled pages");
5743 
5744  marking_finished = gc_marks_step(objspace, objspace->rincgc.step_slots);
5745  }
5746  else {
5747  gc_report(2, objspace, "gc_marks_continue: no more pooled pages (stack depth: %"PRIdSIZE").\n",
5748  mark_stack_size(&objspace->mark_stack));
5749  heap->force_incremental_marking_finish_count++;
5750  gc_marks_rest(objspace);
5751  }
5752 
5753  gc_marking_exit(objspace);
5754 
5755  return marking_finished;
5756 }
5757 
5758 static void
5759 gc_marks_start(rb_objspace_t *objspace, int full_mark)
5760 {
5761  /* start marking */
5762  gc_report(1, objspace, "gc_marks_start: (%s)\n", full_mark ? "full" : "minor");
5763  gc_mode_transition(objspace, gc_mode_marking);
5764 
5765  if (full_mark) {
5766  size_t incremental_marking_steps = (objspace->rincgc.pooled_slots / INCREMENTAL_MARK_STEP_ALLOCATIONS) + 1;
5767  objspace->rincgc.step_slots = (objspace->marked_slots * 2) / incremental_marking_steps;
5768 
5769  if (0) fprintf(stderr, "objspace->marked_slots: %"PRIdSIZE", "
5770  "objspace->rincgc.pooled_page_num: %"PRIdSIZE", "
5771  "objspace->rincgc.step_slots: %"PRIdSIZE", \n",
5772  objspace->marked_slots, objspace->rincgc.pooled_slots, objspace->rincgc.step_slots);
5773  objspace->flags.during_minor_gc = FALSE;
5774  if (ruby_enable_autocompact) {
5775  objspace->flags.during_compacting |= TRUE;
5776  }
5777  objspace->profile.major_gc_count++;
5778  objspace->rgengc.uncollectible_wb_unprotected_objects = 0;
5779  objspace->rgengc.old_objects = 0;
5780  objspace->rgengc.last_major_gc = objspace->profile.count;
5781  objspace->marked_slots = 0;
5782 
5783  for (int i = 0; i < HEAP_COUNT; i++) {
5784  rb_heap_t *heap = &heaps[i];
5785  rgengc_mark_and_rememberset_clear(objspace, heap);
5786  heap_move_pooled_pages_to_free_pages(heap);
5787 
5788  if (objspace->flags.during_compacting) {
5789  struct heap_page *page = NULL;
5790 
5791  ccan_list_for_each(&heap->pages, page, page_node) {
5792  page->pinned_slots = 0;
5793  }
5794  }
5795  }
5796  }
5797  else {
5798  objspace->flags.during_minor_gc = TRUE;
5799  objspace->marked_slots =
5800  objspace->rgengc.old_objects + objspace->rgengc.uncollectible_wb_unprotected_objects; /* uncollectible objects are marked already */
5801  objspace->profile.minor_gc_count++;
5802 
5803  for (int i = 0; i < HEAP_COUNT; i++) {
5804  rgengc_rememberset_mark(objspace, &heaps[i]);
5805  }
5806  }
5807 
5808  mark_roots(objspace, NULL);
5809 
5810  gc_report(1, objspace, "gc_marks_start: (%s) end, stack in %"PRIdSIZE"\n",
5811  full_mark ? "full" : "minor", mark_stack_size(&objspace->mark_stack));
5812 }
5813 
5814 static bool
5815 gc_marks(rb_objspace_t *objspace, int full_mark)
5816 {
5817  gc_prof_mark_timer_start(objspace);
5818  gc_marking_enter(objspace);
5819 
5820  bool marking_finished = false;
5821 
5822  /* setup marking */
5823 
5824  gc_marks_start(objspace, full_mark);
5825  if (!is_incremental_marking(objspace)) {
5826  gc_marks_rest(objspace);
5827  marking_finished = true;
5828  }
5829 
5830 #if RGENGC_PROFILE > 0
5831  if (gc_prof_record(objspace)) {
5832  gc_profile_record *record = gc_prof_record(objspace);
5833  record->old_objects = objspace->rgengc.old_objects;
5834  }
5835 #endif
5836 
5837  gc_marking_exit(objspace);
5838  gc_prof_mark_timer_stop(objspace);
5839 
5840  return marking_finished;
5841 }
5842 
5843 /* RGENGC */
5844 
5845 static void
5846 gc_report_body(int level, rb_objspace_t *objspace, const char *fmt, ...)
5847 {
5848  if (level <= RGENGC_DEBUG) {
5849  char buf[1024];
5850  FILE *out = stderr;
5851  va_list args;
5852  const char *status = " ";
5853 
5854  if (during_gc) {
5855  status = is_full_marking(objspace) ? "+" : "-";
5856  }
5857  else {
5858  if (is_lazy_sweeping(objspace)) {
5859  status = "S";
5860  }
5861  if (is_incremental_marking(objspace)) {
5862  status = "M";
5863  }
5864  }
5865 
5866  va_start(args, fmt);
5867  vsnprintf(buf, 1024, fmt, args);
5868  va_end(args);
5869 
5870  fprintf(out, "%s|", status);
5871  fputs(buf, out);
5872  }
5873 }
5874 
5875 /* bit operations */
5876 
5877 static int
5878 rgengc_remembersetbits_set(rb_objspace_t *objspace, VALUE obj)
5879 {
5880  struct heap_page *page = GET_HEAP_PAGE(obj);
5881  bits_t *bits = &page->remembered_bits[0];
5882 
5883  if (MARKED_IN_BITMAP(bits, obj)) {
5884  return FALSE;
5885  }
5886  else {
5887  page->flags.has_remembered_objects = TRUE;
5888  MARK_IN_BITMAP(bits, obj);
5889  return TRUE;
5890  }
5891 }
5892 
5893 /* wb, etc */
5894 
5895 /* return FALSE if already remembered */
5896 static int
5897 rgengc_remember(rb_objspace_t *objspace, VALUE obj)
5898 {
5899  gc_report(6, objspace, "rgengc_remember: %s %s\n", rb_obj_info(obj),
5900  RVALUE_REMEMBERED(objspace, obj) ? "was already remembered" : "is remembered now");
5901 
5902  check_rvalue_consistency(objspace, obj);
5903 
5904  if (RGENGC_CHECK_MODE) {
5905  if (RVALUE_WB_UNPROTECTED(objspace, obj)) rb_bug("rgengc_remember: %s is not wb protected.", rb_obj_info(obj));
5906  }
5907 
5908 #if RGENGC_PROFILE > 0
5909  if (!RVALUE_REMEMBERED(objspace, obj)) {
5910  if (RVALUE_WB_UNPROTECTED(objspace, obj) == 0) {
5911  objspace->profile.total_remembered_normal_object_count++;
5912 #if RGENGC_PROFILE >= 2
5913  objspace->profile.remembered_normal_object_count_types[BUILTIN_TYPE(obj)]++;
5914 #endif
5915  }
5916  }
5917 #endif /* RGENGC_PROFILE > 0 */
5918 
5919  return rgengc_remembersetbits_set(objspace, obj);
5920 }
5921 
5922 #ifndef PROFILE_REMEMBERSET_MARK
5923 #define PROFILE_REMEMBERSET_MARK 0
5924 #endif
5925 
5926 static inline void
5927 rgengc_rememberset_mark_plane(rb_objspace_t *objspace, uintptr_t p, bits_t bitset)
5928 {
5929  if (bitset) {
5930  do {
5931  if (bitset & 1) {
5932  VALUE obj = (VALUE)p;
5933  gc_report(2, objspace, "rgengc_rememberset_mark: mark %s\n", rb_obj_info(obj));
5934  GC_ASSERT(RVALUE_UNCOLLECTIBLE(objspace, obj));
5935  GC_ASSERT(RVALUE_OLD_P(objspace, obj) || RVALUE_WB_UNPROTECTED(objspace, obj));
5936 
5937  gc_mark_children(objspace, obj);
5938  }
5939  p += BASE_SLOT_SIZE;
5940  bitset >>= 1;
5941  } while (bitset);
5942  }
5943 }
5944 
5945 static void
5946 rgengc_rememberset_mark(rb_objspace_t *objspace, rb_heap_t *heap)
5947 {
5948  size_t j;
5949  struct heap_page *page = 0;
5950 #if PROFILE_REMEMBERSET_MARK
5951  int has_old = 0, has_shady = 0, has_both = 0, skip = 0;
5952 #endif
5953  gc_report(1, objspace, "rgengc_rememberset_mark: start\n");
5954 
5955  ccan_list_for_each(&heap->pages, page, page_node) {
5956  if (page->flags.has_remembered_objects | page->flags.has_uncollectible_wb_unprotected_objects) {
5957  uintptr_t p = page->start;
5958  bits_t bitset, bits[HEAP_PAGE_BITMAP_LIMIT];
5959  bits_t *remembered_bits = page->remembered_bits;
5960  bits_t *uncollectible_bits = page->uncollectible_bits;
5961  bits_t *wb_unprotected_bits = page->wb_unprotected_bits;
5962 #if PROFILE_REMEMBERSET_MARK
5963  if (page->flags.has_remembered_objects && page->flags.has_uncollectible_wb_unprotected_objects) has_both++;
5964  else if (page->flags.has_remembered_objects) has_old++;
5965  else if (page->flags.has_uncollectible_wb_unprotected_objects) has_shady++;
5966 #endif
5967  for (j=0; j<HEAP_PAGE_BITMAP_LIMIT; j++) {
5968  bits[j] = remembered_bits[j] | (uncollectible_bits[j] & wb_unprotected_bits[j]);
5969  remembered_bits[j] = 0;
5970  }
5971  page->flags.has_remembered_objects = FALSE;
5972 
5973  bitset = bits[0];
5974  bitset >>= NUM_IN_PAGE(p);
5975  rgengc_rememberset_mark_plane(objspace, p, bitset);
5976  p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
5977 
5978  for (j=1; j < HEAP_PAGE_BITMAP_LIMIT; j++) {
5979  bitset = bits[j];
5980  rgengc_rememberset_mark_plane(objspace, p, bitset);
5981  p += BITS_BITLENGTH * BASE_SLOT_SIZE;
5982  }
5983  }
5984 #if PROFILE_REMEMBERSET_MARK
5985  else {
5986  skip++;
5987  }
5988 #endif
5989  }
5990 
5991 #if PROFILE_REMEMBERSET_MARK
5992  fprintf(stderr, "%d\t%d\t%d\t%d\n", has_both, has_old, has_shady, skip);
5993 #endif
5994  gc_report(1, objspace, "rgengc_rememberset_mark: finished\n");
5995 }
5996 
5997 static void
5998 rgengc_mark_and_rememberset_clear(rb_objspace_t *objspace, rb_heap_t *heap)
5999 {
6000  struct heap_page *page = 0;
6001 
6002  ccan_list_for_each(&heap->pages, page, page_node) {
6003  memset(&page->mark_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
6004  memset(&page->uncollectible_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
6005  memset(&page->marking_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
6006  memset(&page->remembered_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
6007  memset(&page->pinned_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
6008  page->flags.has_uncollectible_wb_unprotected_objects = FALSE;
6009  page->flags.has_remembered_objects = FALSE;
6010  }
6011 }
6012 
6013 /* RGENGC: APIs */
6014 
6015 NOINLINE(static void gc_writebarrier_generational(VALUE a, VALUE b, rb_objspace_t *objspace));
6016 
6017 static void
6018 gc_writebarrier_generational(VALUE a, VALUE b, rb_objspace_t *objspace)
6019 {
6020  if (RGENGC_CHECK_MODE) {
6021  if (!RVALUE_OLD_P(objspace, a)) rb_bug("gc_writebarrier_generational: %s is not an old object.", rb_obj_info(a));
6022  if ( RVALUE_OLD_P(objspace, b)) rb_bug("gc_writebarrier_generational: %s is an old object.", rb_obj_info(b));
6023  if (is_incremental_marking(objspace)) rb_bug("gc_writebarrier_generational: called while incremental marking: %s -> %s", rb_obj_info(a), rb_obj_info(b));
6024  }
6025 
6026  /* mark `a' and remember (default behavior) */
6027  if (!RVALUE_REMEMBERED(objspace, a)) {
6028  int lev = rb_gc_vm_lock_no_barrier();
6029  {
6030  rgengc_remember(objspace, a);
6031  }
6032  rb_gc_vm_unlock_no_barrier(lev);
6033 
6034  gc_report(1, objspace, "gc_writebarrier_generational: %s (remembered) -> %s\n", rb_obj_info(a), rb_obj_info(b));
6035  }
6036 
6037  check_rvalue_consistency(objspace, a);
6038  check_rvalue_consistency(objspace, b);
6039 }
6040 
6041 static void
6042 gc_mark_from(rb_objspace_t *objspace, VALUE obj, VALUE parent)
6043 {
6044  gc_mark_set_parent(objspace, parent);
6045  rgengc_check_relation(objspace, obj);
6046  if (gc_mark_set(objspace, obj) == FALSE) return;
6047  gc_aging(objspace, obj);
6048  gc_grey(objspace, obj);
6049 }
6050 
6051 NOINLINE(static void gc_writebarrier_incremental(VALUE a, VALUE b, rb_objspace_t *objspace));
6052 
6053 static void
6054 gc_writebarrier_incremental(VALUE a, VALUE b, rb_objspace_t *objspace)
6055 {
6056  gc_report(2, objspace, "gc_writebarrier_incremental: [LG] %p -> %s\n", (void *)a, rb_obj_info(b));
6057 
6058  if (RVALUE_BLACK_P(objspace, a)) {
6059  if (RVALUE_WHITE_P(objspace, b)) {
6060  if (!RVALUE_WB_UNPROTECTED(objspace, a)) {
6061  gc_report(2, objspace, "gc_writebarrier_incremental: [IN] %p -> %s\n", (void *)a, rb_obj_info(b));
6062  gc_mark_from(objspace, b, a);
6063  }
6064  }
6065  else if (RVALUE_OLD_P(objspace, a) && !RVALUE_OLD_P(objspace, b)) {
6066  rgengc_remember(objspace, a);
6067  }
6068 
6069  if (RB_UNLIKELY(objspace->flags.during_compacting)) {
6070  MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(b), b);
6071  }
6072  }
6073 }
6074 
6075 void
6076 rb_gc_impl_writebarrier(void *objspace_ptr, VALUE a, VALUE b)
6077 {
6078  rb_objspace_t *objspace = objspace_ptr;
6079 
6080  if (RGENGC_CHECK_MODE) {
6081  if (SPECIAL_CONST_P(a)) rb_bug("rb_gc_writebarrier: a is special const: %"PRIxVALUE, a);
6082  if (SPECIAL_CONST_P(b)) rb_bug("rb_gc_writebarrier: b is special const: %"PRIxVALUE, b);
6083  }
6084 
6085  GC_ASSERT(RB_BUILTIN_TYPE(a) != T_NONE);
6086  GC_ASSERT(RB_BUILTIN_TYPE(a) != T_MOVED);
6087  GC_ASSERT(RB_BUILTIN_TYPE(a) != T_ZOMBIE);
6088  GC_ASSERT(RB_BUILTIN_TYPE(b) != T_NONE);
6089  GC_ASSERT(RB_BUILTIN_TYPE(b) != T_MOVED);
6090  GC_ASSERT(RB_BUILTIN_TYPE(b) != T_ZOMBIE);
6091 
6092  retry:
6093  if (!is_incremental_marking(objspace)) {
6094  if (!RVALUE_OLD_P(objspace, a) || RVALUE_OLD_P(objspace, b)) {
6095  // do nothing
6096  }
6097  else {
6098  gc_writebarrier_generational(a, b, objspace);
6099  }
6100  }
6101  else {
6102  bool retry = false;
6103  /* slow path */
6104  int lev = rb_gc_vm_lock_no_barrier();
6105  {
6106  if (is_incremental_marking(objspace)) {
6107  gc_writebarrier_incremental(a, b, objspace);
6108  }
6109  else {
6110  retry = true;
6111  }
6112  }
6113  rb_gc_vm_unlock_no_barrier(lev);
6114 
6115  if (retry) goto retry;
6116  }
6117  return;
6118 }
6119 
6120 void
6121 rb_gc_impl_writebarrier_unprotect(void *objspace_ptr, VALUE obj)
6122 {
6123  rb_objspace_t *objspace = objspace_ptr;
6124 
6125  if (RVALUE_WB_UNPROTECTED(objspace, obj)) {
6126  return;
6127  }
6128  else {
6129  gc_report(2, objspace, "rb_gc_writebarrier_unprotect: %s %s\n", rb_obj_info(obj),
6130  RVALUE_REMEMBERED(objspace, obj) ? " (already remembered)" : "");
6131 
6132  unsigned int lev = rb_gc_vm_lock_no_barrier();
6133  {
6134  if (RVALUE_OLD_P(objspace, obj)) {
6135  gc_report(1, objspace, "rb_gc_writebarrier_unprotect: %s\n", rb_obj_info(obj));
6136  RVALUE_DEMOTE(objspace, obj);
6137  gc_mark_set(objspace, obj);
6138  gc_remember_unprotected(objspace, obj);
6139 
6140 #if RGENGC_PROFILE
6141  objspace->profile.total_shade_operation_count++;
6142 #if RGENGC_PROFILE >= 2
6143  objspace->profile.shade_operation_count_types[BUILTIN_TYPE(obj)]++;
6144 #endif /* RGENGC_PROFILE >= 2 */
6145 #endif /* RGENGC_PROFILE */
6146  }
6147  else {
6148  RVALUE_AGE_RESET(obj);
6149  }
6150 
6151  RB_DEBUG_COUNTER_INC(obj_wb_unprotect);
6152  MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
6153  }
6154  rb_gc_vm_unlock_no_barrier(lev);
6155  }
6156 }
6157 
6158 void
6159 rb_gc_impl_copy_attributes(void *objspace_ptr, VALUE dest, VALUE obj)
6160 {
6161  rb_objspace_t *objspace = objspace_ptr;
6162 
6163  if (RVALUE_WB_UNPROTECTED(objspace, obj)) {
6164  rb_gc_impl_writebarrier_unprotect(objspace, dest);
6165  }
6166  rb_gc_impl_copy_finalizer(objspace, dest, obj);
6167 }
6168 
6169 void
6170 rb_gc_impl_writebarrier_remember(void *objspace_ptr, VALUE obj)
6171 {
6172  rb_objspace_t *objspace = objspace_ptr;
6173 
6174  gc_report(1, objspace, "rb_gc_writebarrier_remember: %s\n", rb_obj_info(obj));
6175 
6176  if (is_incremental_marking(objspace)) {
6177  if (RVALUE_BLACK_P(objspace, obj)) {
6178  gc_grey(objspace, obj);
6179  }
6180  }
6181  else {
6182  if (RVALUE_OLD_P(objspace, obj)) {
6183  rgengc_remember(objspace, obj);
6184  }
6185  }
6186 }
6187 
6188 // TODO: rearchitect this function to work for a generic GC
6189 size_t
6190 rb_gc_impl_obj_flags(void *objspace_ptr, VALUE obj, ID* flags, size_t max)
6191 {
6192  rb_objspace_t *objspace = objspace_ptr;
6193  size_t n = 0;
6194  static ID ID_marked;
6195  static ID ID_wb_protected, ID_old, ID_marking, ID_uncollectible, ID_pinned;
6196 
6197  if (!ID_marked) {
6198 #define I(s) ID_##s = rb_intern(#s);
6199  I(marked);
6200  I(wb_protected);
6201  I(old);
6202  I(marking);
6203  I(uncollectible);
6204  I(pinned);
6205 #undef I
6206  }
6207 
6208  if (RVALUE_WB_UNPROTECTED(objspace, obj) == 0 && n < max) flags[n++] = ID_wb_protected;
6209  if (RVALUE_OLD_P(objspace, obj) && n < max) flags[n++] = ID_old;
6210  if (RVALUE_UNCOLLECTIBLE(objspace, obj) && n < max) flags[n++] = ID_uncollectible;
6211  if (RVALUE_MARKING(objspace, obj) && n < max) flags[n++] = ID_marking;
6212  if (RVALUE_MARKED(objspace, obj) && n < max) flags[n++] = ID_marked;
6213  if (RVALUE_PINNED(objspace, obj) && n < max) flags[n++] = ID_pinned;
6214  return n;
6215 }
6216 
6217 void *
6218 rb_gc_impl_ractor_cache_alloc(void *objspace_ptr)
6219 {
6220  rb_objspace_t *objspace = objspace_ptr;
6221 
6222  objspace->live_ractor_cache_count++;
6223 
6224  return calloc1(sizeof(rb_ractor_newobj_cache_t));
6225 }
6226 
6227 void
6228 rb_gc_impl_ractor_cache_free(void *objspace_ptr, void *cache)
6229 {
6230  rb_objspace_t *objspace = objspace_ptr;
6231 
6232  objspace->live_ractor_cache_count--;
6233 
6234  gc_ractor_newobj_cache_clear(cache, NULL);
6235  free(cache);
6236 }
6237 
6238 static void
6239 heap_ready_to_gc(rb_objspace_t *objspace, rb_heap_t *heap)
6240 {
6241  if (!heap->free_pages) {
6242  if (!heap_page_allocate_and_initialize(objspace, heap)) {
6243  objspace->heap_pages.allocatable_slots = 1;
6244  heap_page_allocate_and_initialize(objspace, heap);
6245  }
6246  }
6247 }
6248 
6249 static int
6250 ready_to_gc(rb_objspace_t *objspace)
6251 {
6252  if (dont_gc_val() || during_gc || ruby_disable_gc) {
6253  for (int i = 0; i < HEAP_COUNT; i++) {
6254  rb_heap_t *heap = &heaps[i];
6255  heap_ready_to_gc(objspace, heap);
6256  }
6257  return FALSE;
6258  }
6259  else {
6260  return TRUE;
6261  }
6262 }
6263 
6264 static void
6265 gc_reset_malloc_info(rb_objspace_t *objspace, bool full_mark)
6266 {
6267  gc_prof_set_malloc_info(objspace);
6268  {
6269  size_t inc = RUBY_ATOMIC_SIZE_EXCHANGE(malloc_increase, 0);
6270  size_t old_limit = malloc_limit;
6271 
6272  if (inc > malloc_limit) {
6273  malloc_limit = (size_t)(inc * gc_params.malloc_limit_growth_factor);
6274  if (malloc_limit > gc_params.malloc_limit_max) {
6275  malloc_limit = gc_params.malloc_limit_max;
6276  }
6277  }
6278  else {
6279  malloc_limit = (size_t)(malloc_limit * 0.98); /* magic number */
6280  if (malloc_limit < gc_params.malloc_limit_min) {
6281  malloc_limit = gc_params.malloc_limit_min;
6282  }
6283  }
6284 
6285  if (0) {
6286  if (old_limit != malloc_limit) {
6287  fprintf(stderr, "[%"PRIuSIZE"] malloc_limit: %"PRIuSIZE" -> %"PRIuSIZE"\n",
6288  rb_gc_count(), old_limit, malloc_limit);
6289  }
6290  else {
6291  fprintf(stderr, "[%"PRIuSIZE"] malloc_limit: not changed (%"PRIuSIZE")\n",
6292  rb_gc_count(), malloc_limit);
6293  }
6294  }
6295  }
6296 
6297  /* reset oldmalloc info */
6298 #if RGENGC_ESTIMATE_OLDMALLOC
6299  if (!full_mark) {
6300  if (objspace->rgengc.oldmalloc_increase > objspace->rgengc.oldmalloc_increase_limit) {
6301  gc_needs_major_flags |= GPR_FLAG_MAJOR_BY_OLDMALLOC;
6302  objspace->rgengc.oldmalloc_increase_limit =
6303  (size_t)(objspace->rgengc.oldmalloc_increase_limit * gc_params.oldmalloc_limit_growth_factor);
6304 
6305  if (objspace->rgengc.oldmalloc_increase_limit > gc_params.oldmalloc_limit_max) {
6306  objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_max;
6307  }
6308  }
6309 
6310  if (0) fprintf(stderr, "%"PRIdSIZE"\t%d\t%"PRIuSIZE"\t%"PRIuSIZE"\t%"PRIdSIZE"\n",
6311  rb_gc_count(),
6312  gc_needs_major_flags,
6313  objspace->rgengc.oldmalloc_increase,
6314  objspace->rgengc.oldmalloc_increase_limit,
6315  gc_params.oldmalloc_limit_max);
6316  }
6317  else {
6318  /* major GC */
6319  objspace->rgengc.oldmalloc_increase = 0;
6320 
6321  if ((objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_BY_OLDMALLOC) == 0) {
6322  objspace->rgengc.oldmalloc_increase_limit =
6323  (size_t)(objspace->rgengc.oldmalloc_increase_limit / ((gc_params.oldmalloc_limit_growth_factor - 1)/10 + 1));
6324  if (objspace->rgengc.oldmalloc_increase_limit < gc_params.oldmalloc_limit_min) {
6325  objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
6326  }
6327  }
6328  }
6329 #endif
6330 }
6331 
6332 static int
6333 garbage_collect(rb_objspace_t *objspace, unsigned int reason)
6334 {
6335  int ret;
6336 
6337  int lev = rb_gc_vm_lock();
6338  {
6339 #if GC_PROFILE_MORE_DETAIL
6340  objspace->profile.prepare_time = getrusage_time();
6341 #endif
6342 
6343  gc_rest(objspace);
6344 
6345 #if GC_PROFILE_MORE_DETAIL
6346  objspace->profile.prepare_time = getrusage_time() - objspace->profile.prepare_time;
6347 #endif
6348 
6349  ret = gc_start(objspace, reason);
6350  }
6351  rb_gc_vm_unlock(lev);
6352 
6353  return ret;
6354 }
6355 
6356 static int
6357 gc_start(rb_objspace_t *objspace, unsigned int reason)
6358 {
6359  unsigned int do_full_mark = !!(reason & GPR_FLAG_FULL_MARK);
6360 
6361  /* reason may be clobbered, later, so keep set immediate_sweep here */
6362  objspace->flags.immediate_sweep = !!(reason & GPR_FLAG_IMMEDIATE_SWEEP);
6363 
6364  if (!rb_darray_size(objspace->heap_pages.sorted)) return TRUE; /* heap is not ready */
6365  if (!(reason & GPR_FLAG_METHOD) && !ready_to_gc(objspace)) return TRUE; /* GC is not allowed */
6366 
6367  GC_ASSERT(gc_mode(objspace) == gc_mode_none);
6368  GC_ASSERT(!is_lazy_sweeping(objspace));
6369  GC_ASSERT(!is_incremental_marking(objspace));
6370 
6371  unsigned int lock_lev;
6372  gc_enter(objspace, gc_enter_event_start, &lock_lev);
6373 
6374 #if RGENGC_CHECK_MODE >= 2
6375  gc_verify_internal_consistency(objspace);
6376 #endif
6377 
6378  if (ruby_gc_stressful) {
6379  int flag = FIXNUM_P(ruby_gc_stress_mode) ? FIX2INT(ruby_gc_stress_mode) : 0;
6380 
6381  if ((flag & (1 << gc_stress_no_major)) == 0) {
6382  do_full_mark = TRUE;
6383  }
6384 
6385  objspace->flags.immediate_sweep = !(flag & (1<<gc_stress_no_immediate_sweep));
6386  }
6387 
6388  if (gc_needs_major_flags) {
6389  reason |= gc_needs_major_flags;
6390  do_full_mark = TRUE;
6391  }
6392  else if (RGENGC_FORCE_MAJOR_GC) {
6393  reason = GPR_FLAG_MAJOR_BY_FORCE;
6394  do_full_mark = TRUE;
6395  }
6396 
6397  /* if major gc has been disabled, never do a full mark */
6398  if (!gc_config_full_mark_val) {
6399  do_full_mark = FALSE;
6400  }
6401  gc_needs_major_flags = GPR_FLAG_NONE;
6402 
6403  if (do_full_mark && (reason & GPR_FLAG_MAJOR_MASK) == 0) {
6404  reason |= GPR_FLAG_MAJOR_BY_FORCE; /* GC by CAPI, METHOD, and so on. */
6405  }
6406 
6407  if (objspace->flags.dont_incremental ||
6408  reason & GPR_FLAG_IMMEDIATE_MARK ||
6409  ruby_gc_stressful) {
6410  objspace->flags.during_incremental_marking = FALSE;
6411  }
6412  else {
6413  objspace->flags.during_incremental_marking = do_full_mark;
6414  }
6415 
6416  /* Explicitly enable compaction (GC.compact) */
6417  if (do_full_mark && ruby_enable_autocompact) {
6418  objspace->flags.during_compacting = TRUE;
6419 #if RGENGC_CHECK_MODE
6420  objspace->rcompactor.compare_func = ruby_autocompact_compare_func;
6421 #endif
6422  }
6423  else {
6424  objspace->flags.during_compacting = !!(reason & GPR_FLAG_COMPACT);
6425  }
6426 
6427  if (!GC_ENABLE_LAZY_SWEEP || objspace->flags.dont_incremental) {
6428  objspace->flags.immediate_sweep = TRUE;
6429  }
6430 
6431  if (objspace->flags.immediate_sweep) reason |= GPR_FLAG_IMMEDIATE_SWEEP;
6432 
6433  gc_report(1, objspace, "gc_start(reason: %x) => %u, %d, %d\n",
6434  reason,
6435  do_full_mark, !is_incremental_marking(objspace), objspace->flags.immediate_sweep);
6436 
6437 #if USE_DEBUG_COUNTER
6438  RB_DEBUG_COUNTER_INC(gc_count);
6439 
6440  if (reason & GPR_FLAG_MAJOR_MASK) {
6441  (void)RB_DEBUG_COUNTER_INC_IF(gc_major_nofree, reason & GPR_FLAG_MAJOR_BY_NOFREE);
6442  (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldgen, reason & GPR_FLAG_MAJOR_BY_OLDGEN);
6443  (void)RB_DEBUG_COUNTER_INC_IF(gc_major_shady, reason & GPR_FLAG_MAJOR_BY_SHADY);
6444  (void)RB_DEBUG_COUNTER_INC_IF(gc_major_force, reason & GPR_FLAG_MAJOR_BY_FORCE);
6445 #if RGENGC_ESTIMATE_OLDMALLOC
6446  (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldmalloc, reason & GPR_FLAG_MAJOR_BY_OLDMALLOC);
6447 #endif
6448  }
6449  else {
6450  (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_newobj, reason & GPR_FLAG_NEWOBJ);
6451  (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_malloc, reason & GPR_FLAG_MALLOC);
6452  (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_method, reason & GPR_FLAG_METHOD);
6453  (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_capi, reason & GPR_FLAG_CAPI);
6454  (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_stress, reason & GPR_FLAG_STRESS);
6455  }
6456 #endif
6457 
6458  objspace->profile.count++;
6459  objspace->profile.latest_gc_info = reason;
6460  objspace->profile.total_allocated_objects_at_gc_start = total_allocated_objects(objspace);
6461  objspace->profile.heap_used_at_gc_start = rb_darray_size(objspace->heap_pages.sorted);
6462  objspace->profile.weak_references_count = 0;
6463  objspace->profile.retained_weak_references_count = 0;
6464  gc_prof_setup_new_record(objspace, reason);
6465  gc_reset_malloc_info(objspace, do_full_mark);
6466 
6467  rb_gc_event_hook(0, RUBY_INTERNAL_EVENT_GC_START);
6468 
6469  GC_ASSERT(during_gc);
6470 
6471  gc_prof_timer_start(objspace);
6472  {
6473  if (gc_marks(objspace, do_full_mark)) {
6474  gc_sweep(objspace);
6475  }
6476  }
6477  gc_prof_timer_stop(objspace);
6478 
6479  gc_exit(objspace, gc_enter_event_start, &lock_lev);
6480  return TRUE;
6481 }
6482 
6483 static void
6484 gc_rest(rb_objspace_t *objspace)
6485 {
6486  if (is_incremental_marking(objspace) || is_lazy_sweeping(objspace)) {
6487  unsigned int lock_lev;
6488  gc_enter(objspace, gc_enter_event_rest, &lock_lev);
6489 
6490  if (RGENGC_CHECK_MODE >= 2) gc_verify_internal_consistency(objspace);
6491 
6492  if (is_incremental_marking(objspace)) {
6493  gc_marking_enter(objspace);
6494  gc_marks_rest(objspace);
6495  gc_marking_exit(objspace);
6496 
6497  gc_sweep(objspace);
6498  }
6499 
6500  if (is_lazy_sweeping(objspace)) {
6501  gc_sweeping_enter(objspace);
6502  gc_sweep_rest(objspace);
6503  gc_sweeping_exit(objspace);
6504  }
6505 
6506  gc_exit(objspace, gc_enter_event_rest, &lock_lev);
6507  }
6508 }
6509 
6511  rb_objspace_t *objspace;
6512  unsigned int reason;
6513 };
6514 
6515 static void
6516 gc_current_status_fill(rb_objspace_t *objspace, char *buff)
6517 {
6518  int i = 0;
6519  if (is_marking(objspace)) {
6520  buff[i++] = 'M';
6521  if (is_full_marking(objspace)) buff[i++] = 'F';
6522  if (is_incremental_marking(objspace)) buff[i++] = 'I';
6523  }
6524  else if (is_sweeping(objspace)) {
6525  buff[i++] = 'S';
6526  if (is_lazy_sweeping(objspace)) buff[i++] = 'L';
6527  }
6528  else {
6529  buff[i++] = 'N';
6530  }
6531  buff[i] = '\0';
6532 }
6533 
6534 static const char *
6535 gc_current_status(rb_objspace_t *objspace)
6536 {
6537  static char buff[0x10];
6538  gc_current_status_fill(objspace, buff);
6539  return buff;
6540 }
6541 
6542 #if PRINT_ENTER_EXIT_TICK
6543 
6544 static tick_t last_exit_tick;
6545 static tick_t enter_tick;
6546 static int enter_count = 0;
6547 static char last_gc_status[0x10];
6548 
6549 static inline void
6550 gc_record(rb_objspace_t *objspace, int direction, const char *event)
6551 {
6552  if (direction == 0) { /* enter */
6553  enter_count++;
6554  enter_tick = tick();
6555  gc_current_status_fill(objspace, last_gc_status);
6556  }
6557  else { /* exit */
6558  tick_t exit_tick = tick();
6559  char current_gc_status[0x10];
6560  gc_current_status_fill(objspace, current_gc_status);
6561 #if 1
6562  /* [last mutator time] [gc time] [event] */
6563  fprintf(stderr, "%"PRItick"\t%"PRItick"\t%s\t[%s->%s|%c]\n",
6564  enter_tick - last_exit_tick,
6565  exit_tick - enter_tick,
6566  event,
6567  last_gc_status, current_gc_status,
6568  (objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_MASK) ? '+' : '-');
6569  last_exit_tick = exit_tick;
6570 #else
6571  /* [enter_tick] [gc time] [event] */
6572  fprintf(stderr, "%"PRItick"\t%"PRItick"\t%s\t[%s->%s|%c]\n",
6573  enter_tick,
6574  exit_tick - enter_tick,
6575  event,
6576  last_gc_status, current_gc_status,
6577  (objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_MASK) ? '+' : '-');
6578 #endif
6579  }
6580 }
6581 #else /* PRINT_ENTER_EXIT_TICK */
6582 static inline void
6583 gc_record(rb_objspace_t *objspace, int direction, const char *event)
6584 {
6585  /* null */
6586 }
6587 #endif /* PRINT_ENTER_EXIT_TICK */
6588 
6589 static const char *
6590 gc_enter_event_cstr(enum gc_enter_event event)
6591 {
6592  switch (event) {
6593  case gc_enter_event_start: return "start";
6594  case gc_enter_event_continue: return "continue";
6595  case gc_enter_event_rest: return "rest";
6596  case gc_enter_event_finalizer: return "finalizer";
6597  }
6598  return NULL;
6599 }
6600 
6601 static void
6602 gc_enter_count(enum gc_enter_event event)
6603 {
6604  switch (event) {
6605  case gc_enter_event_start: RB_DEBUG_COUNTER_INC(gc_enter_start); break;
6606  case gc_enter_event_continue: RB_DEBUG_COUNTER_INC(gc_enter_continue); break;
6607  case gc_enter_event_rest: RB_DEBUG_COUNTER_INC(gc_enter_rest); break;
6608  case gc_enter_event_finalizer: RB_DEBUG_COUNTER_INC(gc_enter_finalizer); break;
6609  }
6610 }
6611 
6612 static bool current_process_time(struct timespec *ts);
6613 
6614 static void
6615 gc_clock_start(struct timespec *ts)
6616 {
6617  if (!current_process_time(ts)) {
6618  ts->tv_sec = 0;
6619  ts->tv_nsec = 0;
6620  }
6621 }
6622 
6623 static unsigned long long
6624 gc_clock_end(struct timespec *ts)
6625 {
6626  struct timespec end_time;
6627 
6628  if ((ts->tv_sec > 0 || ts->tv_nsec > 0) &&
6629  current_process_time(&end_time) &&
6630  end_time.tv_sec >= ts->tv_sec) {
6631  return (unsigned long long)(end_time.tv_sec - ts->tv_sec) * (1000 * 1000 * 1000) +
6632  (end_time.tv_nsec - ts->tv_nsec);
6633  }
6634 
6635  return 0;
6636 }
6637 
6638 static inline void
6639 gc_enter(rb_objspace_t *objspace, enum gc_enter_event event, unsigned int *lock_lev)
6640 {
6641  *lock_lev = rb_gc_vm_lock();
6642 
6643  switch (event) {
6644  case gc_enter_event_rest:
6645  if (!is_marking(objspace)) break;
6646  // fall through
6647  case gc_enter_event_start:
6648  case gc_enter_event_continue:
6649  // stop other ractors
6650  rb_gc_vm_barrier();
6651  break;
6652  default:
6653  break;
6654  }
6655 
6656  gc_enter_count(event);
6657  if (RB_UNLIKELY(during_gc != 0)) rb_bug("during_gc != 0");
6658  if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(objspace);
6659 
6660  during_gc = TRUE;
6661  RUBY_DEBUG_LOG("%s (%s)",gc_enter_event_cstr(event), gc_current_status(objspace));
6662  gc_report(1, objspace, "gc_enter: %s [%s]\n", gc_enter_event_cstr(event), gc_current_status(objspace));
6663  gc_record(objspace, 0, gc_enter_event_cstr(event));
6664 
6665  rb_gc_event_hook(0, RUBY_INTERNAL_EVENT_GC_ENTER);
6666 }
6667 
6668 static inline void
6669 gc_exit(rb_objspace_t *objspace, enum gc_enter_event event, unsigned int *lock_lev)
6670 {
6671  GC_ASSERT(during_gc != 0);
6672 
6673  rb_gc_event_hook(0, RUBY_INTERNAL_EVENT_GC_EXIT);
6674 
6675  gc_record(objspace, 1, gc_enter_event_cstr(event));
6676  RUBY_DEBUG_LOG("%s (%s)", gc_enter_event_cstr(event), gc_current_status(objspace));
6677  gc_report(1, objspace, "gc_exit: %s [%s]\n", gc_enter_event_cstr(event), gc_current_status(objspace));
6678  during_gc = FALSE;
6679 
6680  rb_gc_vm_unlock(*lock_lev);
6681 }
6682 
6683 #ifndef MEASURE_GC
6684 #define MEASURE_GC (objspace->flags.measure_gc)
6685 #endif
6686 
6687 static void
6688 gc_marking_enter(rb_objspace_t *objspace)
6689 {
6690  GC_ASSERT(during_gc != 0);
6691 
6692  if (MEASURE_GC) {
6693  gc_clock_start(&objspace->profile.marking_start_time);
6694  }
6695 }
6696 
6697 static void
6698 gc_marking_exit(rb_objspace_t *objspace)
6699 {
6700  GC_ASSERT(during_gc != 0);
6701 
6702  if (MEASURE_GC) {
6703  objspace->profile.marking_time_ns += gc_clock_end(&objspace->profile.marking_start_time);
6704  }
6705 }
6706 
6707 static void
6708 gc_sweeping_enter(rb_objspace_t *objspace)
6709 {
6710  GC_ASSERT(during_gc != 0);
6711 
6712  if (MEASURE_GC) {
6713  gc_clock_start(&objspace->profile.sweeping_start_time);
6714  }
6715 }
6716 
6717 static void
6718 gc_sweeping_exit(rb_objspace_t *objspace)
6719 {
6720  GC_ASSERT(during_gc != 0);
6721 
6722  if (MEASURE_GC) {
6723  objspace->profile.sweeping_time_ns += gc_clock_end(&objspace->profile.sweeping_start_time);
6724  }
6725 }
6726 
6727 static void *
6728 gc_with_gvl(void *ptr)
6729 {
6730  struct objspace_and_reason *oar = (struct objspace_and_reason *)ptr;
6731  return (void *)(VALUE)garbage_collect(oar->objspace, oar->reason);
6732 }
6733 
6734 int ruby_thread_has_gvl_p(void);
6735 
6736 static int
6737 garbage_collect_with_gvl(rb_objspace_t *objspace, unsigned int reason)
6738 {
6739  if (dont_gc_val()) return TRUE;
6740  if (ruby_thread_has_gvl_p()) {
6741  return garbage_collect(objspace, reason);
6742  }
6743  else {
6744  if (ruby_native_thread_p()) {
6745  struct objspace_and_reason oar;
6746  oar.objspace = objspace;
6747  oar.reason = reason;
6748  return (int)(VALUE)rb_thread_call_with_gvl(gc_with_gvl, (void *)&oar);
6749  }
6750  else {
6751  /* no ruby thread */
6752  fprintf(stderr, "[FATAL] failed to allocate memory\n");
6753  exit(EXIT_FAILURE);
6754  }
6755  }
6756 }
6757 
6758 static int
6759 gc_set_candidate_object_i(void *vstart, void *vend, size_t stride, void *data)
6760 {
6761  rb_objspace_t *objspace = (rb_objspace_t *)data;
6762 
6763  VALUE v = (VALUE)vstart;
6764  for (; v != (VALUE)vend; v += stride) {
6765  asan_unpoisoning_object(v) {
6766  switch (BUILTIN_TYPE(v)) {
6767  case T_NONE:
6768  case T_ZOMBIE:
6769  break;
6770  default:
6771  rb_gc_prepare_heap_process_object(v);
6772  if (!RVALUE_OLD_P(objspace, v) && !RVALUE_WB_UNPROTECTED(objspace, v)) {
6773  RVALUE_AGE_SET_CANDIDATE(objspace, v);
6774  }
6775  }
6776  }
6777  }
6778 
6779  return 0;
6780 }
6781 
6782 void
6783 rb_gc_impl_start(void *objspace_ptr, bool full_mark, bool immediate_mark, bool immediate_sweep, bool compact)
6784 {
6785  rb_objspace_t *objspace = objspace_ptr;
6786  unsigned int reason = (GPR_FLAG_FULL_MARK |
6787  GPR_FLAG_IMMEDIATE_MARK |
6788  GPR_FLAG_IMMEDIATE_SWEEP |
6789  GPR_FLAG_METHOD);
6790 
6791  int full_marking_p = gc_config_full_mark_val;
6792  gc_config_full_mark_set(TRUE);
6793 
6794  /* For now, compact implies full mark / sweep, so ignore other flags */
6795  if (compact) {
6796  GC_ASSERT(GC_COMPACTION_SUPPORTED);
6797 
6798  reason |= GPR_FLAG_COMPACT;
6799  }
6800  else {
6801  if (!full_mark) reason &= ~GPR_FLAG_FULL_MARK;
6802  if (!immediate_mark) reason &= ~GPR_FLAG_IMMEDIATE_MARK;
6803  if (!immediate_sweep) reason &= ~GPR_FLAG_IMMEDIATE_SWEEP;
6804  }
6805 
6806  garbage_collect(objspace, reason);
6807  gc_finalize_deferred(objspace);
6808 
6809  gc_config_full_mark_set(full_marking_p);
6810 }
6811 
6812 void
6813 rb_gc_impl_prepare_heap(void *objspace_ptr)
6814 {
6815  rb_objspace_t *objspace = objspace_ptr;
6816 
6817  size_t orig_total_slots = objspace_available_slots(objspace);
6818  size_t orig_allocatable_slots = objspace->heap_pages.allocatable_slots;
6819 
6820  rb_gc_impl_each_objects(objspace, gc_set_candidate_object_i, objspace_ptr);
6821 
6822  double orig_max_free_slots = gc_params.heap_free_slots_max_ratio;
6823  /* Ensure that all empty pages are moved onto empty_pages. */
6824  gc_params.heap_free_slots_max_ratio = 0.0;
6825  rb_gc_impl_start(objspace, true, true, true, true);
6826  gc_params.heap_free_slots_max_ratio = orig_max_free_slots;
6827 
6828  objspace->heap_pages.allocatable_slots = 0;
6829  heap_pages_free_unused_pages(objspace_ptr);
6830  GC_ASSERT(objspace->empty_pages_count == 0);
6831  objspace->heap_pages.allocatable_slots = orig_allocatable_slots;
6832 
6833  size_t total_slots = objspace_available_slots(objspace);
6834  if (orig_total_slots > total_slots) {
6835  objspace->heap_pages.allocatable_slots += orig_total_slots - total_slots;
6836  }
6837 
6838 #if defined(HAVE_MALLOC_TRIM) && !defined(RUBY_ALTERNATIVE_MALLOC_HEADER)
6839  malloc_trim(0);
6840 #endif
6841 }
6842 
6843 static int
6844 gc_is_moveable_obj(rb_objspace_t *objspace, VALUE obj)
6845 {
6846  GC_ASSERT(!SPECIAL_CONST_P(obj));
6847 
6848  switch (BUILTIN_TYPE(obj)) {
6849  case T_NONE:
6850  case T_MOVED:
6851  case T_ZOMBIE:
6852  return FALSE;
6853  case T_SYMBOL:
6854  // TODO: restore original behavior
6855  // if (RSYMBOL(obj)->id & ~ID_SCOPE_MASK) {
6856  // return FALSE;
6857  // }
6858  return false;
6859  /* fall through */
6860  case T_STRING:
6861  case T_OBJECT:
6862  case T_FLOAT:
6863  case T_IMEMO:
6864  case T_ARRAY:
6865  case T_BIGNUM:
6866  case T_ICLASS:
6867  case T_MODULE:
6868  case T_REGEXP:
6869  case T_DATA:
6870  case T_MATCH:
6871  case T_STRUCT:
6872  case T_HASH:
6873  case T_FILE:
6874  case T_COMPLEX:
6875  case T_RATIONAL:
6876  case T_NODE:
6877  case T_CLASS:
6878  if (FL_TEST(obj, FL_FINALIZE)) {
6879  /* The finalizer table is a numtable. It looks up objects by address.
6880  * We can't mark the keys in the finalizer table because that would
6881  * prevent the objects from being collected. This check prevents
6882  * objects that are keys in the finalizer table from being moved
6883  * without directly pinning them. */
6884  GC_ASSERT(st_is_member(finalizer_table, obj));
6885 
6886  return FALSE;
6887  }
6888  GC_ASSERT(RVALUE_MARKED(objspace, obj));
6889  GC_ASSERT(!RVALUE_PINNED(objspace, obj));
6890 
6891  return TRUE;
6892 
6893  default:
6894  rb_bug("gc_is_moveable_obj: unreachable (%d)", (int)BUILTIN_TYPE(obj));
6895  break;
6896  }
6897 
6898  return FALSE;
6899 }
6900 
6901 void rb_mv_generic_ivar(VALUE src, VALUE dst);
6902 
6903 static VALUE
6904 gc_move(rb_objspace_t *objspace, VALUE src, VALUE dest, size_t src_slot_size, size_t slot_size)
6905 {
6906  int marked;
6907  int wb_unprotected;
6908  int uncollectible;
6909  int age;
6910 
6911  gc_report(4, objspace, "Moving object: %p -> %p\n", (void *)src, (void *)dest);
6912 
6913  GC_ASSERT(BUILTIN_TYPE(src) != T_NONE);
6914  GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(dest), dest));
6915 
6916  GC_ASSERT(!RVALUE_MARKING(objspace, src));
6917 
6918  /* Save off bits for current object. */
6919  marked = RVALUE_MARKED(objspace, src);
6920  wb_unprotected = RVALUE_WB_UNPROTECTED(objspace, src);
6921  uncollectible = RVALUE_UNCOLLECTIBLE(objspace, src);
6922  bool remembered = RVALUE_REMEMBERED(objspace, src);
6923  age = RVALUE_AGE_GET(src);
6924 
6925  /* Clear bits for eventual T_MOVED */
6926  CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS(src), src);
6927  CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(src), src);
6928  CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(src), src);
6929  CLEAR_IN_BITMAP(GET_HEAP_PAGE(src)->remembered_bits, src);
6930 
6931  if (FL_TEST(src, FL_EXIVAR)) {
6932  /* Resizing the st table could cause a malloc */
6933  DURING_GC_COULD_MALLOC_REGION_START();
6934  {
6935  rb_mv_generic_ivar(src, dest);
6936  }
6937  DURING_GC_COULD_MALLOC_REGION_END();
6938  }
6939 
6940  if (FL_TEST(src, FL_SEEN_OBJ_ID)) {
6941  /* If the source object's object_id has been seen, we need to update
6942  * the object to object id mapping. */
6943  st_data_t srcid = (st_data_t)src, id;
6944 
6945  gc_report(4, objspace, "Moving object with seen id: %p -> %p\n", (void *)src, (void *)dest);
6946  /* Resizing the st table could cause a malloc */
6947  DURING_GC_COULD_MALLOC_REGION_START();
6948  {
6949  if (!st_delete(objspace->obj_to_id_tbl, &srcid, &id)) {
6950  rb_bug("gc_move: object ID seen, but not in mapping table: %s", rb_obj_info((VALUE)src));
6951  }
6952 
6953  st_insert(objspace->obj_to_id_tbl, (st_data_t)dest, id);
6954  }
6955  DURING_GC_COULD_MALLOC_REGION_END();
6956  }
6957  else {
6958  GC_ASSERT(!st_lookup(objspace->obj_to_id_tbl, (st_data_t)src, NULL));
6959  }
6960 
6961  /* Move the object */
6962  memcpy((void *)dest, (void *)src, MIN(src_slot_size, slot_size));
6963 
6964  if (RVALUE_OVERHEAD > 0) {
6965  void *dest_overhead = (void *)(((uintptr_t)dest) + slot_size - RVALUE_OVERHEAD);
6966  void *src_overhead = (void *)(((uintptr_t)src) + src_slot_size - RVALUE_OVERHEAD);
6967 
6968  memcpy(dest_overhead, src_overhead, RVALUE_OVERHEAD);
6969  }
6970 
6971  memset((void *)src, 0, src_slot_size);
6972  RVALUE_AGE_RESET(src);
6973 
6974  /* Set bits for object in new location */
6975  if (remembered) {
6976  MARK_IN_BITMAP(GET_HEAP_PAGE(dest)->remembered_bits, dest);
6977  }
6978  else {
6979  CLEAR_IN_BITMAP(GET_HEAP_PAGE(dest)->remembered_bits, dest);
6980  }
6981 
6982  if (marked) {
6983  MARK_IN_BITMAP(GET_HEAP_MARK_BITS(dest), dest);
6984  }
6985  else {
6986  CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS(dest), dest);
6987  }
6988 
6989  if (wb_unprotected) {
6990  MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(dest), dest);
6991  }
6992  else {
6993  CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(dest), dest);
6994  }
6995 
6996  if (uncollectible) {
6997  MARK_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(dest), dest);
6998  }
6999  else {
7000  CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(dest), dest);
7001  }
7002 
7003  RVALUE_AGE_SET(dest, age);
7004  /* Assign forwarding address */
7005  RMOVED(src)->flags = T_MOVED;
7006  RMOVED(src)->dummy = Qundef;
7007  RMOVED(src)->destination = dest;
7008  GC_ASSERT(BUILTIN_TYPE(dest) != T_NONE);
7009 
7010  GET_HEAP_PAGE(src)->heap->total_freed_objects++;
7011  GET_HEAP_PAGE(dest)->heap->total_allocated_objects++;
7012 
7013  return src;
7014 }
7015 
7016 #if GC_CAN_COMPILE_COMPACTION
7017 static int
7018 compare_pinned_slots(const void *left, const void *right, void *dummy)
7019 {
7020  struct heap_page *left_page;
7021  struct heap_page *right_page;
7022 
7023  left_page = *(struct heap_page * const *)left;
7024  right_page = *(struct heap_page * const *)right;
7025 
7026  return left_page->pinned_slots - right_page->pinned_slots;
7027 }
7028 
7029 static int
7030 compare_free_slots(const void *left, const void *right, void *dummy)
7031 {
7032  struct heap_page *left_page;
7033  struct heap_page *right_page;
7034 
7035  left_page = *(struct heap_page * const *)left;
7036  right_page = *(struct heap_page * const *)right;
7037 
7038  return left_page->free_slots - right_page->free_slots;
7039 }
7040 
7041 static void
7042 gc_sort_heap_by_compare_func(rb_objspace_t *objspace, gc_compact_compare_func compare_func)
7043 {
7044  for (int j = 0; j < HEAP_COUNT; j++) {
7045  rb_heap_t *heap = &heaps[j];
7046 
7047  size_t total_pages = heap->total_pages;
7048  size_t size = rb_size_mul_or_raise(total_pages, sizeof(struct heap_page *), rb_eRuntimeError);
7049  struct heap_page *page = 0, **page_list = malloc(size);
7050  size_t i = 0;
7051 
7052  heap->free_pages = NULL;
7053  ccan_list_for_each(&heap->pages, page, page_node) {
7054  page_list[i++] = page;
7055  GC_ASSERT(page);
7056  }
7057 
7058  GC_ASSERT((size_t)i == total_pages);
7059 
7060  /* Sort the heap so "filled pages" are first. `heap_add_page` adds to the
7061  * head of the list, so empty pages will end up at the start of the heap */
7062  ruby_qsort(page_list, total_pages, sizeof(struct heap_page *), compare_func, NULL);
7063 
7064  /* Reset the eden heap */
7065  ccan_list_head_init(&heap->pages);
7066 
7067  for (i = 0; i < total_pages; i++) {
7068  ccan_list_add(&heap->pages, &page_list[i]->page_node);
7069  if (page_list[i]->free_slots != 0) {
7070  heap_add_freepage(heap, page_list[i]);
7071  }
7072  }
7073 
7074  free(page_list);
7075  }
7076 }
7077 #endif
7078 
7079 bool
7080 rb_gc_impl_object_moved_p(void *objspace_ptr, VALUE obj)
7081 {
7082  return gc_object_moved_p(objspace_ptr, obj);
7083 }
7084 
7085 static int
7086 gc_ref_update(void *vstart, void *vend, size_t stride, rb_objspace_t *objspace, struct heap_page *page)
7087 {
7088  VALUE v = (VALUE)vstart;
7089 
7090  page->flags.has_uncollectible_wb_unprotected_objects = FALSE;
7091  page->flags.has_remembered_objects = FALSE;
7092 
7093  /* For each object on the page */
7094  for (; v != (VALUE)vend; v += stride) {
7095  asan_unpoisoning_object(v) {
7096  switch (BUILTIN_TYPE(v)) {
7097  case T_NONE:
7098  case T_MOVED:
7099  case T_ZOMBIE:
7100  break;
7101  default:
7102  if (RVALUE_WB_UNPROTECTED(objspace, v)) {
7103  page->flags.has_uncollectible_wb_unprotected_objects = TRUE;
7104  }
7105  if (RVALUE_REMEMBERED(objspace, v)) {
7106  page->flags.has_remembered_objects = TRUE;
7107  }
7108  if (page->flags.before_sweep) {
7109  if (RVALUE_MARKED(objspace, v)) {
7110  rb_gc_update_object_references(objspace, v);
7111  }
7112  }
7113  else {
7114  rb_gc_update_object_references(objspace, v);
7115  }
7116  }
7117  }
7118  }
7119 
7120  return 0;
7121 }
7122 
7123 static void
7124 gc_update_references(rb_objspace_t *objspace)
7125 {
7126  objspace->flags.during_reference_updating = true;
7127 
7128  struct heap_page *page = NULL;
7129 
7130  for (int i = 0; i < HEAP_COUNT; i++) {
7131  bool should_set_mark_bits = TRUE;
7132  rb_heap_t *heap = &heaps[i];
7133 
7134  ccan_list_for_each(&heap->pages, page, page_node) {
7135  uintptr_t start = (uintptr_t)page->start;
7136  uintptr_t end = start + (page->total_slots * heap->slot_size);
7137 
7138  gc_ref_update((void *)start, (void *)end, heap->slot_size, objspace, page);
7139  if (page == heap->sweeping_page) {
7140  should_set_mark_bits = FALSE;
7141  }
7142  if (should_set_mark_bits) {
7143  gc_setup_mark_bits(page);
7144  }
7145  }
7146  }
7147  gc_ref_update_table_values_only(objspace->obj_to_id_tbl);
7148  gc_update_table_refs(objspace->id_to_obj_tbl);
7149  gc_update_table_refs(finalizer_table);
7150 
7151  rb_gc_update_vm_references((void *)objspace);
7152 
7153  objspace->flags.during_reference_updating = false;
7154 }
7155 
7156 #if GC_CAN_COMPILE_COMPACTION
7157 static void
7158 root_obj_check_moved_i(const char *category, VALUE obj, void *data)
7159 {
7160  rb_objspace_t *objspace = data;
7161 
7162  if (gc_object_moved_p(objspace, obj)) {
7163  rb_bug("ROOT %s points to MOVED: %p -> %s", category, (void *)obj, rb_obj_info(rb_gc_impl_location(objspace, obj)));
7164  }
7165 }
7166 
7167 static void
7168 reachable_object_check_moved_i(VALUE ref, void *data)
7169 {
7170  VALUE parent = (VALUE)data;
7171  if (gc_object_moved_p(rb_gc_get_objspace(), ref)) {
7172  rb_bug("Object %s points to MOVED: %p -> %s", rb_obj_info(parent), (void *)ref, rb_obj_info(rb_gc_impl_location(rb_gc_get_objspace(), ref)));
7173  }
7174 }
7175 
7176 static int
7177 heap_check_moved_i(void *vstart, void *vend, size_t stride, void *data)
7178 {
7179  rb_objspace_t *objspace = data;
7180 
7181  VALUE v = (VALUE)vstart;
7182  for (; v != (VALUE)vend; v += stride) {
7183  if (gc_object_moved_p(objspace, v)) {
7184  /* Moved object still on the heap, something may have a reference. */
7185  }
7186  else {
7187  asan_unpoisoning_object(v) {
7188  switch (BUILTIN_TYPE(v)) {
7189  case T_NONE:
7190  case T_ZOMBIE:
7191  break;
7192  default:
7193  if (!rb_gc_impl_garbage_object_p(objspace, v)) {
7194  rb_objspace_reachable_objects_from(v, reachable_object_check_moved_i, (void *)v);
7195  }
7196  }
7197  }
7198  }
7199  }
7200 
7201  return 0;
7202 }
7203 #endif
7204 
7205 bool
7206 rb_gc_impl_during_gc_p(void *objspace_ptr)
7207 {
7208  rb_objspace_t *objspace = objspace_ptr;
7209 
7210  return during_gc;
7211 }
7212 
7213 #if RGENGC_PROFILE >= 2
7214 
7215 static const char*
7216 type_name(int type, VALUE obj)
7217 {
7218  switch ((enum ruby_value_type)type) {
7219  case RUBY_T_NONE: return "T_NONE";
7220  case RUBY_T_OBJECT: return "T_OBJECT";
7221  case RUBY_T_CLASS: return "T_CLASS";
7222  case RUBY_T_MODULE: return "T_MODULE";
7223  case RUBY_T_FLOAT: return "T_FLOAT";
7224  case RUBY_T_STRING: return "T_STRING";
7225  case RUBY_T_REGEXP: return "T_REGEXP";
7226  case RUBY_T_ARRAY: return "T_ARRAY";
7227  case RUBY_T_HASH: return "T_HASH";
7228  case RUBY_T_STRUCT: return "T_STRUCT";
7229  case RUBY_T_BIGNUM: return "T_BIGNUM";
7230  case RUBY_T_FILE: return "T_FILE";
7231  case RUBY_T_DATA: return "T_DATA";
7232  case RUBY_T_MATCH: return "T_MATCH";
7233  case RUBY_T_COMPLEX: return "T_COMPLEX";
7234  case RUBY_T_RATIONAL: return "T_RATIONAL";
7235  case RUBY_T_NIL: return "T_NIL";
7236  case RUBY_T_TRUE: return "T_TRUE";
7237  case RUBY_T_FALSE: return "T_FALSE";
7238  case RUBY_T_SYMBOL: return "T_SYMBOL";
7239  case RUBY_T_FIXNUM: return "T_FIXNUM";
7240  case RUBY_T_UNDEF: return "T_UNDEF";
7241  case RUBY_T_IMEMO: return "T_IMEMO";
7242  case RUBY_T_NODE: return "T_NODE";
7243  case RUBY_T_ICLASS: return "T_ICLASS";
7244  case RUBY_T_ZOMBIE: return "T_ZOMBIE";
7245  case RUBY_T_MOVED: return "T_MOVED";
7246  default: return "unknown";
7247  }
7248 }
7249 
7250 static void
7251 gc_count_add_each_types(VALUE hash, const char *name, const size_t *types)
7252 {
7253  VALUE result = rb_hash_new_with_size(T_MASK);
7254  int i;
7255  for (i=0; i<T_MASK; i++) {
7256  const char *type = type_name(i, 0);
7257  rb_hash_aset(result, ID2SYM(rb_intern(type)), SIZET2NUM(types[i]));
7258  }
7259  rb_hash_aset(hash, ID2SYM(rb_intern(name)), result);
7260 }
7261 #endif
7262 
7263 size_t
7264 rb_gc_impl_gc_count(void *objspace_ptr)
7265 {
7266  rb_objspace_t *objspace = objspace_ptr;
7267 
7268  return objspace->profile.count;
7269 }
7270 
7271 static VALUE
7272 gc_info_decode(rb_objspace_t *objspace, const VALUE hash_or_key, const unsigned int orig_flags)
7273 {
7274  static VALUE sym_major_by = Qnil, sym_gc_by, sym_immediate_sweep, sym_have_finalizer, sym_state, sym_need_major_by;
7275  static VALUE sym_nofree, sym_oldgen, sym_shady, sym_force, sym_stress;
7276 #if RGENGC_ESTIMATE_OLDMALLOC
7277  static VALUE sym_oldmalloc;
7278 #endif
7279  static VALUE sym_newobj, sym_malloc, sym_method, sym_capi;
7280  static VALUE sym_none, sym_marking, sym_sweeping;
7281  static VALUE sym_weak_references_count, sym_retained_weak_references_count;
7282  VALUE hash = Qnil, key = Qnil;
7283  VALUE major_by, need_major_by;
7284  unsigned int flags = orig_flags ? orig_flags : objspace->profile.latest_gc_info;
7285 
7286  if (SYMBOL_P(hash_or_key)) {
7287  key = hash_or_key;
7288  }
7289  else if (RB_TYPE_P(hash_or_key, T_HASH)) {
7290  hash = hash_or_key;
7291  }
7292  else {
7293  rb_bug("gc_info_decode: non-hash or symbol given");
7294  }
7295 
7296  if (NIL_P(sym_major_by)) {
7297 #define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
7298  S(major_by);
7299  S(gc_by);
7300  S(immediate_sweep);
7301  S(have_finalizer);
7302  S(state);
7303  S(need_major_by);
7304 
7305  S(stress);
7306  S(nofree);
7307  S(oldgen);
7308  S(shady);
7309  S(force);
7310 #if RGENGC_ESTIMATE_OLDMALLOC
7311  S(oldmalloc);
7312 #endif
7313  S(newobj);
7314  S(malloc);
7315  S(method);
7316  S(capi);
7317 
7318  S(none);
7319  S(marking);
7320  S(sweeping);
7321 
7322  S(weak_references_count);
7323  S(retained_weak_references_count);
7324 #undef S
7325  }
7326 
7327 #define SET(name, attr) \
7328  if (key == sym_##name) \
7329  return (attr); \
7330  else if (hash != Qnil) \
7331  rb_hash_aset(hash, sym_##name, (attr));
7332 
7333  major_by =
7334  (flags & GPR_FLAG_MAJOR_BY_NOFREE) ? sym_nofree :
7335  (flags & GPR_FLAG_MAJOR_BY_OLDGEN) ? sym_oldgen :
7336  (flags & GPR_FLAG_MAJOR_BY_SHADY) ? sym_shady :
7337  (flags & GPR_FLAG_MAJOR_BY_FORCE) ? sym_force :
7338 #if RGENGC_ESTIMATE_OLDMALLOC
7339  (flags & GPR_FLAG_MAJOR_BY_OLDMALLOC) ? sym_oldmalloc :
7340 #endif
7341  Qnil;
7342  SET(major_by, major_by);
7343 
7344  if (orig_flags == 0) { /* set need_major_by only if flags not set explicitly */
7345  unsigned int need_major_flags = gc_needs_major_flags;
7346  need_major_by =
7347  (need_major_flags & GPR_FLAG_MAJOR_BY_NOFREE) ? sym_nofree :
7348  (need_major_flags & GPR_FLAG_MAJOR_BY_OLDGEN) ? sym_oldgen :
7349  (need_major_flags & GPR_FLAG_MAJOR_BY_SHADY) ? sym_shady :
7350  (need_major_flags & GPR_FLAG_MAJOR_BY_FORCE) ? sym_force :
7351 #if RGENGC_ESTIMATE_OLDMALLOC
7352  (need_major_flags & GPR_FLAG_MAJOR_BY_OLDMALLOC) ? sym_oldmalloc :
7353 #endif
7354  Qnil;
7355  SET(need_major_by, need_major_by);
7356  }
7357 
7358  SET(gc_by,
7359  (flags & GPR_FLAG_NEWOBJ) ? sym_newobj :
7360  (flags & GPR_FLAG_MALLOC) ? sym_malloc :
7361  (flags & GPR_FLAG_METHOD) ? sym_method :
7362  (flags & GPR_FLAG_CAPI) ? sym_capi :
7363  (flags & GPR_FLAG_STRESS) ? sym_stress :
7364  Qnil
7365  );
7366 
7367  SET(have_finalizer, (flags & GPR_FLAG_HAVE_FINALIZE) ? Qtrue : Qfalse);
7368  SET(immediate_sweep, (flags & GPR_FLAG_IMMEDIATE_SWEEP) ? Qtrue : Qfalse);
7369 
7370  if (orig_flags == 0) {
7371  SET(state, gc_mode(objspace) == gc_mode_none ? sym_none :
7372  gc_mode(objspace) == gc_mode_marking ? sym_marking : sym_sweeping);
7373  }
7374 
7375  SET(weak_references_count, LONG2FIX(objspace->profile.weak_references_count));
7376  SET(retained_weak_references_count, LONG2FIX(objspace->profile.retained_weak_references_count));
7377 #undef SET
7378 
7379  if (!NIL_P(key)) {
7380  // Matched key should return above
7381  return Qundef;
7382  }
7383 
7384  return hash;
7385 }
7386 
7387 VALUE
7388 rb_gc_impl_latest_gc_info(void *objspace_ptr, VALUE key)
7389 {
7390  rb_objspace_t *objspace = objspace_ptr;
7391 
7392  return gc_info_decode(objspace, key, 0);
7393 }
7394 
7395 
7396 enum gc_stat_sym {
7397  gc_stat_sym_count,
7398  gc_stat_sym_time,
7399  gc_stat_sym_marking_time,
7400  gc_stat_sym_sweeping_time,
7401  gc_stat_sym_heap_allocated_pages,
7402  gc_stat_sym_heap_empty_pages,
7403  gc_stat_sym_heap_allocatable_slots,
7404  gc_stat_sym_heap_available_slots,
7405  gc_stat_sym_heap_live_slots,
7406  gc_stat_sym_heap_free_slots,
7407  gc_stat_sym_heap_final_slots,
7408  gc_stat_sym_heap_marked_slots,
7409  gc_stat_sym_heap_eden_pages,
7410  gc_stat_sym_total_allocated_pages,
7411  gc_stat_sym_total_freed_pages,
7412  gc_stat_sym_total_allocated_objects,
7413  gc_stat_sym_total_freed_objects,
7414  gc_stat_sym_malloc_increase_bytes,
7415  gc_stat_sym_malloc_increase_bytes_limit,
7416  gc_stat_sym_minor_gc_count,
7417  gc_stat_sym_major_gc_count,
7418  gc_stat_sym_compact_count,
7419  gc_stat_sym_read_barrier_faults,
7420  gc_stat_sym_total_moved_objects,
7421  gc_stat_sym_remembered_wb_unprotected_objects,
7422  gc_stat_sym_remembered_wb_unprotected_objects_limit,
7423  gc_stat_sym_old_objects,
7424  gc_stat_sym_old_objects_limit,
7425 #if RGENGC_ESTIMATE_OLDMALLOC
7426  gc_stat_sym_oldmalloc_increase_bytes,
7427  gc_stat_sym_oldmalloc_increase_bytes_limit,
7428 #endif
7429  gc_stat_sym_weak_references_count,
7430 #if RGENGC_PROFILE
7431  gc_stat_sym_total_generated_normal_object_count,
7432  gc_stat_sym_total_generated_shady_object_count,
7433  gc_stat_sym_total_shade_operation_count,
7434  gc_stat_sym_total_promoted_count,
7435  gc_stat_sym_total_remembered_normal_object_count,
7436  gc_stat_sym_total_remembered_shady_object_count,
7437 #endif
7438  gc_stat_sym_last
7439 };
7440 
7441 static VALUE gc_stat_symbols[gc_stat_sym_last];
7442 
7443 static void
7444 setup_gc_stat_symbols(void)
7445 {
7446  if (gc_stat_symbols[0] == 0) {
7447 #define S(s) gc_stat_symbols[gc_stat_sym_##s] = ID2SYM(rb_intern_const(#s))
7448  S(count);
7449  S(time);
7450  S(marking_time),
7451  S(sweeping_time),
7452  S(heap_allocated_pages);
7453  S(heap_empty_pages);
7454  S(heap_allocatable_slots);
7455  S(heap_available_slots);
7456  S(heap_live_slots);
7457  S(heap_free_slots);
7458  S(heap_final_slots);
7459  S(heap_marked_slots);
7460  S(heap_eden_pages);
7461  S(total_allocated_pages);
7462  S(total_freed_pages);
7463  S(total_allocated_objects);
7464  S(total_freed_objects);
7465  S(malloc_increase_bytes);
7466  S(malloc_increase_bytes_limit);
7467  S(minor_gc_count);
7468  S(major_gc_count);
7469  S(compact_count);
7470  S(read_barrier_faults);
7471  S(total_moved_objects);
7472  S(remembered_wb_unprotected_objects);
7473  S(remembered_wb_unprotected_objects_limit);
7474  S(old_objects);
7475  S(old_objects_limit);
7476 #if RGENGC_ESTIMATE_OLDMALLOC
7477  S(oldmalloc_increase_bytes);
7478  S(oldmalloc_increase_bytes_limit);
7479 #endif
7480  S(weak_references_count);
7481 #if RGENGC_PROFILE
7482  S(total_generated_normal_object_count);
7483  S(total_generated_shady_object_count);
7484  S(total_shade_operation_count);
7485  S(total_promoted_count);
7486  S(total_remembered_normal_object_count);
7487  S(total_remembered_shady_object_count);
7488 #endif /* RGENGC_PROFILE */
7489 #undef S
7490  }
7491 }
7492 
7493 static uint64_t
7494 ns_to_ms(uint64_t ns)
7495 {
7496  return ns / (1000 * 1000);
7497 }
7498 
7499 VALUE
7500 rb_gc_impl_stat(void *objspace_ptr, VALUE hash_or_sym)
7501 {
7502  rb_objspace_t *objspace = objspace_ptr;
7503  VALUE hash = Qnil, key = Qnil;
7504 
7505  setup_gc_stat_symbols();
7506 
7507  if (RB_TYPE_P(hash_or_sym, T_HASH)) {
7508  hash = hash_or_sym;
7509  }
7510  else if (SYMBOL_P(hash_or_sym)) {
7511  key = hash_or_sym;
7512  }
7513  else {
7514  rb_bug("non-hash or symbol given");
7515  }
7516 
7517 #define SET(name, attr) \
7518  if (key == gc_stat_symbols[gc_stat_sym_##name]) \
7519  return SIZET2NUM(attr); \
7520  else if (hash != Qnil) \
7521  rb_hash_aset(hash, gc_stat_symbols[gc_stat_sym_##name], SIZET2NUM(attr));
7522 
7523  SET(count, objspace->profile.count);
7524  SET(time, (size_t)ns_to_ms(objspace->profile.marking_time_ns + objspace->profile.sweeping_time_ns)); // TODO: UINT64T2NUM
7525  SET(marking_time, (size_t)ns_to_ms(objspace->profile.marking_time_ns));
7526  SET(sweeping_time, (size_t)ns_to_ms(objspace->profile.sweeping_time_ns));
7527 
7528  /* implementation dependent counters */
7529  SET(heap_allocated_pages, rb_darray_size(objspace->heap_pages.sorted));
7530  SET(heap_empty_pages, objspace->empty_pages_count)
7531  SET(heap_allocatable_slots, objspace->heap_pages.allocatable_slots);
7532  SET(heap_available_slots, objspace_available_slots(objspace));
7533  SET(heap_live_slots, objspace_live_slots(objspace));
7534  SET(heap_free_slots, objspace_free_slots(objspace));
7535  SET(heap_final_slots, total_final_slots_count(objspace));
7536  SET(heap_marked_slots, objspace->marked_slots);
7537  SET(heap_eden_pages, heap_eden_total_pages(objspace));
7538  SET(total_allocated_pages, objspace->heap_pages.allocated_pages);
7539  SET(total_freed_pages, objspace->heap_pages.freed_pages);
7540  SET(total_allocated_objects, total_allocated_objects(objspace));
7541  SET(total_freed_objects, total_freed_objects(objspace));
7542  SET(malloc_increase_bytes, malloc_increase);
7543  SET(malloc_increase_bytes_limit, malloc_limit);
7544  SET(minor_gc_count, objspace->profile.minor_gc_count);
7545  SET(major_gc_count, objspace->profile.major_gc_count);
7546  SET(compact_count, objspace->profile.compact_count);
7547  SET(read_barrier_faults, objspace->profile.read_barrier_faults);
7548  SET(total_moved_objects, objspace->rcompactor.total_moved);
7549  SET(remembered_wb_unprotected_objects, objspace->rgengc.uncollectible_wb_unprotected_objects);
7550  SET(remembered_wb_unprotected_objects_limit, objspace->rgengc.uncollectible_wb_unprotected_objects_limit);
7551  SET(old_objects, objspace->rgengc.old_objects);
7552  SET(old_objects_limit, objspace->rgengc.old_objects_limit);
7553 #if RGENGC_ESTIMATE_OLDMALLOC
7554  SET(oldmalloc_increase_bytes, objspace->rgengc.oldmalloc_increase);
7555  SET(oldmalloc_increase_bytes_limit, objspace->rgengc.oldmalloc_increase_limit);
7556 #endif
7557 
7558 #if RGENGC_PROFILE
7559  SET(total_generated_normal_object_count, objspace->profile.total_generated_normal_object_count);
7560  SET(total_generated_shady_object_count, objspace->profile.total_generated_shady_object_count);
7561  SET(total_shade_operation_count, objspace->profile.total_shade_operation_count);
7562  SET(total_promoted_count, objspace->profile.total_promoted_count);
7563  SET(total_remembered_normal_object_count, objspace->profile.total_remembered_normal_object_count);
7564  SET(total_remembered_shady_object_count, objspace->profile.total_remembered_shady_object_count);
7565 #endif /* RGENGC_PROFILE */
7566 #undef SET
7567 
7568  if (!NIL_P(key)) {
7569  // Matched key should return above
7570  return Qundef;
7571  }
7572 
7573 #if defined(RGENGC_PROFILE) && RGENGC_PROFILE >= 2
7574  if (hash != Qnil) {
7575  gc_count_add_each_types(hash, "generated_normal_object_count_types", objspace->profile.generated_normal_object_count_types);
7576  gc_count_add_each_types(hash, "generated_shady_object_count_types", objspace->profile.generated_shady_object_count_types);
7577  gc_count_add_each_types(hash, "shade_operation_count_types", objspace->profile.shade_operation_count_types);
7578  gc_count_add_each_types(hash, "promoted_types", objspace->profile.promoted_types);
7579  gc_count_add_each_types(hash, "remembered_normal_object_count_types", objspace->profile.remembered_normal_object_count_types);
7580  gc_count_add_each_types(hash, "remembered_shady_object_count_types", objspace->profile.remembered_shady_object_count_types);
7581  }
7582 #endif
7583 
7584  return hash;
7585 }
7586 
7587 enum gc_stat_heap_sym {
7588  gc_stat_heap_sym_slot_size,
7589  gc_stat_heap_sym_heap_eden_pages,
7590  gc_stat_heap_sym_heap_eden_slots,
7591  gc_stat_heap_sym_total_allocated_pages,
7592  gc_stat_heap_sym_force_major_gc_count,
7593  gc_stat_heap_sym_force_incremental_marking_finish_count,
7594  gc_stat_heap_sym_total_allocated_objects,
7595  gc_stat_heap_sym_total_freed_objects,
7596  gc_stat_heap_sym_last
7597 };
7598 
7599 static VALUE gc_stat_heap_symbols[gc_stat_heap_sym_last];
7600 
7601 static void
7602 setup_gc_stat_heap_symbols(void)
7603 {
7604  if (gc_stat_heap_symbols[0] == 0) {
7605 #define S(s) gc_stat_heap_symbols[gc_stat_heap_sym_##s] = ID2SYM(rb_intern_const(#s))
7606  S(slot_size);
7607  S(heap_eden_pages);
7608  S(heap_eden_slots);
7609  S(total_allocated_pages);
7610  S(force_major_gc_count);
7611  S(force_incremental_marking_finish_count);
7612  S(total_allocated_objects);
7613  S(total_freed_objects);
7614 #undef S
7615  }
7616 }
7617 
7618 static VALUE
7619 stat_one_heap(rb_heap_t *heap, VALUE hash, VALUE key)
7620 {
7621 #define SET(name, attr) \
7622  if (key == gc_stat_heap_symbols[gc_stat_heap_sym_##name]) \
7623  return SIZET2NUM(attr); \
7624  else if (hash != Qnil) \
7625  rb_hash_aset(hash, gc_stat_heap_symbols[gc_stat_heap_sym_##name], SIZET2NUM(attr));
7626 
7627  SET(slot_size, heap->slot_size);
7628  SET(heap_eden_pages, heap->total_pages);
7629  SET(heap_eden_slots, heap->total_slots);
7630  SET(total_allocated_pages, heap->total_allocated_pages);
7631  SET(force_major_gc_count, heap->force_major_gc_count);
7632  SET(force_incremental_marking_finish_count, heap->force_incremental_marking_finish_count);
7633  SET(total_allocated_objects, heap->total_allocated_objects);
7634  SET(total_freed_objects, heap->total_freed_objects);
7635 #undef SET
7636 
7637  if (!NIL_P(key)) {
7638  // Matched key should return above
7639  return Qundef;
7640  }
7641 
7642  return hash;
7643 }
7644 
7645 VALUE
7646 rb_gc_impl_stat_heap(void *objspace_ptr, VALUE heap_name, VALUE hash_or_sym)
7647 {
7648  rb_objspace_t *objspace = objspace_ptr;
7649 
7650  setup_gc_stat_heap_symbols();
7651 
7652  if (NIL_P(heap_name)) {
7653  if (!RB_TYPE_P(hash_or_sym, T_HASH)) {
7654  rb_bug("non-hash given");
7655  }
7656 
7657  for (int i = 0; i < HEAP_COUNT; i++) {
7658  VALUE hash = rb_hash_aref(hash_or_sym, INT2FIX(i));
7659  if (NIL_P(hash)) {
7660  hash = rb_hash_new();
7661  rb_hash_aset(hash_or_sym, INT2FIX(i), hash);
7662  }
7663 
7664  stat_one_heap(&heaps[i], hash, Qnil);
7665  }
7666  }
7667  else if (FIXNUM_P(heap_name)) {
7668  int heap_idx = FIX2INT(heap_name);
7669 
7670  if (heap_idx < 0 || heap_idx >= HEAP_COUNT) {
7671  rb_raise(rb_eArgError, "size pool index out of range");
7672  }
7673 
7674  if (SYMBOL_P(hash_or_sym)) {
7675  return stat_one_heap(&heaps[heap_idx], Qnil, hash_or_sym);
7676  }
7677  else if (RB_TYPE_P(hash_or_sym, T_HASH)) {
7678  return stat_one_heap(&heaps[heap_idx], hash_or_sym, Qnil);
7679  }
7680  else {
7681  rb_bug("non-hash or symbol given");
7682  }
7683  }
7684  else {
7685  rb_bug("heap_name must be nil or an Integer");
7686  }
7687 
7688  return hash_or_sym;
7689 }
7690 
7691 /* I could include internal.h for this, but doing so undefines some Array macros
7692  * necessary for initialising objects, and I don't want to include all the array
7693  * headers to get them back
7694  * TODO: Investigate why RARRAY_AREF gets undefined in internal.h
7695  */
7696 #ifndef RBOOL
7697 #define RBOOL(v) (v ? Qtrue : Qfalse)
7698 #endif
7699 
7700 VALUE
7701 rb_gc_impl_config_get(void *objspace_ptr)
7702 {
7703 #define sym(name) ID2SYM(rb_intern_const(name))
7704  rb_objspace_t *objspace = objspace_ptr;
7705  VALUE hash = rb_hash_new();
7706 
7707  rb_hash_aset(hash, sym("rgengc_allow_full_mark"), RBOOL(gc_config_full_mark_val));
7708 
7709  return hash;
7710 }
7711 
7712 static int
7713 gc_config_set_key(st_data_t key, st_data_t value, st_data_t data)
7714 {
7715  rb_objspace_t *objspace = (rb_objspace_t *)data;
7716  if (rb_sym2id(key) == rb_intern("rgengc_allow_full_mark")) {
7717  gc_rest(objspace);
7718  gc_config_full_mark_set(RTEST(value));
7719  }
7720  return ST_CONTINUE;
7721 }
7722 
7723 void
7724 rb_gc_impl_config_set(void *objspace_ptr, VALUE hash)
7725 {
7726  rb_objspace_t *objspace = objspace_ptr;
7727 
7728  if (!RB_TYPE_P(hash, T_HASH)) {
7729  rb_raise(rb_eArgError, "expected keyword arguments");
7730  }
7731 
7732  rb_hash_stlike_foreach(hash, gc_config_set_key, (st_data_t)objspace);
7733 }
7734 
7735 VALUE
7736 rb_gc_impl_stress_get(void *objspace_ptr)
7737 {
7738  rb_objspace_t *objspace = objspace_ptr;
7739  return ruby_gc_stress_mode;
7740 }
7741 
7742 void
7743 rb_gc_impl_stress_set(void *objspace_ptr, VALUE flag)
7744 {
7745  rb_objspace_t *objspace = objspace_ptr;
7746 
7747  objspace->flags.gc_stressful = RTEST(flag);
7748  objspace->gc_stress_mode = flag;
7749 }
7750 
7751 static int
7752 get_envparam_size(const char *name, size_t *default_value, size_t lower_bound)
7753 {
7754  const char *ptr = getenv(name);
7755  ssize_t val;
7756 
7757  if (ptr != NULL && *ptr) {
7758  size_t unit = 0;
7759  char *end;
7760 #if SIZEOF_SIZE_T == SIZEOF_LONG_LONG
7761  val = strtoll(ptr, &end, 0);
7762 #else
7763  val = strtol(ptr, &end, 0);
7764 #endif
7765  switch (*end) {
7766  case 'k': case 'K':
7767  unit = 1024;
7768  ++end;
7769  break;
7770  case 'm': case 'M':
7771  unit = 1024*1024;
7772  ++end;
7773  break;
7774  case 'g': case 'G':
7775  unit = 1024*1024*1024;
7776  ++end;
7777  break;
7778  }
7779  while (*end && isspace((unsigned char)*end)) end++;
7780  if (*end) {
7781  if (RTEST(ruby_verbose)) fprintf(stderr, "invalid string for %s: %s\n", name, ptr);
7782  return 0;
7783  }
7784  if (unit > 0) {
7785  if (val < -(ssize_t)(SIZE_MAX / 2 / unit) || (ssize_t)(SIZE_MAX / 2 / unit) < val) {
7786  if (RTEST(ruby_verbose)) fprintf(stderr, "%s=%s is ignored because it overflows\n", name, ptr);
7787  return 0;
7788  }
7789  val *= unit;
7790  }
7791  if (val > 0 && (size_t)val > lower_bound) {
7792  if (RTEST(ruby_verbose)) {
7793  fprintf(stderr, "%s=%"PRIdSIZE" (default value: %"PRIuSIZE")\n", name, val, *default_value);
7794  }
7795  *default_value = (size_t)val;
7796  return 1;
7797  }
7798  else {
7799  if (RTEST(ruby_verbose)) {
7800  fprintf(stderr, "%s=%"PRIdSIZE" (default value: %"PRIuSIZE") is ignored because it must be greater than %"PRIuSIZE".\n",
7801  name, val, *default_value, lower_bound);
7802  }
7803  return 0;
7804  }
7805  }
7806  return 0;
7807 }
7808 
7809 static int
7810 get_envparam_double(const char *name, double *default_value, double lower_bound, double upper_bound, int accept_zero)
7811 {
7812  const char *ptr = getenv(name);
7813  double val;
7814 
7815  if (ptr != NULL && *ptr) {
7816  char *end;
7817  val = strtod(ptr, &end);
7818  if (!*ptr || *end) {
7819  if (RTEST(ruby_verbose)) fprintf(stderr, "invalid string for %s: %s\n", name, ptr);
7820  return 0;
7821  }
7822 
7823  if (accept_zero && val == 0.0) {
7824  goto accept;
7825  }
7826  else if (val <= lower_bound) {
7827  if (RTEST(ruby_verbose)) {
7828  fprintf(stderr, "%s=%f (default value: %f) is ignored because it must be greater than %f.\n",
7829  name, val, *default_value, lower_bound);
7830  }
7831  }
7832  else if (upper_bound != 0.0 && /* ignore upper_bound if it is 0.0 */
7833  val > upper_bound) {
7834  if (RTEST(ruby_verbose)) {
7835  fprintf(stderr, "%s=%f (default value: %f) is ignored because it must be lower than %f.\n",
7836  name, val, *default_value, upper_bound);
7837  }
7838  }
7839  else {
7840  goto accept;
7841  }
7842  }
7843  return 0;
7844 
7845  accept:
7846  if (RTEST(ruby_verbose)) fprintf(stderr, "%s=%f (default value: %f)\n", name, val, *default_value);
7847  *default_value = val;
7848  return 1;
7849 }
7850 
7851 /*
7852  * GC tuning environment variables
7853  *
7854  * * RUBY_GC_HEAP_FREE_SLOTS
7855  * - Prepare at least this amount of slots after GC.
7856  * - Allocate slots if there are not enough slots.
7857  * * RUBY_GC_HEAP_GROWTH_FACTOR (new from 2.1)
7858  * - Allocate slots by this factor.
7859  * - (next slots number) = (current slots number) * (this factor)
7860  * * RUBY_GC_HEAP_GROWTH_MAX_SLOTS (new from 2.1)
7861  * - Allocation rate is limited to this number of slots.
7862  * * RUBY_GC_HEAP_FREE_SLOTS_MIN_RATIO (new from 2.4)
7863  * - Allocate additional pages when the number of free slots is
7864  * lower than the value (total_slots * (this ratio)).
7865  * * RUBY_GC_HEAP_FREE_SLOTS_GOAL_RATIO (new from 2.4)
7866  * - Allocate slots to satisfy this formula:
7867  * free_slots = total_slots * goal_ratio
7868  * - In other words, prepare (total_slots * goal_ratio) free slots.
7869  * - if this value is 0.0, then use RUBY_GC_HEAP_GROWTH_FACTOR directly.
7870  * * RUBY_GC_HEAP_FREE_SLOTS_MAX_RATIO (new from 2.4)
7871  * - Allow to free pages when the number of free slots is
7872  * greater than the value (total_slots * (this ratio)).
7873  * * RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR (new from 2.1.1)
7874  * - Do full GC when the number of old objects is more than R * N
7875  * where R is this factor and
7876  * N is the number of old objects just after last full GC.
7877  *
7878  * * obsolete
7879  * * RUBY_FREE_MIN -> RUBY_GC_HEAP_FREE_SLOTS (from 2.1)
7880  * * RUBY_HEAP_MIN_SLOTS -> RUBY_GC_HEAP_INIT_SLOTS (from 2.1)
7881  *
7882  * * RUBY_GC_MALLOC_LIMIT
7883  * * RUBY_GC_MALLOC_LIMIT_MAX (new from 2.1)
7884  * * RUBY_GC_MALLOC_LIMIT_GROWTH_FACTOR (new from 2.1)
7885  *
7886  * * RUBY_GC_OLDMALLOC_LIMIT (new from 2.1)
7887  * * RUBY_GC_OLDMALLOC_LIMIT_MAX (new from 2.1)
7888  * * RUBY_GC_OLDMALLOC_LIMIT_GROWTH_FACTOR (new from 2.1)
7889  */
7890 
7891 void
7892 rb_gc_impl_set_params(void *objspace_ptr)
7893 {
7894  rb_objspace_t *objspace = objspace_ptr;
7895  /* RUBY_GC_HEAP_FREE_SLOTS */
7896  if (get_envparam_size("RUBY_GC_HEAP_FREE_SLOTS", &gc_params.heap_free_slots, 0)) {
7897  /* ok */
7898  }
7899 
7900  for (int i = 0; i < HEAP_COUNT; i++) {
7901  char env_key[sizeof("RUBY_GC_HEAP_" "_INIT_SLOTS") + DECIMAL_SIZE_OF_BITS(sizeof(int) * CHAR_BIT)];
7902  snprintf(env_key, sizeof(env_key), "RUBY_GC_HEAP_%d_INIT_SLOTS", i);
7903 
7904  get_envparam_size(env_key, &gc_params.heap_init_slots[i], 0);
7905  }
7906 
7907  get_envparam_double("RUBY_GC_HEAP_GROWTH_FACTOR", &gc_params.growth_factor, 1.0, 0.0, FALSE);
7908  get_envparam_size ("RUBY_GC_HEAP_GROWTH_MAX_SLOTS", &gc_params.growth_max_slots, 0);
7909  get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_MIN_RATIO", &gc_params.heap_free_slots_min_ratio,
7910  0.0, 1.0, FALSE);
7911  get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_MAX_RATIO", &gc_params.heap_free_slots_max_ratio,
7912  gc_params.heap_free_slots_min_ratio, 1.0, FALSE);
7913  get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_GOAL_RATIO", &gc_params.heap_free_slots_goal_ratio,
7914  gc_params.heap_free_slots_min_ratio, gc_params.heap_free_slots_max_ratio, TRUE);
7915  get_envparam_double("RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR", &gc_params.oldobject_limit_factor, 0.0, 0.0, TRUE);
7916  get_envparam_double("RUBY_GC_HEAP_REMEMBERED_WB_UNPROTECTED_OBJECTS_LIMIT_RATIO", &gc_params.uncollectible_wb_unprotected_objects_limit_ratio, 0.0, 0.0, TRUE);
7917 
7918  if (get_envparam_size("RUBY_GC_MALLOC_LIMIT", &gc_params.malloc_limit_min, 0)) {
7919  malloc_limit = gc_params.malloc_limit_min;
7920  }
7921  get_envparam_size ("RUBY_GC_MALLOC_LIMIT_MAX", &gc_params.malloc_limit_max, 0);
7922  if (!gc_params.malloc_limit_max) { /* ignore max-check if 0 */
7923  gc_params.malloc_limit_max = SIZE_MAX;
7924  }
7925  get_envparam_double("RUBY_GC_MALLOC_LIMIT_GROWTH_FACTOR", &gc_params.malloc_limit_growth_factor, 1.0, 0.0, FALSE);
7926 
7927 #if RGENGC_ESTIMATE_OLDMALLOC
7928  if (get_envparam_size("RUBY_GC_OLDMALLOC_LIMIT", &gc_params.oldmalloc_limit_min, 0)) {
7929  objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
7930  }
7931  get_envparam_size ("RUBY_GC_OLDMALLOC_LIMIT_MAX", &gc_params.oldmalloc_limit_max, 0);
7932  get_envparam_double("RUBY_GC_OLDMALLOC_LIMIT_GROWTH_FACTOR", &gc_params.oldmalloc_limit_growth_factor, 1.0, 0.0, FALSE);
7933 #endif
7934 }
7935 
7936 static inline size_t
7937 objspace_malloc_size(rb_objspace_t *objspace, void *ptr, size_t hint)
7938 {
7939 #ifdef HAVE_MALLOC_USABLE_SIZE
7940  return malloc_usable_size(ptr);
7941 #else
7942  return hint;
7943 #endif
7944 }
7945 
7946 enum memop_type {
7947  MEMOP_TYPE_MALLOC = 0,
7948  MEMOP_TYPE_FREE,
7949  MEMOP_TYPE_REALLOC
7950 };
7951 
7952 static inline void
7953 atomic_sub_nounderflow(size_t *var, size_t sub)
7954 {
7955  if (sub == 0) return;
7956 
7957  while (1) {
7958  size_t val = *var;
7959  if (val < sub) sub = val;
7960  if (RUBY_ATOMIC_SIZE_CAS(*var, val, val-sub) == val) break;
7961  }
7962 }
7963 
7964 #define gc_stress_full_mark_after_malloc_p() \
7965  (FIXNUM_P(ruby_gc_stress_mode) && (FIX2LONG(ruby_gc_stress_mode) & (1<<gc_stress_full_mark_after_malloc)))
7966 
7967 static void
7968 objspace_malloc_gc_stress(rb_objspace_t *objspace)
7969 {
7970  if (ruby_gc_stressful && ruby_native_thread_p()) {
7971  unsigned int reason = (GPR_FLAG_IMMEDIATE_MARK | GPR_FLAG_IMMEDIATE_SWEEP |
7972  GPR_FLAG_STRESS | GPR_FLAG_MALLOC);
7973 
7974  if (gc_stress_full_mark_after_malloc_p()) {
7975  reason |= GPR_FLAG_FULL_MARK;
7976  }
7977  garbage_collect_with_gvl(objspace, reason);
7978  }
7979 }
7980 
7981 static inline bool
7982 objspace_malloc_increase_report(rb_objspace_t *objspace, void *mem, size_t new_size, size_t old_size, enum memop_type type)
7983 {
7984  if (0) fprintf(stderr, "increase - ptr: %p, type: %s, new_size: %"PRIdSIZE", old_size: %"PRIdSIZE"\n",
7985  mem,
7986  type == MEMOP_TYPE_MALLOC ? "malloc" :
7987  type == MEMOP_TYPE_FREE ? "free " :
7988  type == MEMOP_TYPE_REALLOC ? "realloc": "error",
7989  new_size, old_size);
7990  return false;
7991 }
7992 
7993 static bool
7994 objspace_malloc_increase_body(rb_objspace_t *objspace, void *mem, size_t new_size, size_t old_size, enum memop_type type)
7995 {
7996  if (new_size > old_size) {
7997  RUBY_ATOMIC_SIZE_ADD(malloc_increase, new_size - old_size);
7998 #if RGENGC_ESTIMATE_OLDMALLOC
7999  RUBY_ATOMIC_SIZE_ADD(objspace->rgengc.oldmalloc_increase, new_size - old_size);
8000 #endif
8001  }
8002  else {
8003  atomic_sub_nounderflow(&malloc_increase, old_size - new_size);
8004 #if RGENGC_ESTIMATE_OLDMALLOC
8005  atomic_sub_nounderflow(&objspace->rgengc.oldmalloc_increase, old_size - new_size);
8006 #endif
8007  }
8008 
8009  if (type == MEMOP_TYPE_MALLOC) {
8010  retry:
8011  if (malloc_increase > malloc_limit && ruby_native_thread_p() && !dont_gc_val()) {
8012  if (ruby_thread_has_gvl_p() && is_lazy_sweeping(objspace)) {
8013  gc_rest(objspace); /* gc_rest can reduce malloc_increase */
8014  goto retry;
8015  }
8016  garbage_collect_with_gvl(objspace, GPR_FLAG_MALLOC);
8017  }
8018  }
8019 
8020 #if MALLOC_ALLOCATED_SIZE
8021  if (new_size >= old_size) {
8022  RUBY_ATOMIC_SIZE_ADD(objspace->malloc_params.allocated_size, new_size - old_size);
8023  }
8024  else {
8025  size_t dec_size = old_size - new_size;
8026  size_t allocated_size = objspace->malloc_params.allocated_size;
8027 
8028 #if MALLOC_ALLOCATED_SIZE_CHECK
8029  if (allocated_size < dec_size) {
8030  rb_bug("objspace_malloc_increase: underflow malloc_params.allocated_size.");
8031  }
8032 #endif
8033  atomic_sub_nounderflow(&objspace->malloc_params.allocated_size, dec_size);
8034  }
8035 
8036  switch (type) {
8037  case MEMOP_TYPE_MALLOC:
8038  RUBY_ATOMIC_SIZE_INC(objspace->malloc_params.allocations);
8039  break;
8040  case MEMOP_TYPE_FREE:
8041  {
8042  size_t allocations = objspace->malloc_params.allocations;
8043  if (allocations > 0) {
8044  atomic_sub_nounderflow(&objspace->malloc_params.allocations, 1);
8045  }
8046 #if MALLOC_ALLOCATED_SIZE_CHECK
8047  else {
8048  GC_ASSERT(objspace->malloc_params.allocations > 0);
8049  }
8050 #endif
8051  }
8052  break;
8053  case MEMOP_TYPE_REALLOC: /* ignore */ break;
8054  }
8055 #endif
8056  return true;
8057 }
8058 
8059 #define objspace_malloc_increase(...) \
8060  for (bool malloc_increase_done = objspace_malloc_increase_report(__VA_ARGS__); \
8061  !malloc_increase_done; \
8062  malloc_increase_done = objspace_malloc_increase_body(__VA_ARGS__))
8063 
8064 struct malloc_obj_info { /* 4 words */
8065  size_t size;
8066 };
8067 
8068 static inline size_t
8069 objspace_malloc_prepare(rb_objspace_t *objspace, size_t size)
8070 {
8071  if (size == 0) size = 1;
8072 
8073 #if CALC_EXACT_MALLOC_SIZE
8074  size += sizeof(struct malloc_obj_info);
8075 #endif
8076 
8077  return size;
8078 }
8079 
8080 static bool
8081 malloc_during_gc_p(rb_objspace_t *objspace)
8082 {
8083  /* malloc is not allowed during GC when we're not using multiple ractors
8084  * (since ractors can run while another thread is sweeping) and when we
8085  * have the GVL (since if we don't have the GVL, we'll try to acquire the
8086  * GVL which will block and ensure the other thread finishes GC). */
8087  return during_gc && !dont_gc_val() && !rb_gc_multi_ractor_p() && ruby_thread_has_gvl_p();
8088 }
8089 
8090 static inline void *
8091 objspace_malloc_fixup(rb_objspace_t *objspace, void *mem, size_t size)
8092 {
8093  size = objspace_malloc_size(objspace, mem, size);
8094  objspace_malloc_increase(objspace, mem, size, 0, MEMOP_TYPE_MALLOC) {}
8095 
8096 #if CALC_EXACT_MALLOC_SIZE
8097  {
8098  struct malloc_obj_info *info = mem;
8099  info->size = size;
8100  mem = info + 1;
8101  }
8102 #endif
8103 
8104  return mem;
8105 }
8106 
8107 #if defined(__GNUC__) && RUBY_DEBUG
8108 #define RB_BUG_INSTEAD_OF_RB_MEMERROR 1
8109 #endif
8110 
8111 #ifndef RB_BUG_INSTEAD_OF_RB_MEMERROR
8112 # define RB_BUG_INSTEAD_OF_RB_MEMERROR 0
8113 #endif
8114 
8115 #define GC_MEMERROR(...) \
8116  ((RB_BUG_INSTEAD_OF_RB_MEMERROR+0) ? rb_bug("" __VA_ARGS__) : rb_memerror())
8117 
8118 #define TRY_WITH_GC(siz, expr) do { \
8119  const gc_profile_record_flag gpr = \
8120  GPR_FLAG_FULL_MARK | \
8121  GPR_FLAG_IMMEDIATE_MARK | \
8122  GPR_FLAG_IMMEDIATE_SWEEP | \
8123  GPR_FLAG_MALLOC; \
8124  objspace_malloc_gc_stress(objspace); \
8125  \
8126  if (RB_LIKELY((expr))) { \
8127  /* Success on 1st try */ \
8128  } \
8129  else if (!garbage_collect_with_gvl(objspace, gpr)) { \
8130  /* @shyouhei thinks this doesn't happen */ \
8131  GC_MEMERROR("TRY_WITH_GC: could not GC"); \
8132  } \
8133  else if ((expr)) { \
8134  /* Success on 2nd try */ \
8135  } \
8136  else { \
8137  GC_MEMERROR("TRY_WITH_GC: could not allocate:" \
8138  "%"PRIdSIZE" bytes for %s", \
8139  siz, # expr); \
8140  } \
8141  } while (0)
8142 
8143 static void
8144 check_malloc_not_in_gc(rb_objspace_t *objspace, const char *msg)
8145 {
8146  if (RB_UNLIKELY(malloc_during_gc_p(objspace))) {
8147  dont_gc_on();
8148  during_gc = false;
8149  rb_bug("Cannot %s during GC", msg);
8150  }
8151 }
8152 
8153 void
8154 rb_gc_impl_free(void *objspace_ptr, void *ptr, size_t old_size)
8155 {
8156  rb_objspace_t *objspace = objspace_ptr;
8157 
8158  if (!ptr) {
8159  /*
8160  * ISO/IEC 9899 says "If ptr is a null pointer, no action occurs" since
8161  * its first version. We would better follow.
8162  */
8163  return;
8164  }
8165 #if CALC_EXACT_MALLOC_SIZE
8166  struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
8167  ptr = info;
8168  old_size = info->size;
8169 #endif
8170  old_size = objspace_malloc_size(objspace, ptr, old_size);
8171 
8172  objspace_malloc_increase(objspace, ptr, 0, old_size, MEMOP_TYPE_FREE) {
8173  free(ptr);
8174  ptr = NULL;
8175  RB_DEBUG_COUNTER_INC(heap_xfree);
8176  }
8177 }
8178 
8179 void *
8180 rb_gc_impl_malloc(void *objspace_ptr, size_t size)
8181 {
8182  rb_objspace_t *objspace = objspace_ptr;
8183  check_malloc_not_in_gc(objspace, "malloc");
8184 
8185  void *mem;
8186 
8187  size = objspace_malloc_prepare(objspace, size);
8188  TRY_WITH_GC(size, mem = malloc(size));
8189  RB_DEBUG_COUNTER_INC(heap_xmalloc);
8190  return objspace_malloc_fixup(objspace, mem, size);
8191 }
8192 
8193 void *
8194 rb_gc_impl_calloc(void *objspace_ptr, size_t size)
8195 {
8196  rb_objspace_t *objspace = objspace_ptr;
8197 
8198  if (RB_UNLIKELY(malloc_during_gc_p(objspace))) {
8199  rb_warn("calloc during GC detected, this could cause crashes if it triggers another GC");
8200 #if RGENGC_CHECK_MODE || RUBY_DEBUG
8201  rb_bug("Cannot calloc during GC");
8202 #endif
8203  }
8204 
8205  void *mem;
8206 
8207  size = objspace_malloc_prepare(objspace, size);
8208  TRY_WITH_GC(size, mem = calloc1(size));
8209  return objspace_malloc_fixup(objspace, mem, size);
8210 }
8211 
8212 void *
8213 rb_gc_impl_realloc(void *objspace_ptr, void *ptr, size_t new_size, size_t old_size)
8214 {
8215  rb_objspace_t *objspace = objspace_ptr;
8216 
8217  check_malloc_not_in_gc(objspace, "realloc");
8218 
8219  void *mem;
8220 
8221  if (!ptr) return rb_gc_impl_malloc(objspace, new_size);
8222 
8223  /*
8224  * The behavior of realloc(ptr, 0) is implementation defined.
8225  * Therefore we don't use realloc(ptr, 0) for portability reason.
8226  * see http://www.open-std.org/jtc1/sc22/wg14/www/docs/dr_400.htm
8227  */
8228  if (new_size == 0) {
8229  if ((mem = rb_gc_impl_malloc(objspace, 0)) != NULL) {
8230  /*
8231  * - OpenBSD's malloc(3) man page says that when 0 is passed, it
8232  * returns a non-NULL pointer to an access-protected memory page.
8233  * The returned pointer cannot be read / written at all, but
8234  * still be a valid argument of free().
8235  *
8236  * https://man.openbsd.org/malloc.3
8237  *
8238  * - Linux's malloc(3) man page says that it _might_ perhaps return
8239  * a non-NULL pointer when its argument is 0. That return value
8240  * is safe (and is expected) to be passed to free().
8241  *
8242  * https://man7.org/linux/man-pages/man3/malloc.3.html
8243  *
8244  * - As I read the implementation jemalloc's malloc() returns fully
8245  * normal 16 bytes memory region when its argument is 0.
8246  *
8247  * - As I read the implementation musl libc's malloc() returns
8248  * fully normal 32 bytes memory region when its argument is 0.
8249  *
8250  * - Other malloc implementations can also return non-NULL.
8251  */
8252  rb_gc_impl_free(objspace, ptr, old_size);
8253  return mem;
8254  }
8255  else {
8256  /*
8257  * It is dangerous to return NULL here, because that could lead to
8258  * RCE. Fallback to 1 byte instead of zero.
8259  *
8260  * https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-11932
8261  */
8262  new_size = 1;
8263  }
8264  }
8265 
8266 #if CALC_EXACT_MALLOC_SIZE
8267  {
8268  struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
8269  new_size += sizeof(struct malloc_obj_info);
8270  ptr = info;
8271  old_size = info->size;
8272  }
8273 #endif
8274 
8275  old_size = objspace_malloc_size(objspace, ptr, old_size);
8276  TRY_WITH_GC(new_size, mem = RB_GNUC_EXTENSION_BLOCK(realloc(ptr, new_size)));
8277  new_size = objspace_malloc_size(objspace, mem, new_size);
8278 
8279 #if CALC_EXACT_MALLOC_SIZE
8280  {
8281  struct malloc_obj_info *info = mem;
8282  info->size = new_size;
8283  mem = info + 1;
8284  }
8285 #endif
8286 
8287  objspace_malloc_increase(objspace, mem, new_size, old_size, MEMOP_TYPE_REALLOC);
8288 
8289  RB_DEBUG_COUNTER_INC(heap_xrealloc);
8290  return mem;
8291 }
8292 
8293 void
8294 rb_gc_impl_adjust_memory_usage(void *objspace_ptr, ssize_t diff)
8295 {
8296  rb_objspace_t *objspace = objspace_ptr;
8297 
8298  if (diff > 0) {
8299  objspace_malloc_increase(objspace, 0, diff, 0, MEMOP_TYPE_REALLOC);
8300  }
8301  else if (diff < 0) {
8302  objspace_malloc_increase(objspace, 0, 0, -diff, MEMOP_TYPE_REALLOC);
8303  }
8304 }
8305 
8306 // TODO: move GC profiler stuff back into gc.c
8307 /*
8308  ------------------------------ GC profiler ------------------------------
8309 */
8310 
8311 #define GC_PROFILE_RECORD_DEFAULT_SIZE 100
8312 
8313 static bool
8314 current_process_time(struct timespec *ts)
8315 {
8316 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_PROCESS_CPUTIME_ID)
8317  {
8318  static int try_clock_gettime = 1;
8319  if (try_clock_gettime && clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ts) == 0) {
8320  return true;
8321  }
8322  else {
8323  try_clock_gettime = 0;
8324  }
8325  }
8326 #endif
8327 
8328 #ifdef RUSAGE_SELF
8329  {
8330  struct rusage usage;
8331  struct timeval time;
8332  if (getrusage(RUSAGE_SELF, &usage) == 0) {
8333  time = usage.ru_utime;
8334  ts->tv_sec = time.tv_sec;
8335  ts->tv_nsec = (int32_t)time.tv_usec * 1000;
8336  return true;
8337  }
8338  }
8339 #endif
8340 
8341 #ifdef _WIN32
8342  {
8343  FILETIME creation_time, exit_time, kernel_time, user_time;
8344  ULARGE_INTEGER ui;
8345 
8346  if (GetProcessTimes(GetCurrentProcess(),
8347  &creation_time, &exit_time, &kernel_time, &user_time) != 0) {
8348  memcpy(&ui, &user_time, sizeof(FILETIME));
8349 #define PER100NSEC (uint64_t)(1000 * 1000 * 10)
8350  ts->tv_nsec = (long)(ui.QuadPart % PER100NSEC);
8351  ts->tv_sec = (time_t)(ui.QuadPart / PER100NSEC);
8352  return true;
8353  }
8354  }
8355 #endif
8356 
8357  return false;
8358 }
8359 
8360 static double
8361 getrusage_time(void)
8362 {
8363  struct timespec ts;
8364  if (current_process_time(&ts)) {
8365  return ts.tv_sec + ts.tv_nsec * 1e-9;
8366  }
8367  else {
8368  return 0.0;
8369  }
8370 }
8371 
8372 
8373 static inline void
8374 gc_prof_setup_new_record(rb_objspace_t *objspace, unsigned int reason)
8375 {
8376  if (objspace->profile.run) {
8377  size_t index = objspace->profile.next_index;
8378  gc_profile_record *record;
8379 
8380  /* create new record */
8381  objspace->profile.next_index++;
8382 
8383  if (!objspace->profile.records) {
8384  objspace->profile.size = GC_PROFILE_RECORD_DEFAULT_SIZE;
8385  objspace->profile.records = malloc(xmalloc2_size(sizeof(gc_profile_record), objspace->profile.size));
8386  }
8387  if (index >= objspace->profile.size) {
8388  void *ptr;
8389  objspace->profile.size += 1000;
8390  ptr = realloc(objspace->profile.records, xmalloc2_size(sizeof(gc_profile_record), objspace->profile.size));
8391  if (!ptr) rb_memerror();
8392  objspace->profile.records = ptr;
8393  }
8394  if (!objspace->profile.records) {
8395  rb_bug("gc_profile malloc or realloc miss");
8396  }
8397  record = objspace->profile.current_record = &objspace->profile.records[objspace->profile.next_index - 1];
8398  MEMZERO(record, gc_profile_record, 1);
8399 
8400  /* setup before-GC parameter */
8401  record->flags = reason | (ruby_gc_stressful ? GPR_FLAG_STRESS : 0);
8402 #if MALLOC_ALLOCATED_SIZE
8403  record->allocated_size = malloc_allocated_size;
8404 #endif
8405 #if GC_PROFILE_MORE_DETAIL && GC_PROFILE_DETAIL_MEMORY
8406 #ifdef RUSAGE_SELF
8407  {
8408  struct rusage usage;
8409  if (getrusage(RUSAGE_SELF, &usage) == 0) {
8410  record->maxrss = usage.ru_maxrss;
8411  record->minflt = usage.ru_minflt;
8412  record->majflt = usage.ru_majflt;
8413  }
8414  }
8415 #endif
8416 #endif
8417  }
8418 }
8419 
8420 static inline void
8421 gc_prof_timer_start(rb_objspace_t *objspace)
8422 {
8423  if (gc_prof_enabled(objspace)) {
8424  gc_profile_record *record = gc_prof_record(objspace);
8425 #if GC_PROFILE_MORE_DETAIL
8426  record->prepare_time = objspace->profile.prepare_time;
8427 #endif
8428  record->gc_time = 0;
8429  record->gc_invoke_time = getrusage_time();
8430  }
8431 }
8432 
8433 static double
8434 elapsed_time_from(double time)
8435 {
8436  double now = getrusage_time();
8437  if (now > time) {
8438  return now - time;
8439  }
8440  else {
8441  return 0;
8442  }
8443 }
8444 
8445 static inline void
8446 gc_prof_timer_stop(rb_objspace_t *objspace)
8447 {
8448  if (gc_prof_enabled(objspace)) {
8449  gc_profile_record *record = gc_prof_record(objspace);
8450  record->gc_time = elapsed_time_from(record->gc_invoke_time);
8451  record->gc_invoke_time -= objspace->profile.invoke_time;
8452  }
8453 }
8454 
8455 #ifdef BUILDING_SHARED_GC
8456 # define RUBY_DTRACE_GC_HOOK(name)
8457 #else
8458 # define RUBY_DTRACE_GC_HOOK(name) \
8459  do {if (RUBY_DTRACE_GC_##name##_ENABLED()) RUBY_DTRACE_GC_##name();} while (0)
8460 #endif
8461 
8462 static inline void
8463 gc_prof_mark_timer_start(rb_objspace_t *objspace)
8464 {
8465  RUBY_DTRACE_GC_HOOK(MARK_BEGIN);
8466 #if GC_PROFILE_MORE_DETAIL
8467  if (gc_prof_enabled(objspace)) {
8468  gc_prof_record(objspace)->gc_mark_time = getrusage_time();
8469  }
8470 #endif
8471 }
8472 
8473 static inline void
8474 gc_prof_mark_timer_stop(rb_objspace_t *objspace)
8475 {
8476  RUBY_DTRACE_GC_HOOK(MARK_END);
8477 #if GC_PROFILE_MORE_DETAIL
8478  if (gc_prof_enabled(objspace)) {
8479  gc_profile_record *record = gc_prof_record(objspace);
8480  record->gc_mark_time = elapsed_time_from(record->gc_mark_time);
8481  }
8482 #endif
8483 }
8484 
8485 static inline void
8486 gc_prof_sweep_timer_start(rb_objspace_t *objspace)
8487 {
8488  RUBY_DTRACE_GC_HOOK(SWEEP_BEGIN);
8489  if (gc_prof_enabled(objspace)) {
8490  gc_profile_record *record = gc_prof_record(objspace);
8491 
8492  if (record->gc_time > 0 || GC_PROFILE_MORE_DETAIL) {
8493  objspace->profile.gc_sweep_start_time = getrusage_time();
8494  }
8495  }
8496 }
8497 
8498 static inline void
8499 gc_prof_sweep_timer_stop(rb_objspace_t *objspace)
8500 {
8501  RUBY_DTRACE_GC_HOOK(SWEEP_END);
8502 
8503  if (gc_prof_enabled(objspace)) {
8504  double sweep_time;
8505  gc_profile_record *record = gc_prof_record(objspace);
8506 
8507  if (record->gc_time > 0) {
8508  sweep_time = elapsed_time_from(objspace->profile.gc_sweep_start_time);
8509  /* need to accumulate GC time for lazy sweep after gc() */
8510  record->gc_time += sweep_time;
8511  }
8512  else if (GC_PROFILE_MORE_DETAIL) {
8513  sweep_time = elapsed_time_from(objspace->profile.gc_sweep_start_time);
8514  }
8515 
8516 #if GC_PROFILE_MORE_DETAIL
8517  record->gc_sweep_time += sweep_time;
8518  if (heap_pages_deferred_final) record->flags |= GPR_FLAG_HAVE_FINALIZE;
8519 #endif
8520  if (heap_pages_deferred_final) objspace->profile.latest_gc_info |= GPR_FLAG_HAVE_FINALIZE;
8521  }
8522 }
8523 
8524 static inline void
8525 gc_prof_set_malloc_info(rb_objspace_t *objspace)
8526 {
8527 #if GC_PROFILE_MORE_DETAIL
8528  if (gc_prof_enabled(objspace)) {
8529  gc_profile_record *record = gc_prof_record(objspace);
8530  record->allocate_increase = malloc_increase;
8531  record->allocate_limit = malloc_limit;
8532  }
8533 #endif
8534 }
8535 
8536 static inline void
8537 gc_prof_set_heap_info(rb_objspace_t *objspace)
8538 {
8539  if (gc_prof_enabled(objspace)) {
8540  gc_profile_record *record = gc_prof_record(objspace);
8541  size_t live = objspace->profile.total_allocated_objects_at_gc_start - total_freed_objects(objspace);
8542  size_t total = objspace->profile.heap_used_at_gc_start * HEAP_PAGE_OBJ_LIMIT;
8543 
8544 #if GC_PROFILE_MORE_DETAIL
8545  record->heap_use_pages = objspace->profile.heap_used_at_gc_start;
8546  record->heap_live_objects = live;
8547  record->heap_free_objects = total - live;
8548 #endif
8549 
8550  record->heap_total_objects = total;
8551  record->heap_use_size = live * BASE_SLOT_SIZE;
8552  record->heap_total_size = total * BASE_SLOT_SIZE;
8553  }
8554 }
8555 
8556 /*
8557  * call-seq:
8558  * GC::Profiler.clear -> nil
8559  *
8560  * Clears the \GC profiler data.
8561  *
8562  */
8563 
8564 static VALUE
8565 gc_profile_clear(VALUE _)
8566 {
8567  rb_objspace_t *objspace = rb_gc_get_objspace();
8568  void *p = objspace->profile.records;
8569  objspace->profile.records = NULL;
8570  objspace->profile.size = 0;
8571  objspace->profile.next_index = 0;
8572  objspace->profile.current_record = 0;
8573  free(p);
8574  return Qnil;
8575 }
8576 
8577 /*
8578  * call-seq:
8579  * GC::Profiler.raw_data -> [Hash, ...]
8580  *
8581  * Returns an Array of individual raw profile data Hashes ordered
8582  * from earliest to latest by +:GC_INVOKE_TIME+.
8583  *
8584  * For example:
8585  *
8586  * [
8587  * {
8588  * :GC_TIME=>1.3000000000000858e-05,
8589  * :GC_INVOKE_TIME=>0.010634999999999999,
8590  * :HEAP_USE_SIZE=>289640,
8591  * :HEAP_TOTAL_SIZE=>588960,
8592  * :HEAP_TOTAL_OBJECTS=>14724,
8593  * :GC_IS_MARKED=>false
8594  * },
8595  * # ...
8596  * ]
8597  *
8598  * The keys mean:
8599  *
8600  * +:GC_TIME+::
8601  * Time elapsed in seconds for this GC run
8602  * +:GC_INVOKE_TIME+::
8603  * Time elapsed in seconds from startup to when the GC was invoked
8604  * +:HEAP_USE_SIZE+::
8605  * Total bytes of heap used
8606  * +:HEAP_TOTAL_SIZE+::
8607  * Total size of heap in bytes
8608  * +:HEAP_TOTAL_OBJECTS+::
8609  * Total number of objects
8610  * +:GC_IS_MARKED+::
8611  * Returns +true+ if the GC is in mark phase
8612  *
8613  * If ruby was built with +GC_PROFILE_MORE_DETAIL+, you will also have access
8614  * to the following hash keys:
8615  *
8616  * +:GC_MARK_TIME+::
8617  * +:GC_SWEEP_TIME+::
8618  * +:ALLOCATE_INCREASE+::
8619  * +:ALLOCATE_LIMIT+::
8620  * +:HEAP_USE_PAGES+::
8621  * +:HEAP_LIVE_OBJECTS+::
8622  * +:HEAP_FREE_OBJECTS+::
8623  * +:HAVE_FINALIZE+::
8624  *
8625  */
8626 
8627 static VALUE
8628 gc_profile_record_get(VALUE _)
8629 {
8630  VALUE prof;
8631  VALUE gc_profile = rb_ary_new();
8632  size_t i;
8633  rb_objspace_t *objspace = rb_gc_get_objspace();
8634 
8635  if (!objspace->profile.run) {
8636  return Qnil;
8637  }
8638 
8639  for (i =0; i < objspace->profile.next_index; i++) {
8640  gc_profile_record *record = &objspace->profile.records[i];
8641 
8642  prof = rb_hash_new();
8643  rb_hash_aset(prof, ID2SYM(rb_intern("GC_FLAGS")), gc_info_decode(objspace, rb_hash_new(), record->flags));
8644  rb_hash_aset(prof, ID2SYM(rb_intern("GC_TIME")), DBL2NUM(record->gc_time));
8645  rb_hash_aset(prof, ID2SYM(rb_intern("GC_INVOKE_TIME")), DBL2NUM(record->gc_invoke_time));
8646  rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_SIZE")), SIZET2NUM(record->heap_use_size));
8647  rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_SIZE")), SIZET2NUM(record->heap_total_size));
8648  rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_OBJECTS")), SIZET2NUM(record->heap_total_objects));
8649  rb_hash_aset(prof, ID2SYM(rb_intern("MOVED_OBJECTS")), SIZET2NUM(record->moved_objects));
8650  rb_hash_aset(prof, ID2SYM(rb_intern("GC_IS_MARKED")), Qtrue);
8651 #if GC_PROFILE_MORE_DETAIL
8652  rb_hash_aset(prof, ID2SYM(rb_intern("GC_MARK_TIME")), DBL2NUM(record->gc_mark_time));
8653  rb_hash_aset(prof, ID2SYM(rb_intern("GC_SWEEP_TIME")), DBL2NUM(record->gc_sweep_time));
8654  rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_INCREASE")), SIZET2NUM(record->allocate_increase));
8655  rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_LIMIT")), SIZET2NUM(record->allocate_limit));
8656  rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_PAGES")), SIZET2NUM(record->heap_use_pages));
8657  rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_LIVE_OBJECTS")), SIZET2NUM(record->heap_live_objects));
8658  rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_FREE_OBJECTS")), SIZET2NUM(record->heap_free_objects));
8659 
8660  rb_hash_aset(prof, ID2SYM(rb_intern("REMOVING_OBJECTS")), SIZET2NUM(record->removing_objects));
8661  rb_hash_aset(prof, ID2SYM(rb_intern("EMPTY_OBJECTS")), SIZET2NUM(record->empty_objects));
8662 
8663  rb_hash_aset(prof, ID2SYM(rb_intern("HAVE_FINALIZE")), (record->flags & GPR_FLAG_HAVE_FINALIZE) ? Qtrue : Qfalse);
8664 #endif
8665 
8666 #if RGENGC_PROFILE > 0
8667  rb_hash_aset(prof, ID2SYM(rb_intern("OLD_OBJECTS")), SIZET2NUM(record->old_objects));
8668  rb_hash_aset(prof, ID2SYM(rb_intern("REMEMBERED_NORMAL_OBJECTS")), SIZET2NUM(record->remembered_normal_objects));
8669  rb_hash_aset(prof, ID2SYM(rb_intern("REMEMBERED_SHADY_OBJECTS")), SIZET2NUM(record->remembered_shady_objects));
8670 #endif
8671  rb_ary_push(gc_profile, prof);
8672  }
8673 
8674  return gc_profile;
8675 }
8676 
8677 #if GC_PROFILE_MORE_DETAIL
8678 #define MAJOR_REASON_MAX 0x10
8679 
8680 static char *
8681 gc_profile_dump_major_reason(unsigned int flags, char *buff)
8682 {
8683  unsigned int reason = flags & GPR_FLAG_MAJOR_MASK;
8684  int i = 0;
8685 
8686  if (reason == GPR_FLAG_NONE) {
8687  buff[0] = '-';
8688  buff[1] = 0;
8689  }
8690  else {
8691 #define C(x, s) \
8692  if (reason & GPR_FLAG_MAJOR_BY_##x) { \
8693  buff[i++] = #x[0]; \
8694  if (i >= MAJOR_REASON_MAX) rb_bug("gc_profile_dump_major_reason: overflow"); \
8695  buff[i] = 0; \
8696  }
8697  C(NOFREE, N);
8698  C(OLDGEN, O);
8699  C(SHADY, S);
8700 #if RGENGC_ESTIMATE_OLDMALLOC
8701  C(OLDMALLOC, M);
8702 #endif
8703 #undef C
8704  }
8705  return buff;
8706 }
8707 #endif
8708 
8709 
8710 
8711 static void
8712 gc_profile_dump_on(VALUE out, VALUE (*append)(VALUE, VALUE))
8713 {
8714  rb_objspace_t *objspace = rb_gc_get_objspace();
8715  size_t count = objspace->profile.next_index;
8716 #ifdef MAJOR_REASON_MAX
8717  char reason_str[MAJOR_REASON_MAX];
8718 #endif
8719 
8720  if (objspace->profile.run && count /* > 1 */) {
8721  size_t i;
8722  const gc_profile_record *record;
8723 
8724  append(out, rb_sprintf("GC %"PRIuSIZE" invokes.\n", objspace->profile.count));
8725  append(out, rb_str_new_cstr("Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC Time(ms)\n"));
8726 
8727  for (i = 0; i < count; i++) {
8728  record = &objspace->profile.records[i];
8729  append(out, rb_sprintf("%5"PRIuSIZE" %19.3f %20"PRIuSIZE" %20"PRIuSIZE" %20"PRIuSIZE" %30.20f\n",
8730  i+1, record->gc_invoke_time, record->heap_use_size,
8731  record->heap_total_size, record->heap_total_objects, record->gc_time*1000));
8732  }
8733 
8734 #if GC_PROFILE_MORE_DETAIL
8735  const char *str = "\n\n" \
8736  "More detail.\n" \
8737  "Prepare Time = Previously GC's rest sweep time\n"
8738  "Index Flags Allocate Inc. Allocate Limit"
8739 #if CALC_EXACT_MALLOC_SIZE
8740  " Allocated Size"
8741 #endif
8742  " Use Page Mark Time(ms) Sweep Time(ms) Prepare Time(ms) LivingObj FreeObj RemovedObj EmptyObj"
8743 #if RGENGC_PROFILE
8744  " OldgenObj RemNormObj RemShadObj"
8745 #endif
8746 #if GC_PROFILE_DETAIL_MEMORY
8747  " MaxRSS(KB) MinorFLT MajorFLT"
8748 #endif
8749  "\n";
8750  append(out, rb_str_new_cstr(str));
8751 
8752  for (i = 0; i < count; i++) {
8753  record = &objspace->profile.records[i];
8754  append(out, rb_sprintf("%5"PRIuSIZE" %4s/%c/%6s%c %13"PRIuSIZE" %15"PRIuSIZE
8755 #if CALC_EXACT_MALLOC_SIZE
8756  " %15"PRIuSIZE
8757 #endif
8758  " %9"PRIuSIZE" %17.12f %17.12f %17.12f %10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE
8759 #if RGENGC_PROFILE
8760  "%10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE
8761 #endif
8762 #if GC_PROFILE_DETAIL_MEMORY
8763  "%11ld %8ld %8ld"
8764 #endif
8765 
8766  "\n",
8767  i+1,
8768  gc_profile_dump_major_reason(record->flags, reason_str),
8769  (record->flags & GPR_FLAG_HAVE_FINALIZE) ? 'F' : '.',
8770  (record->flags & GPR_FLAG_NEWOBJ) ? "NEWOBJ" :
8771  (record->flags & GPR_FLAG_MALLOC) ? "MALLOC" :
8772  (record->flags & GPR_FLAG_METHOD) ? "METHOD" :
8773  (record->flags & GPR_FLAG_CAPI) ? "CAPI__" : "??????",
8774  (record->flags & GPR_FLAG_STRESS) ? '!' : ' ',
8775  record->allocate_increase, record->allocate_limit,
8776 #if CALC_EXACT_MALLOC_SIZE
8777  record->allocated_size,
8778 #endif
8779  record->heap_use_pages,
8780  record->gc_mark_time*1000,
8781  record->gc_sweep_time*1000,
8782  record->prepare_time*1000,
8783 
8784  record->heap_live_objects,
8785  record->heap_free_objects,
8786  record->removing_objects,
8787  record->empty_objects
8788 #if RGENGC_PROFILE
8789  ,
8790  record->old_objects,
8791  record->remembered_normal_objects,
8792  record->remembered_shady_objects
8793 #endif
8794 #if GC_PROFILE_DETAIL_MEMORY
8795  ,
8796  record->maxrss / 1024,
8797  record->minflt,
8798  record->majflt
8799 #endif
8800 
8801  ));
8802  }
8803 #endif
8804  }
8805 }
8806 
8807 /*
8808  * call-seq:
8809  * GC::Profiler.result -> String
8810  *
8811  * Returns a profile data report such as:
8812  *
8813  * GC 1 invokes.
8814  * Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC time(ms)
8815  * 1 0.012 159240 212940 10647 0.00000000000001530000
8816  */
8817 
8818 static VALUE
8819 gc_profile_result(VALUE _)
8820 {
8821  VALUE str = rb_str_buf_new(0);
8822  gc_profile_dump_on(str, rb_str_buf_append);
8823  return str;
8824 }
8825 
8826 /*
8827  * call-seq:
8828  * GC::Profiler.report
8829  * GC::Profiler.report(io)
8830  *
8831  * Writes the GC::Profiler.result to <tt>$stdout</tt> or the given IO object.
8832  *
8833  */
8834 
8835 static VALUE
8836 gc_profile_report(int argc, VALUE *argv, VALUE self)
8837 {
8838  VALUE out;
8839 
8840  out = (!rb_check_arity(argc, 0, 1) ? rb_stdout : argv[0]);
8841  gc_profile_dump_on(out, rb_io_write);
8842 
8843  return Qnil;
8844 }
8845 
8846 /*
8847  * call-seq:
8848  * GC::Profiler.total_time -> float
8849  *
8850  * The total time used for garbage collection in seconds
8851  */
8852 
8853 static VALUE
8854 gc_profile_total_time(VALUE self)
8855 {
8856  double time = 0;
8857  rb_objspace_t *objspace = rb_gc_get_objspace();
8858 
8859  if (objspace->profile.run && objspace->profile.next_index > 0) {
8860  size_t i;
8861  size_t count = objspace->profile.next_index;
8862 
8863  for (i = 0; i < count; i++) {
8864  time += objspace->profile.records[i].gc_time;
8865  }
8866  }
8867  return DBL2NUM(time);
8868 }
8869 
8870 /*
8871  * call-seq:
8872  * GC::Profiler.enabled? -> true or false
8873  *
8874  * The current status of \GC profile mode.
8875  */
8876 
8877 static VALUE
8878 gc_profile_enable_get(VALUE self)
8879 {
8880  rb_objspace_t *objspace = rb_gc_get_objspace();
8881  return objspace->profile.run ? Qtrue : Qfalse;
8882 }
8883 
8884 /*
8885  * call-seq:
8886  * GC::Profiler.enable -> nil
8887  *
8888  * Starts the \GC profiler.
8889  *
8890  */
8891 
8892 static VALUE
8893 gc_profile_enable(VALUE _)
8894 {
8895  rb_objspace_t *objspace = rb_gc_get_objspace();
8896  objspace->profile.run = TRUE;
8897  objspace->profile.current_record = 0;
8898  return Qnil;
8899 }
8900 
8901 /*
8902  * call-seq:
8903  * GC::Profiler.disable -> nil
8904  *
8905  * Stops the \GC profiler.
8906  *
8907  */
8908 
8909 static VALUE
8910 gc_profile_disable(VALUE _)
8911 {
8912  rb_objspace_t *objspace = rb_gc_get_objspace();
8913 
8914  objspace->profile.run = FALSE;
8915  objspace->profile.current_record = 0;
8916  return Qnil;
8917 }
8918 
8919 /*
8920  * call-seq:
8921  * GC.verify_internal_consistency -> nil
8922  *
8923  * Verify internal consistency.
8924  *
8925  * This method is implementation specific.
8926  * Now this method checks generational consistency
8927  * if RGenGC is supported.
8928  */
8929 static VALUE
8930 gc_verify_internal_consistency_m(VALUE dummy)
8931 {
8932  gc_verify_internal_consistency(rb_gc_get_objspace());
8933  return Qnil;
8934 }
8935 
8936 #if GC_CAN_COMPILE_COMPACTION
8937 /*
8938  * call-seq:
8939  * GC.auto_compact = flag
8940  *
8941  * Updates automatic compaction mode.
8942  *
8943  * When enabled, the compactor will execute on every major collection.
8944  *
8945  * Enabling compaction will degrade performance on major collections.
8946  */
8947 static VALUE
8948 gc_set_auto_compact(VALUE _, VALUE v)
8949 {
8950  GC_ASSERT(GC_COMPACTION_SUPPORTED);
8951 
8952  ruby_enable_autocompact = RTEST(v);
8953 
8954 #if RGENGC_CHECK_MODE
8955  ruby_autocompact_compare_func = NULL;
8956 
8957  if (SYMBOL_P(v)) {
8958  ID id = RB_SYM2ID(v);
8959  if (id == rb_intern("empty")) {
8960  ruby_autocompact_compare_func = compare_free_slots;
8961  }
8962  }
8963 #endif
8964 
8965  return v;
8966 }
8967 #else
8968 # define gc_set_auto_compact rb_f_notimplement
8969 #endif
8970 
8971 #if GC_CAN_COMPILE_COMPACTION
8972 /*
8973  * call-seq:
8974  * GC.auto_compact -> true or false
8975  *
8976  * Returns whether or not automatic compaction has been enabled.
8977  */
8978 static VALUE
8979 gc_get_auto_compact(VALUE _)
8980 {
8981  return ruby_enable_autocompact ? Qtrue : Qfalse;
8982 }
8983 #else
8984 # define gc_get_auto_compact rb_f_notimplement
8985 #endif
8986 
8987 #if GC_CAN_COMPILE_COMPACTION
8988 /*
8989  * call-seq:
8990  * GC.latest_compact_info -> hash
8991  *
8992  * Returns information about object moved in the most recent \GC compaction.
8993  *
8994  * The returned +hash+ contains the following keys:
8995  *
8996  * [considered]
8997  * Hash containing the type of the object as the key and the number of
8998  * objects of that type that were considered for movement.
8999  * [moved]
9000  * Hash containing the type of the object as the key and the number of
9001  * objects of that type that were actually moved.
9002  * [moved_up]
9003  * Hash containing the type of the object as the key and the number of
9004  * objects of that type that were increased in size.
9005  * [moved_down]
9006  * Hash containing the type of the object as the key and the number of
9007  * objects of that type that were decreased in size.
9008  *
9009  * Some objects can't be moved (due to pinning) so these numbers can be used to
9010  * calculate compaction efficiency.
9011  */
9012 static VALUE
9013 gc_compact_stats(VALUE self)
9014 {
9015  rb_objspace_t *objspace = rb_gc_get_objspace();
9016  VALUE h = rb_hash_new();
9017  VALUE considered = rb_hash_new();
9018  VALUE moved = rb_hash_new();
9019  VALUE moved_up = rb_hash_new();
9020  VALUE moved_down = rb_hash_new();
9021 
9022  for (size_t i = 0; i < T_MASK; i++) {
9023  if (objspace->rcompactor.considered_count_table[i]) {
9024  rb_hash_aset(considered, type_sym(i), SIZET2NUM(objspace->rcompactor.considered_count_table[i]));
9025  }
9026 
9027  if (objspace->rcompactor.moved_count_table[i]) {
9028  rb_hash_aset(moved, type_sym(i), SIZET2NUM(objspace->rcompactor.moved_count_table[i]));
9029  }
9030 
9031  if (objspace->rcompactor.moved_up_count_table[i]) {
9032  rb_hash_aset(moved_up, type_sym(i), SIZET2NUM(objspace->rcompactor.moved_up_count_table[i]));
9033  }
9034 
9035  if (objspace->rcompactor.moved_down_count_table[i]) {
9036  rb_hash_aset(moved_down, type_sym(i), SIZET2NUM(objspace->rcompactor.moved_down_count_table[i]));
9037  }
9038  }
9039 
9040  rb_hash_aset(h, ID2SYM(rb_intern("considered")), considered);
9041  rb_hash_aset(h, ID2SYM(rb_intern("moved")), moved);
9042  rb_hash_aset(h, ID2SYM(rb_intern("moved_up")), moved_up);
9043  rb_hash_aset(h, ID2SYM(rb_intern("moved_down")), moved_down);
9044 
9045  return h;
9046 }
9047 #else
9048 # define gc_compact_stats rb_f_notimplement
9049 #endif
9050 
9051 #if GC_CAN_COMPILE_COMPACTION
9052 /*
9053  * call-seq:
9054  * GC.compact -> hash
9055  *
9056  * This function compacts objects together in Ruby's heap. It eliminates
9057  * unused space (or fragmentation) in the heap by moving objects in to that
9058  * unused space.
9059  *
9060  * The returned +hash+ contains statistics about the objects that were moved;
9061  * see GC.latest_compact_info.
9062  *
9063  * This method is only expected to work on CRuby.
9064  *
9065  * To test whether \GC compaction is supported, use the idiom:
9066  *
9067  * GC.respond_to?(:compact)
9068  */
9069 static VALUE
9070 gc_compact(VALUE self)
9071 {
9072  rb_objspace_t *objspace = rb_gc_get_objspace();
9073  int full_marking_p = gc_config_full_mark_val;
9074  gc_config_full_mark_set(TRUE);
9075 
9076  /* Run GC with compaction enabled */
9077  rb_gc_impl_start(rb_gc_get_objspace(), true, true, true, true);
9078  gc_config_full_mark_set(full_marking_p);
9079 
9080  return gc_compact_stats(self);
9081 }
9082 #else
9083 # define gc_compact rb_f_notimplement
9084 #endif
9085 
9086 #if GC_CAN_COMPILE_COMPACTION
9088  rb_objspace_t *objspace;
9089  size_t required_slots[HEAP_COUNT];
9090 };
9091 
9092 static int
9093 desired_compaction_pages_i(struct heap_page *page, void *data)
9094 {
9095  struct desired_compaction_pages_i_data *tdata = data;
9096  rb_objspace_t *objspace = tdata->objspace;
9097  VALUE vstart = (VALUE)page->start;
9098  VALUE vend = vstart + (VALUE)(page->total_slots * page->heap->slot_size);
9099 
9100 
9101  for (VALUE v = vstart; v != vend; v += page->heap->slot_size) {
9102  asan_unpoisoning_object(v) {
9103  /* skip T_NONEs; they won't be moved */
9104  if (BUILTIN_TYPE(v) != T_NONE) {
9105  rb_heap_t *dest_pool = gc_compact_destination_pool(objspace, page->heap, v);
9106  size_t dest_pool_idx = dest_pool - heaps;
9107  tdata->required_slots[dest_pool_idx]++;
9108  }
9109  }
9110  }
9111 
9112  return 0;
9113 }
9114 
9115 /* call-seq:
9116  * GC.verify_compaction_references(toward: nil, double_heap: false) -> hash
9117  *
9118  * Verify compaction reference consistency.
9119  *
9120  * This method is implementation specific. During compaction, objects that
9121  * were moved are replaced with T_MOVED objects. No object should have a
9122  * reference to a T_MOVED object after compaction.
9123  *
9124  * This function expands the heap to ensure room to move all objects,
9125  * compacts the heap to make sure everything moves, updates all references,
9126  * then performs a full \GC. If any object contains a reference to a T_MOVED
9127  * object, that object should be pushed on the mark stack, and will
9128  * make a SEGV.
9129  */
9130 static VALUE
9131 gc_verify_compaction_references(int argc, VALUE* argv, VALUE self)
9132 {
9133  static ID keywords[3] = {0};
9134  if (!keywords[0]) {
9135  keywords[0] = rb_intern("toward");
9136  keywords[1] = rb_intern("double_heap");
9137  keywords[2] = rb_intern("expand_heap");
9138  }
9139 
9140  VALUE options;
9141  rb_scan_args_kw(rb_keyword_given_p(), argc, argv, ":", &options);
9142 
9143  VALUE arguments[3] = { Qnil, Qfalse, Qfalse };
9144  int kwarg_count = rb_get_kwargs(options, keywords, 0, 3, arguments);
9145  bool toward_empty = kwarg_count > 0 && SYMBOL_P(arguments[0]) && SYM2ID(arguments[0]) == rb_intern("empty");
9146  bool expand_heap = (kwarg_count > 1 && RTEST(arguments[1])) || (kwarg_count > 2 && RTEST(arguments[2]));
9147 
9148  rb_objspace_t *objspace = rb_gc_get_objspace();
9149 
9150  /* Clear the heap. */
9151  rb_gc_impl_start(objspace, true, true, true, false);
9152 
9153  unsigned int lev = rb_gc_vm_lock();
9154  {
9155  gc_rest(objspace);
9156 
9157  /* if both double_heap and expand_heap are set, expand_heap takes precedence */
9158  if (expand_heap) {
9159  struct desired_compaction_pages_i_data desired_compaction = {
9160  .objspace = objspace,
9161  .required_slots = {0},
9162  };
9163  /* Work out how many objects want to be in each size pool, taking account of moves */
9164  objspace_each_pages(objspace, desired_compaction_pages_i, &desired_compaction, TRUE);
9165 
9166  /* Find out which pool has the most pages */
9167  size_t max_existing_pages = 0;
9168  for (int i = 0; i < HEAP_COUNT; i++) {
9169  rb_heap_t *heap = &heaps[i];
9170  max_existing_pages = MAX(max_existing_pages, heap->total_pages);
9171  }
9172 
9173  /* Add pages to each size pool so that compaction is guaranteed to move every object */
9174  for (int i = 0; i < HEAP_COUNT; i++) {
9175  rb_heap_t *heap = &heaps[i];
9176 
9177  size_t pages_to_add = 0;
9178  /*
9179  * Step 1: Make sure every pool has the same number of pages, by adding empty pages
9180  * to smaller pools. This is required to make sure the compact cursor can advance
9181  * through all of the pools in `gc_sweep_compact` without hitting the "sweep &
9182  * compact cursors met" condition on some pools before fully compacting others
9183  */
9184  pages_to_add += max_existing_pages - heap->total_pages;
9185  /*
9186  * Step 2: Now add additional free pages to each size pool sufficient to hold all objects
9187  * that want to be in that size pool, whether moved into it or moved within it
9188  */
9189  objspace->heap_pages.allocatable_slots = desired_compaction.required_slots[i];
9190  while (objspace->heap_pages.allocatable_slots > 0) {
9191  heap_page_allocate_and_initialize(objspace, heap);
9192  }
9193  /*
9194  * Step 3: Add two more pages so that the compact & sweep cursors will meet _after_ all objects
9195  * have been moved, and not on the last iteration of the `gc_sweep_compact` loop
9196  */
9197  pages_to_add += 2;
9198 
9199  for (; pages_to_add > 0; pages_to_add--) {
9200  heap_page_allocate_and_initialize_force(objspace, heap);
9201  }
9202  }
9203  }
9204 
9205  if (toward_empty) {
9206  objspace->rcompactor.compare_func = compare_free_slots;
9207  }
9208  }
9209  rb_gc_vm_unlock(lev);
9210 
9211  rb_gc_impl_start(rb_gc_get_objspace(), true, true, true, true);
9212 
9213  rb_objspace_reachable_objects_from_root(root_obj_check_moved_i, objspace);
9214  objspace_each_objects(objspace, heap_check_moved_i, objspace, TRUE);
9215 
9216  objspace->rcompactor.compare_func = NULL;
9217 
9218  return gc_compact_stats(self);
9219 }
9220 #else
9221 # define gc_verify_compaction_references rb_f_notimplement
9222 #endif
9223 
9224 void
9225 rb_gc_impl_objspace_free(void *objspace_ptr)
9226 {
9227  rb_objspace_t *objspace = objspace_ptr;
9228 
9229  if (is_lazy_sweeping(objspace))
9230  rb_bug("lazy sweeping underway when freeing object space");
9231 
9232  free(objspace->profile.records);
9233  objspace->profile.records = NULL;
9234 
9235  for (size_t i = 0; i < rb_darray_size(objspace->heap_pages.sorted); i++) {
9236  heap_page_free(objspace, rb_darray_get(objspace->heap_pages.sorted, i));
9237  }
9238  rb_darray_free(objspace->heap_pages.sorted);
9239  heap_pages_lomem = 0;
9240  heap_pages_himem = 0;
9241 
9242  for (int i = 0; i < HEAP_COUNT; i++) {
9243  rb_heap_t *heap = &heaps[i];
9244  heap->total_pages = 0;
9245  heap->total_slots = 0;
9246  }
9247 
9248  st_free_table(objspace->id_to_obj_tbl);
9249  st_free_table(objspace->obj_to_id_tbl);
9250 
9251  free_stack_chunks(&objspace->mark_stack);
9252  mark_stack_free_cache(&objspace->mark_stack);
9253 
9254  rb_darray_free(objspace->weak_references);
9255 
9256  free(objspace);
9257 }
9258 
9259 #if MALLOC_ALLOCATED_SIZE
9260 /*
9261  * call-seq:
9262  * GC.malloc_allocated_size -> Integer
9263  *
9264  * Returns the size of memory allocated by malloc().
9265  *
9266  * Only available if ruby was built with +CALC_EXACT_MALLOC_SIZE+.
9267  */
9268 
9269 static VALUE
9270 gc_malloc_allocated_size(VALUE self)
9271 {
9272  return UINT2NUM(rb_objspace.malloc_params.allocated_size);
9273 }
9274 
9275 /*
9276  * call-seq:
9277  * GC.malloc_allocations -> Integer
9278  *
9279  * Returns the number of malloc() allocations.
9280  *
9281  * Only available if ruby was built with +CALC_EXACT_MALLOC_SIZE+.
9282  */
9283 
9284 static VALUE
9285 gc_malloc_allocations(VALUE self)
9286 {
9287  return UINT2NUM(rb_objspace.malloc_params.allocations);
9288 }
9289 #endif
9290 
9291 void *
9292 rb_gc_impl_objspace_alloc(void)
9293 {
9294  rb_objspace_t *objspace = calloc1(sizeof(rb_objspace_t));
9295 
9296  return objspace;
9297 }
9298 
9299 void
9300 rb_gc_impl_objspace_init(void *objspace_ptr)
9301 {
9302  rb_objspace_t *objspace = objspace_ptr;
9303 
9304  gc_config_full_mark_set(TRUE);
9305 
9306  objspace->flags.measure_gc = true;
9307  malloc_limit = gc_params.malloc_limit_min;
9308  objspace->finalize_deferred_pjob = rb_postponed_job_preregister(0, gc_finalize_deferred, objspace);
9309  if (objspace->finalize_deferred_pjob == POSTPONED_JOB_HANDLE_INVALID) {
9310  rb_bug("Could not preregister postponed job for GC");
9311  }
9312 
9313  for (int i = 0; i < HEAP_COUNT; i++) {
9314  rb_heap_t *heap = &heaps[i];
9315 
9316  heap->slot_size = (1 << i) * BASE_SLOT_SIZE;
9317 
9318  ccan_list_head_init(&heap->pages);
9319  }
9320 
9321  rb_darray_make(&objspace->heap_pages.sorted, 0);
9322  rb_darray_make(&objspace->weak_references, 0);
9323 
9324  // TODO: debug why on Windows Ruby crashes on boot when GC is on.
9325 #ifdef _WIN32
9326  dont_gc_on();
9327 #endif
9328 
9329 #if defined(INIT_HEAP_PAGE_ALLOC_USE_MMAP)
9330  /* Need to determine if we can use mmap at runtime. */
9331  heap_page_alloc_use_mmap = INIT_HEAP_PAGE_ALLOC_USE_MMAP;
9332 #endif
9333  objspace->next_object_id = OBJ_ID_INITIAL;
9334  objspace->id_to_obj_tbl = st_init_table(&object_id_hash_type);
9335  objspace->obj_to_id_tbl = st_init_numtable();
9336 #if RGENGC_ESTIMATE_OLDMALLOC
9337  objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
9338 #endif
9339  /* Set size pools allocatable pages. */
9340  for (int i = 0; i < HEAP_COUNT; i++) {
9341  /* Set the default value of heap_init_slots. */
9342  gc_params.heap_init_slots[i] = GC_HEAP_INIT_SLOTS;
9343  }
9344 
9345  init_mark_stack(&objspace->mark_stack);
9346 
9347  objspace->profile.invoke_time = getrusage_time();
9348  finalizer_table = st_init_numtable();
9349 }
9350 
9351 void
9352 rb_gc_impl_init(void)
9353 {
9354  VALUE gc_constants = rb_hash_new();
9355  rb_hash_aset(gc_constants, ID2SYM(rb_intern("DEBUG")), GC_DEBUG ? Qtrue : Qfalse);
9356  rb_hash_aset(gc_constants, ID2SYM(rb_intern("BASE_SLOT_SIZE")), SIZET2NUM(BASE_SLOT_SIZE - RVALUE_OVERHEAD));
9357  rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_OVERHEAD")), SIZET2NUM(RVALUE_OVERHEAD));
9358  rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_OBJ_LIMIT")), SIZET2NUM(HEAP_PAGE_OBJ_LIMIT));
9359  rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_BITMAP_SIZE")), SIZET2NUM(HEAP_PAGE_BITMAP_SIZE));
9360  rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_SIZE")), SIZET2NUM(HEAP_PAGE_SIZE));
9361  rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_COUNT")), LONG2FIX(HEAP_COUNT));
9362  rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVARGC_MAX_ALLOCATE_SIZE")), LONG2FIX(heap_slot_size(HEAP_COUNT - 1)));
9363  rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_OLD_AGE")), LONG2FIX(RVALUE_OLD_AGE));
9364  if (RB_BUG_INSTEAD_OF_RB_MEMERROR+0) {
9365  rb_hash_aset(gc_constants, ID2SYM(rb_intern("RB_BUG_INSTEAD_OF_RB_MEMERROR")), Qtrue);
9366  }
9367  OBJ_FREEZE(gc_constants);
9368  /* Internal constants in the garbage collector. */
9369  rb_define_const(rb_mGC, "INTERNAL_CONSTANTS", gc_constants);
9370 
9371  if (GC_COMPACTION_SUPPORTED) {
9372  rb_define_singleton_method(rb_mGC, "compact", gc_compact, 0);
9373  rb_define_singleton_method(rb_mGC, "auto_compact", gc_get_auto_compact, 0);
9374  rb_define_singleton_method(rb_mGC, "auto_compact=", gc_set_auto_compact, 1);
9375  rb_define_singleton_method(rb_mGC, "latest_compact_info", gc_compact_stats, 0);
9376  rb_define_singleton_method(rb_mGC, "verify_compaction_references", gc_verify_compaction_references, -1);
9377  }
9378  else {
9381  rb_define_singleton_method(rb_mGC, "auto_compact=", rb_f_notimplement, 1);
9382  rb_define_singleton_method(rb_mGC, "latest_compact_info", rb_f_notimplement, 0);
9383  rb_define_singleton_method(rb_mGC, "verify_compaction_references", rb_f_notimplement, -1);
9384  }
9385 
9386  /* internal methods */
9387  rb_define_singleton_method(rb_mGC, "verify_internal_consistency", gc_verify_internal_consistency_m, 0);
9388 
9389 #if MALLOC_ALLOCATED_SIZE
9390  rb_define_singleton_method(rb_mGC, "malloc_allocated_size", gc_malloc_allocated_size, 0);
9391  rb_define_singleton_method(rb_mGC, "malloc_allocations", gc_malloc_allocations, 0);
9392 #endif
9393 
9394  VALUE rb_mProfiler = rb_define_module_under(rb_mGC, "Profiler");
9395  rb_define_singleton_method(rb_mProfiler, "enabled?", gc_profile_enable_get, 0);
9396  rb_define_singleton_method(rb_mProfiler, "enable", gc_profile_enable, 0);
9397  rb_define_singleton_method(rb_mProfiler, "raw_data", gc_profile_record_get, 0);
9398  rb_define_singleton_method(rb_mProfiler, "disable", gc_profile_disable, 0);
9399  rb_define_singleton_method(rb_mProfiler, "clear", gc_profile_clear, 0);
9400  rb_define_singleton_method(rb_mProfiler, "result", gc_profile_result, 0);
9401  rb_define_singleton_method(rb_mProfiler, "report", gc_profile_report, -1);
9402  rb_define_singleton_method(rb_mProfiler, "total_time", gc_profile_total_time, 0);
9403 
9404  {
9405  VALUE opts;
9406  /* \GC build options */
9407  rb_define_const(rb_mGC, "OPTS", opts = rb_ary_new());
9408 #define OPT(o) if (o) rb_ary_push(opts, rb_interned_str(#o, sizeof(#o) - 1))
9409  OPT(GC_DEBUG);
9410  OPT(USE_RGENGC);
9411  OPT(RGENGC_DEBUG);
9412  OPT(RGENGC_CHECK_MODE);
9413  OPT(RGENGC_PROFILE);
9414  OPT(RGENGC_ESTIMATE_OLDMALLOC);
9415  OPT(GC_PROFILE_MORE_DETAIL);
9416  OPT(GC_ENABLE_LAZY_SWEEP);
9417  OPT(CALC_EXACT_MALLOC_SIZE);
9418  OPT(MALLOC_ALLOCATED_SIZE);
9419  OPT(MALLOC_ALLOCATED_SIZE_CHECK);
9420  OPT(GC_PROFILE_DETAIL_MEMORY);
9421  OPT(GC_COMPACTION_SUPPORTED);
9422 #undef OPT
9423  OBJ_FREEZE(opts);
9424  }
9425 }
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition: assert.h:219
Atomic operations.
#define RUBY_ATOMIC_VALUE_CAS(var, oldval, newval)
Identical to RUBY_ATOMIC_CAS, except it expects its arguments are VALUE.
Definition: atomic.h:343
#define RUBY_ATOMIC_SIZE_EXCHANGE(var, val)
Identical to RUBY_ATOMIC_EXCHANGE, except it expects its arguments are size_t.
Definition: atomic.h:233
#define RUBY_ATOMIC_SIZE_INC(var)
Identical to RUBY_ATOMIC_INC, except it expects its argument is size_t.
Definition: atomic.h:209
#define RUBY_ATOMIC_SIZE_CAS(var, oldval, newval)
Identical to RUBY_ATOMIC_CAS, except it expects its arguments are size_t.
Definition: atomic.h:247
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition: atomic.h:69
#define RUBY_ATOMIC_SIZE_ADD(var, val)
Identical to RUBY_ATOMIC_ADD, except it expects its arguments are size_t.
Definition: atomic.h:260
#define RUBY_ATOMIC_VALUE_EXCHANGE(var, val)
Identical to RUBY_ATOMIC_EXCHANGE, except it expects its arguments are VALUE.
Definition: atomic.h:329
#define RUBY_ATOMIC_SET(var, val)
Identical to RUBY_ATOMIC_EXCHANGE, except for the return type.
Definition: atomic.h:160
#define RUBY_ATOMIC_EXCHANGE(var, val)
Atomically replaces the value pointed by var with val.
Definition: atomic.h:127
#define RB_LIKELY(x)
Asserts that the given Boolean expression likely holds.
Definition: assume.h:43
#define RB_UNLIKELY(x)
Asserts that the given Boolean expression likely doesn't hold.
Definition: assume.h:50
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
Definition: cxxanyargs.hpp:685
unsigned int rb_postponed_job_handle_t
The type of a handle returned from rb_postponed_job_preregister and passed to rb_postponed_job_trigge...
Definition: debug.h:665
void rb_postponed_job_trigger(rb_postponed_job_handle_t h)
Triggers a pre-registered job registered with rb_postponed_job_preregister, scheduling it for executi...
Definition: vm_trace.c:1783
rb_postponed_job_handle_t rb_postponed_job_preregister(unsigned int flags, rb_postponed_job_func_t func, void *data)
Pre-registers a func in Ruby's postponed job preregistration table, returning an opaque handle which ...
Definition: vm_trace.c:1749
#define RUBY_INTERNAL_EVENT_GC_EXIT
gc_exit() is called.
Definition: event.h:99
#define RUBY_INTERNAL_EVENT_GC_ENTER
gc_enter() is called.
Definition: event.h:98
#define RUBY_INTERNAL_EVENT_GC_END_SWEEP
GC ended sweep phase.
Definition: event.h:97
#define RUBY_INTERNAL_EVENT_GC_END_MARK
GC ended mark phase.
Definition: event.h:96
#define RUBY_INTERNAL_EVENT_OBJSPACE_MASK
Bitmask of GC events.
Definition: event.h:100
#define RUBY_INTERNAL_EVENT_FREEOBJ
Object swept.
Definition: event.h:94
#define RUBY_INTERNAL_EVENT_GC_START
GC started.
Definition: event.h:95
uint32_t rb_event_flag_t
Represents event(s).
Definition: event.h:108
#define RUBY_INTERNAL_EVENT_NEWOBJ
Object allocated.
Definition: event.h:93
static VALUE RB_FL_TEST(VALUE obj, VALUE flags)
Tests if the given flag(s) are set or not.
Definition: fl_type.h:495
static void RB_FL_SET_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_SET().
Definition: fl_type.h:606
static void RB_FL_UNSET_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_UNSET().
Definition: fl_type.h:666
@ RUBY_FL_PROMOTED
Ruby objects are "generational".
Definition: fl_type.h:218
VALUE rb_define_module_under(VALUE outer, const char *name)
Defines a module under the namespace of outer.
Definition: class.c:1119
int rb_scan_args_kw(int kw_flag, int argc, const VALUE *argv, const char *fmt,...)
Identical to rb_scan_args(), except it also accepts kw_splat.
Definition: class.c:2648
int rb_keyword_given_p(void)
Determines if the current method is given a keyword argument.
Definition: eval.c:929
int rb_get_kwargs(VALUE keyword_hash, const ID *table, int required, int optional, VALUE *values)
Keyword argument deconstructor.
Definition: class.c:2424
#define T_COMPLEX
Old name of RUBY_T_COMPLEX.
Definition: value_type.h:59
#define T_FILE
Old name of RUBY_T_FILE.
Definition: value_type.h:62
#define FL_EXIVAR
Old name of RUBY_FL_EXIVAR.
Definition: fl_type.h:66
#define T_STRING
Old name of RUBY_T_STRING.
Definition: value_type.h:78
#define xfree
Old name of ruby_xfree.
Definition: xmalloc.h:58
#define T_MASK
Old name of RUBY_T_MASK.
Definition: value_type.h:68
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition: long.h:48
#define OBJ_FROZEN
Old name of RB_OBJ_FROZEN.
Definition: fl_type.h:137
#define T_NIL
Old name of RUBY_T_NIL.
Definition: value_type.h:72
#define T_FLOAT
Old name of RUBY_T_FLOAT.
Definition: value_type.h:64
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition: value_type.h:67
#define ID2SYM
Old name of RB_ID2SYM.
Definition: symbol.h:44
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
Definition: value_type.h:57
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
Definition: value_type.h:79
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition: fl_type.h:135
#define T_FIXNUM
Old name of RUBY_T_FIXNUM.
Definition: value_type.h:63
#define SYM2ID
Old name of RB_SYM2ID.
Definition: symbol.h:45
#define T_DATA
Old name of RUBY_T_DATA.
Definition: value_type.h:60
#define FL_SEEN_OBJ_ID
Old name of RUBY_FL_SEEN_OBJ_ID.
Definition: fl_type.h:65
#define T_NONE
Old name of RUBY_T_NONE.
Definition: value_type.h:74
#define T_NODE
Old name of RUBY_T_NODE.
Definition: value_type.h:73
#define SIZET2NUM
Old name of RB_SIZE2NUM.
Definition: size_t.h:62
#define xmalloc
Old name of ruby_xmalloc.
Definition: xmalloc.h:53
#define LONG2FIX
Old name of RB_INT2FIX.
Definition: long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition: int.h:41
#define FL_FINALIZE
Old name of RUBY_FL_FINALIZE.
Definition: fl_type.h:61
#define T_MODULE
Old name of RUBY_T_MODULE.
Definition: value_type.h:70
#define T_TRUE
Old name of RUBY_T_TRUE.
Definition: value_type.h:81
#define T_RATIONAL
Old name of RUBY_T_RATIONAL.
Definition: value_type.h:76
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition: value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition: value_type.h:65
#define ALLOC_N
Old name of RB_ALLOC_N.
Definition: memory.h:394
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition: fl_type.h:132
#define FL_SET
Old name of RB_FL_SET.
Definition: fl_type.h:129
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition: array.h:658
#define T_FALSE
Old name of RUBY_T_FALSE.
Definition: value_type.h:61
#define ULL2NUM
Old name of RB_ULL2NUM.
Definition: long_long.h:31
#define T_UNDEF
Old name of RUBY_T_UNDEF.
Definition: value_type.h:82
#define Qtrue
Old name of RUBY_Qtrue.
#define T_ZOMBIE
Old name of RUBY_T_ZOMBIE.
Definition: value_type.h:83
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition: long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition: value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition: value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define FL_WB_PROTECTED
Old name of RUBY_FL_WB_PROTECTED.
Definition: fl_type.h:59
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
Definition: value_type.h:80
#define DBL2NUM
Old name of rb_float_new.
Definition: double.h:29
#define T_MATCH
Old name of RUBY_T_MATCH.
Definition: value_type.h:69
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition: value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition: value_type.h:85
#define T_MOVED
Old name of RUBY_T_MOVED.
Definition: value_type.h:71
#define FL_TEST
Old name of RB_FL_TEST.
Definition: fl_type.h:131
#define FL_UNSET
Old name of RB_FL_UNSET.
Definition: fl_type.h:133
#define UINT2NUM
Old name of RB_UINT2NUM.
Definition: int.h:46
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition: value_type.h:88
#define T_REGEXP
Old name of RUBY_T_REGEXP.
Definition: value_type.h:77
void rb_raise(VALUE exc, const char *fmt,...)
Exception entry point.
Definition: error.c:3627
void rb_bug(const char *fmt,...)
Interpreter panic switch.
Definition: error.c:1088
VALUE rb_eRangeError
RangeError exception.
Definition: error.c:1407
#define ruby_verbose
This variable controls whether the interpreter is in debug mode.
Definition: error.h:471
VALUE rb_eRuntimeError
RuntimeError exception.
Definition: error.c:1401
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition: error.c:465
VALUE rb_eArgError
ArgumentError exception.
Definition: error.c:1404
VALUE rb_ensure(VALUE(*b_proc)(VALUE), VALUE data1, VALUE(*e_proc)(VALUE), VALUE data2)
An equivalent to ensure clause.
Definition: eval.c:1045
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
Definition: object.c:104
VALUE rb_mGC
GC module.
Definition: gc.c:365
VALUE rb_equal(VALUE lhs, VALUE rhs)
This function is an optimised version of calling #==.
Definition: object.c:179
VALUE rb_stdout
STDOUT constant.
Definition: io.c:201
#define RB_GNUC_EXTENSION_BLOCK(x)
This is expanded to the passed token for non-GCC compilers.
Definition: defines.h:91
Routines to manipulate encodings of strings.
VALUE rb_funcall(VALUE recv, ID mid, int n,...)
Calls a method.
Definition: vm_eval.c:1099
void rb_gc_mark(VALUE obj)
Marks an object.
Definition: gc.c:2094
void rb_memerror(void)
Triggers out-of-memory error.
Definition: gc.c:4153
static bool RB_OBJ_PROMOTED_RAW(VALUE obj)
This is the implementation of RB_OBJ_PROMOTED().
Definition: gc.h:706
#define USE_RGENGC
Definition: gc.h:428
size_t rb_gc_count(void)
Identical to rb_gc_stat(), with "count" parameter.
Definition: gc.c:3386
VALUE rb_ary_new(void)
Allocates a new, empty array.
Definition: array.c:747
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
Definition: array.c:1384
VALUE rb_big_eql(VALUE lhs, VALUE rhs)
Equality, in terms of eql?.
Definition: bignum.c:5573
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition: error.h:284
VALUE rb_hash_aref(VALUE hash, VALUE key)
Queries the given key in the given hash table.
Definition: hash.c:2073
VALUE rb_hash_aset(VALUE hash, VALUE key, VALUE val)
Inserts or replaces ("upsert"s) the objects into the given hash table.
Definition: hash.c:2893
VALUE rb_hash(VALUE obj)
Calculates a message authentication code of the passed object.
Definition: hash.c:267
VALUE rb_hash_new(void)
Creates a new, empty hash object.
Definition: hash.c:1475
VALUE rb_io_write(VALUE io, VALUE str)
Writes the given string to the given IO.
Definition: io.c:2294
VALUE rb_str_buf_append(VALUE dst, VALUE src)
Identical to rb_str_cat_cstr(), except it takes Ruby's string instead of C's.
Definition: string.c:3628
VALUE rb_str_new_cstr(const char *ptr)
Identical to rb_str_new(), except it assumes the passed pointer is a pointer to a C string.
Definition: string.c:1054
VALUE rb_str_buf_new(long capa)
Allocates a "string buffer".
Definition: string.c:1627
const char * rb_sourcefile(void)
Resembles __FILE__.
Definition: vm.c:1854
VALUE rb_f_notimplement(int argc, const VALUE *argv, VALUE obj, VALUE marker)
Raises rb_eNotImpError.
Definition: vm_method.c:481
int rb_sourceline(void)
Resembles __LINE__.
Definition: vm.c:1868
#define RB_SYM2ID
Just another name of rb_sym2id.
Definition: symbol.h:43
ID rb_intern(const char *name)
Finds or creates a symbol of the given name.
Definition: symbol.c:823
ID rb_sym2id(VALUE obj)
Converts an instance of rb_cSymbol into an ID.
Definition: symbol.c:917
void rb_define_const(VALUE klass, const char *name, VALUE val)
Defines a Ruby level constant under a namespace.
Definition: variable.c:3713
char * ptr
Pointer to the underlying memory region, of at least capa bytes.
Definition: io.h:2
int len
Length of the buffer.
Definition: io.h:8
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
Definition: thread.c:1889
#define strtod(s, e)
Just another name of ruby_strtod.
Definition: util.h:223
void ruby_qsort(void *, const size_t, const size_t, int(*)(const void *, const void *, void *), void *)
Reentrant implementation of quick sort.
#define DECIMAL_SIZE_OF_BITS(n)
an approximation of ceil(n * log10(2)), up to 1,048,576 (1<<20) without overflow within 32-bit calcul...
Definition: util.h:39
VALUE rb_sprintf(const char *fmt,...)
Ruby's extended sprintf(3).
Definition: sprintf.c:1217
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
Definition: memory.h:355
VALUE type(ANYARGS)
ANYARGS-ed function type.
Definition: cxxanyargs.hpp:56
int st_foreach(st_table *q, int_type *w, st_data_t e)
Iteration over the given table.
Definition: cxxanyargs.hpp:432
#define RARRAY_LEN
Just another name of rb_array_len.
Definition: rarray.h:51
#define RARRAY_AREF(a, i)
Definition: rarray.h:403
#define RBASIC(obj)
Convenient casting macro.
Definition: rbasic.h:40
#define errno
Ractor-aware version of errno.
Definition: ruby.h:388
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition: thread.c:5514
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
#define _(args)
This was a transition path from K&R to ANSI.
Definition: stdarg.h:35
Ruby object's base components.
Definition: rbasic.h:63
VALUE flags
Per-object flags.
Definition: rbasic.h:75
Definition: st.h:79
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition: value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition: value.h:40
static enum ruby_value_type RB_BUILTIN_TYPE(VALUE obj)
Queries the type of the object.
Definition: value_type.h:182
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition: value_type.h:376
ruby_value_type
C-level type of an object.
Definition: value_type.h:113
@ RUBY_T_SYMBOL
Definition: value_type.h:135
@ RUBY_T_MATCH
Definition: value_type.h:128
@ RUBY_T_MODULE
Definition: value_type.h:118
@ RUBY_T_ICLASS
Hidden classes known as IClasses.
Definition: value_type.h:141
@ RUBY_T_MOVED
Definition: value_type.h:143
@ RUBY_T_FIXNUM
Integers formerly known as Fixnums.
Definition: value_type.h:136
@ RUBY_T_IMEMO
Definition: value_type.h:139
@ RUBY_T_NODE
Definition: value_type.h:140
@ RUBY_T_OBJECT
Definition: value_type.h:116
@ RUBY_T_DATA
Definition: value_type.h:127
@ RUBY_T_FALSE
Definition: value_type.h:134
@ RUBY_T_UNDEF
Definition: value_type.h:137
@ RUBY_T_COMPLEX
Definition: value_type.h:129
@ RUBY_T_STRING
Definition: value_type.h:120
@ RUBY_T_HASH
Definition: value_type.h:123
@ RUBY_T_NIL
Definition: value_type.h:132
@ RUBY_T_CLASS
Definition: value_type.h:117
@ RUBY_T_ARRAY
Definition: value_type.h:122
@ RUBY_T_MASK
Bitmask of ruby_value_type.
Definition: value_type.h:145
@ RUBY_T_RATIONAL
Definition: value_type.h:130
@ RUBY_T_ZOMBIE
Definition: value_type.h:142
@ RUBY_T_BIGNUM
Definition: value_type.h:125
@ RUBY_T_TRUE
Definition: value_type.h:133
@ RUBY_T_FLOAT
Definition: value_type.h:119
@ RUBY_T_STRUCT
Definition: value_type.h:124
@ RUBY_T_NONE
Non-object (swept etc.)
Definition: value_type.h:114
@ RUBY_T_REGEXP
Definition: value_type.h:121
@ RUBY_T_FILE
Definition: value_type.h:126