Ruby  3.4.0dev (2024-11-26 revision 7be0d505146deb4a8a641da3b1c0e4d2aa174784)
default.c
1 #include "ruby/internal/config.h"
2 
3 #include <signal.h>
4 
5 #ifndef _WIN32
6 # include <sys/mman.h>
7 # include <unistd.h>
8 # ifdef HAVE_SYS_PRCTL_H
9 # include <sys/prctl.h>
10 # endif
11 #endif
12 
13 #if !defined(PAGE_SIZE) && defined(HAVE_SYS_USER_H)
14 /* LIST_HEAD conflicts with sys/queue.h on macOS */
15 # include <sys/user.h>
16 #endif
17 
18 #include "internal/hash.h"
19 
20 #include "ruby/ruby.h"
21 #include "ruby/atomic.h"
22 #include "ruby/debug.h"
23 #include "ruby/thread.h"
24 #include "ruby/util.h"
25 #include "ruby/vm.h"
27 #include "ccan/list/list.h"
28 #include "darray.h"
29 #include "gc/gc.h"
30 #include "gc/gc_impl.h"
31 
32 #ifndef BUILDING_SHARED_GC
33 # include "probes.h"
34 #endif
35 
36 #include "debug_counter.h"
37 #include "internal/sanitizers.h"
38 
39 /* MALLOC_HEADERS_BEGIN */
40 #ifndef HAVE_MALLOC_USABLE_SIZE
41 # ifdef _WIN32
42 # define HAVE_MALLOC_USABLE_SIZE
43 # define malloc_usable_size(a) _msize(a)
44 # elif defined HAVE_MALLOC_SIZE
45 # define HAVE_MALLOC_USABLE_SIZE
46 # define malloc_usable_size(a) malloc_size(a)
47 # endif
48 #endif
49 
50 #ifdef HAVE_MALLOC_USABLE_SIZE
51 # ifdef RUBY_ALTERNATIVE_MALLOC_HEADER
52 /* Alternative malloc header is included in ruby/missing.h */
53 # elif defined(HAVE_MALLOC_H)
54 # include <malloc.h>
55 # elif defined(HAVE_MALLOC_NP_H)
56 # include <malloc_np.h>
57 # elif defined(HAVE_MALLOC_MALLOC_H)
58 # include <malloc/malloc.h>
59 # endif
60 #endif
61 
62 #ifdef HAVE_MALLOC_TRIM
63 # include <malloc.h>
64 
65 # ifdef __EMSCRIPTEN__
66 /* malloc_trim is defined in emscripten/emmalloc.h on emscripten. */
67 # include <emscripten/emmalloc.h>
68 # endif
69 #endif
70 
71 #ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
72 # include <mach/task.h>
73 # include <mach/mach_init.h>
74 # include <mach/mach_port.h>
75 #endif
76 
77 #ifndef VM_CHECK_MODE
78 # define VM_CHECK_MODE RUBY_DEBUG
79 #endif
80 
81 // From ractor_core.h
82 #ifndef RACTOR_CHECK_MODE
83 # define RACTOR_CHECK_MODE (VM_CHECK_MODE || RUBY_DEBUG) && (SIZEOF_UINT64_T == SIZEOF_VALUE)
84 #endif
85 
86 #ifndef RUBY_DEBUG_LOG
87 # define RUBY_DEBUG_LOG(...)
88 #endif
89 
90 #ifndef GC_HEAP_INIT_SLOTS
91 #define GC_HEAP_INIT_SLOTS 10000
92 #endif
93 #ifndef GC_HEAP_FREE_SLOTS
94 #define GC_HEAP_FREE_SLOTS 4096
95 #endif
96 #ifndef GC_HEAP_GROWTH_FACTOR
97 #define GC_HEAP_GROWTH_FACTOR 1.8
98 #endif
99 #ifndef GC_HEAP_GROWTH_MAX_SLOTS
100 #define GC_HEAP_GROWTH_MAX_SLOTS 0 /* 0 is disable */
101 #endif
102 #ifndef GC_HEAP_REMEMBERED_WB_UNPROTECTED_OBJECTS_LIMIT_RATIO
103 # define GC_HEAP_REMEMBERED_WB_UNPROTECTED_OBJECTS_LIMIT_RATIO 0.01
104 #endif
105 #ifndef GC_HEAP_OLDOBJECT_LIMIT_FACTOR
106 #define GC_HEAP_OLDOBJECT_LIMIT_FACTOR 2.0
107 #endif
108 
109 #ifndef GC_HEAP_FREE_SLOTS_MIN_RATIO
110 #define GC_HEAP_FREE_SLOTS_MIN_RATIO 0.20
111 #endif
112 #ifndef GC_HEAP_FREE_SLOTS_GOAL_RATIO
113 #define GC_HEAP_FREE_SLOTS_GOAL_RATIO 0.40
114 #endif
115 #ifndef GC_HEAP_FREE_SLOTS_MAX_RATIO
116 #define GC_HEAP_FREE_SLOTS_MAX_RATIO 0.65
117 #endif
118 
119 #ifndef GC_MALLOC_LIMIT_MIN
120 #define GC_MALLOC_LIMIT_MIN (16 * 1024 * 1024 /* 16MB */)
121 #endif
122 #ifndef GC_MALLOC_LIMIT_MAX
123 #define GC_MALLOC_LIMIT_MAX (32 * 1024 * 1024 /* 32MB */)
124 #endif
125 #ifndef GC_MALLOC_LIMIT_GROWTH_FACTOR
126 #define GC_MALLOC_LIMIT_GROWTH_FACTOR 1.4
127 #endif
128 
129 #ifndef GC_OLDMALLOC_LIMIT_MIN
130 #define GC_OLDMALLOC_LIMIT_MIN (16 * 1024 * 1024 /* 16MB */)
131 #endif
132 #ifndef GC_OLDMALLOC_LIMIT_GROWTH_FACTOR
133 #define GC_OLDMALLOC_LIMIT_GROWTH_FACTOR 1.2
134 #endif
135 #ifndef GC_OLDMALLOC_LIMIT_MAX
136 #define GC_OLDMALLOC_LIMIT_MAX (128 * 1024 * 1024 /* 128MB */)
137 #endif
138 
139 #ifndef GC_CAN_COMPILE_COMPACTION
140 #if defined(__wasi__) /* WebAssembly doesn't support signals */
141 # define GC_CAN_COMPILE_COMPACTION 0
142 #else
143 # define GC_CAN_COMPILE_COMPACTION 1
144 #endif
145 #endif
146 
147 #ifndef PRINT_ENTER_EXIT_TICK
148 # define PRINT_ENTER_EXIT_TICK 0
149 #endif
150 #ifndef PRINT_ROOT_TICKS
151 #define PRINT_ROOT_TICKS 0
152 #endif
153 
154 #define USE_TICK_T (PRINT_ENTER_EXIT_TICK || PRINT_ROOT_TICKS)
155 
156 #ifndef HEAP_COUNT
157 # define HEAP_COUNT 5
158 #endif
159 
160 typedef struct ractor_newobj_heap_cache {
161  struct free_slot *freelist;
162  struct heap_page *using_page;
164 
165 typedef struct ractor_newobj_cache {
166  size_t incremental_mark_step_allocated_slots;
167  rb_ractor_newobj_heap_cache_t heap_caches[HEAP_COUNT];
169 
170 typedef struct {
171  size_t heap_init_slots[HEAP_COUNT];
172  size_t heap_free_slots;
173  double growth_factor;
174  size_t growth_max_slots;
175 
176  double heap_free_slots_min_ratio;
177  double heap_free_slots_goal_ratio;
178  double heap_free_slots_max_ratio;
179  double uncollectible_wb_unprotected_objects_limit_ratio;
180  double oldobject_limit_factor;
181 
182  size_t malloc_limit_min;
183  size_t malloc_limit_max;
184  double malloc_limit_growth_factor;
185 
186  size_t oldmalloc_limit_min;
187  size_t oldmalloc_limit_max;
188  double oldmalloc_limit_growth_factor;
190 
191 static ruby_gc_params_t gc_params = {
192  { GC_HEAP_INIT_SLOTS },
193  GC_HEAP_FREE_SLOTS,
194  GC_HEAP_GROWTH_FACTOR,
195  GC_HEAP_GROWTH_MAX_SLOTS,
196 
197  GC_HEAP_FREE_SLOTS_MIN_RATIO,
198  GC_HEAP_FREE_SLOTS_GOAL_RATIO,
199  GC_HEAP_FREE_SLOTS_MAX_RATIO,
200  GC_HEAP_REMEMBERED_WB_UNPROTECTED_OBJECTS_LIMIT_RATIO,
201  GC_HEAP_OLDOBJECT_LIMIT_FACTOR,
202 
203  GC_MALLOC_LIMIT_MIN,
204  GC_MALLOC_LIMIT_MAX,
205  GC_MALLOC_LIMIT_GROWTH_FACTOR,
206 
207  GC_OLDMALLOC_LIMIT_MIN,
208  GC_OLDMALLOC_LIMIT_MAX,
209  GC_OLDMALLOC_LIMIT_GROWTH_FACTOR,
210 };
211 
212 /* GC_DEBUG:
213  * enable to embed GC debugging information.
214  */
215 #ifndef GC_DEBUG
216 #define GC_DEBUG 0
217 #endif
218 
219 /* RGENGC_DEBUG:
220  * 1: basic information
221  * 2: remember set operation
222  * 3: mark
223  * 4:
224  * 5: sweep
225  */
226 #ifndef RGENGC_DEBUG
227 #ifdef RUBY_DEVEL
228 #define RGENGC_DEBUG -1
229 #else
230 #define RGENGC_DEBUG 0
231 #endif
232 #endif
233 #if RGENGC_DEBUG < 0 && !defined(_MSC_VER)
234 # define RGENGC_DEBUG_ENABLED(level) (-(RGENGC_DEBUG) >= (level) && ruby_rgengc_debug >= (level))
235 #elif defined(HAVE_VA_ARGS_MACRO)
236 # define RGENGC_DEBUG_ENABLED(level) ((RGENGC_DEBUG) >= (level))
237 #else
238 # define RGENGC_DEBUG_ENABLED(level) 0
239 #endif
240 int ruby_rgengc_debug;
241 
242 /* RGENGC_PROFILE
243  * 0: disable RGenGC profiling
244  * 1: enable profiling for basic information
245  * 2: enable profiling for each types
246  */
247 #ifndef RGENGC_PROFILE
248 # define RGENGC_PROFILE 0
249 #endif
250 
251 /* RGENGC_ESTIMATE_OLDMALLOC
252  * Enable/disable to estimate increase size of malloc'ed size by old objects.
253  * If estimation exceeds threshold, then will invoke full GC.
254  * 0: disable estimation.
255  * 1: enable estimation.
256  */
257 #ifndef RGENGC_ESTIMATE_OLDMALLOC
258 # define RGENGC_ESTIMATE_OLDMALLOC 1
259 #endif
260 
261 #ifndef GC_PROFILE_MORE_DETAIL
262 # define GC_PROFILE_MORE_DETAIL 0
263 #endif
264 #ifndef GC_PROFILE_DETAIL_MEMORY
265 # define GC_PROFILE_DETAIL_MEMORY 0
266 #endif
267 #ifndef GC_ENABLE_LAZY_SWEEP
268 # define GC_ENABLE_LAZY_SWEEP 1
269 #endif
270 #ifndef CALC_EXACT_MALLOC_SIZE
271 # define CALC_EXACT_MALLOC_SIZE 0
272 #endif
273 #if defined(HAVE_MALLOC_USABLE_SIZE) || CALC_EXACT_MALLOC_SIZE > 0
274 # ifndef MALLOC_ALLOCATED_SIZE
275 # define MALLOC_ALLOCATED_SIZE 0
276 # endif
277 #else
278 # define MALLOC_ALLOCATED_SIZE 0
279 #endif
280 #ifndef MALLOC_ALLOCATED_SIZE_CHECK
281 # define MALLOC_ALLOCATED_SIZE_CHECK 0
282 #endif
283 
284 #ifndef GC_DEBUG_STRESS_TO_CLASS
285 # define GC_DEBUG_STRESS_TO_CLASS RUBY_DEBUG
286 #endif
287 
288 typedef enum {
289  GPR_FLAG_NONE = 0x000,
290  /* major reason */
291  GPR_FLAG_MAJOR_BY_NOFREE = 0x001,
292  GPR_FLAG_MAJOR_BY_OLDGEN = 0x002,
293  GPR_FLAG_MAJOR_BY_SHADY = 0x004,
294  GPR_FLAG_MAJOR_BY_FORCE = 0x008,
295 #if RGENGC_ESTIMATE_OLDMALLOC
296  GPR_FLAG_MAJOR_BY_OLDMALLOC = 0x020,
297 #endif
298  GPR_FLAG_MAJOR_MASK = 0x0ff,
299 
300  /* gc reason */
301  GPR_FLAG_NEWOBJ = 0x100,
302  GPR_FLAG_MALLOC = 0x200,
303  GPR_FLAG_METHOD = 0x400,
304  GPR_FLAG_CAPI = 0x800,
305  GPR_FLAG_STRESS = 0x1000,
306 
307  /* others */
308  GPR_FLAG_IMMEDIATE_SWEEP = 0x2000,
309  GPR_FLAG_HAVE_FINALIZE = 0x4000,
310  GPR_FLAG_IMMEDIATE_MARK = 0x8000,
311  GPR_FLAG_FULL_MARK = 0x10000,
312  GPR_FLAG_COMPACT = 0x20000,
313 
314  GPR_DEFAULT_REASON =
315  (GPR_FLAG_FULL_MARK | GPR_FLAG_IMMEDIATE_MARK |
316  GPR_FLAG_IMMEDIATE_SWEEP | GPR_FLAG_CAPI),
317 } gc_profile_record_flag;
318 
319 typedef struct gc_profile_record {
320  unsigned int flags;
321 
322  double gc_time;
323  double gc_invoke_time;
324 
325  size_t heap_total_objects;
326  size_t heap_use_size;
327  size_t heap_total_size;
328  size_t moved_objects;
329 
330 #if GC_PROFILE_MORE_DETAIL
331  double gc_mark_time;
332  double gc_sweep_time;
333 
334  size_t heap_use_pages;
335  size_t heap_live_objects;
336  size_t heap_free_objects;
337 
338  size_t allocate_increase;
339  size_t allocate_limit;
340 
341  double prepare_time;
342  size_t removing_objects;
343  size_t empty_objects;
344 #if GC_PROFILE_DETAIL_MEMORY
345  long maxrss;
346  long minflt;
347  long majflt;
348 #endif
349 #endif
350 #if MALLOC_ALLOCATED_SIZE
351  size_t allocated_size;
352 #endif
353 
354 #if RGENGC_PROFILE > 0
355  size_t old_objects;
356  size_t remembered_normal_objects;
357  size_t remembered_shady_objects;
358 #endif
360 
361 struct RMoved {
362  VALUE flags;
363  VALUE dummy;
364  VALUE destination;
365  uint32_t original_shape_id;
366 };
367 
368 #define RMOVED(obj) ((struct RMoved *)(obj))
369 
370 typedef uintptr_t bits_t;
371 enum {
372  BITS_SIZE = sizeof(bits_t),
373  BITS_BITLENGTH = ( BITS_SIZE * CHAR_BIT )
374 };
375 
377  struct heap_page *page;
378 };
379 
381  struct heap_page_header header;
382  /* char gap[]; */
383  /* RVALUE values[]; */
384 };
385 
386 #define STACK_CHUNK_SIZE 500
387 
388 typedef struct stack_chunk {
389  VALUE data[STACK_CHUNK_SIZE];
390  struct stack_chunk *next;
391 } stack_chunk_t;
392 
393 typedef struct mark_stack {
394  stack_chunk_t *chunk;
395  stack_chunk_t *cache;
396  int index;
397  int limit;
398  size_t cache_size;
399  size_t unused_cache_size;
400 } mark_stack_t;
401 
402 typedef int (*gc_compact_compare_func)(const void *l, const void *r, void *d);
403 
404 typedef struct rb_heap_struct {
405  short slot_size;
406 
407  /* Basic statistics */
408  size_t total_allocated_pages;
409  size_t force_major_gc_count;
410  size_t force_incremental_marking_finish_count;
411  size_t total_allocated_objects;
412  size_t total_freed_objects;
413  size_t final_slots_count;
414 
415  /* Sweeping statistics */
416  size_t freed_slots;
417  size_t empty_slots;
418 
419  struct heap_page *free_pages;
420  struct ccan_list_head pages;
421  struct heap_page *sweeping_page; /* iterator for .pages */
422  struct heap_page *compact_cursor;
423  uintptr_t compact_cursor_index;
424  struct heap_page *pooled_pages;
425  size_t total_pages; /* total page count in a heap */
426  size_t total_slots; /* total slot count (about total_pages * HEAP_PAGE_OBJ_LIMIT) */
427 
428 } rb_heap_t;
429 
430 enum {
431  gc_stress_no_major,
432  gc_stress_no_immediate_sweep,
433  gc_stress_full_mark_after_malloc,
434  gc_stress_max
435 };
436 
437 enum gc_mode {
438  gc_mode_none,
439  gc_mode_marking,
440  gc_mode_sweeping,
441  gc_mode_compacting,
442 };
443 
444 typedef struct rb_objspace {
445  struct {
446  size_t limit;
447  size_t increase;
448 #if MALLOC_ALLOCATED_SIZE
449  size_t allocated_size;
450  size_t allocations;
451 #endif
452  } malloc_params;
453 
454  struct rb_gc_config {
455  bool full_mark;
456  } gc_config;
457 
458  struct {
459  unsigned int mode : 2;
460  unsigned int immediate_sweep : 1;
461  unsigned int dont_gc : 1;
462  unsigned int dont_incremental : 1;
463  unsigned int during_gc : 1;
464  unsigned int during_compacting : 1;
465  unsigned int during_reference_updating : 1;
466  unsigned int gc_stressful: 1;
467  unsigned int has_newobj_hook: 1;
468  unsigned int during_minor_gc : 1;
469  unsigned int during_incremental_marking : 1;
470  unsigned int measure_gc : 1;
471  } flags;
472 
473  rb_event_flag_t hook_events;
474  unsigned long long next_object_id;
475 
476  rb_heap_t heaps[HEAP_COUNT];
477  size_t empty_pages_count;
478  struct heap_page *empty_pages;
479 
480  struct {
481  rb_atomic_t finalizing;
482  } atomic_flags;
483 
485  size_t marked_slots;
486 
487  struct {
488  rb_darray(struct heap_page *) sorted;
489 
490  size_t allocated_pages;
491  size_t freed_pages;
492  uintptr_t range[2];
493  size_t freeable_pages;
494 
495  size_t allocatable_slots;
496 
497  /* final */
498  VALUE deferred_final;
499  } heap_pages;
500 
501  st_table *finalizer_table;
502 
503  struct {
504  int run;
505  unsigned int latest_gc_info;
506  gc_profile_record *records;
507  gc_profile_record *current_record;
508  size_t next_index;
509  size_t size;
510 
511 #if GC_PROFILE_MORE_DETAIL
512  double prepare_time;
513 #endif
514  double invoke_time;
515 
516  size_t minor_gc_count;
517  size_t major_gc_count;
518  size_t compact_count;
519  size_t read_barrier_faults;
520 #if RGENGC_PROFILE > 0
521  size_t total_generated_normal_object_count;
522  size_t total_generated_shady_object_count;
523  size_t total_shade_operation_count;
524  size_t total_promoted_count;
525  size_t total_remembered_normal_object_count;
526  size_t total_remembered_shady_object_count;
527 
528 #if RGENGC_PROFILE >= 2
529  size_t generated_normal_object_count_types[RUBY_T_MASK];
530  size_t generated_shady_object_count_types[RUBY_T_MASK];
531  size_t shade_operation_count_types[RUBY_T_MASK];
532  size_t promoted_types[RUBY_T_MASK];
533  size_t remembered_normal_object_count_types[RUBY_T_MASK];
534  size_t remembered_shady_object_count_types[RUBY_T_MASK];
535 #endif
536 #endif /* RGENGC_PROFILE */
537 
538  /* temporary profiling space */
539  double gc_sweep_start_time;
540  size_t total_allocated_objects_at_gc_start;
541  size_t heap_used_at_gc_start;
542 
543  /* basic statistics */
544  size_t count;
545  unsigned long long marking_time_ns;
546  struct timespec marking_start_time;
547  unsigned long long sweeping_time_ns;
548  struct timespec sweeping_start_time;
549 
550  /* Weak references */
551  size_t weak_references_count;
552  size_t retained_weak_references_count;
553  } profile;
554 
555  VALUE gc_stress_mode;
556 
557  struct {
558  VALUE parent_object;
559  int need_major_gc;
560  size_t last_major_gc;
561  size_t uncollectible_wb_unprotected_objects;
562  size_t uncollectible_wb_unprotected_objects_limit;
563  size_t old_objects;
564  size_t old_objects_limit;
565 
566 #if RGENGC_ESTIMATE_OLDMALLOC
567  size_t oldmalloc_increase;
568  size_t oldmalloc_increase_limit;
569 #endif
570 
571 #if RGENGC_CHECK_MODE >= 2
572  struct st_table *allrefs_table;
573  size_t error_count;
574 #endif
575  } rgengc;
576 
577  struct {
578  size_t considered_count_table[T_MASK];
579  size_t moved_count_table[T_MASK];
580  size_t moved_up_count_table[T_MASK];
581  size_t moved_down_count_table[T_MASK];
582  size_t total_moved;
583 
584  /* This function will be used, if set, to sort the heap prior to compaction */
585  gc_compact_compare_func compare_func;
586  } rcompactor;
587 
588  struct {
589  size_t pooled_slots;
590  size_t step_slots;
591  } rincgc;
592 
593  st_table *id_to_obj_tbl;
594  st_table *obj_to_id_tbl;
595 
596 #if GC_DEBUG_STRESS_TO_CLASS
597  VALUE stress_to_class;
598 #endif
599 
600  rb_darray(VALUE *) weak_references;
601  rb_postponed_job_handle_t finalize_deferred_pjob;
602 
603  unsigned long live_ractor_cache_count;
604 } rb_objspace_t;
605 
606 #ifndef HEAP_PAGE_ALIGN_LOG
607 /* default tiny heap size: 64KiB */
608 #define HEAP_PAGE_ALIGN_LOG 16
609 #endif
610 
611 #if RACTOR_CHECK_MODE || GC_DEBUG
612 struct rvalue_overhead {
613 # if RACTOR_CHECK_MODE
614  uint32_t _ractor_belonging_id;
615 # endif
616 # if GC_DEBUG
617  const char *file;
618  int line;
619 # endif
620 };
621 
622 // Make sure that RVALUE_OVERHEAD aligns to sizeof(VALUE)
623 # define RVALUE_OVERHEAD (sizeof(struct { \
624  union { \
625  struct rvalue_overhead overhead; \
626  VALUE value; \
627  }; \
628 }))
629 size_t rb_gc_impl_obj_slot_size(VALUE obj);
630 # define GET_RVALUE_OVERHEAD(obj) ((struct rvalue_overhead *)((uintptr_t)obj + rb_gc_impl_obj_slot_size(obj)))
631 #else
632 # define RVALUE_OVERHEAD 0
633 #endif
634 
635 #define BASE_SLOT_SIZE (sizeof(struct RBasic) + sizeof(VALUE[RBIMPL_RVALUE_EMBED_LEN_MAX]) + RVALUE_OVERHEAD)
636 
637 #ifndef MAX
638 # define MAX(a, b) (((a) > (b)) ? (a) : (b))
639 #endif
640 #ifndef MIN
641 # define MIN(a, b) (((a) < (b)) ? (a) : (b))
642 #endif
643 #define roomof(x, y) (((x) + (y) - 1) / (y))
644 #define CEILDIV(i, mod) roomof(i, mod)
645 enum {
646  HEAP_PAGE_ALIGN = (1UL << HEAP_PAGE_ALIGN_LOG),
647  HEAP_PAGE_ALIGN_MASK = (~(~0UL << HEAP_PAGE_ALIGN_LOG)),
648  HEAP_PAGE_SIZE = HEAP_PAGE_ALIGN,
649  HEAP_PAGE_OBJ_LIMIT = (unsigned int)((HEAP_PAGE_SIZE - sizeof(struct heap_page_header)) / BASE_SLOT_SIZE),
650  HEAP_PAGE_BITMAP_LIMIT = CEILDIV(CEILDIV(HEAP_PAGE_SIZE, BASE_SLOT_SIZE), BITS_BITLENGTH),
651  HEAP_PAGE_BITMAP_SIZE = (BITS_SIZE * HEAP_PAGE_BITMAP_LIMIT),
652 };
653 #define HEAP_PAGE_ALIGN (1 << HEAP_PAGE_ALIGN_LOG)
654 #define HEAP_PAGE_SIZE HEAP_PAGE_ALIGN
655 
656 #if !defined(INCREMENTAL_MARK_STEP_ALLOCATIONS)
657 # define INCREMENTAL_MARK_STEP_ALLOCATIONS 500
658 #endif
659 
660 #undef INIT_HEAP_PAGE_ALLOC_USE_MMAP
661 /* Must define either HEAP_PAGE_ALLOC_USE_MMAP or
662  * INIT_HEAP_PAGE_ALLOC_USE_MMAP. */
663 
664 #ifndef HAVE_MMAP
665 /* We can't use mmap of course, if it is not available. */
666 static const bool HEAP_PAGE_ALLOC_USE_MMAP = false;
667 
668 #elif defined(__wasm__)
669 /* wasmtime does not have proper support for mmap.
670  * See https://github.com/bytecodealliance/wasmtime/blob/main/docs/WASI-rationale.md#why-no-mmap-and-friends
671  */
672 static const bool HEAP_PAGE_ALLOC_USE_MMAP = false;
673 
674 #elif HAVE_CONST_PAGE_SIZE
675 /* If we have the PAGE_SIZE and it is a constant, then we can directly use it. */
676 static const bool HEAP_PAGE_ALLOC_USE_MMAP = (PAGE_SIZE <= HEAP_PAGE_SIZE);
677 
678 #elif defined(PAGE_MAX_SIZE) && (PAGE_MAX_SIZE <= HEAP_PAGE_SIZE)
679 /* If we can use the maximum page size. */
680 static const bool HEAP_PAGE_ALLOC_USE_MMAP = true;
681 
682 #elif defined(PAGE_SIZE)
683 /* If the PAGE_SIZE macro can be used dynamically. */
684 # define INIT_HEAP_PAGE_ALLOC_USE_MMAP (PAGE_SIZE <= HEAP_PAGE_SIZE)
685 
686 #elif defined(HAVE_SYSCONF) && defined(_SC_PAGE_SIZE)
687 /* If we can use sysconf to determine the page size. */
688 # define INIT_HEAP_PAGE_ALLOC_USE_MMAP (sysconf(_SC_PAGE_SIZE) <= HEAP_PAGE_SIZE)
689 
690 #else
691 /* Otherwise we can't determine the system page size, so don't use mmap. */
692 static const bool HEAP_PAGE_ALLOC_USE_MMAP = false;
693 #endif
694 
695 #ifdef INIT_HEAP_PAGE_ALLOC_USE_MMAP
696 /* We can determine the system page size at runtime. */
697 # define HEAP_PAGE_ALLOC_USE_MMAP (heap_page_alloc_use_mmap != false)
698 
699 static bool heap_page_alloc_use_mmap;
700 #endif
701 
702 #define RVALUE_AGE_BIT_COUNT 2
703 #define RVALUE_AGE_BIT_MASK (((bits_t)1 << RVALUE_AGE_BIT_COUNT) - 1)
704 #define RVALUE_OLD_AGE 3
705 
706 struct free_slot {
707  VALUE flags; /* always 0 for freed obj */
708  struct free_slot *next;
709 };
710 
711 struct heap_page {
712  unsigned short slot_size;
713  unsigned short total_slots;
714  unsigned short free_slots;
715  unsigned short final_slots;
716  unsigned short pinned_slots;
717  struct {
718  unsigned int before_sweep : 1;
719  unsigned int has_remembered_objects : 1;
720  unsigned int has_uncollectible_wb_unprotected_objects : 1;
721  } flags;
722 
723  rb_heap_t *heap;
724 
725  struct heap_page *free_next;
726  struct heap_page_body *body;
727  uintptr_t start;
728  struct free_slot *freelist;
729  struct ccan_list_node page_node;
730 
731  bits_t wb_unprotected_bits[HEAP_PAGE_BITMAP_LIMIT];
732  /* the following three bitmaps are cleared at the beginning of full GC */
733  bits_t mark_bits[HEAP_PAGE_BITMAP_LIMIT];
734  bits_t uncollectible_bits[HEAP_PAGE_BITMAP_LIMIT];
735  bits_t marking_bits[HEAP_PAGE_BITMAP_LIMIT];
736 
737  bits_t remembered_bits[HEAP_PAGE_BITMAP_LIMIT];
738 
739  /* If set, the object is not movable */
740  bits_t pinned_bits[HEAP_PAGE_BITMAP_LIMIT];
741  bits_t age_bits[HEAP_PAGE_BITMAP_LIMIT * RVALUE_AGE_BIT_COUNT];
742 };
743 
744 /*
745  * When asan is enabled, this will prohibit writing to the freelist until it is unlocked
746  */
747 static void
748 asan_lock_freelist(struct heap_page *page)
749 {
750  asan_poison_memory_region(&page->freelist, sizeof(struct free_list *));
751 }
752 
753 /*
754  * When asan is enabled, this will enable the ability to write to the freelist
755  */
756 static void
757 asan_unlock_freelist(struct heap_page *page)
758 {
759  asan_unpoison_memory_region(&page->freelist, sizeof(struct free_list *), false);
760 }
761 
762 static inline bool
763 heap_page_in_global_empty_pages_pool(rb_objspace_t *objspace, struct heap_page *page)
764 {
765  if (page->total_slots == 0) {
766  GC_ASSERT(page->start == 0);
767  GC_ASSERT(page->slot_size == 0);
768  GC_ASSERT(page->heap == NULL);
769  GC_ASSERT(page->free_slots == 0);
770  asan_unpoisoning_memory_region(&page->freelist, sizeof(&page->freelist)) {
771  GC_ASSERT(page->freelist == NULL);
772  }
773 
774  return true;
775  }
776  else {
777  GC_ASSERT(page->start != 0);
778  GC_ASSERT(page->slot_size != 0);
779  GC_ASSERT(page->heap != NULL);
780 
781  return false;
782  }
783 }
784 
785 #define GET_PAGE_BODY(x) ((struct heap_page_body *)((bits_t)(x) & ~(HEAP_PAGE_ALIGN_MASK)))
786 #define GET_PAGE_HEADER(x) (&GET_PAGE_BODY(x)->header)
787 #define GET_HEAP_PAGE(x) (GET_PAGE_HEADER(x)->page)
788 
789 #define NUM_IN_PAGE(p) (((bits_t)(p) & HEAP_PAGE_ALIGN_MASK) / BASE_SLOT_SIZE)
790 #define BITMAP_INDEX(p) (NUM_IN_PAGE(p) / BITS_BITLENGTH )
791 #define BITMAP_OFFSET(p) (NUM_IN_PAGE(p) & (BITS_BITLENGTH-1))
792 #define BITMAP_BIT(p) ((bits_t)1 << BITMAP_OFFSET(p))
793 
794 /* Bitmap Operations */
795 #define MARKED_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] & BITMAP_BIT(p))
796 #define MARK_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] | BITMAP_BIT(p))
797 #define CLEAR_IN_BITMAP(bits, p) ((bits)[BITMAP_INDEX(p)] = (bits)[BITMAP_INDEX(p)] & ~BITMAP_BIT(p))
798 
799 /* getting bitmap */
800 #define GET_HEAP_MARK_BITS(x) (&GET_HEAP_PAGE(x)->mark_bits[0])
801 #define GET_HEAP_PINNED_BITS(x) (&GET_HEAP_PAGE(x)->pinned_bits[0])
802 #define GET_HEAP_UNCOLLECTIBLE_BITS(x) (&GET_HEAP_PAGE(x)->uncollectible_bits[0])
803 #define GET_HEAP_WB_UNPROTECTED_BITS(x) (&GET_HEAP_PAGE(x)->wb_unprotected_bits[0])
804 #define GET_HEAP_MARKING_BITS(x) (&GET_HEAP_PAGE(x)->marking_bits[0])
805 
806 #define GC_SWEEP_PAGES_FREEABLE_PER_STEP 3
807 
808 #define RVALUE_AGE_BITMAP_INDEX(n) (NUM_IN_PAGE(n) / (BITS_BITLENGTH / RVALUE_AGE_BIT_COUNT))
809 #define RVALUE_AGE_BITMAP_OFFSET(n) ((NUM_IN_PAGE(n) % (BITS_BITLENGTH / RVALUE_AGE_BIT_COUNT)) * RVALUE_AGE_BIT_COUNT)
810 
811 static int
812 RVALUE_AGE_GET(VALUE obj)
813 {
814  bits_t *age_bits = GET_HEAP_PAGE(obj)->age_bits;
815  return (int)(age_bits[RVALUE_AGE_BITMAP_INDEX(obj)] >> RVALUE_AGE_BITMAP_OFFSET(obj)) & RVALUE_AGE_BIT_MASK;
816 }
817 
818 static void
819 RVALUE_AGE_SET(VALUE obj, int age)
820 {
821  RUBY_ASSERT(age <= RVALUE_OLD_AGE);
822  bits_t *age_bits = GET_HEAP_PAGE(obj)->age_bits;
823  // clear the bits
824  age_bits[RVALUE_AGE_BITMAP_INDEX(obj)] &= ~(RVALUE_AGE_BIT_MASK << (RVALUE_AGE_BITMAP_OFFSET(obj)));
825  // shift the correct value in
826  age_bits[RVALUE_AGE_BITMAP_INDEX(obj)] |= ((bits_t)age << RVALUE_AGE_BITMAP_OFFSET(obj));
827  if (age == RVALUE_OLD_AGE) {
829  }
830  else {
832  }
833 }
834 
835 #define malloc_limit objspace->malloc_params.limit
836 #define malloc_increase objspace->malloc_params.increase
837 #define malloc_allocated_size objspace->malloc_params.allocated_size
838 #define heap_pages_lomem objspace->heap_pages.range[0]
839 #define heap_pages_himem objspace->heap_pages.range[1]
840 #define heap_pages_freeable_pages objspace->heap_pages.freeable_pages
841 #define heap_pages_deferred_final objspace->heap_pages.deferred_final
842 #define heaps objspace->heaps
843 #define during_gc objspace->flags.during_gc
844 #define finalizing objspace->atomic_flags.finalizing
845 #define finalizer_table objspace->finalizer_table
846 #define ruby_gc_stressful objspace->flags.gc_stressful
847 #define ruby_gc_stress_mode objspace->gc_stress_mode
848 #if GC_DEBUG_STRESS_TO_CLASS
849 #define stress_to_class objspace->stress_to_class
850 #define set_stress_to_class(c) (stress_to_class = (c))
851 #else
852 #define stress_to_class (objspace, 0)
853 #define set_stress_to_class(c) (objspace, (c))
854 #endif
855 
856 #if 0
857 #define dont_gc_on() (fprintf(stderr, "dont_gc_on@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 1)
858 #define dont_gc_off() (fprintf(stderr, "dont_gc_off@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = 0)
859 #define dont_gc_set(b) (fprintf(stderr, "dont_gc_set(%d)@%s:%d\n", __FILE__, __LINE__), objspace->flags.dont_gc = (int)(b))
860 #define dont_gc_val() (objspace->flags.dont_gc)
861 #else
862 #define dont_gc_on() (objspace->flags.dont_gc = 1)
863 #define dont_gc_off() (objspace->flags.dont_gc = 0)
864 #define dont_gc_set(b) (objspace->flags.dont_gc = (int)(b))
865 #define dont_gc_val() (objspace->flags.dont_gc)
866 #endif
867 
868 #define gc_config_full_mark_set(b) (objspace->gc_config.full_mark = (int)(b))
869 #define gc_config_full_mark_val (objspace->gc_config.full_mark)
870 
871 #ifndef DURING_GC_COULD_MALLOC_REGION_START
872 # define DURING_GC_COULD_MALLOC_REGION_START() \
873  assert(rb_during_gc()); \
874  bool _prev_enabled = rb_gc_impl_gc_enabled_p(objspace); \
875  rb_gc_impl_gc_disable(objspace, false)
876 #endif
877 
878 #ifndef DURING_GC_COULD_MALLOC_REGION_END
879 # define DURING_GC_COULD_MALLOC_REGION_END() \
880  if (_prev_enabled) rb_gc_impl_gc_enable(objspace)
881 #endif
882 
883 static inline enum gc_mode
884 gc_mode_verify(enum gc_mode mode)
885 {
886 #if RGENGC_CHECK_MODE > 0
887  switch (mode) {
888  case gc_mode_none:
889  case gc_mode_marking:
890  case gc_mode_sweeping:
891  case gc_mode_compacting:
892  break;
893  default:
894  rb_bug("gc_mode_verify: unreachable (%d)", (int)mode);
895  }
896 #endif
897  return mode;
898 }
899 
900 static inline bool
901 has_sweeping_pages(rb_objspace_t *objspace)
902 {
903  for (int i = 0; i < HEAP_COUNT; i++) {
904  if ((&heaps[i])->sweeping_page) {
905  return TRUE;
906  }
907  }
908  return FALSE;
909 }
910 
911 static inline size_t
912 heap_eden_total_pages(rb_objspace_t *objspace)
913 {
914  size_t count = 0;
915  for (int i = 0; i < HEAP_COUNT; i++) {
916  count += (&heaps[i])->total_pages;
917  }
918  return count;
919 }
920 
921 static inline size_t
922 total_allocated_objects(rb_objspace_t *objspace)
923 {
924  size_t count = 0;
925  for (int i = 0; i < HEAP_COUNT; i++) {
926  rb_heap_t *heap = &heaps[i];
927  count += heap->total_allocated_objects;
928  }
929  return count;
930 }
931 
932 static inline size_t
933 total_freed_objects(rb_objspace_t *objspace)
934 {
935  size_t count = 0;
936  for (int i = 0; i < HEAP_COUNT; i++) {
937  rb_heap_t *heap = &heaps[i];
938  count += heap->total_freed_objects;
939  }
940  return count;
941 }
942 
943 static inline size_t
944 total_final_slots_count(rb_objspace_t *objspace)
945 {
946  size_t count = 0;
947  for (int i = 0; i < HEAP_COUNT; i++) {
948  rb_heap_t *heap = &heaps[i];
949  count += heap->final_slots_count;
950  }
951  return count;
952 }
953 
954 #define gc_mode(objspace) gc_mode_verify((enum gc_mode)(objspace)->flags.mode)
955 #define gc_mode_set(objspace, m) ((objspace)->flags.mode = (unsigned int)gc_mode_verify(m))
956 #define gc_needs_major_flags objspace->rgengc.need_major_gc
957 
958 #define is_marking(objspace) (gc_mode(objspace) == gc_mode_marking)
959 #define is_sweeping(objspace) (gc_mode(objspace) == gc_mode_sweeping)
960 #define is_full_marking(objspace) ((objspace)->flags.during_minor_gc == FALSE)
961 #define is_incremental_marking(objspace) ((objspace)->flags.during_incremental_marking != FALSE)
962 #define will_be_incremental_marking(objspace) ((objspace)->rgengc.need_major_gc != GPR_FLAG_NONE)
963 #define GC_INCREMENTAL_SWEEP_SLOT_COUNT 2048
964 #define GC_INCREMENTAL_SWEEP_POOL_SLOT_COUNT 1024
965 #define is_lazy_sweeping(objspace) (GC_ENABLE_LAZY_SWEEP && has_sweeping_pages(objspace))
966 
967 #if SIZEOF_LONG == SIZEOF_VOIDP
968 # define obj_id_to_ref(objid) ((objid) ^ FIXNUM_FLAG) /* unset FIXNUM_FLAG */
969 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
970 # define obj_id_to_ref(objid) (FIXNUM_P(objid) ? \
971  ((objid) ^ FIXNUM_FLAG) : (NUM2PTR(objid) << 1))
972 #else
973 # error not supported
974 #endif
975 
976 struct RZombie {
977  struct RBasic basic;
978  VALUE next;
979  void (*dfree)(void *);
980  void *data;
981 };
982 
983 #define RZOMBIE(o) ((struct RZombie *)(o))
984 
985 int ruby_disable_gc = 0;
986 int ruby_enable_autocompact = 0;
987 #if RGENGC_CHECK_MODE
988 gc_compact_compare_func ruby_autocompact_compare_func;
989 #endif
990 
991 static void init_mark_stack(mark_stack_t *stack);
992 static int garbage_collect(rb_objspace_t *, unsigned int reason);
993 
994 static int gc_start(rb_objspace_t *objspace, unsigned int reason);
995 static void gc_rest(rb_objspace_t *objspace);
996 
997 enum gc_enter_event {
998  gc_enter_event_start,
999  gc_enter_event_continue,
1000  gc_enter_event_rest,
1001  gc_enter_event_finalizer,
1002 };
1003 
1004 static inline void gc_enter(rb_objspace_t *objspace, enum gc_enter_event event, unsigned int *lock_lev);
1005 static inline void gc_exit(rb_objspace_t *objspace, enum gc_enter_event event, unsigned int *lock_lev);
1006 static void gc_marking_enter(rb_objspace_t *objspace);
1007 static void gc_marking_exit(rb_objspace_t *objspace);
1008 static void gc_sweeping_enter(rb_objspace_t *objspace);
1009 static void gc_sweeping_exit(rb_objspace_t *objspace);
1010 static bool gc_marks_continue(rb_objspace_t *objspace, rb_heap_t *heap);
1011 
1012 static void gc_sweep(rb_objspace_t *objspace);
1013 static void gc_sweep_finish_heap(rb_objspace_t *objspace, rb_heap_t *heap);
1014 static void gc_sweep_continue(rb_objspace_t *objspace, rb_heap_t *heap);
1015 
1016 static inline void gc_mark(rb_objspace_t *objspace, VALUE ptr);
1017 static inline void gc_pin(rb_objspace_t *objspace, VALUE ptr);
1018 static inline void gc_mark_and_pin(rb_objspace_t *objspace, VALUE ptr);
1019 
1020 static int gc_mark_stacked_objects_incremental(rb_objspace_t *, size_t count);
1021 NO_SANITIZE("memory", static inline bool is_pointer_to_heap(rb_objspace_t *objspace, const void *ptr));
1022 
1023 static void gc_verify_internal_consistency(void *objspace_ptr);
1024 
1025 static double getrusage_time(void);
1026 static inline void gc_prof_setup_new_record(rb_objspace_t *objspace, unsigned int reason);
1027 static inline void gc_prof_timer_start(rb_objspace_t *);
1028 static inline void gc_prof_timer_stop(rb_objspace_t *);
1029 static inline void gc_prof_mark_timer_start(rb_objspace_t *);
1030 static inline void gc_prof_mark_timer_stop(rb_objspace_t *);
1031 static inline void gc_prof_sweep_timer_start(rb_objspace_t *);
1032 static inline void gc_prof_sweep_timer_stop(rb_objspace_t *);
1033 static inline void gc_prof_set_malloc_info(rb_objspace_t *);
1034 static inline void gc_prof_set_heap_info(rb_objspace_t *);
1035 
1036 #define gc_prof_record(objspace) (objspace)->profile.current_record
1037 #define gc_prof_enabled(objspace) ((objspace)->profile.run && (objspace)->profile.current_record)
1038 
1039 #ifdef HAVE_VA_ARGS_MACRO
1040 # define gc_report(level, objspace, ...) \
1041  if (!RGENGC_DEBUG_ENABLED(level)) {} else gc_report_body(level, objspace, __VA_ARGS__)
1042 #else
1043 # define gc_report if (!RGENGC_DEBUG_ENABLED(0)) {} else gc_report_body
1044 #endif
1045 PRINTF_ARGS(static void gc_report_body(int level, rb_objspace_t *objspace, const char *fmt, ...), 3, 4);
1046 
1047 static void gc_finalize_deferred(void *dmy);
1048 
1049 #if USE_TICK_T
1050 
1051 /* the following code is only for internal tuning. */
1052 
1053 /* Source code to use RDTSC is quoted and modified from
1054  * https://www.mcs.anl.gov/~kazutomo/rdtsc.html
1055  * written by Kazutomo Yoshii <kazutomo@mcs.anl.gov>
1056  */
1057 
1058 #if defined(__GNUC__) && defined(__i386__)
1059 typedef unsigned long long tick_t;
1060 #define PRItick "llu"
1061 static inline tick_t
1062 tick(void)
1063 {
1064  unsigned long long int x;
1065  __asm__ __volatile__ ("rdtsc" : "=A" (x));
1066  return x;
1067 }
1068 
1069 #elif defined(__GNUC__) && defined(__x86_64__)
1070 typedef unsigned long long tick_t;
1071 #define PRItick "llu"
1072 
1073 static __inline__ tick_t
1074 tick(void)
1075 {
1076  unsigned long hi, lo;
1077  __asm__ __volatile__ ("rdtsc" : "=a"(lo), "=d"(hi));
1078  return ((unsigned long long)lo)|( ((unsigned long long)hi)<<32);
1079 }
1080 
1081 #elif defined(__powerpc64__) && (GCC_VERSION_SINCE(4,8,0) || defined(__clang__))
1082 typedef unsigned long long tick_t;
1083 #define PRItick "llu"
1084 
1085 static __inline__ tick_t
1086 tick(void)
1087 {
1088  unsigned long long val = __builtin_ppc_get_timebase();
1089  return val;
1090 }
1091 
1092 /* Implementation for macOS PPC by @nobu
1093  * See: https://github.com/ruby/ruby/pull/5975#discussion_r890045558
1094  */
1095 #elif defined(__POWERPC__) && defined(__APPLE__)
1096 typedef unsigned long long tick_t;
1097 #define PRItick "llu"
1098 
1099 static __inline__ tick_t
1100 tick(void)
1101 {
1102  unsigned long int upper, lower, tmp;
1103  # define mftbu(r) __asm__ volatile("mftbu %0" : "=r"(r))
1104  # define mftb(r) __asm__ volatile("mftb %0" : "=r"(r))
1105  do {
1106  mftbu(upper);
1107  mftb(lower);
1108  mftbu(tmp);
1109  } while (tmp != upper);
1110  return ((tick_t)upper << 32) | lower;
1111 }
1112 
1113 #elif defined(__aarch64__) && defined(__GNUC__)
1114 typedef unsigned long tick_t;
1115 #define PRItick "lu"
1116 
1117 static __inline__ tick_t
1118 tick(void)
1119 {
1120  unsigned long val;
1121  __asm__ __volatile__ ("mrs %0, cntvct_el0" : "=r" (val));
1122  return val;
1123 }
1124 
1125 
1126 #elif defined(_WIN32) && defined(_MSC_VER)
1127 #include <intrin.h>
1128 typedef unsigned __int64 tick_t;
1129 #define PRItick "llu"
1130 
1131 static inline tick_t
1132 tick(void)
1133 {
1134  return __rdtsc();
1135 }
1136 
1137 #else /* use clock */
1138 typedef clock_t tick_t;
1139 #define PRItick "llu"
1140 
1141 static inline tick_t
1142 tick(void)
1143 {
1144  return clock();
1145 }
1146 #endif /* TSC */
1147 #else /* USE_TICK_T */
1148 #define MEASURE_LINE(expr) expr
1149 #endif /* USE_TICK_T */
1150 
1151 static inline VALUE check_rvalue_consistency(rb_objspace_t *objspace, const VALUE obj);
1152 
1153 #define RVALUE_MARKED_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(obj), (obj))
1154 #define RVALUE_WB_UNPROTECTED_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), (obj))
1155 #define RVALUE_MARKING_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), (obj))
1156 #define RVALUE_UNCOLLECTIBLE_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), (obj))
1157 #define RVALUE_PINNED_BITMAP(obj) MARKED_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), (obj))
1158 
1159 static inline int
1160 RVALUE_MARKED(rb_objspace_t *objspace, VALUE obj)
1161 {
1162  check_rvalue_consistency(objspace, obj);
1163  return RVALUE_MARKED_BITMAP(obj) != 0;
1164 }
1165 
1166 static inline int
1167 RVALUE_PINNED(rb_objspace_t *objspace, VALUE obj)
1168 {
1169  check_rvalue_consistency(objspace, obj);
1170  return RVALUE_PINNED_BITMAP(obj) != 0;
1171 }
1172 
1173 static inline int
1174 RVALUE_WB_UNPROTECTED(rb_objspace_t *objspace, VALUE obj)
1175 {
1176  check_rvalue_consistency(objspace, obj);
1177  return RVALUE_WB_UNPROTECTED_BITMAP(obj) != 0;
1178 }
1179 
1180 static inline int
1181 RVALUE_MARKING(rb_objspace_t *objspace, VALUE obj)
1182 {
1183  check_rvalue_consistency(objspace, obj);
1184  return RVALUE_MARKING_BITMAP(obj) != 0;
1185 }
1186 
1187 static inline int
1188 RVALUE_REMEMBERED(rb_objspace_t *objspace, VALUE obj)
1189 {
1190  check_rvalue_consistency(objspace, obj);
1191  return MARKED_IN_BITMAP(GET_HEAP_PAGE(obj)->remembered_bits, obj) != 0;
1192 }
1193 
1194 static inline int
1195 RVALUE_UNCOLLECTIBLE(rb_objspace_t *objspace, VALUE obj)
1196 {
1197  check_rvalue_consistency(objspace, obj);
1198  return RVALUE_UNCOLLECTIBLE_BITMAP(obj) != 0;
1199 }
1200 
1201 #define RVALUE_PAGE_WB_UNPROTECTED(page, obj) MARKED_IN_BITMAP((page)->wb_unprotected_bits, (obj))
1202 #define RVALUE_PAGE_UNCOLLECTIBLE(page, obj) MARKED_IN_BITMAP((page)->uncollectible_bits, (obj))
1203 #define RVALUE_PAGE_MARKING(page, obj) MARKED_IN_BITMAP((page)->marking_bits, (obj))
1204 
1205 static int rgengc_remember(rb_objspace_t *objspace, VALUE obj);
1206 static void rgengc_mark_and_rememberset_clear(rb_objspace_t *objspace, rb_heap_t *heap);
1207 static void rgengc_rememberset_mark(rb_objspace_t *objspace, rb_heap_t *heap);
1208 
1209 static int
1210 check_rvalue_consistency_force(rb_objspace_t *objspace, const VALUE obj, int terminate)
1211 {
1212  int err = 0;
1213 
1214  int lev = rb_gc_vm_lock_no_barrier();
1215  {
1216  if (SPECIAL_CONST_P(obj)) {
1217  fprintf(stderr, "check_rvalue_consistency: %p is a special const.\n", (void *)obj);
1218  err++;
1219  }
1220  else if (!is_pointer_to_heap(objspace, (void *)obj)) {
1221  struct heap_page *empty_page = objspace->empty_pages;
1222  while (empty_page) {
1223  if ((uintptr_t)empty_page->body <= (uintptr_t)obj &&
1224  (uintptr_t)obj < (uintptr_t)empty_page->body + HEAP_PAGE_SIZE) {
1225  GC_ASSERT(heap_page_in_global_empty_pages_pool(objspace, empty_page));
1226  fprintf(stderr, "check_rvalue_consistency: %p is in an empty page (%p).\n",
1227  (void *)obj, (void *)empty_page);
1228  err++;
1229  goto skip;
1230  }
1231  }
1232  fprintf(stderr, "check_rvalue_consistency: %p is not a Ruby object.\n", (void *)obj);
1233  err++;
1234  skip:
1235  ;
1236  }
1237  else {
1238  const int wb_unprotected_bit = RVALUE_WB_UNPROTECTED_BITMAP(obj) != 0;
1239  const int uncollectible_bit = RVALUE_UNCOLLECTIBLE_BITMAP(obj) != 0;
1240  const int mark_bit = RVALUE_MARKED_BITMAP(obj) != 0;
1241  const int marking_bit = RVALUE_MARKING_BITMAP(obj) != 0;
1242  const int remembered_bit = MARKED_IN_BITMAP(GET_HEAP_PAGE(obj)->remembered_bits, obj) != 0;
1243  const int age = RVALUE_AGE_GET((VALUE)obj);
1244 
1245  if (heap_page_in_global_empty_pages_pool(objspace, GET_HEAP_PAGE(obj))) {
1246  fprintf(stderr, "check_rvalue_consistency: %s is in tomb page.\n", rb_obj_info(obj));
1247  err++;
1248  }
1249  if (BUILTIN_TYPE(obj) == T_NONE) {
1250  fprintf(stderr, "check_rvalue_consistency: %s is T_NONE.\n", rb_obj_info(obj));
1251  err++;
1252  }
1253  if (BUILTIN_TYPE(obj) == T_ZOMBIE) {
1254  fprintf(stderr, "check_rvalue_consistency: %s is T_ZOMBIE.\n", rb_obj_info(obj));
1255  err++;
1256  }
1257 
1258  if (BUILTIN_TYPE(obj) != T_DATA) {
1259  rb_obj_memsize_of((VALUE)obj);
1260  }
1261 
1262  /* check generation
1263  *
1264  * OLD == age == 3 && old-bitmap && mark-bit (except incremental marking)
1265  */
1266  if (age > 0 && wb_unprotected_bit) {
1267  fprintf(stderr, "check_rvalue_consistency: %s is not WB protected, but age is %d > 0.\n", rb_obj_info(obj), age);
1268  err++;
1269  }
1270 
1271  if (!is_marking(objspace) && uncollectible_bit && !mark_bit) {
1272  fprintf(stderr, "check_rvalue_consistency: %s is uncollectible, but is not marked while !gc.\n", rb_obj_info(obj));
1273  err++;
1274  }
1275 
1276  if (!is_full_marking(objspace)) {
1277  if (uncollectible_bit && age != RVALUE_OLD_AGE && !wb_unprotected_bit) {
1278  fprintf(stderr, "check_rvalue_consistency: %s is uncollectible, but not old (age: %d) and not WB unprotected.\n",
1279  rb_obj_info(obj), age);
1280  err++;
1281  }
1282  if (remembered_bit && age != RVALUE_OLD_AGE) {
1283  fprintf(stderr, "check_rvalue_consistency: %s is remembered, but not old (age: %d).\n",
1284  rb_obj_info(obj), age);
1285  err++;
1286  }
1287  }
1288 
1289  /*
1290  * check coloring
1291  *
1292  * marking:false marking:true
1293  * marked:false white *invalid*
1294  * marked:true black grey
1295  */
1296  if (is_incremental_marking(objspace) && marking_bit) {
1297  if (!is_marking(objspace) && !mark_bit) {
1298  fprintf(stderr, "check_rvalue_consistency: %s is marking, but not marked.\n", rb_obj_info(obj));
1299  err++;
1300  }
1301  }
1302  }
1303  }
1304  rb_gc_vm_unlock_no_barrier(lev);
1305 
1306  if (err > 0 && terminate) {
1307  rb_bug("check_rvalue_consistency_force: there is %d errors.", err);
1308  }
1309  return err;
1310 }
1311 
1312 #if RGENGC_CHECK_MODE == 0
1313 static inline VALUE
1314 check_rvalue_consistency(rb_objspace_t *objspace, const VALUE obj)
1315 {
1316  return obj;
1317 }
1318 #else
1319 static VALUE
1320 check_rvalue_consistency(rb_objspace_t *objspace, const VALUE obj)
1321 {
1322  check_rvalue_consistency_force(objspace, obj, TRUE);
1323  return obj;
1324 }
1325 #endif
1326 
1327 static inline bool
1328 gc_object_moved_p(rb_objspace_t *objspace, VALUE obj)
1329 {
1330  if (RB_SPECIAL_CONST_P(obj)) {
1331  return FALSE;
1332  }
1333  else {
1334  int ret;
1335  asan_unpoisoning_object(obj) {
1336  ret = BUILTIN_TYPE(obj) == T_MOVED;
1337  }
1338  return ret;
1339  }
1340 }
1341 
1342 static inline int
1343 RVALUE_OLD_P(rb_objspace_t *objspace, VALUE obj)
1344 {
1345  GC_ASSERT(!RB_SPECIAL_CONST_P(obj));
1346  check_rvalue_consistency(objspace, obj);
1347  // Because this will only ever be called on GC controlled objects,
1348  // we can use the faster _RAW function here
1349  return RB_OBJ_PROMOTED_RAW(obj);
1350 }
1351 
1352 static inline void
1353 RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
1354 {
1355  MARK_IN_BITMAP(&page->uncollectible_bits[0], obj);
1356  objspace->rgengc.old_objects++;
1357 
1358 #if RGENGC_PROFILE >= 2
1359  objspace->profile.total_promoted_count++;
1360  objspace->profile.promoted_types[BUILTIN_TYPE(obj)]++;
1361 #endif
1362 }
1363 
1364 static inline void
1365 RVALUE_OLD_UNCOLLECTIBLE_SET(rb_objspace_t *objspace, VALUE obj)
1366 {
1367  RB_DEBUG_COUNTER_INC(obj_promote);
1368  RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace, GET_HEAP_PAGE(obj), obj);
1369 }
1370 
1371 /* set age to age+1 */
1372 static inline void
1373 RVALUE_AGE_INC(rb_objspace_t *objspace, VALUE obj)
1374 {
1375  int age = RVALUE_AGE_GET((VALUE)obj);
1376 
1377  if (RGENGC_CHECK_MODE && age == RVALUE_OLD_AGE) {
1378  rb_bug("RVALUE_AGE_INC: can not increment age of OLD object %s.", rb_obj_info(obj));
1379  }
1380 
1381  age++;
1382  RVALUE_AGE_SET(obj, age);
1383 
1384  if (age == RVALUE_OLD_AGE) {
1385  RVALUE_OLD_UNCOLLECTIBLE_SET(objspace, obj);
1386  }
1387 
1388  check_rvalue_consistency(objspace, obj);
1389 }
1390 
1391 static inline void
1392 RVALUE_AGE_SET_CANDIDATE(rb_objspace_t *objspace, VALUE obj)
1393 {
1394  check_rvalue_consistency(objspace, obj);
1395  GC_ASSERT(!RVALUE_OLD_P(objspace, obj));
1396  RVALUE_AGE_SET(obj, RVALUE_OLD_AGE - 1);
1397  check_rvalue_consistency(objspace, obj);
1398 }
1399 
1400 static inline void
1401 RVALUE_AGE_RESET(VALUE obj)
1402 {
1403  RVALUE_AGE_SET(obj, 0);
1404 }
1405 
1406 static inline void
1407 RVALUE_DEMOTE(rb_objspace_t *objspace, VALUE obj)
1408 {
1409  check_rvalue_consistency(objspace, obj);
1410  GC_ASSERT(RVALUE_OLD_P(objspace, obj));
1411 
1412  if (!is_incremental_marking(objspace) && RVALUE_REMEMBERED(objspace, obj)) {
1413  CLEAR_IN_BITMAP(GET_HEAP_PAGE(obj)->remembered_bits, obj);
1414  }
1415 
1416  CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(obj), obj);
1417  RVALUE_AGE_RESET(obj);
1418 
1419  if (RVALUE_MARKED(objspace, obj)) {
1420  objspace->rgengc.old_objects--;
1421  }
1422 
1423  check_rvalue_consistency(objspace, obj);
1424 }
1425 
1426 static inline int
1427 RVALUE_BLACK_P(rb_objspace_t *objspace, VALUE obj)
1428 {
1429  return RVALUE_MARKED(objspace, obj) && !RVALUE_MARKING(objspace, obj);
1430 }
1431 
1432 static inline int
1433 RVALUE_WHITE_P(rb_objspace_t *objspace, VALUE obj)
1434 {
1435  return !RVALUE_MARKED(objspace, obj);
1436 }
1437 
1438 bool
1439 rb_gc_impl_gc_enabled_p(void *objspace_ptr)
1440 {
1441  rb_objspace_t *objspace = objspace_ptr;
1442  return !dont_gc_val();
1443 }
1444 
1445 void
1446 rb_gc_impl_gc_enable(void *objspace_ptr)
1447 {
1448  rb_objspace_t *objspace = objspace_ptr;
1449 
1450  dont_gc_off();
1451 }
1452 
1453 void
1454 rb_gc_impl_gc_disable(void *objspace_ptr, bool finish_current_gc)
1455 {
1456  rb_objspace_t *objspace = objspace_ptr;
1457 
1458  if (finish_current_gc) {
1459  gc_rest(objspace);
1460  }
1461 
1462  dont_gc_on();
1463 }
1464 
1465 /*
1466  --------------------------- ObjectSpace -----------------------------
1467 */
1468 
1469 static inline void *
1470 calloc1(size_t n)
1471 {
1472  return calloc(1, n);
1473 }
1474 
1475 void
1476 rb_gc_impl_set_event_hook(void *objspace_ptr, const rb_event_flag_t event)
1477 {
1478  rb_objspace_t *objspace = objspace_ptr;
1479  objspace->hook_events = event & RUBY_INTERNAL_EVENT_OBJSPACE_MASK;
1480  objspace->flags.has_newobj_hook = !!(objspace->hook_events & RUBY_INTERNAL_EVENT_NEWOBJ);
1481 }
1482 
1483 unsigned long long
1484 rb_gc_impl_get_total_time(void *objspace_ptr)
1485 {
1486  rb_objspace_t *objspace = objspace_ptr;
1487 
1488  unsigned long long marking_time = objspace->profile.marking_time_ns;
1489  unsigned long long sweeping_time = objspace->profile.sweeping_time_ns;
1490 
1491  return marking_time + sweeping_time;
1492 }
1493 
1494 void
1495 rb_gc_impl_set_measure_total_time(void *objspace_ptr, VALUE flag)
1496 {
1497  rb_objspace_t *objspace = objspace_ptr;
1498 
1499  objspace->flags.measure_gc = RTEST(flag) ? TRUE : FALSE;
1500 }
1501 
1502 bool
1503 rb_gc_impl_get_measure_total_time(void *objspace_ptr)
1504 {
1505  rb_objspace_t *objspace = objspace_ptr;
1506 
1507  return objspace->flags.measure_gc;
1508 }
1509 
1510 static size_t
1511 minimum_slots_for_heap(rb_objspace_t *objspace, rb_heap_t *heap)
1512 {
1513  size_t heap_idx = heap - heaps;
1514  return gc_params.heap_init_slots[heap_idx];
1515 }
1516 
1517 static int
1518 object_id_cmp(st_data_t x, st_data_t y)
1519 {
1520  if (RB_TYPE_P(x, T_BIGNUM)) {
1521  return !rb_big_eql(x, y);
1522  }
1523  else {
1524  return x != y;
1525  }
1526 }
1527 
1528 static st_index_t
1529 object_id_hash(st_data_t n)
1530 {
1531  return FIX2LONG(rb_hash((VALUE)n));
1532 }
1533 
1534 #define OBJ_ID_INCREMENT (RUBY_IMMEDIATE_MASK + 1)
1535 #define OBJ_ID_INITIAL (OBJ_ID_INCREMENT)
1536 
1537 static const struct st_hash_type object_id_hash_type = {
1538  object_id_cmp,
1539  object_id_hash,
1540 };
1541 
1542 /* garbage objects will be collected soon. */
1543 bool
1544 rb_gc_impl_garbage_object_p(void *objspace_ptr, VALUE ptr)
1545 {
1546  rb_objspace_t *objspace = objspace_ptr;
1547 
1548  bool dead = false;
1549 
1550  asan_unpoisoning_object(ptr) {
1551  switch (BUILTIN_TYPE(ptr)) {
1552  case T_NONE:
1553  case T_MOVED:
1554  case T_ZOMBIE:
1555  dead = true;
1556  break;
1557  default:
1558  break;
1559  }
1560  }
1561 
1562  if (dead) return true;
1563  return is_lazy_sweeping(objspace) && GET_HEAP_PAGE(ptr)->flags.before_sweep &&
1564  !RVALUE_MARKED(objspace, ptr);
1565 }
1566 
1567 VALUE
1568 rb_gc_impl_object_id_to_ref(void *objspace_ptr, VALUE object_id)
1569 {
1570  rb_objspace_t *objspace = objspace_ptr;
1571 
1572  VALUE obj;
1573  if (st_lookup(objspace->id_to_obj_tbl, object_id, &obj) &&
1574  !rb_gc_impl_garbage_object_p(objspace, obj)) {
1575  return obj;
1576  }
1577 
1578  if (rb_funcall(object_id, rb_intern(">="), 1, ULL2NUM(objspace->next_object_id))) {
1579  rb_raise(rb_eRangeError, "%+"PRIsVALUE" is not id value", rb_funcall(object_id, rb_intern("to_s"), 1, INT2FIX(10)));
1580  }
1581  else {
1582  rb_raise(rb_eRangeError, "%+"PRIsVALUE" is recycled object", rb_funcall(object_id, rb_intern("to_s"), 1, INT2FIX(10)));
1583  }
1584 }
1585 
1586 VALUE
1587 rb_gc_impl_object_id(void *objspace_ptr, VALUE obj)
1588 {
1589  VALUE id;
1590  rb_objspace_t *objspace = objspace_ptr;
1591 
1592  unsigned int lev = rb_gc_vm_lock();
1593  if (FL_TEST(obj, FL_SEEN_OBJ_ID)) {
1594  st_data_t val;
1595  if (st_lookup(objspace->obj_to_id_tbl, (st_data_t)obj, &val)) {
1596  id = (VALUE)val;
1597  }
1598  else {
1599  rb_bug("rb_gc_impl_object_id: FL_SEEN_OBJ_ID flag set but not found in table");
1600  }
1601  }
1602  else {
1603  GC_ASSERT(!st_lookup(objspace->obj_to_id_tbl, (st_data_t)obj, NULL));
1604 
1605  id = ULL2NUM(objspace->next_object_id);
1606  objspace->next_object_id += OBJ_ID_INCREMENT;
1607 
1608  st_insert(objspace->obj_to_id_tbl, (st_data_t)obj, (st_data_t)id);
1609  st_insert(objspace->id_to_obj_tbl, (st_data_t)id, (st_data_t)obj);
1610  FL_SET(obj, FL_SEEN_OBJ_ID);
1611  }
1612  rb_gc_vm_unlock(lev);
1613 
1614  return id;
1615 }
1616 
1617 static void free_stack_chunks(mark_stack_t *);
1618 static void mark_stack_free_cache(mark_stack_t *);
1619 static void heap_page_free(rb_objspace_t *objspace, struct heap_page *page);
1620 
1621 static inline void
1622 heap_page_add_freeobj(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
1623 {
1624  asan_unpoison_object(obj, false);
1625 
1626  asan_unlock_freelist(page);
1627 
1628  struct free_slot *slot = (struct free_slot *)obj;
1629  slot->flags = 0;
1630  slot->next = page->freelist;
1631  page->freelist = slot;
1632  asan_lock_freelist(page);
1633 
1634  RVALUE_AGE_RESET(obj);
1635 
1636  if (RGENGC_CHECK_MODE &&
1637  /* obj should belong to page */
1638  !(page->start <= (uintptr_t)obj &&
1639  (uintptr_t)obj < ((uintptr_t)page->start + (page->total_slots * page->slot_size)) &&
1640  obj % BASE_SLOT_SIZE == 0)) {
1641  rb_bug("heap_page_add_freeobj: %p is not rvalue.", (void *)obj);
1642  }
1643 
1644  asan_poison_object(obj);
1645  gc_report(3, objspace, "heap_page_add_freeobj: add %p to freelist\n", (void *)obj);
1646 }
1647 
1648 static void
1649 heap_allocatable_slots_expand(rb_objspace_t *objspace,
1650  rb_heap_t *heap, size_t free_slots, size_t total_slots)
1651 {
1652  double goal_ratio = gc_params.heap_free_slots_goal_ratio;
1653  size_t target_total_slots;
1654 
1655  if (goal_ratio == 0.0) {
1656  target_total_slots = (size_t)(total_slots * gc_params.growth_factor);
1657  }
1658  else if (total_slots == 0) {
1659  target_total_slots = minimum_slots_for_heap(objspace, heap);
1660  }
1661  else {
1662  /* Find `f' where free_slots = f * total_slots * goal_ratio
1663  * => f = (total_slots - free_slots) / ((1 - goal_ratio) * total_slots)
1664  */
1665  double f = (double)(total_slots - free_slots) / ((1 - goal_ratio) * total_slots);
1666 
1667  if (f > gc_params.growth_factor) f = gc_params.growth_factor;
1668  if (f < 1.0) f = 1.1;
1669 
1670  target_total_slots = (size_t)(f * total_slots);
1671 
1672  if (0) {
1673  fprintf(stderr,
1674  "free_slots(%8"PRIuSIZE")/total_slots(%8"PRIuSIZE")=%1.2f,"
1675  " G(%1.2f), f(%1.2f),"
1676  " total_slots(%8"PRIuSIZE") => target_total_slots(%8"PRIuSIZE")\n",
1677  free_slots, total_slots, free_slots/(double)total_slots,
1678  goal_ratio, f, total_slots, target_total_slots);
1679  }
1680  }
1681 
1682  if (gc_params.growth_max_slots > 0) {
1683  size_t max_total_slots = (size_t)(total_slots + gc_params.growth_max_slots);
1684  if (target_total_slots > max_total_slots) target_total_slots = max_total_slots;
1685  }
1686 
1687  size_t extend_slot_count = target_total_slots - total_slots;
1688  /* Extend by at least 1 page. */
1689  if (extend_slot_count == 0) extend_slot_count = 1;
1690 
1691  objspace->heap_pages.allocatable_slots += extend_slot_count;
1692 }
1693 
1694 static inline void
1695 heap_add_freepage(rb_heap_t *heap, struct heap_page *page)
1696 {
1697  asan_unlock_freelist(page);
1698  GC_ASSERT(page->free_slots != 0);
1699  GC_ASSERT(page->freelist != NULL);
1700 
1701  page->free_next = heap->free_pages;
1702  heap->free_pages = page;
1703 
1704  RUBY_DEBUG_LOG("page:%p freelist:%p", (void *)page, (void *)page->freelist);
1705 
1706  asan_lock_freelist(page);
1707 }
1708 
1709 static inline void
1710 heap_add_poolpage(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
1711 {
1712  asan_unlock_freelist(page);
1713  GC_ASSERT(page->free_slots != 0);
1714  GC_ASSERT(page->freelist != NULL);
1715 
1716  page->free_next = heap->pooled_pages;
1717  heap->pooled_pages = page;
1718  objspace->rincgc.pooled_slots += page->free_slots;
1719 
1720  asan_lock_freelist(page);
1721 }
1722 
1723 static void
1724 heap_unlink_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
1725 {
1726  ccan_list_del(&page->page_node);
1727  heap->total_pages--;
1728  heap->total_slots -= page->total_slots;
1729 }
1730 
1731 static void
1732 gc_aligned_free(void *ptr, size_t size)
1733 {
1734 #if defined __MINGW32__
1735  __mingw_aligned_free(ptr);
1736 #elif defined _WIN32
1737  _aligned_free(ptr);
1738 #elif defined(HAVE_POSIX_MEMALIGN) || defined(HAVE_MEMALIGN)
1739  free(ptr);
1740 #else
1741  free(((void**)ptr)[-1]);
1742 #endif
1743 }
1744 
1745 static void
1746 heap_page_body_free(struct heap_page_body *page_body)
1747 {
1748  GC_ASSERT((uintptr_t)page_body % HEAP_PAGE_ALIGN == 0);
1749 
1750  if (HEAP_PAGE_ALLOC_USE_MMAP) {
1751 #ifdef HAVE_MMAP
1752  GC_ASSERT(HEAP_PAGE_SIZE % sysconf(_SC_PAGE_SIZE) == 0);
1753  if (munmap(page_body, HEAP_PAGE_SIZE)) {
1754  rb_bug("heap_page_body_free: munmap failed");
1755  }
1756 #endif
1757  }
1758  else {
1759  gc_aligned_free(page_body, HEAP_PAGE_SIZE);
1760  }
1761 }
1762 
1763 static void
1764 heap_page_free(rb_objspace_t *objspace, struct heap_page *page)
1765 {
1766  objspace->heap_pages.freed_pages++;
1767  heap_page_body_free(page->body);
1768  free(page);
1769 }
1770 
1771 static void
1772 heap_pages_free_unused_pages(rb_objspace_t *objspace)
1773 {
1774  size_t pages_to_keep_count =
1775  // Get number of pages estimated for the smallest size pool
1776  CEILDIV(objspace->heap_pages.allocatable_slots, HEAP_PAGE_OBJ_LIMIT) *
1777  // Estimate the average slot size multiple
1778  (1 << (HEAP_COUNT / 2));
1779 
1780  if (objspace->empty_pages != NULL && objspace->empty_pages_count > pages_to_keep_count) {
1781  GC_ASSERT(objspace->empty_pages_count > 0);
1782  objspace->empty_pages = NULL;
1783  objspace->empty_pages_count = 0;
1784 
1785  size_t i, j;
1786  for (i = j = 0; i < rb_darray_size(objspace->heap_pages.sorted); i++) {
1787  struct heap_page *page = rb_darray_get(objspace->heap_pages.sorted, i);
1788 
1789  if (heap_page_in_global_empty_pages_pool(objspace, page) && pages_to_keep_count == 0) {
1790  heap_page_free(objspace, page);
1791  }
1792  else {
1793  if (heap_page_in_global_empty_pages_pool(objspace, page) && pages_to_keep_count > 0) {
1794  page->free_next = objspace->empty_pages;
1795  objspace->empty_pages = page;
1796  objspace->empty_pages_count++;
1797  pages_to_keep_count--;
1798  }
1799 
1800  if (i != j) {
1801  rb_darray_set(objspace->heap_pages.sorted, j, page);
1802  }
1803  j++;
1804  }
1805  }
1806 
1807  rb_darray_pop(objspace->heap_pages.sorted, i - j);
1808  GC_ASSERT(rb_darray_size(objspace->heap_pages.sorted) == j);
1809 
1810  struct heap_page *hipage = rb_darray_get(objspace->heap_pages.sorted, rb_darray_size(objspace->heap_pages.sorted) - 1);
1811  uintptr_t himem = (uintptr_t)hipage->body + HEAP_PAGE_SIZE;
1812  GC_ASSERT(himem <= heap_pages_himem);
1813  heap_pages_himem = himem;
1814 
1815  struct heap_page *lopage = rb_darray_get(objspace->heap_pages.sorted, 0);
1816  uintptr_t lomem = (uintptr_t)lopage->body + sizeof(struct heap_page_header);
1817  GC_ASSERT(lomem >= heap_pages_lomem);
1818  heap_pages_lomem = lomem;
1819  }
1820 }
1821 
1822 static void *
1823 gc_aligned_malloc(size_t alignment, size_t size)
1824 {
1825  /* alignment must be a power of 2 */
1826  GC_ASSERT(((alignment - 1) & alignment) == 0);
1827  GC_ASSERT(alignment % sizeof(void*) == 0);
1828 
1829  void *res;
1830 
1831 #if defined __MINGW32__
1832  res = __mingw_aligned_malloc(size, alignment);
1833 #elif defined _WIN32
1834  void *_aligned_malloc(size_t, size_t);
1835  res = _aligned_malloc(size, alignment);
1836 #elif defined(HAVE_POSIX_MEMALIGN)
1837  if (posix_memalign(&res, alignment, size) != 0) {
1838  return NULL;
1839  }
1840 #elif defined(HAVE_MEMALIGN)
1841  res = memalign(alignment, size);
1842 #else
1843  char* aligned;
1844  res = malloc(alignment + size + sizeof(void*));
1845  aligned = (char*)res + alignment + sizeof(void*);
1846  aligned -= ((VALUE)aligned & (alignment - 1));
1847  ((void**)aligned)[-1] = res;
1848  res = (void*)aligned;
1849 #endif
1850 
1851  GC_ASSERT((uintptr_t)res % alignment == 0);
1852 
1853  return res;
1854 }
1855 
1856 static struct heap_page_body *
1857 heap_page_body_allocate(void)
1858 {
1859  struct heap_page_body *page_body;
1860 
1861  if (HEAP_PAGE_ALLOC_USE_MMAP) {
1862 #ifdef HAVE_MMAP
1863  GC_ASSERT(HEAP_PAGE_ALIGN % sysconf(_SC_PAGE_SIZE) == 0);
1864 
1865  size_t mmap_size = HEAP_PAGE_ALIGN + HEAP_PAGE_SIZE;
1866  char *ptr = mmap(NULL, mmap_size,
1867  PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1868  if (ptr == MAP_FAILED) {
1869  return NULL;
1870  }
1871 
1872  // If we are building `default.c` as part of the ruby executable, we
1873  // may just call `ruby_annotate_mmap`. But if we are building
1874  // `default.c` as a shared library, we will not have access to private
1875  // symbols, and we have to either call prctl directly or make our own
1876  // wrapper.
1877 #if defined(HAVE_SYS_PRCTL_H) && defined(PR_SET_VMA) && defined(PR_SET_VMA_ANON_NAME)
1878  prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, ptr, mmap_size, "Ruby:GC:default:heap_page_body_allocate");
1879  errno = 0;
1880 #endif
1881 
1882  char *aligned = ptr + HEAP_PAGE_ALIGN;
1883  aligned -= ((VALUE)aligned & (HEAP_PAGE_ALIGN - 1));
1884  GC_ASSERT(aligned > ptr);
1885  GC_ASSERT(aligned <= ptr + HEAP_PAGE_ALIGN);
1886 
1887  size_t start_out_of_range_size = aligned - ptr;
1888  GC_ASSERT(start_out_of_range_size % sysconf(_SC_PAGE_SIZE) == 0);
1889  if (start_out_of_range_size > 0) {
1890  if (munmap(ptr, start_out_of_range_size)) {
1891  rb_bug("heap_page_body_allocate: munmap failed for start");
1892  }
1893  }
1894 
1895  size_t end_out_of_range_size = HEAP_PAGE_ALIGN - start_out_of_range_size;
1896  GC_ASSERT(end_out_of_range_size % sysconf(_SC_PAGE_SIZE) == 0);
1897  if (end_out_of_range_size > 0) {
1898  if (munmap(aligned + HEAP_PAGE_SIZE, end_out_of_range_size)) {
1899  rb_bug("heap_page_body_allocate: munmap failed for end");
1900  }
1901  }
1902 
1903  page_body = (struct heap_page_body *)aligned;
1904 #endif
1905  }
1906  else {
1907  page_body = gc_aligned_malloc(HEAP_PAGE_ALIGN, HEAP_PAGE_SIZE);
1908  }
1909 
1910  GC_ASSERT((uintptr_t)page_body % HEAP_PAGE_ALIGN == 0);
1911 
1912  return page_body;
1913 }
1914 
1915 static struct heap_page *
1916 heap_page_resurrect(rb_objspace_t *objspace)
1917 {
1918  struct heap_page *page = NULL;
1919  if (objspace->empty_pages != NULL) {
1920  GC_ASSERT(objspace->empty_pages_count > 0);
1921  objspace->empty_pages_count--;
1922  page = objspace->empty_pages;
1923  objspace->empty_pages = page->free_next;
1924  }
1925 
1926  return page;
1927 }
1928 
1929 static struct heap_page *
1930 heap_page_allocate(rb_objspace_t *objspace)
1931 {
1932  struct heap_page_body *page_body = heap_page_body_allocate();
1933  if (page_body == 0) {
1934  rb_memerror();
1935  }
1936 
1937  struct heap_page *page = calloc1(sizeof(struct heap_page));
1938  if (page == 0) {
1939  heap_page_body_free(page_body);
1940  rb_memerror();
1941  }
1942 
1943  uintptr_t start = (uintptr_t)page_body + sizeof(struct heap_page_header);
1944  uintptr_t end = (uintptr_t)page_body + HEAP_PAGE_SIZE;
1945 
1946  size_t lo = 0;
1947  size_t hi = rb_darray_size(objspace->heap_pages.sorted);
1948  while (lo < hi) {
1949  struct heap_page *mid_page;
1950 
1951  size_t mid = (lo + hi) / 2;
1952  mid_page = rb_darray_get(objspace->heap_pages.sorted, mid);
1953  if ((uintptr_t)mid_page->start < start) {
1954  lo = mid + 1;
1955  }
1956  else if ((uintptr_t)mid_page->start > start) {
1957  hi = mid;
1958  }
1959  else {
1960  rb_bug("same heap page is allocated: %p at %"PRIuVALUE, (void *)page_body, (VALUE)mid);
1961  }
1962  }
1963 
1964  rb_darray_insert(&objspace->heap_pages.sorted, hi, page);
1965 
1966  if (heap_pages_lomem == 0 || heap_pages_lomem > start) heap_pages_lomem = start;
1967  if (heap_pages_himem < end) heap_pages_himem = end;
1968 
1969  page->body = page_body;
1970  page_body->header.page = page;
1971 
1972  objspace->heap_pages.allocated_pages++;
1973 
1974  return page;
1975 }
1976 
1977 static void
1978 heap_add_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
1979 {
1980  /* Adding to eden heap during incremental sweeping is forbidden */
1981  GC_ASSERT(!heap->sweeping_page);
1982  GC_ASSERT(heap_page_in_global_empty_pages_pool(objspace, page));
1983 
1984  /* adjust obj_limit (object number available in this page) */
1985  uintptr_t start = (uintptr_t)page->body + sizeof(struct heap_page_header);
1986  if (start % BASE_SLOT_SIZE != 0) {
1987  int delta = BASE_SLOT_SIZE - (start % BASE_SLOT_SIZE);
1988  start = start + delta;
1989  GC_ASSERT(NUM_IN_PAGE(start) == 0 || NUM_IN_PAGE(start) == 1);
1990 
1991  /* Find a num in page that is evenly divisible by `stride`.
1992  * This is to ensure that objects are aligned with bit planes.
1993  * In other words, ensure there are an even number of objects
1994  * per bit plane. */
1995  if (NUM_IN_PAGE(start) == 1) {
1996  start += heap->slot_size - BASE_SLOT_SIZE;
1997  }
1998 
1999  GC_ASSERT(NUM_IN_PAGE(start) * BASE_SLOT_SIZE % heap->slot_size == 0);
2000  }
2001 
2002  int slot_count = (int)((HEAP_PAGE_SIZE - (start - (uintptr_t)page->body))/heap->slot_size);
2003 
2004  page->start = start;
2005  page->total_slots = slot_count;
2006  page->slot_size = heap->slot_size;
2007  page->heap = heap;
2008 
2009  asan_unlock_freelist(page);
2010  page->freelist = NULL;
2011  asan_unpoison_memory_region(page->body, HEAP_PAGE_SIZE, false);
2012  for (VALUE p = (VALUE)start; p < start + (slot_count * heap->slot_size); p += heap->slot_size) {
2013  heap_page_add_freeobj(objspace, page, p);
2014  }
2015  asan_lock_freelist(page);
2016 
2017  page->free_slots = slot_count;
2018 
2019  heap->total_allocated_pages++;
2020 
2021  ccan_list_add_tail(&heap->pages, &page->page_node);
2022  heap->total_pages++;
2023  heap->total_slots += page->total_slots;
2024 }
2025 
2026 static int
2027 heap_page_allocate_and_initialize(rb_objspace_t *objspace, rb_heap_t *heap)
2028 {
2029  if (objspace->heap_pages.allocatable_slots > 0) {
2030  gc_report(1, objspace, "heap_page_allocate_and_initialize: rb_darray_size(objspace->heap_pages.sorted): %"PRIdSIZE", "
2031  "allocatable_slots: %"PRIdSIZE", heap->total_pages: %"PRIdSIZE"\n",
2032  rb_darray_size(objspace->heap_pages.sorted), objspace->heap_pages.allocatable_slots, heap->total_pages);
2033 
2034  struct heap_page *page = heap_page_resurrect(objspace);
2035  if (page == NULL) {
2036  page = heap_page_allocate(objspace);
2037  }
2038  heap_add_page(objspace, heap, page);
2039  heap_add_freepage(heap, page);
2040 
2041  if (objspace->heap_pages.allocatable_slots > (size_t)page->total_slots) {
2042  objspace->heap_pages.allocatable_slots -= page->total_slots;
2043  }
2044  else {
2045  objspace->heap_pages.allocatable_slots = 0;
2046  }
2047 
2048  return true;
2049  }
2050 
2051  return false;
2052 }
2053 
2054 static void
2055 heap_page_allocate_and_initialize_force(rb_objspace_t *objspace, rb_heap_t *heap)
2056 {
2057  size_t prev_allocatable_slots = objspace->heap_pages.allocatable_slots;
2058  // Set allocatable slots to 1 to force a page to be created.
2059  objspace->heap_pages.allocatable_slots = 1;
2060  heap_page_allocate_and_initialize(objspace, heap);
2061  GC_ASSERT(heap->free_pages != NULL);
2062  objspace->heap_pages.allocatable_slots = prev_allocatable_slots;
2063 }
2064 
2065 static void
2066 gc_continue(rb_objspace_t *objspace, rb_heap_t *heap)
2067 {
2068  unsigned int lock_lev;
2069  gc_enter(objspace, gc_enter_event_continue, &lock_lev);
2070 
2071  /* Continue marking if in incremental marking. */
2072  if (is_incremental_marking(objspace)) {
2073  if (gc_marks_continue(objspace, heap)) {
2074  gc_sweep(objspace);
2075  }
2076  }
2077 
2078  /* Continue sweeping if in lazy sweeping or the previous incremental
2079  * marking finished and did not yield a free page. */
2080  if (heap->free_pages == NULL && is_lazy_sweeping(objspace)) {
2081  gc_sweep_continue(objspace, heap);
2082  }
2083 
2084  gc_exit(objspace, gc_enter_event_continue, &lock_lev);
2085 }
2086 
2087 static void
2088 heap_prepare(rb_objspace_t *objspace, rb_heap_t *heap)
2089 {
2090  GC_ASSERT(heap->free_pages == NULL);
2091 
2092  if (heap->total_slots < gc_params.heap_init_slots[heap - heaps] &&
2093  heap->sweeping_page == NULL) {
2094  heap_page_allocate_and_initialize_force(objspace, heap);
2095  GC_ASSERT(heap->free_pages != NULL);
2096  return;
2097  }
2098 
2099  /* Continue incremental marking or lazy sweeping, if in any of those steps. */
2100  gc_continue(objspace, heap);
2101 
2102  if (heap->free_pages == NULL) {
2103  heap_page_allocate_and_initialize(objspace, heap);
2104  }
2105 
2106  /* If we still don't have a free page and not allowed to create a new page,
2107  * we should start a new GC cycle. */
2108  if (heap->free_pages == NULL) {
2109  if (gc_start(objspace, GPR_FLAG_NEWOBJ) == FALSE) {
2110  rb_memerror();
2111  }
2112  else {
2113  if (objspace->heap_pages.allocatable_slots == 0 && !gc_config_full_mark_val) {
2114  heap_allocatable_slots_expand(objspace, heap,
2115  heap->freed_slots + heap->empty_slots,
2116  heap->total_slots);
2117  GC_ASSERT(objspace->heap_pages.allocatable_slots > 0);
2118  }
2119  /* Do steps of incremental marking or lazy sweeping if the GC run permits. */
2120  gc_continue(objspace, heap);
2121 
2122  /* If we're not incremental marking (e.g. a minor GC) or finished
2123  * sweeping and still don't have a free page, then
2124  * gc_sweep_finish_heap should allow us to create a new page. */
2125  if (heap->free_pages == NULL && !heap_page_allocate_and_initialize(objspace, heap)) {
2126  if (gc_needs_major_flags == GPR_FLAG_NONE) {
2127  rb_bug("cannot create a new page after GC");
2128  }
2129  else { // Major GC is required, which will allow us to create new page
2130  if (gc_start(objspace, GPR_FLAG_NEWOBJ) == FALSE) {
2131  rb_memerror();
2132  }
2133  else {
2134  /* Do steps of incremental marking or lazy sweeping. */
2135  gc_continue(objspace, heap);
2136 
2137  if (heap->free_pages == NULL &&
2138  !heap_page_allocate_and_initialize(objspace, heap)) {
2139  rb_bug("cannot create a new page after major GC");
2140  }
2141  }
2142  }
2143  }
2144  }
2145  }
2146 
2147  GC_ASSERT(heap->free_pages != NULL);
2148 }
2149 
2150 static inline VALUE
2151 newobj_fill(VALUE obj, VALUE v1, VALUE v2, VALUE v3)
2152 {
2153  VALUE *p = (VALUE *)obj;
2154  p[2] = v1;
2155  p[3] = v2;
2156  p[4] = v3;
2157  return obj;
2158 }
2159 
2160 #if GC_DEBUG
2161 static inline const char*
2162 rb_gc_impl_source_location_cstr(int *ptr)
2163 {
2164  /* We could directly refer `rb_source_location_cstr()` before, but not any
2165  * longer. We have to heavy lift using our debugging API. */
2166  if (! ptr) {
2167  return NULL;
2168  }
2169  else if (! (*ptr = rb_sourceline())) {
2170  return NULL;
2171  }
2172  else {
2173  return rb_sourcefile();
2174  }
2175 }
2176 #endif
2177 
2178 static inline VALUE
2179 newobj_init(VALUE klass, VALUE flags, int wb_protected, rb_objspace_t *objspace, VALUE obj)
2180 {
2181 #if !__has_feature(memory_sanitizer)
2182  GC_ASSERT(BUILTIN_TYPE(obj) == T_NONE);
2183  GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
2184 #endif
2185  RBASIC(obj)->flags = flags;
2186  *((VALUE *)&RBASIC(obj)->klass) = klass;
2187 
2188  int t = flags & RUBY_T_MASK;
2189  if (t == T_CLASS || t == T_MODULE || t == T_ICLASS) {
2190  RVALUE_AGE_SET_CANDIDATE(objspace, obj);
2191  }
2192 
2193 #if RACTOR_CHECK_MODE
2194  void rb_ractor_setup_belonging(VALUE obj);
2195  rb_ractor_setup_belonging(obj);
2196 #endif
2197 
2198 #if RGENGC_CHECK_MODE
2199  newobj_fill(obj, 0, 0, 0);
2200 
2201  int lev = rb_gc_vm_lock_no_barrier();
2202  {
2203  check_rvalue_consistency(objspace, obj);
2204 
2205  GC_ASSERT(RVALUE_MARKED(objspace, obj) == FALSE);
2206  GC_ASSERT(RVALUE_MARKING(objspace, obj) == FALSE);
2207  GC_ASSERT(RVALUE_OLD_P(objspace, obj) == FALSE);
2208  GC_ASSERT(RVALUE_WB_UNPROTECTED(objspace, obj) == FALSE);
2209 
2210  if (RVALUE_REMEMBERED(objspace, obj)) rb_bug("newobj: %s is remembered.", rb_obj_info(obj));
2211  }
2212  rb_gc_vm_unlock_no_barrier(lev);
2213 #endif
2214 
2215  if (RB_UNLIKELY(wb_protected == FALSE)) {
2216  MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
2217  }
2218 
2219 #if RGENGC_PROFILE
2220  if (wb_protected) {
2221  objspace->profile.total_generated_normal_object_count++;
2222 #if RGENGC_PROFILE >= 2
2223  objspace->profile.generated_normal_object_count_types[BUILTIN_TYPE(obj)]++;
2224 #endif
2225  }
2226  else {
2227  objspace->profile.total_generated_shady_object_count++;
2228 #if RGENGC_PROFILE >= 2
2229  objspace->profile.generated_shady_object_count_types[BUILTIN_TYPE(obj)]++;
2230 #endif
2231  }
2232 #endif
2233 
2234 #if GC_DEBUG
2235  GET_RVALUE_OVERHEAD(obj)->file = rb_gc_impl_source_location_cstr(&GET_RVALUE_OVERHEAD(obj)->line);
2236  GC_ASSERT(!SPECIAL_CONST_P(obj)); /* check alignment */
2237 #endif
2238 
2239  gc_report(5, objspace, "newobj: %s\n", rb_obj_info(obj));
2240 
2241  RUBY_DEBUG_LOG("obj:%p (%s)", (void *)obj, rb_obj_info(obj));
2242  return obj;
2243 }
2244 
2245 size_t
2246 rb_gc_impl_obj_slot_size(VALUE obj)
2247 {
2248  return GET_HEAP_PAGE(obj)->slot_size - RVALUE_OVERHEAD;
2249 }
2250 
2251 static inline size_t
2252 heap_slot_size(unsigned char pool_id)
2253 {
2254  GC_ASSERT(pool_id < HEAP_COUNT);
2255 
2256  size_t slot_size = (1 << pool_id) * BASE_SLOT_SIZE;
2257 
2258 #if RGENGC_CHECK_MODE
2259  rb_objspace_t *objspace = rb_gc_get_objspace();
2260  GC_ASSERT(heaps[pool_id].slot_size == (short)slot_size);
2261 #endif
2262 
2263  slot_size -= RVALUE_OVERHEAD;
2264 
2265  return slot_size;
2266 }
2267 
2268 bool
2269 rb_gc_impl_size_allocatable_p(size_t size)
2270 {
2271  return size <= heap_slot_size(HEAP_COUNT - 1);
2272 }
2273 
2274 static inline VALUE
2275 ractor_cache_allocate_slot(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache,
2276  size_t heap_idx)
2277 {
2278  rb_ractor_newobj_heap_cache_t *heap_cache = &cache->heap_caches[heap_idx];
2279  struct free_slot *p = heap_cache->freelist;
2280 
2281  if (RB_UNLIKELY(is_incremental_marking(objspace))) {
2282  // Not allowed to allocate without running an incremental marking step
2283  if (cache->incremental_mark_step_allocated_slots >= INCREMENTAL_MARK_STEP_ALLOCATIONS) {
2284  return Qfalse;
2285  }
2286 
2287  if (p) {
2288  cache->incremental_mark_step_allocated_slots++;
2289  }
2290  }
2291 
2292  if (RB_LIKELY(p)) {
2293  VALUE obj = (VALUE)p;
2294  MAYBE_UNUSED(const size_t) stride = heap_slot_size(heap_idx);
2295  heap_cache->freelist = p->next;
2296  asan_unpoison_memory_region(p, stride, true);
2297 #if RGENGC_CHECK_MODE
2298  GC_ASSERT(rb_gc_impl_obj_slot_size(obj) == stride);
2299  // zero clear
2300  MEMZERO((char *)obj, char, stride);
2301 #endif
2302  return obj;
2303  }
2304  else {
2305  return Qfalse;
2306  }
2307 }
2308 
2309 static struct heap_page *
2310 heap_next_free_page(rb_objspace_t *objspace, rb_heap_t *heap)
2311 {
2312  struct heap_page *page;
2313 
2314  if (heap->free_pages == NULL) {
2315  heap_prepare(objspace, heap);
2316  }
2317 
2318  page = heap->free_pages;
2319  heap->free_pages = page->free_next;
2320 
2321  GC_ASSERT(page->free_slots != 0);
2322 
2323  asan_unlock_freelist(page);
2324 
2325  return page;
2326 }
2327 
2328 static inline void
2329 ractor_cache_set_page(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t heap_idx,
2330  struct heap_page *page)
2331 {
2332  gc_report(3, objspace, "ractor_set_cache: Using page %p\n", (void *)page->body);
2333 
2334  rb_ractor_newobj_heap_cache_t *heap_cache = &cache->heap_caches[heap_idx];
2335 
2336  GC_ASSERT(heap_cache->freelist == NULL);
2337  GC_ASSERT(page->free_slots != 0);
2338  GC_ASSERT(page->freelist != NULL);
2339 
2340  heap_cache->using_page = page;
2341  heap_cache->freelist = page->freelist;
2342  page->free_slots = 0;
2343  page->freelist = NULL;
2344 
2345  asan_unpoison_object((VALUE)heap_cache->freelist, false);
2346  GC_ASSERT(RB_TYPE_P((VALUE)heap_cache->freelist, T_NONE));
2347  asan_poison_object((VALUE)heap_cache->freelist);
2348 }
2349 
2350 static inline size_t
2351 heap_idx_for_size(size_t size)
2352 {
2353  size += RVALUE_OVERHEAD;
2354 
2355  size_t slot_count = CEILDIV(size, BASE_SLOT_SIZE);
2356 
2357  /* heap_idx is ceil(log2(slot_count)) */
2358  size_t heap_idx = 64 - nlz_int64(slot_count - 1);
2359 
2360  if (heap_idx >= HEAP_COUNT) {
2361  rb_bug("heap_idx_for_size: allocation size too large "
2362  "(size=%"PRIuSIZE"u, heap_idx=%"PRIuSIZE"u)", size, heap_idx);
2363  }
2364 
2365 #if RGENGC_CHECK_MODE
2366  rb_objspace_t *objspace = rb_gc_get_objspace();
2367  GC_ASSERT(size <= (size_t)heaps[heap_idx].slot_size);
2368  if (heap_idx > 0) GC_ASSERT(size > (size_t)heaps[heap_idx - 1].slot_size);
2369 #endif
2370 
2371  return heap_idx;
2372 }
2373 
2374 size_t
2375 rb_gc_impl_heap_id_for_size(void *objspace_ptr, size_t size)
2376 {
2377  return heap_idx_for_size(size);
2378 }
2379 
2380 
2381 static size_t heap_sizes[HEAP_COUNT + 1] = { 0 };
2382 
2383 size_t *
2384 rb_gc_impl_heap_sizes(void *objspace_ptr)
2385 {
2386  if (heap_sizes[0] == 0) {
2387  for (unsigned char i = 0; i < HEAP_COUNT; i++) {
2388  heap_sizes[i] = heap_slot_size(i);
2389  }
2390  }
2391 
2392  return heap_sizes;
2393 }
2394 
2395 NOINLINE(static VALUE newobj_cache_miss(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t heap_idx, bool vm_locked));
2396 
2397 static VALUE
2398 newobj_cache_miss(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t heap_idx, bool vm_locked)
2399 {
2400  rb_heap_t *heap = &heaps[heap_idx];
2401  VALUE obj = Qfalse;
2402 
2403  unsigned int lev = 0;
2404  bool unlock_vm = false;
2405 
2406  if (!vm_locked) {
2407  lev = rb_gc_cr_lock();
2408  vm_locked = true;
2409  unlock_vm = true;
2410  }
2411 
2412  {
2413  if (is_incremental_marking(objspace)) {
2414  gc_continue(objspace, heap);
2415  cache->incremental_mark_step_allocated_slots = 0;
2416 
2417  // Retry allocation after resetting incremental_mark_step_allocated_slots
2418  obj = ractor_cache_allocate_slot(objspace, cache, heap_idx);
2419  }
2420 
2421  if (obj == Qfalse) {
2422  // Get next free page (possibly running GC)
2423  struct heap_page *page = heap_next_free_page(objspace, heap);
2424  ractor_cache_set_page(objspace, cache, heap_idx, page);
2425 
2426  // Retry allocation after moving to new page
2427  obj = ractor_cache_allocate_slot(objspace, cache, heap_idx);
2428  }
2429  }
2430 
2431  if (unlock_vm) {
2432  rb_gc_cr_unlock(lev);
2433  }
2434 
2435  if (RB_UNLIKELY(obj == Qfalse)) {
2436  rb_memerror();
2437  }
2438  return obj;
2439 }
2440 
2441 static VALUE
2442 newobj_alloc(rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t heap_idx, bool vm_locked)
2443 {
2444  VALUE obj = ractor_cache_allocate_slot(objspace, cache, heap_idx);
2445 
2446  if (RB_UNLIKELY(obj == Qfalse)) {
2447  obj = newobj_cache_miss(objspace, cache, heap_idx, vm_locked);
2448  }
2449 
2450  rb_heap_t *heap = &heaps[heap_idx];
2451  heap->total_allocated_objects++;
2452  GC_ASSERT(rb_gc_multi_ractor_p() ||
2453  heap->total_slots >=
2454  (heap->total_allocated_objects - heap->total_freed_objects - heap->final_slots_count));
2455 
2456  return obj;
2457 }
2458 
2459 ALWAYS_INLINE(static VALUE newobj_slowpath(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, int wb_protected, size_t heap_idx));
2460 
2461 static inline VALUE
2462 newobj_slowpath(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, int wb_protected, size_t heap_idx)
2463 {
2464  VALUE obj;
2465  unsigned int lev;
2466 
2467  lev = rb_gc_cr_lock();
2468  {
2469  if (RB_UNLIKELY(during_gc || ruby_gc_stressful)) {
2470  if (during_gc) {
2471  dont_gc_on();
2472  during_gc = 0;
2473  rb_bug("object allocation during garbage collection phase");
2474  }
2475 
2476  if (ruby_gc_stressful) {
2477  if (!garbage_collect(objspace, GPR_FLAG_NEWOBJ)) {
2478  rb_memerror();
2479  }
2480  }
2481  }
2482 
2483  obj = newobj_alloc(objspace, cache, heap_idx, true);
2484  newobj_init(klass, flags, wb_protected, objspace, obj);
2485  }
2486  rb_gc_cr_unlock(lev);
2487 
2488  return obj;
2489 }
2490 
2491 NOINLINE(static VALUE newobj_slowpath_wb_protected(VALUE klass, VALUE flags,
2492  rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t heap_idx));
2493 NOINLINE(static VALUE newobj_slowpath_wb_unprotected(VALUE klass, VALUE flags,
2494  rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t heap_idx));
2495 
2496 static VALUE
2497 newobj_slowpath_wb_protected(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t heap_idx)
2498 {
2499  return newobj_slowpath(klass, flags, objspace, cache, TRUE, heap_idx);
2500 }
2501 
2502 static VALUE
2503 newobj_slowpath_wb_unprotected(VALUE klass, VALUE flags, rb_objspace_t *objspace, rb_ractor_newobj_cache_t *cache, size_t heap_idx)
2504 {
2505  return newobj_slowpath(klass, flags, objspace, cache, FALSE, heap_idx);
2506 }
2507 
2508 VALUE
2509 rb_gc_impl_new_obj(void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, bool wb_protected, size_t alloc_size)
2510 {
2511  VALUE obj;
2512  rb_objspace_t *objspace = objspace_ptr;
2513 
2514  RB_DEBUG_COUNTER_INC(obj_newobj);
2515  (void)RB_DEBUG_COUNTER_INC_IF(obj_newobj_wb_unprotected, !wb_protected);
2516 
2517  if (RB_UNLIKELY(stress_to_class)) {
2518  long cnt = RARRAY_LEN(stress_to_class);
2519  for (long i = 0; i < cnt; i++) {
2520  if (klass == RARRAY_AREF(stress_to_class, i)) rb_memerror();
2521  }
2522  }
2523 
2524  size_t heap_idx = heap_idx_for_size(alloc_size);
2525 
2526  rb_ractor_newobj_cache_t *cache = (rb_ractor_newobj_cache_t *)cache_ptr;
2527 
2528  if (!RB_UNLIKELY(during_gc || ruby_gc_stressful) &&
2529  wb_protected) {
2530  obj = newobj_alloc(objspace, cache, heap_idx, false);
2531  newobj_init(klass, flags, wb_protected, objspace, obj);
2532  }
2533  else {
2534  RB_DEBUG_COUNTER_INC(obj_newobj_slowpath);
2535 
2536  obj = wb_protected ?
2537  newobj_slowpath_wb_protected(klass, flags, objspace, cache, heap_idx) :
2538  newobj_slowpath_wb_unprotected(klass, flags, objspace, cache, heap_idx);
2539  }
2540 
2541  return newobj_fill(obj, v1, v2, v3);
2542 }
2543 
2544 static int
2545 ptr_in_page_body_p(const void *ptr, const void *memb)
2546 {
2547  struct heap_page *page = *(struct heap_page **)memb;
2548  uintptr_t p_body = (uintptr_t)page->body;
2549 
2550  if ((uintptr_t)ptr >= p_body) {
2551  return (uintptr_t)ptr < (p_body + HEAP_PAGE_SIZE) ? 0 : 1;
2552  }
2553  else {
2554  return -1;
2555  }
2556 }
2557 
2558 PUREFUNC(static inline struct heap_page *heap_page_for_ptr(rb_objspace_t *objspace, uintptr_t ptr);)
2559 static inline struct heap_page *
2560 heap_page_for_ptr(rb_objspace_t *objspace, uintptr_t ptr)
2561 {
2562  struct heap_page **res;
2563 
2564  if (ptr < (uintptr_t)heap_pages_lomem ||
2565  ptr > (uintptr_t)heap_pages_himem) {
2566  return NULL;
2567  }
2568 
2569  res = bsearch((void *)ptr, rb_darray_ref(objspace->heap_pages.sorted, 0),
2570  rb_darray_size(objspace->heap_pages.sorted), sizeof(struct heap_page *),
2571  ptr_in_page_body_p);
2572 
2573  if (res) {
2574  return *res;
2575  }
2576  else {
2577  return NULL;
2578  }
2579 }
2580 
2581 PUREFUNC(static inline bool is_pointer_to_heap(rb_objspace_t *objspace, const void *ptr);)
2582 static inline bool
2583 is_pointer_to_heap(rb_objspace_t *objspace, const void *ptr)
2584 {
2585  register uintptr_t p = (uintptr_t)ptr;
2586  register struct heap_page *page;
2587 
2588  RB_DEBUG_COUNTER_INC(gc_isptr_trial);
2589 
2590  if (p < heap_pages_lomem || p > heap_pages_himem) return FALSE;
2591  RB_DEBUG_COUNTER_INC(gc_isptr_range);
2592 
2593  if (p % BASE_SLOT_SIZE != 0) return FALSE;
2594  RB_DEBUG_COUNTER_INC(gc_isptr_align);
2595 
2596  page = heap_page_for_ptr(objspace, (uintptr_t)ptr);
2597  if (page) {
2598  RB_DEBUG_COUNTER_INC(gc_isptr_maybe);
2599  if (heap_page_in_global_empty_pages_pool(objspace, page)) {
2600  return FALSE;
2601  }
2602  else {
2603  if (p < page->start) return FALSE;
2604  if (p >= page->start + (page->total_slots * page->slot_size)) return FALSE;
2605  if ((NUM_IN_PAGE(p) * BASE_SLOT_SIZE) % page->slot_size != 0) return FALSE;
2606 
2607  return TRUE;
2608  }
2609  }
2610  return FALSE;
2611 }
2612 
2613 bool
2614 rb_gc_impl_pointer_to_heap_p(void *objspace_ptr, const void *ptr)
2615 {
2616  return is_pointer_to_heap(objspace_ptr, ptr);
2617 }
2618 
2619 #define ZOMBIE_OBJ_KEPT_FLAGS (FL_SEEN_OBJ_ID | FL_FINALIZE)
2620 
2621 void
2622 rb_gc_impl_make_zombie(void *objspace_ptr, VALUE obj, void (*dfree)(void *), void *data)
2623 {
2624  rb_objspace_t *objspace = objspace_ptr;
2625 
2626  struct RZombie *zombie = RZOMBIE(obj);
2627  zombie->basic.flags = T_ZOMBIE | (zombie->basic.flags & ZOMBIE_OBJ_KEPT_FLAGS);
2628  zombie->dfree = dfree;
2629  zombie->data = data;
2630  VALUE prev, next = heap_pages_deferred_final;
2631  do {
2632  zombie->next = prev = next;
2633  next = RUBY_ATOMIC_VALUE_CAS(heap_pages_deferred_final, prev, obj);
2634  } while (next != prev);
2635 
2636  struct heap_page *page = GET_HEAP_PAGE(obj);
2637  page->final_slots++;
2638  page->heap->final_slots_count++;
2639 }
2640 
2641 static void
2642 obj_free_object_id(rb_objspace_t *objspace, VALUE obj)
2643 {
2644  st_data_t o = (st_data_t)obj, id;
2645 
2646  GC_ASSERT(BUILTIN_TYPE(obj) == T_NONE || FL_TEST(obj, FL_SEEN_OBJ_ID));
2647  FL_UNSET(obj, FL_SEEN_OBJ_ID);
2648 
2649  if (st_delete(objspace->obj_to_id_tbl, &o, &id)) {
2650  GC_ASSERT(id);
2651  st_delete(objspace->id_to_obj_tbl, &id, NULL);
2652  }
2653  else {
2654  rb_bug("Object ID seen, but not in mapping table: %s", rb_obj_info(obj));
2655  }
2656 }
2657 
2658 typedef int each_obj_callback(void *, void *, size_t, void *);
2659 typedef int each_page_callback(struct heap_page *, void *);
2660 
2663  bool reenable_incremental;
2664 
2665  each_obj_callback *each_obj_callback;
2666  each_page_callback *each_page_callback;
2667  void *data;
2668 
2669  struct heap_page **pages[HEAP_COUNT];
2670  size_t pages_counts[HEAP_COUNT];
2671 };
2672 
2673 static VALUE
2674 objspace_each_objects_ensure(VALUE arg)
2675 {
2676  struct each_obj_data *data = (struct each_obj_data *)arg;
2677  rb_objspace_t *objspace = data->objspace;
2678 
2679  /* Reenable incremental GC */
2680  if (data->reenable_incremental) {
2681  objspace->flags.dont_incremental = FALSE;
2682  }
2683 
2684  for (int i = 0; i < HEAP_COUNT; i++) {
2685  struct heap_page **pages = data->pages[i];
2686  free(pages);
2687  }
2688 
2689  return Qnil;
2690 }
2691 
2692 static VALUE
2693 objspace_each_objects_try(VALUE arg)
2694 {
2695  struct each_obj_data *data = (struct each_obj_data *)arg;
2696  rb_objspace_t *objspace = data->objspace;
2697 
2698  /* Copy pages from all heaps to their respective buffers. */
2699  for (int i = 0; i < HEAP_COUNT; i++) {
2700  rb_heap_t *heap = &heaps[i];
2701  size_t size = heap->total_pages * sizeof(struct heap_page *);
2702 
2703  struct heap_page **pages = malloc(size);
2704  if (!pages) rb_memerror();
2705 
2706  /* Set up pages buffer by iterating over all pages in the current eden
2707  * heap. This will be a snapshot of the state of the heap before we
2708  * call the callback over each page that exists in this buffer. Thus it
2709  * is safe for the callback to allocate objects without possibly entering
2710  * an infinite loop. */
2711  struct heap_page *page = 0;
2712  size_t pages_count = 0;
2713  ccan_list_for_each(&heap->pages, page, page_node) {
2714  pages[pages_count] = page;
2715  pages_count++;
2716  }
2717  data->pages[i] = pages;
2718  data->pages_counts[i] = pages_count;
2719  GC_ASSERT(pages_count == heap->total_pages);
2720  }
2721 
2722  for (int i = 0; i < HEAP_COUNT; i++) {
2723  rb_heap_t *heap = &heaps[i];
2724  size_t pages_count = data->pages_counts[i];
2725  struct heap_page **pages = data->pages[i];
2726 
2727  struct heap_page *page = ccan_list_top(&heap->pages, struct heap_page, page_node);
2728  for (size_t i = 0; i < pages_count; i++) {
2729  /* If we have reached the end of the linked list then there are no
2730  * more pages, so break. */
2731  if (page == NULL) break;
2732 
2733  /* If this page does not match the one in the buffer, then move to
2734  * the next page in the buffer. */
2735  if (pages[i] != page) continue;
2736 
2737  uintptr_t pstart = (uintptr_t)page->start;
2738  uintptr_t pend = pstart + (page->total_slots * heap->slot_size);
2739 
2740  if (data->each_obj_callback &&
2741  (*data->each_obj_callback)((void *)pstart, (void *)pend, heap->slot_size, data->data)) {
2742  break;
2743  }
2744  if (data->each_page_callback &&
2745  (*data->each_page_callback)(page, data->data)) {
2746  break;
2747  }
2748 
2749  page = ccan_list_next(&heap->pages, page, page_node);
2750  }
2751  }
2752 
2753  return Qnil;
2754 }
2755 
2756 static void
2757 objspace_each_exec(bool protected, struct each_obj_data *each_obj_data)
2758 {
2759  /* Disable incremental GC */
2760  rb_objspace_t *objspace = each_obj_data->objspace;
2761  bool reenable_incremental = FALSE;
2762  if (protected) {
2763  reenable_incremental = !objspace->flags.dont_incremental;
2764 
2765  gc_rest(objspace);
2766  objspace->flags.dont_incremental = TRUE;
2767  }
2768 
2769  each_obj_data->reenable_incremental = reenable_incremental;
2770  memset(&each_obj_data->pages, 0, sizeof(each_obj_data->pages));
2771  memset(&each_obj_data->pages_counts, 0, sizeof(each_obj_data->pages_counts));
2772  rb_ensure(objspace_each_objects_try, (VALUE)each_obj_data,
2773  objspace_each_objects_ensure, (VALUE)each_obj_data);
2774 }
2775 
2776 static void
2777 objspace_each_objects(rb_objspace_t *objspace, each_obj_callback *callback, void *data, bool protected)
2778 {
2779  struct each_obj_data each_obj_data = {
2780  .objspace = objspace,
2781  .each_obj_callback = callback,
2782  .each_page_callback = NULL,
2783  .data = data,
2784  };
2785  objspace_each_exec(protected, &each_obj_data);
2786 }
2787 
2788 void
2789 rb_gc_impl_each_objects(void *objspace_ptr, each_obj_callback *callback, void *data)
2790 {
2791  objspace_each_objects(objspace_ptr, callback, data, TRUE);
2792 }
2793 
2794 #if GC_CAN_COMPILE_COMPACTION
2795 static void
2796 objspace_each_pages(rb_objspace_t *objspace, each_page_callback *callback, void *data, bool protected)
2797 {
2798  struct each_obj_data each_obj_data = {
2799  .objspace = objspace,
2800  .each_obj_callback = NULL,
2801  .each_page_callback = callback,
2802  .data = data,
2803  };
2804  objspace_each_exec(protected, &each_obj_data);
2805 }
2806 #endif
2807 
2808 VALUE
2809 rb_gc_impl_define_finalizer(void *objspace_ptr, VALUE obj, VALUE block)
2810 {
2811  rb_objspace_t *objspace = objspace_ptr;
2812  VALUE table;
2813  st_data_t data;
2814 
2815  GC_ASSERT(!OBJ_FROZEN(obj));
2816 
2817  RBASIC(obj)->flags |= FL_FINALIZE;
2818 
2819  if (st_lookup(finalizer_table, obj, &data)) {
2820  table = (VALUE)data;
2821 
2822  /* avoid duplicate block, table is usually small */
2823  {
2824  long len = RARRAY_LEN(table);
2825  long i;
2826 
2827  for (i = 0; i < len; i++) {
2828  VALUE recv = RARRAY_AREF(table, i);
2829  if (rb_equal(recv, block)) {
2830  return recv;
2831  }
2832  }
2833  }
2834 
2835  rb_ary_push(table, block);
2836  }
2837  else {
2838  table = rb_ary_new3(1, block);
2839  rb_obj_hide(table);
2840  st_add_direct(finalizer_table, obj, table);
2841  }
2842 
2843  return block;
2844 }
2845 
2846 void
2847 rb_gc_impl_undefine_finalizer(void *objspace_ptr, VALUE obj)
2848 {
2849  rb_objspace_t *objspace = objspace_ptr;
2850 
2851  GC_ASSERT(!OBJ_FROZEN(obj));
2852 
2853  st_data_t data = obj;
2854  st_delete(finalizer_table, &data, 0);
2855  FL_UNSET(obj, FL_FINALIZE);
2856 }
2857 
2858 void
2859 rb_gc_impl_copy_finalizer(void *objspace_ptr, VALUE dest, VALUE obj)
2860 {
2861  rb_objspace_t *objspace = objspace_ptr;
2862  VALUE table;
2863  st_data_t data;
2864 
2865  if (!FL_TEST(obj, FL_FINALIZE)) return;
2866 
2867  if (RB_LIKELY(st_lookup(finalizer_table, obj, &data))) {
2868  table = (VALUE)data;
2869  st_insert(finalizer_table, dest, table);
2870  FL_SET(dest, FL_FINALIZE);
2871  }
2872  else {
2873  rb_bug("rb_gc_copy_finalizer: FL_FINALIZE set but not found in finalizer_table: %s", rb_obj_info(obj));
2874  }
2875 }
2876 
2877 static VALUE
2878 get_object_id_in_finalizer(rb_objspace_t *objspace, VALUE obj)
2879 {
2880  if (FL_TEST(obj, FL_SEEN_OBJ_ID)) {
2881  return rb_gc_impl_object_id(objspace, obj);
2882  }
2883  else {
2884  VALUE id = ULL2NUM(objspace->next_object_id);
2885  objspace->next_object_id += OBJ_ID_INCREMENT;
2886  return id;
2887  }
2888 }
2889 
2890 static VALUE
2891 get_final(long i, void *data)
2892 {
2893  VALUE table = (VALUE)data;
2894 
2895  return RARRAY_AREF(table, i);
2896 }
2897 
2898 static void
2899 run_final(rb_objspace_t *objspace, VALUE zombie)
2900 {
2901  if (RZOMBIE(zombie)->dfree) {
2902  RZOMBIE(zombie)->dfree(RZOMBIE(zombie)->data);
2903  }
2904 
2905  st_data_t key = (st_data_t)zombie;
2906  if (FL_TEST_RAW(zombie, FL_FINALIZE)) {
2907  FL_UNSET(zombie, FL_FINALIZE);
2908  st_data_t table;
2909  if (st_delete(finalizer_table, &key, &table)) {
2910  rb_gc_run_obj_finalizer(get_object_id_in_finalizer(objspace, zombie), RARRAY_LEN(table), get_final, (void *)table);
2911  }
2912  else {
2913  rb_bug("FL_FINALIZE flag is set, but finalizers are not found");
2914  }
2915  }
2916  else {
2917  GC_ASSERT(!st_lookup(finalizer_table, key, NULL));
2918  }
2919 }
2920 
2921 static void
2922 finalize_list(rb_objspace_t *objspace, VALUE zombie)
2923 {
2924  while (zombie) {
2925  VALUE next_zombie;
2926  struct heap_page *page;
2927  asan_unpoison_object(zombie, false);
2928  next_zombie = RZOMBIE(zombie)->next;
2929  page = GET_HEAP_PAGE(zombie);
2930 
2931  run_final(objspace, zombie);
2932 
2933  int lev = rb_gc_vm_lock();
2934  {
2935  GC_ASSERT(BUILTIN_TYPE(zombie) == T_ZOMBIE);
2936  if (FL_TEST(zombie, FL_SEEN_OBJ_ID)) {
2937  obj_free_object_id(objspace, zombie);
2938  }
2939 
2940  GC_ASSERT(page->heap->final_slots_count > 0);
2941  GC_ASSERT(page->final_slots > 0);
2942 
2943  page->heap->final_slots_count--;
2944  page->final_slots--;
2945  page->free_slots++;
2946  heap_page_add_freeobj(objspace, page, zombie);
2947  page->heap->total_freed_objects++;
2948  }
2949  rb_gc_vm_unlock(lev);
2950 
2951  zombie = next_zombie;
2952  }
2953 }
2954 
2955 static void
2956 finalize_deferred_heap_pages(rb_objspace_t *objspace)
2957 {
2958  VALUE zombie;
2959  while ((zombie = RUBY_ATOMIC_VALUE_EXCHANGE(heap_pages_deferred_final, 0)) != 0) {
2960  finalize_list(objspace, zombie);
2961  }
2962 }
2963 
2964 static void
2965 finalize_deferred(rb_objspace_t *objspace)
2966 {
2967  rb_gc_set_pending_interrupt();
2968  finalize_deferred_heap_pages(objspace);
2969  rb_gc_unset_pending_interrupt();
2970 }
2971 
2972 static void
2973 gc_finalize_deferred(void *dmy)
2974 {
2975  rb_objspace_t *objspace = dmy;
2976  if (RUBY_ATOMIC_EXCHANGE(finalizing, 1)) return;
2977 
2978  finalize_deferred(objspace);
2979  RUBY_ATOMIC_SET(finalizing, 0);
2980 }
2981 
2982 static void
2983 gc_finalize_deferred_register(rb_objspace_t *objspace)
2984 {
2985  /* will enqueue a call to gc_finalize_deferred */
2986  rb_postponed_job_trigger(objspace->finalize_deferred_pjob);
2987 }
2988 
2989 static int pop_mark_stack(mark_stack_t *stack, VALUE *data);
2990 
2991 static void
2992 gc_abort(void *objspace_ptr)
2993 {
2994  rb_objspace_t *objspace = objspace_ptr;
2995 
2996  if (is_incremental_marking(objspace)) {
2997  /* Remove all objects from the mark stack. */
2998  VALUE obj;
2999  while (pop_mark_stack(&objspace->mark_stack, &obj));
3000 
3001  objspace->flags.during_incremental_marking = FALSE;
3002  }
3003 
3004  if (is_lazy_sweeping(objspace)) {
3005  for (int i = 0; i < HEAP_COUNT; i++) {
3006  rb_heap_t *heap = &heaps[i];
3007 
3008  heap->sweeping_page = NULL;
3009  struct heap_page *page = NULL;
3010 
3011  ccan_list_for_each(&heap->pages, page, page_node) {
3012  page->flags.before_sweep = false;
3013  }
3014  }
3015  }
3016 
3017  for (int i = 0; i < HEAP_COUNT; i++) {
3018  rb_heap_t *heap = &heaps[i];
3019  rgengc_mark_and_rememberset_clear(objspace, heap);
3020  }
3021 
3022  gc_mode_set(objspace, gc_mode_none);
3023 }
3024 
3025 void
3026 rb_gc_impl_shutdown_free_objects(void *objspace_ptr)
3027 {
3028  rb_objspace_t *objspace = objspace_ptr;
3029 
3030  for (size_t i = 0; i < rb_darray_size(objspace->heap_pages.sorted); i++) {
3031  struct heap_page *page = rb_darray_get(objspace->heap_pages.sorted, i);
3032  short stride = page->slot_size;
3033 
3034  uintptr_t p = (uintptr_t)page->start;
3035  uintptr_t pend = p + page->total_slots * stride;
3036  for (; p < pend; p += stride) {
3037  VALUE vp = (VALUE)p;
3038  asan_unpoisoning_object(vp) {
3039  if (RB_BUILTIN_TYPE(vp) != T_NONE) {
3040  rb_gc_obj_free_vm_weak_references(vp);
3041  if (rb_gc_obj_free(objspace, vp)) {
3042  RBASIC(vp)->flags = 0;
3043  }
3044  }
3045  }
3046  }
3047  }
3048 }
3049 
3050 static int
3051 rb_gc_impl_shutdown_call_finalizer_i(st_data_t key, st_data_t val, st_data_t data)
3052 {
3054  VALUE obj = (VALUE)key;
3055  VALUE table = (VALUE)val;
3056 
3057  GC_ASSERT(RB_FL_TEST(obj, FL_FINALIZE));
3058  GC_ASSERT(RB_BUILTIN_TYPE(val) == T_ARRAY);
3059 
3060  rb_gc_run_obj_finalizer(rb_gc_impl_object_id(objspace, obj), RARRAY_LEN(table), get_final, (void *)table);
3061 
3062  FL_UNSET(obj, FL_FINALIZE);
3063 
3064  return ST_DELETE;
3065 }
3066 
3067 void
3068 rb_gc_impl_shutdown_call_finalizer(void *objspace_ptr)
3069 {
3070  rb_objspace_t *objspace = objspace_ptr;
3071 
3072 #if RGENGC_CHECK_MODE >= 2
3073  gc_verify_internal_consistency(objspace);
3074 #endif
3075 
3076  /* prohibit incremental GC */
3077  objspace->flags.dont_incremental = 1;
3078 
3079  if (RUBY_ATOMIC_EXCHANGE(finalizing, 1)) {
3080  /* Abort incremental marking and lazy sweeping to speed up shutdown. */
3081  gc_abort(objspace);
3082  dont_gc_on();
3083  return;
3084  }
3085 
3086  while (finalizer_table->num_entries) {
3087  st_foreach(finalizer_table, rb_gc_impl_shutdown_call_finalizer_i, (st_data_t)objspace);
3088  }
3089 
3090  /* run finalizers */
3091  finalize_deferred(objspace);
3092  GC_ASSERT(heap_pages_deferred_final == 0);
3093 
3094  /* Abort incremental marking and lazy sweeping to speed up shutdown. */
3095  gc_abort(objspace);
3096 
3097  /* prohibit GC because force T_DATA finalizers can break an object graph consistency */
3098  dont_gc_on();
3099 
3100  /* running data/file finalizers are part of garbage collection */
3101  unsigned int lock_lev;
3102  gc_enter(objspace, gc_enter_event_finalizer, &lock_lev);
3103 
3104  /* run data/file object's finalizers */
3105  for (size_t i = 0; i < rb_darray_size(objspace->heap_pages.sorted); i++) {
3106  struct heap_page *page = rb_darray_get(objspace->heap_pages.sorted, i);
3107  short stride = page->slot_size;
3108 
3109  uintptr_t p = (uintptr_t)page->start;
3110  uintptr_t pend = p + page->total_slots * stride;
3111  for (; p < pend; p += stride) {
3112  VALUE vp = (VALUE)p;
3113  asan_unpoisoning_object(vp) {
3114  if (rb_gc_shutdown_call_finalizer_p(vp)) {
3115  rb_gc_obj_free_vm_weak_references(vp);
3116  if (rb_gc_obj_free(objspace, vp)) {
3117  RBASIC(vp)->flags = 0;
3118  }
3119  }
3120  }
3121  }
3122  }
3123 
3124  gc_exit(objspace, gc_enter_event_finalizer, &lock_lev);
3125 
3126  finalize_deferred_heap_pages(objspace);
3127 
3128  st_free_table(finalizer_table);
3129  finalizer_table = 0;
3130  RUBY_ATOMIC_SET(finalizing, 0);
3131 }
3132 
3133 void
3134 rb_gc_impl_each_object(void *objspace_ptr, void (*func)(VALUE obj, void *data), void *data)
3135 {
3136  rb_objspace_t *objspace = objspace_ptr;
3137 
3138  for (size_t i = 0; i < rb_darray_size(objspace->heap_pages.sorted); i++) {
3139  struct heap_page *page = rb_darray_get(objspace->heap_pages.sorted, i);
3140  short stride = page->slot_size;
3141 
3142  uintptr_t p = (uintptr_t)page->start;
3143  uintptr_t pend = p + page->total_slots * stride;
3144  for (; p < pend; p += stride) {
3145  VALUE obj = (VALUE)p;
3146 
3147  asan_unpoisoning_object(obj) {
3148  func(obj, data);
3149  }
3150  }
3151  }
3152 }
3153 
3154 /*
3155  ------------------------ Garbage Collection ------------------------
3156 */
3157 
3158 /* Sweeping */
3159 
3160 static size_t
3161 objspace_available_slots(rb_objspace_t *objspace)
3162 {
3163  size_t total_slots = 0;
3164  for (int i = 0; i < HEAP_COUNT; i++) {
3165  rb_heap_t *heap = &heaps[i];
3166  total_slots += heap->total_slots;
3167  }
3168  return total_slots;
3169 }
3170 
3171 static size_t
3172 objspace_live_slots(rb_objspace_t *objspace)
3173 {
3174  return total_allocated_objects(objspace) - total_freed_objects(objspace) - total_final_slots_count(objspace);
3175 }
3176 
3177 static size_t
3178 objspace_free_slots(rb_objspace_t *objspace)
3179 {
3180  return objspace_available_slots(objspace) - objspace_live_slots(objspace) - total_final_slots_count(objspace);
3181 }
3182 
3183 static void
3184 gc_setup_mark_bits(struct heap_page *page)
3185 {
3186  /* copy oldgen bitmap to mark bitmap */
3187  memcpy(&page->mark_bits[0], &page->uncollectible_bits[0], HEAP_PAGE_BITMAP_SIZE);
3188 }
3189 
3190 static int gc_is_moveable_obj(rb_objspace_t *objspace, VALUE obj);
3191 static VALUE gc_move(rb_objspace_t *objspace, VALUE scan, VALUE free, size_t src_slot_size, size_t slot_size);
3192 
3193 #if defined(_WIN32)
3194 enum {HEAP_PAGE_LOCK = PAGE_NOACCESS, HEAP_PAGE_UNLOCK = PAGE_READWRITE};
3195 
3196 static BOOL
3197 protect_page_body(struct heap_page_body *body, DWORD protect)
3198 {
3199  DWORD old_protect;
3200  return VirtualProtect(body, HEAP_PAGE_SIZE, protect, &old_protect) != 0;
3201 }
3202 #else
3203 enum {HEAP_PAGE_LOCK = PROT_NONE, HEAP_PAGE_UNLOCK = PROT_READ | PROT_WRITE};
3204 #define protect_page_body(body, protect) !mprotect((body), HEAP_PAGE_SIZE, (protect))
3205 #endif
3206 
3207 static void
3208 lock_page_body(rb_objspace_t *objspace, struct heap_page_body *body)
3209 {
3210  if (!protect_page_body(body, HEAP_PAGE_LOCK)) {
3211  rb_bug("Couldn't protect page %p, errno: %s", (void *)body, strerror(errno));
3212  }
3213  else {
3214  gc_report(5, objspace, "Protecting page in move %p\n", (void *)body);
3215  }
3216 }
3217 
3218 static void
3219 unlock_page_body(rb_objspace_t *objspace, struct heap_page_body *body)
3220 {
3221  if (!protect_page_body(body, HEAP_PAGE_UNLOCK)) {
3222  rb_bug("Couldn't unprotect page %p, errno: %s", (void *)body, strerror(errno));
3223  }
3224  else {
3225  gc_report(5, objspace, "Unprotecting page in move %p\n", (void *)body);
3226  }
3227 }
3228 
3229 static bool
3230 try_move(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *free_page, VALUE src)
3231 {
3232  GC_ASSERT(gc_is_moveable_obj(objspace, src));
3233 
3234  struct heap_page *src_page = GET_HEAP_PAGE(src);
3235  if (!free_page) {
3236  return false;
3237  }
3238 
3239  /* We should return true if either src is successfully moved, or src is
3240  * unmoveable. A false return will cause the sweeping cursor to be
3241  * incremented to the next page, and src will attempt to move again */
3242  GC_ASSERT(RVALUE_MARKED(objspace, src));
3243 
3244  asan_unlock_freelist(free_page);
3245  VALUE dest = (VALUE)free_page->freelist;
3246  asan_lock_freelist(free_page);
3247  asan_unpoison_object(dest, false);
3248  if (!dest) {
3249  /* if we can't get something from the freelist then the page must be
3250  * full */
3251  return false;
3252  }
3253  asan_unlock_freelist(free_page);
3254  free_page->freelist = ((struct free_slot *)dest)->next;
3255  asan_lock_freelist(free_page);
3256 
3257  GC_ASSERT(RB_BUILTIN_TYPE(dest) == T_NONE);
3258 
3259  if (src_page->slot_size > free_page->slot_size) {
3260  objspace->rcompactor.moved_down_count_table[BUILTIN_TYPE(src)]++;
3261  }
3262  else if (free_page->slot_size > src_page->slot_size) {
3263  objspace->rcompactor.moved_up_count_table[BUILTIN_TYPE(src)]++;
3264  }
3265  objspace->rcompactor.moved_count_table[BUILTIN_TYPE(src)]++;
3266  objspace->rcompactor.total_moved++;
3267 
3268  gc_move(objspace, src, dest, src_page->slot_size, free_page->slot_size);
3269  gc_pin(objspace, src);
3270  free_page->free_slots--;
3271 
3272  return true;
3273 }
3274 
3275 static void
3276 gc_unprotect_pages(rb_objspace_t *objspace, rb_heap_t *heap)
3277 {
3278  struct heap_page *cursor = heap->compact_cursor;
3279 
3280  while (cursor) {
3281  unlock_page_body(objspace, cursor->body);
3282  cursor = ccan_list_next(&heap->pages, cursor, page_node);
3283  }
3284 }
3285 
3286 static void gc_update_references(rb_objspace_t *objspace);
3287 #if GC_CAN_COMPILE_COMPACTION
3288 static void invalidate_moved_page(rb_objspace_t *objspace, struct heap_page *page);
3289 #endif
3290 
3291 #if defined(__MINGW32__) || defined(_WIN32)
3292 # define GC_COMPACTION_SUPPORTED 1
3293 #else
3294 /* If not MinGW, Windows, or does not have mmap, we cannot use mprotect for
3295  * the read barrier, so we must disable compaction. */
3296 # define GC_COMPACTION_SUPPORTED (GC_CAN_COMPILE_COMPACTION && HEAP_PAGE_ALLOC_USE_MMAP)
3297 #endif
3298 
3299 #if GC_CAN_COMPILE_COMPACTION
3300 static void
3301 read_barrier_handler(uintptr_t original_address)
3302 {
3303  VALUE obj;
3304  rb_objspace_t *objspace = (rb_objspace_t *)rb_gc_get_objspace();
3305 
3306  /* Calculate address aligned to slots. */
3307  uintptr_t address = original_address - (original_address % BASE_SLOT_SIZE);
3308 
3309  obj = (VALUE)address;
3310 
3311  struct heap_page_body *page_body = GET_PAGE_BODY(obj);
3312 
3313  /* If the page_body is NULL, then mprotect cannot handle it and will crash
3314  * with "Cannot allocate memory". */
3315  if (page_body == NULL) {
3316  rb_bug("read_barrier_handler: segmentation fault at %p", (void *)original_address);
3317  }
3318 
3319  int lev = rb_gc_vm_lock();
3320  {
3321  unlock_page_body(objspace, page_body);
3322 
3323  objspace->profile.read_barrier_faults++;
3324 
3325  invalidate_moved_page(objspace, GET_HEAP_PAGE(obj));
3326  }
3327  rb_gc_vm_unlock(lev);
3328 }
3329 #endif
3330 
3331 #if !GC_CAN_COMPILE_COMPACTION
3332 static void
3333 uninstall_handlers(void)
3334 {
3335  /* no-op */
3336 }
3337 
3338 static void
3339 install_handlers(void)
3340 {
3341  /* no-op */
3342 }
3343 #elif defined(_WIN32)
3344 static LPTOP_LEVEL_EXCEPTION_FILTER old_handler;
3345 typedef void (*signal_handler)(int);
3346 static signal_handler old_sigsegv_handler;
3347 
3348 static LONG WINAPI
3349 read_barrier_signal(EXCEPTION_POINTERS *info)
3350 {
3351  /* EXCEPTION_ACCESS_VIOLATION is what's raised by access to protected pages */
3352  if (info->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) {
3353  /* > The second array element specifies the virtual address of the inaccessible data.
3354  * https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-exception_record
3355  *
3356  * Use this address to invalidate the page */
3357  read_barrier_handler((uintptr_t)info->ExceptionRecord->ExceptionInformation[1]);
3358  return EXCEPTION_CONTINUE_EXECUTION;
3359  }
3360  else {
3361  return EXCEPTION_CONTINUE_SEARCH;
3362  }
3363 }
3364 
3365 static void
3366 uninstall_handlers(void)
3367 {
3368  signal(SIGSEGV, old_sigsegv_handler);
3369  SetUnhandledExceptionFilter(old_handler);
3370 }
3371 
3372 static void
3373 install_handlers(void)
3374 {
3375  /* Remove SEGV handler so that the Unhandled Exception Filter handles it */
3376  old_sigsegv_handler = signal(SIGSEGV, NULL);
3377  /* Unhandled Exception Filter has access to the violation address similar
3378  * to si_addr from sigaction */
3379  old_handler = SetUnhandledExceptionFilter(read_barrier_signal);
3380 }
3381 #else
3382 static struct sigaction old_sigbus_handler;
3383 static struct sigaction old_sigsegv_handler;
3384 
3385 #ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
3386 static exception_mask_t old_exception_masks[32];
3387 static mach_port_t old_exception_ports[32];
3388 static exception_behavior_t old_exception_behaviors[32];
3389 static thread_state_flavor_t old_exception_flavors[32];
3390 static mach_msg_type_number_t old_exception_count;
3391 
3392 static void
3393 disable_mach_bad_access_exc(void)
3394 {
3395  old_exception_count = sizeof(old_exception_masks) / sizeof(old_exception_masks[0]);
3396  task_swap_exception_ports(
3397  mach_task_self(), EXC_MASK_BAD_ACCESS,
3398  MACH_PORT_NULL, EXCEPTION_DEFAULT, 0,
3399  old_exception_masks, &old_exception_count,
3400  old_exception_ports, old_exception_behaviors, old_exception_flavors
3401  );
3402 }
3403 
3404 static void
3405 restore_mach_bad_access_exc(void)
3406 {
3407  for (mach_msg_type_number_t i = 0; i < old_exception_count; i++) {
3408  task_set_exception_ports(
3409  mach_task_self(),
3410  old_exception_masks[i], old_exception_ports[i],
3411  old_exception_behaviors[i], old_exception_flavors[i]
3412  );
3413  }
3414 }
3415 #endif
3416 
3417 static void
3418 read_barrier_signal(int sig, siginfo_t *info, void *data)
3419 {
3420  // setup SEGV/BUS handlers for errors
3421  struct sigaction prev_sigbus, prev_sigsegv;
3422  sigaction(SIGBUS, &old_sigbus_handler, &prev_sigbus);
3423  sigaction(SIGSEGV, &old_sigsegv_handler, &prev_sigsegv);
3424 
3425  // enable SIGBUS/SEGV
3426  sigset_t set, prev_set;
3427  sigemptyset(&set);
3428  sigaddset(&set, SIGBUS);
3429  sigaddset(&set, SIGSEGV);
3430  sigprocmask(SIG_UNBLOCK, &set, &prev_set);
3431 #ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
3432  disable_mach_bad_access_exc();
3433 #endif
3434  // run handler
3435  read_barrier_handler((uintptr_t)info->si_addr);
3436 
3437  // reset SEGV/BUS handlers
3438 #ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
3439  restore_mach_bad_access_exc();
3440 #endif
3441  sigaction(SIGBUS, &prev_sigbus, NULL);
3442  sigaction(SIGSEGV, &prev_sigsegv, NULL);
3443  sigprocmask(SIG_SETMASK, &prev_set, NULL);
3444 }
3445 
3446 static void
3447 uninstall_handlers(void)
3448 {
3449 #ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
3450  restore_mach_bad_access_exc();
3451 #endif
3452  sigaction(SIGBUS, &old_sigbus_handler, NULL);
3453  sigaction(SIGSEGV, &old_sigsegv_handler, NULL);
3454 }
3455 
3456 static void
3457 install_handlers(void)
3458 {
3459  struct sigaction action;
3460  memset(&action, 0, sizeof(struct sigaction));
3461  sigemptyset(&action.sa_mask);
3462  action.sa_sigaction = read_barrier_signal;
3463  action.sa_flags = SA_SIGINFO | SA_ONSTACK;
3464 
3465  sigaction(SIGBUS, &action, &old_sigbus_handler);
3466  sigaction(SIGSEGV, &action, &old_sigsegv_handler);
3467 #ifdef HAVE_MACH_TASK_EXCEPTION_PORTS
3468  disable_mach_bad_access_exc();
3469 #endif
3470 }
3471 #endif
3472 
3473 static void
3474 gc_compact_finish(rb_objspace_t *objspace)
3475 {
3476  for (int i = 0; i < HEAP_COUNT; i++) {
3477  rb_heap_t *heap = &heaps[i];
3478  gc_unprotect_pages(objspace, heap);
3479  }
3480 
3481  uninstall_handlers();
3482 
3483  gc_update_references(objspace);
3484  objspace->profile.compact_count++;
3485 
3486  for (int i = 0; i < HEAP_COUNT; i++) {
3487  rb_heap_t *heap = &heaps[i];
3488  heap->compact_cursor = NULL;
3489  heap->free_pages = NULL;
3490  heap->compact_cursor_index = 0;
3491  }
3492 
3493  if (gc_prof_enabled(objspace)) {
3494  gc_profile_record *record = gc_prof_record(objspace);
3495  record->moved_objects = objspace->rcompactor.total_moved - record->moved_objects;
3496  }
3497  objspace->flags.during_compacting = FALSE;
3498 }
3499 
3501  struct heap_page *page;
3502  int final_slots;
3503  int freed_slots;
3504  int empty_slots;
3505 };
3506 
3507 static inline void
3508 gc_sweep_plane(rb_objspace_t *objspace, rb_heap_t *heap, uintptr_t p, bits_t bitset, struct gc_sweep_context *ctx)
3509 {
3510  struct heap_page *sweep_page = ctx->page;
3511  short slot_size = sweep_page->slot_size;
3512  short slot_bits = slot_size / BASE_SLOT_SIZE;
3513  GC_ASSERT(slot_bits > 0);
3514 
3515  do {
3516  VALUE vp = (VALUE)p;
3517  GC_ASSERT(vp % BASE_SLOT_SIZE == 0);
3518 
3519  asan_unpoison_object(vp, false);
3520  if (bitset & 1) {
3521  switch (BUILTIN_TYPE(vp)) {
3522  default: /* majority case */
3523  gc_report(2, objspace, "page_sweep: free %p\n", (void *)p);
3524 #if RGENGC_CHECK_MODE
3525  if (!is_full_marking(objspace)) {
3526  if (RVALUE_OLD_P(objspace, vp)) rb_bug("page_sweep: %p - old while minor GC.", (void *)p);
3527  if (RVALUE_REMEMBERED(objspace, vp)) rb_bug("page_sweep: %p - remembered.", (void *)p);
3528  }
3529 #endif
3530 
3531  if (RVALUE_WB_UNPROTECTED(objspace, vp)) CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(vp), vp);
3532 
3533 #if RGENGC_CHECK_MODE
3534 #define CHECK(x) if (x(objspace, vp) != FALSE) rb_bug("obj_free: " #x "(%s) != FALSE", rb_obj_info(vp))
3535  CHECK(RVALUE_WB_UNPROTECTED);
3536  CHECK(RVALUE_MARKED);
3537  CHECK(RVALUE_MARKING);
3538  CHECK(RVALUE_UNCOLLECTIBLE);
3539 #undef CHECK
3540 #endif
3541 
3542  rb_gc_event_hook(vp, RUBY_INTERNAL_EVENT_FREEOBJ);
3543 
3544  bool has_object_id = FL_TEST(vp, FL_SEEN_OBJ_ID);
3545  rb_gc_obj_free_vm_weak_references(vp);
3546  if (rb_gc_obj_free(objspace, vp)) {
3547  if (has_object_id) {
3548  obj_free_object_id(objspace, vp);
3549  }
3550  // always add free slots back to the swept pages freelist,
3551  // so that if we're compacting, we can re-use the slots
3552  (void)VALGRIND_MAKE_MEM_UNDEFINED((void*)p, BASE_SLOT_SIZE);
3553  heap_page_add_freeobj(objspace, sweep_page, vp);
3554  gc_report(3, objspace, "page_sweep: %s is added to freelist\n", rb_obj_info(vp));
3555  ctx->freed_slots++;
3556  }
3557  else {
3558  ctx->final_slots++;
3559  }
3560  break;
3561 
3562  case T_MOVED:
3563  if (objspace->flags.during_compacting) {
3564  /* The sweep cursor shouldn't have made it to any
3565  * T_MOVED slots while the compact flag is enabled.
3566  * The sweep cursor and compact cursor move in
3567  * opposite directions, and when they meet references will
3568  * get updated and "during_compacting" should get disabled */
3569  rb_bug("T_MOVED shouldn't be seen until compaction is finished");
3570  }
3571  gc_report(3, objspace, "page_sweep: %s is added to freelist\n", rb_obj_info(vp));
3572  ctx->empty_slots++;
3573  heap_page_add_freeobj(objspace, sweep_page, vp);
3574  break;
3575  case T_ZOMBIE:
3576  /* already counted */
3577  break;
3578  case T_NONE:
3579  ctx->empty_slots++; /* already freed */
3580  break;
3581  }
3582  }
3583  p += slot_size;
3584  bitset >>= slot_bits;
3585  } while (bitset);
3586 }
3587 
3588 static inline void
3589 gc_sweep_page(rb_objspace_t *objspace, rb_heap_t *heap, struct gc_sweep_context *ctx)
3590 {
3591  struct heap_page *sweep_page = ctx->page;
3592  GC_ASSERT(sweep_page->heap == heap);
3593 
3594  uintptr_t p;
3595  bits_t *bits, bitset;
3596 
3597  gc_report(2, objspace, "page_sweep: start.\n");
3598 
3599 #if RGENGC_CHECK_MODE
3600  if (!objspace->flags.immediate_sweep) {
3601  GC_ASSERT(sweep_page->flags.before_sweep == TRUE);
3602  }
3603 #endif
3604  sweep_page->flags.before_sweep = FALSE;
3605  sweep_page->free_slots = 0;
3606 
3607  p = (uintptr_t)sweep_page->start;
3608  bits = sweep_page->mark_bits;
3609 
3610  int page_rvalue_count = sweep_page->total_slots * (sweep_page->slot_size / BASE_SLOT_SIZE);
3611  int out_of_range_bits = (NUM_IN_PAGE(p) + page_rvalue_count) % BITS_BITLENGTH;
3612  if (out_of_range_bits != 0) { // sizeof(RVALUE) == 64
3613  bits[BITMAP_INDEX(p) + page_rvalue_count / BITS_BITLENGTH] |= ~(((bits_t)1 << out_of_range_bits) - 1);
3614  }
3615 
3616  /* The last bitmap plane may not be used if the last plane does not
3617  * have enough space for the slot_size. In that case, the last plane must
3618  * be skipped since none of the bits will be set. */
3619  int bitmap_plane_count = CEILDIV(NUM_IN_PAGE(p) + page_rvalue_count, BITS_BITLENGTH);
3620  GC_ASSERT(bitmap_plane_count == HEAP_PAGE_BITMAP_LIMIT - 1 ||
3621  bitmap_plane_count == HEAP_PAGE_BITMAP_LIMIT);
3622 
3623  // Skip out of range slots at the head of the page
3624  bitset = ~bits[0];
3625  bitset >>= NUM_IN_PAGE(p);
3626  if (bitset) {
3627  gc_sweep_plane(objspace, heap, p, bitset, ctx);
3628  }
3629  p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
3630 
3631  for (int i = 1; i < bitmap_plane_count; i++) {
3632  bitset = ~bits[i];
3633  if (bitset) {
3634  gc_sweep_plane(objspace, heap, p, bitset, ctx);
3635  }
3636  p += BITS_BITLENGTH * BASE_SLOT_SIZE;
3637  }
3638 
3639  if (!heap->compact_cursor) {
3640  gc_setup_mark_bits(sweep_page);
3641  }
3642 
3643 #if GC_PROFILE_MORE_DETAIL
3644  if (gc_prof_enabled(objspace)) {
3645  gc_profile_record *record = gc_prof_record(objspace);
3646  record->removing_objects += ctx->final_slots + ctx->freed_slots;
3647  record->empty_objects += ctx->empty_slots;
3648  }
3649 #endif
3650  if (0) fprintf(stderr, "gc_sweep_page(%"PRIdSIZE"): total_slots: %d, freed_slots: %d, empty_slots: %d, final_slots: %d\n",
3651  rb_gc_count(),
3652  sweep_page->total_slots,
3653  ctx->freed_slots, ctx->empty_slots, ctx->final_slots);
3654 
3655  sweep_page->free_slots += ctx->freed_slots + ctx->empty_slots;
3656  sweep_page->heap->total_freed_objects += ctx->freed_slots;
3657 
3658  if (heap_pages_deferred_final && !finalizing) {
3659  gc_finalize_deferred_register(objspace);
3660  }
3661 
3662 #if RGENGC_CHECK_MODE
3663  short freelist_len = 0;
3664  asan_unlock_freelist(sweep_page);
3665  struct free_slot *ptr = sweep_page->freelist;
3666  while (ptr) {
3667  freelist_len++;
3668  ptr = ptr->next;
3669  }
3670  asan_lock_freelist(sweep_page);
3671  if (freelist_len != sweep_page->free_slots) {
3672  rb_bug("inconsistent freelist length: expected %d but was %d", sweep_page->free_slots, freelist_len);
3673  }
3674 #endif
3675 
3676  gc_report(2, objspace, "page_sweep: end.\n");
3677 }
3678 
3679 static const char *
3680 gc_mode_name(enum gc_mode mode)
3681 {
3682  switch (mode) {
3683  case gc_mode_none: return "none";
3684  case gc_mode_marking: return "marking";
3685  case gc_mode_sweeping: return "sweeping";
3686  case gc_mode_compacting: return "compacting";
3687  default: rb_bug("gc_mode_name: unknown mode: %d", (int)mode);
3688  }
3689 }
3690 
3691 static void
3692 gc_mode_transition(rb_objspace_t *objspace, enum gc_mode mode)
3693 {
3694 #if RGENGC_CHECK_MODE
3695  enum gc_mode prev_mode = gc_mode(objspace);
3696  switch (prev_mode) {
3697  case gc_mode_none: GC_ASSERT(mode == gc_mode_marking); break;
3698  case gc_mode_marking: GC_ASSERT(mode == gc_mode_sweeping); break;
3699  case gc_mode_sweeping: GC_ASSERT(mode == gc_mode_none || mode == gc_mode_compacting); break;
3700  case gc_mode_compacting: GC_ASSERT(mode == gc_mode_none); break;
3701  }
3702 #endif
3703  if (0) fprintf(stderr, "gc_mode_transition: %s->%s\n", gc_mode_name(gc_mode(objspace)), gc_mode_name(mode));
3704  gc_mode_set(objspace, mode);
3705 }
3706 
3707 static void
3708 heap_page_freelist_append(struct heap_page *page, struct free_slot *freelist)
3709 {
3710  if (freelist) {
3711  asan_unlock_freelist(page);
3712  if (page->freelist) {
3713  struct free_slot *p = page->freelist;
3714  asan_unpoison_object((VALUE)p, false);
3715  while (p->next) {
3716  struct free_slot *prev = p;
3717  p = p->next;
3718  asan_poison_object((VALUE)prev);
3719  asan_unpoison_object((VALUE)p, false);
3720  }
3721  p->next = freelist;
3722  asan_poison_object((VALUE)p);
3723  }
3724  else {
3725  page->freelist = freelist;
3726  }
3727  asan_lock_freelist(page);
3728  }
3729 }
3730 
3731 static void
3732 gc_sweep_start_heap(rb_objspace_t *objspace, rb_heap_t *heap)
3733 {
3734  heap->sweeping_page = ccan_list_top(&heap->pages, struct heap_page, page_node);
3735  heap->free_pages = NULL;
3736  heap->pooled_pages = NULL;
3737  if (!objspace->flags.immediate_sweep) {
3738  struct heap_page *page = NULL;
3739 
3740  ccan_list_for_each(&heap->pages, page, page_node) {
3741  page->flags.before_sweep = TRUE;
3742  }
3743  }
3744 }
3745 
3746 #if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 4
3747 __attribute__((noinline))
3748 #endif
3749 
3750 #if GC_CAN_COMPILE_COMPACTION
3751 static void gc_sort_heap_by_compare_func(rb_objspace_t *objspace, gc_compact_compare_func compare_func);
3752 static int compare_pinned_slots(const void *left, const void *right, void *d);
3753 #endif
3754 
3755 static void
3756 gc_ractor_newobj_cache_clear(void *c, void *data)
3757 {
3758  rb_ractor_newobj_cache_t *newobj_cache = c;
3759 
3760  newobj_cache->incremental_mark_step_allocated_slots = 0;
3761 
3762  for (size_t heap_idx = 0; heap_idx < HEAP_COUNT; heap_idx++) {
3763  rb_ractor_newobj_heap_cache_t *cache = &newobj_cache->heap_caches[heap_idx];
3764 
3765  struct heap_page *page = cache->using_page;
3766  struct free_slot *freelist = cache->freelist;
3767  RUBY_DEBUG_LOG("ractor using_page:%p freelist:%p", (void *)page, (void *)freelist);
3768 
3769  heap_page_freelist_append(page, freelist);
3770 
3771  cache->using_page = NULL;
3772  cache->freelist = NULL;
3773  }
3774 }
3775 
3776 static void
3777 gc_sweep_start(rb_objspace_t *objspace)
3778 {
3779  gc_mode_transition(objspace, gc_mode_sweeping);
3780  objspace->rincgc.pooled_slots = 0;
3781  objspace->heap_pages.allocatable_slots = 0;
3782 
3783 #if GC_CAN_COMPILE_COMPACTION
3784  if (objspace->flags.during_compacting) {
3785  gc_sort_heap_by_compare_func(
3786  objspace,
3787  objspace->rcompactor.compare_func ? objspace->rcompactor.compare_func : compare_pinned_slots
3788  );
3789  }
3790 #endif
3791 
3792  for (int i = 0; i < HEAP_COUNT; i++) {
3793  rb_heap_t *heap = &heaps[i];
3794  gc_sweep_start_heap(objspace, heap);
3795 
3796  /* We should call gc_sweep_finish_heap for size pools with no pages. */
3797  if (heap->sweeping_page == NULL) {
3798  GC_ASSERT(heap->total_pages == 0);
3799  GC_ASSERT(heap->total_slots == 0);
3800  gc_sweep_finish_heap(objspace, heap);
3801  }
3802  }
3803 
3804  rb_gc_ractor_newobj_cache_foreach(gc_ractor_newobj_cache_clear, NULL);
3805 }
3806 
3807 static void
3808 gc_sweep_finish_heap(rb_objspace_t *objspace, rb_heap_t *heap)
3809 {
3810  size_t total_slots = heap->total_slots;
3811  size_t swept_slots = heap->freed_slots + heap->empty_slots;
3812 
3813  size_t init_slots = gc_params.heap_init_slots[heap - heaps];
3814  size_t min_free_slots = (size_t)(MAX(total_slots, init_slots) * gc_params.heap_free_slots_min_ratio);
3815 
3816  if (swept_slots < min_free_slots &&
3817  /* The heap is a growth heap if it freed more slots than had empty slots. */
3818  (heap->empty_slots == 0 || heap->freed_slots > heap->empty_slots)) {
3819  /* If we don't have enough slots and we have pages on the tomb heap, move
3820  * pages from the tomb heap to the eden heap. This may prevent page
3821  * creation thrashing (frequently allocating and deallocting pages) and
3822  * GC thrashing (running GC more frequently than required). */
3823  struct heap_page *resurrected_page;
3824  while (swept_slots < min_free_slots &&
3825  (resurrected_page = heap_page_resurrect(objspace))) {
3826  heap_add_page(objspace, heap, resurrected_page);
3827  heap_add_freepage(heap, resurrected_page);
3828 
3829  swept_slots += resurrected_page->free_slots;
3830  }
3831 
3832  if (swept_slots < min_free_slots) {
3833  /* Grow this heap if we are in a major GC or if we haven't run at least
3834  * RVALUE_OLD_AGE minor GC since the last major GC. */
3835  if (is_full_marking(objspace) ||
3836  objspace->profile.count - objspace->rgengc.last_major_gc < RVALUE_OLD_AGE) {
3837  heap_allocatable_slots_expand(objspace, heap, swept_slots, heap->total_slots);
3838  }
3839  else {
3840  gc_needs_major_flags |= GPR_FLAG_MAJOR_BY_NOFREE;
3841  heap->force_major_gc_count++;
3842  }
3843  }
3844  }
3845 }
3846 
3847 static void
3848 gc_sweep_finish(rb_objspace_t *objspace)
3849 {
3850  gc_report(1, objspace, "gc_sweep_finish\n");
3851 
3852  gc_prof_set_heap_info(objspace);
3853  heap_pages_free_unused_pages(objspace);
3854 
3855  for (int i = 0; i < HEAP_COUNT; i++) {
3856  rb_heap_t *heap = &heaps[i];
3857 
3858  heap->freed_slots = 0;
3859  heap->empty_slots = 0;
3860 
3861  if (!will_be_incremental_marking(objspace)) {
3862  struct heap_page *end_page = heap->free_pages;
3863  if (end_page) {
3864  while (end_page->free_next) end_page = end_page->free_next;
3865  end_page->free_next = heap->pooled_pages;
3866  }
3867  else {
3868  heap->free_pages = heap->pooled_pages;
3869  }
3870  heap->pooled_pages = NULL;
3871  objspace->rincgc.pooled_slots = 0;
3872  }
3873  }
3874 
3875  rb_gc_event_hook(0, RUBY_INTERNAL_EVENT_GC_END_SWEEP);
3876  gc_mode_transition(objspace, gc_mode_none);
3877 
3878 #if RGENGC_CHECK_MODE >= 2
3879  gc_verify_internal_consistency(objspace);
3880 #endif
3881 }
3882 
3883 static int
3884 gc_sweep_step(rb_objspace_t *objspace, rb_heap_t *heap)
3885 {
3886  struct heap_page *sweep_page = heap->sweeping_page;
3887  int unlink_limit = GC_SWEEP_PAGES_FREEABLE_PER_STEP;
3888  int swept_slots = 0;
3889  int pooled_slots = 0;
3890 
3891  if (sweep_page == NULL) return FALSE;
3892 
3893 #if GC_ENABLE_LAZY_SWEEP
3894  gc_prof_sweep_timer_start(objspace);
3895 #endif
3896 
3897  do {
3898  RUBY_DEBUG_LOG("sweep_page:%p", (void *)sweep_page);
3899 
3900  struct gc_sweep_context ctx = {
3901  .page = sweep_page,
3902  .final_slots = 0,
3903  .freed_slots = 0,
3904  .empty_slots = 0,
3905  };
3906  gc_sweep_page(objspace, heap, &ctx);
3907  int free_slots = ctx.freed_slots + ctx.empty_slots;
3908 
3909  heap->sweeping_page = ccan_list_next(&heap->pages, sweep_page, page_node);
3910 
3911  if (free_slots == sweep_page->total_slots &&
3912  heap_pages_freeable_pages > 0 &&
3913  unlink_limit > 0) {
3914  heap_pages_freeable_pages--;
3915  unlink_limit--;
3916  /* There are no living objects, so move this page to the global empty pages. */
3917  heap_unlink_page(objspace, heap, sweep_page);
3918 
3919  sweep_page->start = 0;
3920  sweep_page->total_slots = 0;
3921  sweep_page->slot_size = 0;
3922  sweep_page->heap = NULL;
3923  sweep_page->free_slots = 0;
3924 
3925  asan_unlock_freelist(sweep_page);
3926  sweep_page->freelist = NULL;
3927  asan_lock_freelist(sweep_page);
3928 
3929  asan_poison_memory_region(sweep_page->body, HEAP_PAGE_SIZE);
3930 
3931  objspace->empty_pages_count++;
3932  sweep_page->free_next = objspace->empty_pages;
3933  objspace->empty_pages = sweep_page;
3934  }
3935  else if (free_slots > 0) {
3936  heap->freed_slots += ctx.freed_slots;
3937  heap->empty_slots += ctx.empty_slots;
3938 
3939  if (pooled_slots < GC_INCREMENTAL_SWEEP_POOL_SLOT_COUNT) {
3940  heap_add_poolpage(objspace, heap, sweep_page);
3941  pooled_slots += free_slots;
3942  }
3943  else {
3944  heap_add_freepage(heap, sweep_page);
3945  swept_slots += free_slots;
3946  if (swept_slots > GC_INCREMENTAL_SWEEP_SLOT_COUNT) {
3947  break;
3948  }
3949  }
3950  }
3951  else {
3952  sweep_page->free_next = NULL;
3953  }
3954  } while ((sweep_page = heap->sweeping_page));
3955 
3956  if (!heap->sweeping_page) {
3957  gc_sweep_finish_heap(objspace, heap);
3958 
3959  if (!has_sweeping_pages(objspace)) {
3960  gc_sweep_finish(objspace);
3961  }
3962  }
3963 
3964 #if GC_ENABLE_LAZY_SWEEP
3965  gc_prof_sweep_timer_stop(objspace);
3966 #endif
3967 
3968  return heap->free_pages != NULL;
3969 }
3970 
3971 static void
3972 gc_sweep_rest(rb_objspace_t *objspace)
3973 {
3974  for (int i = 0; i < HEAP_COUNT; i++) {
3975  rb_heap_t *heap = &heaps[i];
3976 
3977  while (heap->sweeping_page) {
3978  gc_sweep_step(objspace, heap);
3979  }
3980  }
3981 }
3982 
3983 static void
3984 gc_sweep_continue(rb_objspace_t *objspace, rb_heap_t *sweep_heap)
3985 {
3986  GC_ASSERT(dont_gc_val() == FALSE || objspace->profile.latest_gc_info & GPR_FLAG_METHOD);
3987  if (!GC_ENABLE_LAZY_SWEEP) return;
3988 
3989  gc_sweeping_enter(objspace);
3990 
3991  for (int i = 0; i < HEAP_COUNT; i++) {
3992  rb_heap_t *heap = &heaps[i];
3993  if (!gc_sweep_step(objspace, heap)) {
3994  /* sweep_heap requires a free slot but sweeping did not yield any
3995  * and we cannot allocate a new page. */
3996  if (heap == sweep_heap && objspace->heap_pages.allocatable_slots == 0) {
3997  /* Not allowed to create a new page so finish sweeping. */
3998  gc_sweep_rest(objspace);
3999  break;
4000  }
4001  }
4002  }
4003 
4004  gc_sweeping_exit(objspace);
4005 }
4006 
4007 VALUE
4008 rb_gc_impl_location(void *objspace_ptr, VALUE value)
4009 {
4010  VALUE destination;
4011 
4012  if (!SPECIAL_CONST_P(value)) {
4013  asan_unpoisoning_object(value) {
4014  if (BUILTIN_TYPE(value) == T_MOVED) {
4015  destination = (VALUE)RMOVED(value)->destination;
4016  GC_ASSERT(BUILTIN_TYPE(destination) != T_NONE);
4017  }
4018  else {
4019  destination = value;
4020  }
4021  }
4022  }
4023  else {
4024  destination = value;
4025  }
4026 
4027  return destination;
4028 }
4029 
4030 #if GC_CAN_COMPILE_COMPACTION
4031 static void
4032 invalidate_moved_plane(rb_objspace_t *objspace, struct heap_page *page, uintptr_t p, bits_t bitset)
4033 {
4034  if (bitset) {
4035  do {
4036  if (bitset & 1) {
4037  VALUE forwarding_object = (VALUE)p;
4038  VALUE object;
4039 
4040  if (BUILTIN_TYPE(forwarding_object) == T_MOVED) {
4041  GC_ASSERT(RVALUE_PINNED(objspace, forwarding_object));
4042  GC_ASSERT(!RVALUE_MARKED(objspace, forwarding_object));
4043 
4044  CLEAR_IN_BITMAP(GET_HEAP_PINNED_BITS(forwarding_object), forwarding_object);
4045 
4046  object = rb_gc_impl_location(objspace, forwarding_object);
4047 
4048  uint32_t original_shape_id = 0;
4049  if (RB_TYPE_P(object, T_OBJECT)) {
4050  original_shape_id = RMOVED(forwarding_object)->original_shape_id;
4051  }
4052 
4053  gc_move(objspace, object, forwarding_object, GET_HEAP_PAGE(object)->slot_size, page->slot_size);
4054  /* forwarding_object is now our actual object, and "object"
4055  * is the free slot for the original page */
4056 
4057  if (original_shape_id) {
4058  rb_gc_set_shape(forwarding_object, original_shape_id);
4059  }
4060 
4061  struct heap_page *orig_page = GET_HEAP_PAGE(object);
4062  orig_page->free_slots++;
4063  heap_page_add_freeobj(objspace, orig_page, object);
4064 
4065  GC_ASSERT(RVALUE_MARKED(objspace, forwarding_object));
4066  GC_ASSERT(BUILTIN_TYPE(forwarding_object) != T_MOVED);
4067  GC_ASSERT(BUILTIN_TYPE(forwarding_object) != T_NONE);
4068  }
4069  }
4070  p += BASE_SLOT_SIZE;
4071  bitset >>= 1;
4072  } while (bitset);
4073  }
4074 }
4075 
4076 static void
4077 invalidate_moved_page(rb_objspace_t *objspace, struct heap_page *page)
4078 {
4079  int i;
4080  bits_t *mark_bits, *pin_bits;
4081  bits_t bitset;
4082 
4083  mark_bits = page->mark_bits;
4084  pin_bits = page->pinned_bits;
4085 
4086  uintptr_t p = page->start;
4087 
4088  // Skip out of range slots at the head of the page
4089  bitset = pin_bits[0] & ~mark_bits[0];
4090  bitset >>= NUM_IN_PAGE(p);
4091  invalidate_moved_plane(objspace, page, p, bitset);
4092  p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
4093 
4094  for (i=1; i < HEAP_PAGE_BITMAP_LIMIT; i++) {
4095  /* Moved objects are pinned but never marked. We reuse the pin bits
4096  * to indicate there is a moved object in this slot. */
4097  bitset = pin_bits[i] & ~mark_bits[i];
4098 
4099  invalidate_moved_plane(objspace, page, p, bitset);
4100  p += BITS_BITLENGTH * BASE_SLOT_SIZE;
4101  }
4102 }
4103 #endif
4104 
4105 static void
4106 gc_compact_start(rb_objspace_t *objspace)
4107 {
4108  struct heap_page *page = NULL;
4109  gc_mode_transition(objspace, gc_mode_compacting);
4110 
4111  for (int i = 0; i < HEAP_COUNT; i++) {
4112  rb_heap_t *heap = &heaps[i];
4113  ccan_list_for_each(&heap->pages, page, page_node) {
4114  page->flags.before_sweep = TRUE;
4115  }
4116 
4117  heap->compact_cursor = ccan_list_tail(&heap->pages, struct heap_page, page_node);
4118  heap->compact_cursor_index = 0;
4119  }
4120 
4121  if (gc_prof_enabled(objspace)) {
4122  gc_profile_record *record = gc_prof_record(objspace);
4123  record->moved_objects = objspace->rcompactor.total_moved;
4124  }
4125 
4126  memset(objspace->rcompactor.considered_count_table, 0, T_MASK * sizeof(size_t));
4127  memset(objspace->rcompactor.moved_count_table, 0, T_MASK * sizeof(size_t));
4128  memset(objspace->rcompactor.moved_up_count_table, 0, T_MASK * sizeof(size_t));
4129  memset(objspace->rcompactor.moved_down_count_table, 0, T_MASK * sizeof(size_t));
4130 
4131  /* Set up read barrier for pages containing MOVED objects */
4132  install_handlers();
4133 }
4134 
4135 static void gc_sweep_compact(rb_objspace_t *objspace);
4136 
4137 static void
4138 gc_sweep(rb_objspace_t *objspace)
4139 {
4140  gc_sweeping_enter(objspace);
4141 
4142  const unsigned int immediate_sweep = objspace->flags.immediate_sweep;
4143 
4144  gc_report(1, objspace, "gc_sweep: immediate: %d\n", immediate_sweep);
4145 
4146  gc_sweep_start(objspace);
4147  if (objspace->flags.during_compacting) {
4148  gc_sweep_compact(objspace);
4149  }
4150 
4151  if (immediate_sweep) {
4152 #if !GC_ENABLE_LAZY_SWEEP
4153  gc_prof_sweep_timer_start(objspace);
4154 #endif
4155  gc_sweep_rest(objspace);
4156 #if !GC_ENABLE_LAZY_SWEEP
4157  gc_prof_sweep_timer_stop(objspace);
4158 #endif
4159  }
4160  else {
4161 
4162  /* Sweep every size pool. */
4163  for (int i = 0; i < HEAP_COUNT; i++) {
4164  rb_heap_t *heap = &heaps[i];
4165  gc_sweep_step(objspace, heap);
4166  }
4167  }
4168 
4169  gc_sweeping_exit(objspace);
4170 }
4171 
4172 /* Marking - Marking stack */
4173 
4174 static stack_chunk_t *
4175 stack_chunk_alloc(void)
4176 {
4177  stack_chunk_t *res;
4178 
4179  res = malloc(sizeof(stack_chunk_t));
4180  if (!res)
4181  rb_memerror();
4182 
4183  return res;
4184 }
4185 
4186 static inline int
4187 is_mark_stack_empty(mark_stack_t *stack)
4188 {
4189  return stack->chunk == NULL;
4190 }
4191 
4192 static size_t
4193 mark_stack_size(mark_stack_t *stack)
4194 {
4195  size_t size = stack->index;
4196  stack_chunk_t *chunk = stack->chunk ? stack->chunk->next : NULL;
4197 
4198  while (chunk) {
4199  size += stack->limit;
4200  chunk = chunk->next;
4201  }
4202  return size;
4203 }
4204 
4205 static void
4206 add_stack_chunk_cache(mark_stack_t *stack, stack_chunk_t *chunk)
4207 {
4208  chunk->next = stack->cache;
4209  stack->cache = chunk;
4210  stack->cache_size++;
4211 }
4212 
4213 static void
4214 shrink_stack_chunk_cache(mark_stack_t *stack)
4215 {
4216  stack_chunk_t *chunk;
4217 
4218  if (stack->unused_cache_size > (stack->cache_size/2)) {
4219  chunk = stack->cache;
4220  stack->cache = stack->cache->next;
4221  stack->cache_size--;
4222  free(chunk);
4223  }
4224  stack->unused_cache_size = stack->cache_size;
4225 }
4226 
4227 static void
4228 push_mark_stack_chunk(mark_stack_t *stack)
4229 {
4230  stack_chunk_t *next;
4231 
4232  GC_ASSERT(stack->index == stack->limit);
4233 
4234  if (stack->cache_size > 0) {
4235  next = stack->cache;
4236  stack->cache = stack->cache->next;
4237  stack->cache_size--;
4238  if (stack->unused_cache_size > stack->cache_size)
4239  stack->unused_cache_size = stack->cache_size;
4240  }
4241  else {
4242  next = stack_chunk_alloc();
4243  }
4244  next->next = stack->chunk;
4245  stack->chunk = next;
4246  stack->index = 0;
4247 }
4248 
4249 static void
4250 pop_mark_stack_chunk(mark_stack_t *stack)
4251 {
4252  stack_chunk_t *prev;
4253 
4254  prev = stack->chunk->next;
4255  GC_ASSERT(stack->index == 0);
4256  add_stack_chunk_cache(stack, stack->chunk);
4257  stack->chunk = prev;
4258  stack->index = stack->limit;
4259 }
4260 
4261 static void
4262 mark_stack_chunk_list_free(stack_chunk_t *chunk)
4263 {
4264  stack_chunk_t *next = NULL;
4265 
4266  while (chunk != NULL) {
4267  next = chunk->next;
4268  free(chunk);
4269  chunk = next;
4270  }
4271 }
4272 
4273 static void
4274 free_stack_chunks(mark_stack_t *stack)
4275 {
4276  mark_stack_chunk_list_free(stack->chunk);
4277 }
4278 
4279 static void
4280 mark_stack_free_cache(mark_stack_t *stack)
4281 {
4282  mark_stack_chunk_list_free(stack->cache);
4283  stack->cache_size = 0;
4284  stack->unused_cache_size = 0;
4285 }
4286 
4287 static void
4288 push_mark_stack(mark_stack_t *stack, VALUE obj)
4289 {
4290  switch (BUILTIN_TYPE(obj)) {
4291  case T_OBJECT:
4292  case T_CLASS:
4293  case T_MODULE:
4294  case T_FLOAT:
4295  case T_STRING:
4296  case T_REGEXP:
4297  case T_ARRAY:
4298  case T_HASH:
4299  case T_STRUCT:
4300  case T_BIGNUM:
4301  case T_FILE:
4302  case T_DATA:
4303  case T_MATCH:
4304  case T_COMPLEX:
4305  case T_RATIONAL:
4306  case T_TRUE:
4307  case T_FALSE:
4308  case T_SYMBOL:
4309  case T_IMEMO:
4310  case T_ICLASS:
4311  if (stack->index == stack->limit) {
4312  push_mark_stack_chunk(stack);
4313  }
4314  stack->chunk->data[stack->index++] = obj;
4315  return;
4316 
4317  case T_NONE:
4318  case T_NIL:
4319  case T_FIXNUM:
4320  case T_MOVED:
4321  case T_ZOMBIE:
4322  case T_UNDEF:
4323  case T_MASK:
4324  rb_bug("push_mark_stack() called for broken object");
4325  break;
4326 
4327  case T_NODE:
4328  rb_bug("push_mark_stack: unexpected T_NODE object");
4329  break;
4330  }
4331 
4332  rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
4333  BUILTIN_TYPE(obj), (void *)obj,
4334  is_pointer_to_heap((rb_objspace_t *)rb_gc_get_objspace(), (void *)obj) ? "corrupted object" : "non object");
4335 }
4336 
4337 static int
4338 pop_mark_stack(mark_stack_t *stack, VALUE *data)
4339 {
4340  if (is_mark_stack_empty(stack)) {
4341  return FALSE;
4342  }
4343  if (stack->index == 1) {
4344  *data = stack->chunk->data[--stack->index];
4345  pop_mark_stack_chunk(stack);
4346  }
4347  else {
4348  *data = stack->chunk->data[--stack->index];
4349  }
4350  return TRUE;
4351 }
4352 
4353 static void
4354 init_mark_stack(mark_stack_t *stack)
4355 {
4356  int i;
4357 
4358  MEMZERO(stack, mark_stack_t, 1);
4359  stack->index = stack->limit = STACK_CHUNK_SIZE;
4360 
4361  for (i=0; i < 4; i++) {
4362  add_stack_chunk_cache(stack, stack_chunk_alloc());
4363  }
4364  stack->unused_cache_size = stack->cache_size;
4365 }
4366 
4367 /* Marking */
4368 
4369 static void
4370 rgengc_check_relation(rb_objspace_t *objspace, VALUE obj)
4371 {
4372  const VALUE old_parent = objspace->rgengc.parent_object;
4373 
4374  if (old_parent) { /* parent object is old */
4375  if (RVALUE_WB_UNPROTECTED(objspace, obj) || !RVALUE_OLD_P(objspace, obj)) {
4376  rgengc_remember(objspace, old_parent);
4377  }
4378  }
4379 
4380  GC_ASSERT(old_parent == objspace->rgengc.parent_object);
4381 }
4382 
4383 static inline int
4384 gc_mark_set(rb_objspace_t *objspace, VALUE obj)
4385 {
4386  if (RVALUE_MARKED(objspace, obj)) return 0;
4387  MARK_IN_BITMAP(GET_HEAP_MARK_BITS(obj), obj);
4388  return 1;
4389 }
4390 
4391 static void
4392 gc_aging(rb_objspace_t *objspace, VALUE obj)
4393 {
4394  /* Disable aging if Major GC's are disabled. This will prevent longish lived
4395  * objects filling up the heap at the expense of marking many more objects.
4396  *
4397  * We should always pre-warm our process when disabling majors, by running
4398  * GC manually several times so that most objects likely to become oldgen
4399  * are already oldgen.
4400  */
4401  if(!gc_config_full_mark_val)
4402  return;
4403 
4404  struct heap_page *page = GET_HEAP_PAGE(obj);
4405 
4406  GC_ASSERT(RVALUE_MARKING(objspace, obj) == FALSE);
4407  check_rvalue_consistency(objspace, obj);
4408 
4409  if (!RVALUE_PAGE_WB_UNPROTECTED(page, obj)) {
4410  if (!RVALUE_OLD_P(objspace, obj)) {
4411  gc_report(3, objspace, "gc_aging: YOUNG: %s\n", rb_obj_info(obj));
4412  RVALUE_AGE_INC(objspace, obj);
4413  }
4414  else if (is_full_marking(objspace)) {
4415  GC_ASSERT(RVALUE_PAGE_UNCOLLECTIBLE(page, obj) == FALSE);
4416  RVALUE_PAGE_OLD_UNCOLLECTIBLE_SET(objspace, page, obj);
4417  }
4418  }
4419  check_rvalue_consistency(objspace, obj);
4420 
4421  objspace->marked_slots++;
4422 }
4423 
4424 static void
4425 gc_grey(rb_objspace_t *objspace, VALUE obj)
4426 {
4427 #if RGENGC_CHECK_MODE
4428  if (RVALUE_MARKED(objspace, obj) == FALSE) rb_bug("gc_grey: %s is not marked.", rb_obj_info(obj));
4429  if (RVALUE_MARKING(objspace, obj) == TRUE) rb_bug("gc_grey: %s is marking/remembered.", rb_obj_info(obj));
4430 #endif
4431 
4432  if (is_incremental_marking(objspace)) {
4433  MARK_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
4434  }
4435 
4436  push_mark_stack(&objspace->mark_stack, obj);
4437 }
4438 
4439 static void
4440 gc_mark(rb_objspace_t *objspace, VALUE obj)
4441 {
4442  GC_ASSERT(during_gc);
4443 
4444  rgengc_check_relation(objspace, obj);
4445  if (!gc_mark_set(objspace, obj)) return; /* already marked */
4446 
4447  if (0) { // for debug GC marking miss
4448  if (objspace->rgengc.parent_object) {
4449  RUBY_DEBUG_LOG("%p (%s) parent:%p (%s)",
4450  (void *)obj, obj_type_name(obj),
4451  (void *)objspace->rgengc.parent_object, obj_type_name(objspace->rgengc.parent_object));
4452  }
4453  else {
4454  RUBY_DEBUG_LOG("%p (%s)", (void *)obj, obj_type_name(obj));
4455  }
4456  }
4457 
4458  if (RB_UNLIKELY(RB_TYPE_P(obj, T_NONE))) {
4459  rb_obj_info_dump(obj);
4460  rb_bug("try to mark T_NONE object"); /* check here will help debugging */
4461  }
4462 
4463  gc_aging(objspace, obj);
4464  gc_grey(objspace, obj);
4465 }
4466 
4467 static inline void
4468 gc_pin(rb_objspace_t *objspace, VALUE obj)
4469 {
4470  GC_ASSERT(!SPECIAL_CONST_P(obj));
4471  if (RB_UNLIKELY(objspace->flags.during_compacting)) {
4472  if (RB_LIKELY(during_gc)) {
4473  if (!RVALUE_PINNED(objspace, obj)) {
4474  GC_ASSERT(GET_HEAP_PAGE(obj)->pinned_slots <= GET_HEAP_PAGE(obj)->total_slots);
4475  GET_HEAP_PAGE(obj)->pinned_slots++;
4476  MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(obj), obj);
4477  }
4478  }
4479  }
4480 }
4481 
4482 static inline void
4483 gc_mark_and_pin(rb_objspace_t *objspace, VALUE obj)
4484 {
4485  gc_pin(objspace, obj);
4486  gc_mark(objspace, obj);
4487 }
4488 
4489 void
4490 rb_gc_impl_mark_and_move(void *objspace_ptr, VALUE *ptr)
4491 {
4492  rb_objspace_t *objspace = objspace_ptr;
4493 
4494  if (RB_UNLIKELY(objspace->flags.during_reference_updating)) {
4495  GC_ASSERT(objspace->flags.during_compacting);
4496  GC_ASSERT(during_gc);
4497 
4498  *ptr = rb_gc_impl_location(objspace, *ptr);
4499  }
4500  else {
4501  gc_mark(objspace, *ptr);
4502  }
4503 }
4504 
4505 void
4506 rb_gc_impl_mark(void *objspace_ptr, VALUE obj)
4507 {
4508  rb_objspace_t *objspace = objspace_ptr;
4509 
4510  gc_mark(objspace, obj);
4511 }
4512 
4513 void
4514 rb_gc_impl_mark_and_pin(void *objspace_ptr, VALUE obj)
4515 {
4516  rb_objspace_t *objspace = objspace_ptr;
4517 
4518  gc_mark_and_pin(objspace, obj);
4519 }
4520 
4521 void
4522 rb_gc_impl_mark_maybe(void *objspace_ptr, VALUE obj)
4523 {
4524  rb_objspace_t *objspace = objspace_ptr;
4525 
4526  (void)VALGRIND_MAKE_MEM_DEFINED(&obj, sizeof(obj));
4527 
4528  if (is_pointer_to_heap(objspace, (void *)obj)) {
4529  asan_unpoisoning_object(obj) {
4530  /* Garbage can live on the stack, so do not mark or pin */
4531  switch (BUILTIN_TYPE(obj)) {
4532  case T_ZOMBIE:
4533  case T_NONE:
4534  break;
4535  default:
4536  gc_mark_and_pin(objspace, obj);
4537  break;
4538  }
4539  }
4540  }
4541 }
4542 
4543 void
4544 rb_gc_impl_mark_weak(void *objspace_ptr, VALUE *ptr)
4545 {
4546  rb_objspace_t *objspace = objspace_ptr;
4547 
4548  GC_ASSERT(objspace->rgengc.parent_object == 0 || FL_TEST(objspace->rgengc.parent_object, FL_WB_PROTECTED));
4549 
4550  VALUE obj = *ptr;
4551 
4552  if (RB_UNLIKELY(RB_TYPE_P(obj, T_NONE))) {
4553  rb_obj_info_dump(obj);
4554  rb_bug("try to mark T_NONE object");
4555  }
4556 
4557  /* If we are in a minor GC and the other object is old, then obj should
4558  * already be marked and cannot be reclaimed in this GC cycle so we don't
4559  * need to add it to the weak references list. */
4560  if (!is_full_marking(objspace) && RVALUE_OLD_P(objspace, obj)) {
4561  GC_ASSERT(RVALUE_MARKED(objspace, obj));
4562  GC_ASSERT(!objspace->flags.during_compacting);
4563 
4564  return;
4565  }
4566 
4567  rgengc_check_relation(objspace, obj);
4568 
4569  DURING_GC_COULD_MALLOC_REGION_START();
4570  {
4571  rb_darray_append(&objspace->weak_references, ptr);
4572  }
4573  DURING_GC_COULD_MALLOC_REGION_END();
4574 
4575  objspace->profile.weak_references_count++;
4576 }
4577 
4578 void
4579 rb_gc_impl_remove_weak(void *objspace_ptr, VALUE parent_obj, VALUE *ptr)
4580 {
4581  rb_objspace_t *objspace = objspace_ptr;
4582 
4583  /* If we're not incremental marking, then the state of the objects can't
4584  * change so we don't need to do anything. */
4585  if (!is_incremental_marking(objspace)) return;
4586  /* If parent_obj has not been marked, then ptr has not yet been marked
4587  * weak, so we don't need to do anything. */
4588  if (!RVALUE_MARKED(objspace, parent_obj)) return;
4589 
4590  VALUE **ptr_ptr;
4591  rb_darray_foreach(objspace->weak_references, i, ptr_ptr) {
4592  if (*ptr_ptr == ptr) {
4593  *ptr_ptr = NULL;
4594  break;
4595  }
4596  }
4597 }
4598 
4599 static int
4600 pin_value(st_data_t key, st_data_t value, st_data_t data)
4601 {
4602  rb_gc_impl_mark_and_pin((void *)data, (VALUE)value);
4603 
4604  return ST_CONTINUE;
4605 }
4606 
4607 static void
4608 mark_roots(rb_objspace_t *objspace, const char **categoryp)
4609 {
4610 #define MARK_CHECKPOINT(category) do { \
4611  if (categoryp) *categoryp = category; \
4612 } while (0)
4613 
4614  MARK_CHECKPOINT("objspace");
4615  objspace->rgengc.parent_object = Qfalse;
4616 
4617  if (finalizer_table != NULL) {
4618  st_foreach(finalizer_table, pin_value, (st_data_t)objspace);
4619  }
4620 
4621  st_foreach(objspace->obj_to_id_tbl, gc_mark_tbl_no_pin_i, (st_data_t)objspace);
4622 
4623  if (stress_to_class) rb_gc_mark(stress_to_class);
4624 
4625  rb_gc_save_machine_context();
4626  rb_gc_mark_roots(objspace, categoryp);
4627 }
4628 
4629 static inline void
4630 gc_mark_set_parent(rb_objspace_t *objspace, VALUE obj)
4631 {
4632  if (RVALUE_OLD_P(objspace, obj)) {
4633  objspace->rgengc.parent_object = obj;
4634  }
4635  else {
4636  objspace->rgengc.parent_object = Qfalse;
4637  }
4638 }
4639 
4640 static void
4641 gc_mark_children(rb_objspace_t *objspace, VALUE obj)
4642 {
4643  gc_mark_set_parent(objspace, obj);
4644  rb_gc_mark_children(objspace, obj);
4645 }
4646 
4651 static inline int
4652 gc_mark_stacked_objects(rb_objspace_t *objspace, int incremental, size_t count)
4653 {
4654  mark_stack_t *mstack = &objspace->mark_stack;
4655  VALUE obj;
4656  size_t marked_slots_at_the_beginning = objspace->marked_slots;
4657  size_t popped_count = 0;
4658 
4659  while (pop_mark_stack(mstack, &obj)) {
4660  if (obj == Qundef) continue; /* skip */
4661 
4662  if (RGENGC_CHECK_MODE && !RVALUE_MARKED(objspace, obj)) {
4663  rb_bug("gc_mark_stacked_objects: %s is not marked.", rb_obj_info(obj));
4664  }
4665  gc_mark_children(objspace, obj);
4666 
4667  if (incremental) {
4668  if (RGENGC_CHECK_MODE && !RVALUE_MARKING(objspace, obj)) {
4669  rb_bug("gc_mark_stacked_objects: incremental, but marking bit is 0");
4670  }
4671  CLEAR_IN_BITMAP(GET_HEAP_MARKING_BITS(obj), obj);
4672  popped_count++;
4673 
4674  if (popped_count + (objspace->marked_slots - marked_slots_at_the_beginning) > count) {
4675  break;
4676  }
4677  }
4678  else {
4679  /* just ignore marking bits */
4680  }
4681  }
4682 
4683  if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(objspace);
4684 
4685  if (is_mark_stack_empty(mstack)) {
4686  shrink_stack_chunk_cache(mstack);
4687  return TRUE;
4688  }
4689  else {
4690  return FALSE;
4691  }
4692 }
4693 
4694 static int
4695 gc_mark_stacked_objects_incremental(rb_objspace_t *objspace, size_t count)
4696 {
4697  return gc_mark_stacked_objects(objspace, TRUE, count);
4698 }
4699 
4700 static int
4701 gc_mark_stacked_objects_all(rb_objspace_t *objspace)
4702 {
4703  return gc_mark_stacked_objects(objspace, FALSE, 0);
4704 }
4705 
4706 #if RGENGC_CHECK_MODE >= 4
4707 
4708 #define MAKE_ROOTSIG(obj) (((VALUE)(obj) << 1) | 0x01)
4709 #define IS_ROOTSIG(obj) ((VALUE)(obj) & 0x01)
4710 #define GET_ROOTSIG(obj) ((const char *)((VALUE)(obj) >> 1))
4711 
4712 struct reflist {
4713  VALUE *list;
4714  int pos;
4715  int size;
4716 };
4717 
4718 static struct reflist *
4719 reflist_create(VALUE obj)
4720 {
4721  struct reflist *refs = xmalloc(sizeof(struct reflist));
4722  refs->size = 1;
4723  refs->list = ALLOC_N(VALUE, refs->size);
4724  refs->list[0] = obj;
4725  refs->pos = 1;
4726  return refs;
4727 }
4728 
4729 static void
4730 reflist_destruct(struct reflist *refs)
4731 {
4732  xfree(refs->list);
4733  xfree(refs);
4734 }
4735 
4736 static void
4737 reflist_add(struct reflist *refs, VALUE obj)
4738 {
4739  if (refs->pos == refs->size) {
4740  refs->size *= 2;
4741  SIZED_REALLOC_N(refs->list, VALUE, refs->size, refs->size/2);
4742  }
4743 
4744  refs->list[refs->pos++] = obj;
4745 }
4746 
4747 static void
4748 reflist_dump(struct reflist *refs)
4749 {
4750  int i;
4751  for (i=0; i<refs->pos; i++) {
4752  VALUE obj = refs->list[i];
4753  if (IS_ROOTSIG(obj)) { /* root */
4754  fprintf(stderr, "<root@%s>", GET_ROOTSIG(obj));
4755  }
4756  else {
4757  fprintf(stderr, "<%s>", rb_obj_info(obj));
4758  }
4759  if (i+1 < refs->pos) fprintf(stderr, ", ");
4760  }
4761 }
4762 
4763 static int
4764 reflist_referred_from_machine_context(struct reflist *refs)
4765 {
4766  int i;
4767  for (i=0; i<refs->pos; i++) {
4768  VALUE obj = refs->list[i];
4769  if (IS_ROOTSIG(obj) && strcmp(GET_ROOTSIG(obj), "machine_context") == 0) return 1;
4770  }
4771  return 0;
4772 }
4773 
4774 struct allrefs {
4776  /* a -> obj1
4777  * b -> obj1
4778  * c -> obj1
4779  * c -> obj2
4780  * d -> obj3
4781  * #=> {obj1 => [a, b, c], obj2 => [c, d]}
4782  */
4783  struct st_table *references;
4784  const char *category;
4785  VALUE root_obj;
4787 };
4788 
4789 static int
4790 allrefs_add(struct allrefs *data, VALUE obj)
4791 {
4792  struct reflist *refs;
4793  st_data_t r;
4794 
4795  if (st_lookup(data->references, obj, &r)) {
4796  refs = (struct reflist *)r;
4797  reflist_add(refs, data->root_obj);
4798  return 0;
4799  }
4800  else {
4801  refs = reflist_create(data->root_obj);
4802  st_insert(data->references, obj, (st_data_t)refs);
4803  return 1;
4804  }
4805 }
4806 
4807 static void
4808 allrefs_i(VALUE obj, void *ptr)
4809 {
4810  struct allrefs *data = (struct allrefs *)ptr;
4811 
4812  if (allrefs_add(data, obj)) {
4813  push_mark_stack(&data->mark_stack, obj);
4814  }
4815 }
4816 
4817 static void
4818 allrefs_roots_i(VALUE obj, void *ptr)
4819 {
4820  struct allrefs *data = (struct allrefs *)ptr;
4821  if (strlen(data->category) == 0) rb_bug("!!!");
4822  data->root_obj = MAKE_ROOTSIG(data->category);
4823 
4824  if (allrefs_add(data, obj)) {
4825  push_mark_stack(&data->mark_stack, obj);
4826  }
4827 }
4828 #define PUSH_MARK_FUNC_DATA(v) do { \
4829  struct gc_mark_func_data_struct *prev_mark_func_data = GET_VM()->gc.mark_func_data; \
4830  GET_VM()->gc.mark_func_data = (v);
4831 
4832 #define POP_MARK_FUNC_DATA() GET_VM()->gc.mark_func_data = prev_mark_func_data;} while (0)
4833 
4834 static st_table *
4835 objspace_allrefs(rb_objspace_t *objspace)
4836 {
4837  struct allrefs data;
4838  struct gc_mark_func_data_struct mfd;
4839  VALUE obj;
4840  int prev_dont_gc = dont_gc_val();
4841  dont_gc_on();
4842 
4843  data.objspace = objspace;
4844  data.references = st_init_numtable();
4845  init_mark_stack(&data.mark_stack);
4846 
4847  mfd.mark_func = allrefs_roots_i;
4848  mfd.data = &data;
4849 
4850  /* traverse root objects */
4851  PUSH_MARK_FUNC_DATA(&mfd);
4852  GET_VM()->gc.mark_func_data = &mfd;
4853  mark_roots(objspace, &data.category);
4854  POP_MARK_FUNC_DATA();
4855 
4856  /* traverse rest objects reachable from root objects */
4857  while (pop_mark_stack(&data.mark_stack, &obj)) {
4858  rb_objspace_reachable_objects_from(data.root_obj = obj, allrefs_i, &data);
4859  }
4860  free_stack_chunks(&data.mark_stack);
4861 
4862  dont_gc_set(prev_dont_gc);
4863  return data.references;
4864 }
4865 
4866 static int
4867 objspace_allrefs_destruct_i(st_data_t key, st_data_t value, st_data_t ptr)
4868 {
4869  struct reflist *refs = (struct reflist *)value;
4870  reflist_destruct(refs);
4871  return ST_CONTINUE;
4872 }
4873 
4874 static void
4875 objspace_allrefs_destruct(struct st_table *refs)
4876 {
4877  st_foreach(refs, objspace_allrefs_destruct_i, 0);
4878  st_free_table(refs);
4879 }
4880 
4881 #if RGENGC_CHECK_MODE >= 5
4882 static int
4883 allrefs_dump_i(st_data_t k, st_data_t v, st_data_t ptr)
4884 {
4885  VALUE obj = (VALUE)k;
4886  struct reflist *refs = (struct reflist *)v;
4887  fprintf(stderr, "[allrefs_dump_i] %s <- ", rb_obj_info(obj));
4888  reflist_dump(refs);
4889  fprintf(stderr, "\n");
4890  return ST_CONTINUE;
4891 }
4892 
4893 static void
4894 allrefs_dump(rb_objspace_t *objspace)
4895 {
4896  VALUE size = objspace->rgengc.allrefs_table->num_entries;
4897  fprintf(stderr, "[all refs] (size: %"PRIuVALUE")\n", size);
4898  st_foreach(objspace->rgengc.allrefs_table, allrefs_dump_i, 0);
4899 }
4900 #endif
4901 
4902 static int
4903 gc_check_after_marks_i(st_data_t k, st_data_t v, st_data_t ptr)
4904 {
4905  VALUE obj = k;
4906  struct reflist *refs = (struct reflist *)v;
4908 
4909  /* object should be marked or oldgen */
4910  if (!RVALUE_MARKED(objspace, obj)) {
4911  fprintf(stderr, "gc_check_after_marks_i: %s is not marked and not oldgen.\n", rb_obj_info(obj));
4912  fprintf(stderr, "gc_check_after_marks_i: %p is referred from ", (void *)obj);
4913  reflist_dump(refs);
4914 
4915  if (reflist_referred_from_machine_context(refs)) {
4916  fprintf(stderr, " (marked from machine stack).\n");
4917  /* marked from machine context can be false positive */
4918  }
4919  else {
4920  objspace->rgengc.error_count++;
4921  fprintf(stderr, "\n");
4922  }
4923  }
4924  return ST_CONTINUE;
4925 }
4926 
4927 static void
4928 gc_marks_check(rb_objspace_t *objspace, st_foreach_callback_func *checker_func, const char *checker_name)
4929 {
4930  size_t saved_malloc_increase = objspace->malloc_params.increase;
4931 #if RGENGC_ESTIMATE_OLDMALLOC
4932  size_t saved_oldmalloc_increase = objspace->rgengc.oldmalloc_increase;
4933 #endif
4934  VALUE already_disabled = rb_objspace_gc_disable(objspace);
4935 
4936  objspace->rgengc.allrefs_table = objspace_allrefs(objspace);
4937 
4938  if (checker_func) {
4939  st_foreach(objspace->rgengc.allrefs_table, checker_func, (st_data_t)objspace);
4940  }
4941 
4942  if (objspace->rgengc.error_count > 0) {
4943 #if RGENGC_CHECK_MODE >= 5
4944  allrefs_dump(objspace);
4945 #endif
4946  if (checker_name) rb_bug("%s: GC has problem.", checker_name);
4947  }
4948 
4949  objspace_allrefs_destruct(objspace->rgengc.allrefs_table);
4950  objspace->rgengc.allrefs_table = 0;
4951 
4952  if (already_disabled == Qfalse) rb_objspace_gc_enable(objspace);
4953  objspace->malloc_params.increase = saved_malloc_increase;
4954 #if RGENGC_ESTIMATE_OLDMALLOC
4955  objspace->rgengc.oldmalloc_increase = saved_oldmalloc_increase;
4956 #endif
4957 }
4958 #endif /* RGENGC_CHECK_MODE >= 4 */
4959 
4962  int err_count;
4963  size_t live_object_count;
4964  size_t zombie_object_count;
4965 
4966  VALUE parent;
4967  size_t old_object_count;
4968  size_t remembered_shady_count;
4969 };
4970 
4971 static void
4972 check_generation_i(const VALUE child, void *ptr)
4973 {
4975  const VALUE parent = data->parent;
4976 
4977  if (RGENGC_CHECK_MODE) GC_ASSERT(RVALUE_OLD_P(data->objspace, parent));
4978 
4979  if (!RVALUE_OLD_P(data->objspace, child)) {
4980  if (!RVALUE_REMEMBERED(data->objspace, parent) &&
4981  !RVALUE_REMEMBERED(data->objspace, child) &&
4982  !RVALUE_UNCOLLECTIBLE(data->objspace, child)) {
4983  fprintf(stderr, "verify_internal_consistency_reachable_i: WB miss (O->Y) %s -> %s\n", rb_obj_info(parent), rb_obj_info(child));
4984  data->err_count++;
4985  }
4986  }
4987 }
4988 
4989 static void
4990 check_color_i(const VALUE child, void *ptr)
4991 {
4993  const VALUE parent = data->parent;
4994 
4995  if (!RVALUE_WB_UNPROTECTED(data->objspace, parent) && RVALUE_WHITE_P(data->objspace, child)) {
4996  fprintf(stderr, "verify_internal_consistency_reachable_i: WB miss (B->W) - %s -> %s\n",
4997  rb_obj_info(parent), rb_obj_info(child));
4998  data->err_count++;
4999  }
5000 }
5001 
5002 static void
5003 check_children_i(const VALUE child, void *ptr)
5004 {
5006  if (check_rvalue_consistency_force(data->objspace, child, FALSE) != 0) {
5007  fprintf(stderr, "check_children_i: %s has error (referenced from %s)",
5008  rb_obj_info(child), rb_obj_info(data->parent));
5009 
5010  data->err_count++;
5011  }
5012 }
5013 
5014 static int
5015 verify_internal_consistency_i(void *page_start, void *page_end, size_t stride,
5017 {
5018  VALUE obj;
5019  rb_objspace_t *objspace = data->objspace;
5020 
5021  for (obj = (VALUE)page_start; obj != (VALUE)page_end; obj += stride) {
5022  asan_unpoisoning_object(obj) {
5023  if (!rb_gc_impl_garbage_object_p(objspace, obj)) {
5024  /* count objects */
5025  data->live_object_count++;
5026  data->parent = obj;
5027 
5028  /* Normally, we don't expect T_MOVED objects to be in the heap.
5029  * But they can stay alive on the stack, */
5030  if (!gc_object_moved_p(objspace, obj)) {
5031  /* moved slots don't have children */
5032  rb_objspace_reachable_objects_from(obj, check_children_i, (void *)data);
5033  }
5034 
5035  /* check health of children */
5036  if (RVALUE_OLD_P(objspace, obj)) data->old_object_count++;
5037  if (RVALUE_WB_UNPROTECTED(objspace, obj) && RVALUE_UNCOLLECTIBLE(objspace, obj)) data->remembered_shady_count++;
5038 
5039  if (!is_marking(objspace) && RVALUE_OLD_P(objspace, obj)) {
5040  /* reachable objects from an oldgen object should be old or (young with remember) */
5041  data->parent = obj;
5042  rb_objspace_reachable_objects_from(obj, check_generation_i, (void *)data);
5043  }
5044 
5045  if (is_incremental_marking(objspace)) {
5046  if (RVALUE_BLACK_P(objspace, obj)) {
5047  /* reachable objects from black objects should be black or grey objects */
5048  data->parent = obj;
5049  rb_objspace_reachable_objects_from(obj, check_color_i, (void *)data);
5050  }
5051  }
5052  }
5053  else {
5054  if (BUILTIN_TYPE(obj) == T_ZOMBIE) {
5055  data->zombie_object_count++;
5056 
5057  if ((RBASIC(obj)->flags & ~ZOMBIE_OBJ_KEPT_FLAGS) != T_ZOMBIE) {
5058  fprintf(stderr, "verify_internal_consistency_i: T_ZOMBIE has extra flags set: %s\n",
5059  rb_obj_info(obj));
5060  data->err_count++;
5061  }
5062 
5063  if (!!FL_TEST(obj, FL_FINALIZE) != !!st_is_member(finalizer_table, obj)) {
5064  fprintf(stderr, "verify_internal_consistency_i: FL_FINALIZE %s but %s finalizer_table: %s\n",
5065  FL_TEST(obj, FL_FINALIZE) ? "set" : "not set", st_is_member(finalizer_table, obj) ? "in" : "not in",
5066  rb_obj_info(obj));
5067  data->err_count++;
5068  }
5069  }
5070  }
5071  }
5072  }
5073 
5074  return 0;
5075 }
5076 
5077 static int
5078 gc_verify_heap_page(rb_objspace_t *objspace, struct heap_page *page, VALUE obj)
5079 {
5080  unsigned int has_remembered_shady = FALSE;
5081  unsigned int has_remembered_old = FALSE;
5082  int remembered_old_objects = 0;
5083  int free_objects = 0;
5084  int zombie_objects = 0;
5085 
5086  short slot_size = page->slot_size;
5087  uintptr_t start = (uintptr_t)page->start;
5088  uintptr_t end = start + page->total_slots * slot_size;
5089 
5090  for (uintptr_t ptr = start; ptr < end; ptr += slot_size) {
5091  VALUE val = (VALUE)ptr;
5092  asan_unpoisoning_object(val) {
5093  enum ruby_value_type type = BUILTIN_TYPE(val);
5094 
5095  if (type == T_NONE) free_objects++;
5096  if (type == T_ZOMBIE) zombie_objects++;
5097  if (RVALUE_PAGE_UNCOLLECTIBLE(page, val) && RVALUE_PAGE_WB_UNPROTECTED(page, val)) {
5098  has_remembered_shady = TRUE;
5099  }
5100  if (RVALUE_PAGE_MARKING(page, val)) {
5101  has_remembered_old = TRUE;
5102  remembered_old_objects++;
5103  }
5104  }
5105  }
5106 
5107  if (!is_incremental_marking(objspace) &&
5108  page->flags.has_remembered_objects == FALSE && has_remembered_old == TRUE) {
5109 
5110  for (uintptr_t ptr = start; ptr < end; ptr += slot_size) {
5111  VALUE val = (VALUE)ptr;
5112  if (RVALUE_PAGE_MARKING(page, val)) {
5113  fprintf(stderr, "marking -> %s\n", rb_obj_info(val));
5114  }
5115  }
5116  rb_bug("page %p's has_remembered_objects should be false, but there are remembered old objects (%d). %s",
5117  (void *)page, remembered_old_objects, obj ? rb_obj_info(obj) : "");
5118  }
5119 
5120  if (page->flags.has_uncollectible_wb_unprotected_objects == FALSE && has_remembered_shady == TRUE) {
5121  rb_bug("page %p's has_remembered_shady should be false, but there are remembered shady objects. %s",
5122  (void *)page, obj ? rb_obj_info(obj) : "");
5123  }
5124 
5125  if (0) {
5126  /* free_slots may not equal to free_objects */
5127  if (page->free_slots != free_objects) {
5128  rb_bug("page %p's free_slots should be %d, but %d", (void *)page, page->free_slots, free_objects);
5129  }
5130  }
5131  if (page->final_slots != zombie_objects) {
5132  rb_bug("page %p's final_slots should be %d, but %d", (void *)page, page->final_slots, zombie_objects);
5133  }
5134 
5135  return remembered_old_objects;
5136 }
5137 
5138 static int
5139 gc_verify_heap_pages_(rb_objspace_t *objspace, struct ccan_list_head *head)
5140 {
5141  int remembered_old_objects = 0;
5142  struct heap_page *page = 0;
5143 
5144  ccan_list_for_each(head, page, page_node) {
5145  asan_unlock_freelist(page);
5146  struct free_slot *p = page->freelist;
5147  while (p) {
5148  VALUE vp = (VALUE)p;
5149  VALUE prev = vp;
5150  asan_unpoison_object(vp, false);
5151  if (BUILTIN_TYPE(vp) != T_NONE) {
5152  fprintf(stderr, "freelist slot expected to be T_NONE but was: %s\n", rb_obj_info(vp));
5153  }
5154  p = p->next;
5155  asan_poison_object(prev);
5156  }
5157  asan_lock_freelist(page);
5158 
5159  if (page->flags.has_remembered_objects == FALSE) {
5160  remembered_old_objects += gc_verify_heap_page(objspace, page, Qfalse);
5161  }
5162  }
5163 
5164  return remembered_old_objects;
5165 }
5166 
5167 static int
5168 gc_verify_heap_pages(rb_objspace_t *objspace)
5169 {
5170  int remembered_old_objects = 0;
5171  for (int i = 0; i < HEAP_COUNT; i++) {
5172  remembered_old_objects += gc_verify_heap_pages_(objspace, &((&heaps[i])->pages));
5173  }
5174  return remembered_old_objects;
5175 }
5176 
5177 static void
5178 gc_verify_internal_consistency_(rb_objspace_t *objspace)
5179 {
5180  struct verify_internal_consistency_struct data = {0};
5181 
5182  data.objspace = objspace;
5183  gc_report(5, objspace, "gc_verify_internal_consistency: start\n");
5184 
5185  /* check relations */
5186  for (size_t i = 0; i < rb_darray_size(objspace->heap_pages.sorted); i++) {
5187  struct heap_page *page = rb_darray_get(objspace->heap_pages.sorted, i);
5188  short slot_size = page->slot_size;
5189 
5190  uintptr_t start = (uintptr_t)page->start;
5191  uintptr_t end = start + page->total_slots * slot_size;
5192 
5193  verify_internal_consistency_i((void *)start, (void *)end, slot_size, &data);
5194  }
5195 
5196  if (data.err_count != 0) {
5197 #if RGENGC_CHECK_MODE >= 5
5198  objspace->rgengc.error_count = data.err_count;
5199  gc_marks_check(objspace, NULL, NULL);
5200  allrefs_dump(objspace);
5201 #endif
5202  rb_bug("gc_verify_internal_consistency: found internal inconsistency.");
5203  }
5204 
5205  /* check heap_page status */
5206  gc_verify_heap_pages(objspace);
5207 
5208  /* check counters */
5209 
5210  if (!is_lazy_sweeping(objspace) &&
5211  !finalizing &&
5212  !rb_gc_multi_ractor_p()) {
5213  if (objspace_live_slots(objspace) != data.live_object_count) {
5214  fprintf(stderr, "heap_pages_final_slots: %"PRIdSIZE", total_freed_objects: %"PRIdSIZE"\n",
5215  total_final_slots_count(objspace), total_freed_objects(objspace));
5216  rb_bug("inconsistent live slot number: expect %"PRIuSIZE", but %"PRIuSIZE".",
5217  objspace_live_slots(objspace), data.live_object_count);
5218  }
5219  }
5220 
5221  if (!is_marking(objspace)) {
5222  if (objspace->rgengc.old_objects != data.old_object_count) {
5223  rb_bug("inconsistent old slot number: expect %"PRIuSIZE", but %"PRIuSIZE".",
5224  objspace->rgengc.old_objects, data.old_object_count);
5225  }
5226  if (objspace->rgengc.uncollectible_wb_unprotected_objects != data.remembered_shady_count) {
5227  rb_bug("inconsistent number of wb unprotected objects: expect %"PRIuSIZE", but %"PRIuSIZE".",
5228  objspace->rgengc.uncollectible_wb_unprotected_objects, data.remembered_shady_count);
5229  }
5230  }
5231 
5232  if (!finalizing) {
5233  size_t list_count = 0;
5234 
5235  {
5236  VALUE z = heap_pages_deferred_final;
5237  while (z) {
5238  list_count++;
5239  z = RZOMBIE(z)->next;
5240  }
5241  }
5242 
5243  if (total_final_slots_count(objspace) != data.zombie_object_count ||
5244  total_final_slots_count(objspace) != list_count) {
5245 
5246  rb_bug("inconsistent finalizing object count:\n"
5247  " expect %"PRIuSIZE"\n"
5248  " but %"PRIuSIZE" zombies\n"
5249  " heap_pages_deferred_final list has %"PRIuSIZE" items.",
5250  total_final_slots_count(objspace),
5251  data.zombie_object_count,
5252  list_count);
5253  }
5254  }
5255 
5256  gc_report(5, objspace, "gc_verify_internal_consistency: OK\n");
5257 }
5258 
5259 static void
5260 gc_verify_internal_consistency(void *objspace_ptr)
5261 {
5262  rb_objspace_t *objspace = objspace_ptr;
5263 
5264  unsigned int lev = rb_gc_vm_lock();
5265  {
5266  rb_gc_vm_barrier(); // stop other ractors
5267 
5268  unsigned int prev_during_gc = during_gc;
5269  during_gc = FALSE; // stop gc here
5270  {
5271  gc_verify_internal_consistency_(objspace);
5272  }
5273  during_gc = prev_during_gc;
5274  }
5275  rb_gc_vm_unlock(lev);
5276 }
5277 
5278 static void
5279 heap_move_pooled_pages_to_free_pages(rb_heap_t *heap)
5280 {
5281  if (heap->pooled_pages) {
5282  if (heap->free_pages) {
5283  struct heap_page *free_pages_tail = heap->free_pages;
5284  while (free_pages_tail->free_next) {
5285  free_pages_tail = free_pages_tail->free_next;
5286  }
5287  free_pages_tail->free_next = heap->pooled_pages;
5288  }
5289  else {
5290  heap->free_pages = heap->pooled_pages;
5291  }
5292 
5293  heap->pooled_pages = NULL;
5294  }
5295 }
5296 
5297 static int
5298 gc_remember_unprotected(rb_objspace_t *objspace, VALUE obj)
5299 {
5300  struct heap_page *page = GET_HEAP_PAGE(obj);
5301  bits_t *uncollectible_bits = &page->uncollectible_bits[0];
5302 
5303  if (!MARKED_IN_BITMAP(uncollectible_bits, obj)) {
5304  page->flags.has_uncollectible_wb_unprotected_objects = TRUE;
5305  MARK_IN_BITMAP(uncollectible_bits, obj);
5306  objspace->rgengc.uncollectible_wb_unprotected_objects++;
5307 
5308 #if RGENGC_PROFILE > 0
5309  objspace->profile.total_remembered_shady_object_count++;
5310 #if RGENGC_PROFILE >= 2
5311  objspace->profile.remembered_shady_object_count_types[BUILTIN_TYPE(obj)]++;
5312 #endif
5313 #endif
5314  return TRUE;
5315  }
5316  else {
5317  return FALSE;
5318  }
5319 }
5320 
5321 static inline void
5322 gc_marks_wb_unprotected_objects_plane(rb_objspace_t *objspace, uintptr_t p, bits_t bits)
5323 {
5324  if (bits) {
5325  do {
5326  if (bits & 1) {
5327  gc_report(2, objspace, "gc_marks_wb_unprotected_objects: marked shady: %s\n", rb_obj_info((VALUE)p));
5328  GC_ASSERT(RVALUE_WB_UNPROTECTED(objspace, (VALUE)p));
5329  GC_ASSERT(RVALUE_MARKED(objspace, (VALUE)p));
5330  gc_mark_children(objspace, (VALUE)p);
5331  }
5332  p += BASE_SLOT_SIZE;
5333  bits >>= 1;
5334  } while (bits);
5335  }
5336 }
5337 
5338 static void
5339 gc_marks_wb_unprotected_objects(rb_objspace_t *objspace, rb_heap_t *heap)
5340 {
5341  struct heap_page *page = 0;
5342 
5343  ccan_list_for_each(&heap->pages, page, page_node) {
5344  bits_t *mark_bits = page->mark_bits;
5345  bits_t *wbun_bits = page->wb_unprotected_bits;
5346  uintptr_t p = page->start;
5347  size_t j;
5348 
5349  bits_t bits = mark_bits[0] & wbun_bits[0];
5350  bits >>= NUM_IN_PAGE(p);
5351  gc_marks_wb_unprotected_objects_plane(objspace, p, bits);
5352  p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
5353 
5354  for (j=1; j<HEAP_PAGE_BITMAP_LIMIT; j++) {
5355  bits_t bits = mark_bits[j] & wbun_bits[j];
5356 
5357  gc_marks_wb_unprotected_objects_plane(objspace, p, bits);
5358  p += BITS_BITLENGTH * BASE_SLOT_SIZE;
5359  }
5360  }
5361 
5362  gc_mark_stacked_objects_all(objspace);
5363 }
5364 
5365 static void
5366 gc_update_weak_references(rb_objspace_t *objspace)
5367 {
5368  size_t retained_weak_references_count = 0;
5369  VALUE **ptr_ptr;
5370  rb_darray_foreach(objspace->weak_references, i, ptr_ptr) {
5371  if (!*ptr_ptr) continue;
5372 
5373  VALUE obj = **ptr_ptr;
5374 
5375  if (RB_SPECIAL_CONST_P(obj)) continue;
5376 
5377  if (!RVALUE_MARKED(objspace, obj)) {
5378  **ptr_ptr = Qundef;
5379  }
5380  else {
5381  retained_weak_references_count++;
5382  }
5383  }
5384 
5385  objspace->profile.retained_weak_references_count = retained_weak_references_count;
5386 
5387  rb_darray_clear(objspace->weak_references);
5388  DURING_GC_COULD_MALLOC_REGION_START();
5389  {
5390  rb_darray_resize_capa(&objspace->weak_references, retained_weak_references_count);
5391  }
5392  DURING_GC_COULD_MALLOC_REGION_END();
5393 }
5394 
5395 static void
5396 gc_marks_finish(rb_objspace_t *objspace)
5397 {
5398  /* finish incremental GC */
5399  if (is_incremental_marking(objspace)) {
5400  if (RGENGC_CHECK_MODE && is_mark_stack_empty(&objspace->mark_stack) == 0) {
5401  rb_bug("gc_marks_finish: mark stack is not empty (%"PRIdSIZE").",
5402  mark_stack_size(&objspace->mark_stack));
5403  }
5404 
5405  mark_roots(objspace, NULL);
5406  while (gc_mark_stacked_objects_incremental(objspace, INT_MAX) == false);
5407 
5408 #if RGENGC_CHECK_MODE >= 2
5409  if (gc_verify_heap_pages(objspace) != 0) {
5410  rb_bug("gc_marks_finish (incremental): there are remembered old objects.");
5411  }
5412 #endif
5413 
5414  objspace->flags.during_incremental_marking = FALSE;
5415  /* check children of all marked wb-unprotected objects */
5416  for (int i = 0; i < HEAP_COUNT; i++) {
5417  gc_marks_wb_unprotected_objects(objspace, &heaps[i]);
5418  }
5419  }
5420 
5421  gc_update_weak_references(objspace);
5422 
5423 #if RGENGC_CHECK_MODE >= 2
5424  gc_verify_internal_consistency(objspace);
5425 #endif
5426 
5427 #if RGENGC_CHECK_MODE >= 4
5428  during_gc = FALSE;
5429  gc_marks_check(objspace, gc_check_after_marks_i, "after_marks");
5430  during_gc = TRUE;
5431 #endif
5432 
5433  {
5434  const unsigned long r_mul = objspace->live_ractor_cache_count > 8 ? 8 : objspace->live_ractor_cache_count; // upto 8
5435 
5436  size_t total_slots = objspace_available_slots(objspace);
5437  size_t sweep_slots = total_slots - objspace->marked_slots; /* will be swept slots */
5438  size_t max_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_max_ratio);
5439  size_t min_free_slots = (size_t)(total_slots * gc_params.heap_free_slots_min_ratio);
5440  if (min_free_slots < gc_params.heap_free_slots * r_mul) {
5441  min_free_slots = gc_params.heap_free_slots * r_mul;
5442  }
5443 
5444  int full_marking = is_full_marking(objspace);
5445 
5446  GC_ASSERT(objspace_available_slots(objspace) >= objspace->marked_slots);
5447 
5448  /* Setup freeable slots. */
5449  size_t total_init_slots = 0;
5450  for (int i = 0; i < HEAP_COUNT; i++) {
5451  total_init_slots += gc_params.heap_init_slots[i] * r_mul;
5452  }
5453 
5454  if (max_free_slots < total_init_slots) {
5455  max_free_slots = total_init_slots;
5456  }
5457 
5458  if (sweep_slots > max_free_slots) {
5459  heap_pages_freeable_pages = (sweep_slots - max_free_slots) / HEAP_PAGE_OBJ_LIMIT;
5460  }
5461  else {
5462  heap_pages_freeable_pages = 0;
5463  }
5464 
5465  if (objspace->heap_pages.allocatable_slots == 0 && sweep_slots < min_free_slots) {
5466  if (!full_marking) {
5467  if (objspace->profile.count - objspace->rgengc.last_major_gc < RVALUE_OLD_AGE) {
5468  full_marking = TRUE;
5469  }
5470  else {
5471  gc_report(1, objspace, "gc_marks_finish: next is full GC!!)\n");
5472  gc_needs_major_flags |= GPR_FLAG_MAJOR_BY_NOFREE;
5473  }
5474  }
5475  }
5476 
5477  if (full_marking) {
5478  /* See the comment about RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR */
5479  const double r = gc_params.oldobject_limit_factor;
5480  objspace->rgengc.uncollectible_wb_unprotected_objects_limit = MAX(
5481  (size_t)(objspace->rgengc.uncollectible_wb_unprotected_objects * r),
5482  (size_t)(objspace->rgengc.old_objects * gc_params.uncollectible_wb_unprotected_objects_limit_ratio)
5483  );
5484  objspace->rgengc.old_objects_limit = (size_t)(objspace->rgengc.old_objects * r);
5485  }
5486 
5487  if (objspace->rgengc.uncollectible_wb_unprotected_objects > objspace->rgengc.uncollectible_wb_unprotected_objects_limit) {
5488  gc_needs_major_flags |= GPR_FLAG_MAJOR_BY_SHADY;
5489  }
5490  if (objspace->rgengc.old_objects > objspace->rgengc.old_objects_limit) {
5491  gc_needs_major_flags |= GPR_FLAG_MAJOR_BY_OLDGEN;
5492  }
5493 
5494  gc_report(1, objspace, "gc_marks_finish (marks %"PRIdSIZE" objects, "
5495  "old %"PRIdSIZE" objects, total %"PRIdSIZE" slots, "
5496  "sweep %"PRIdSIZE" slots, allocatable %"PRIdSIZE" slots, next GC: %s)\n",
5497  objspace->marked_slots, objspace->rgengc.old_objects, objspace_available_slots(objspace), sweep_slots, objspace->heap_pages.allocatable_slots,
5498  gc_needs_major_flags ? "major" : "minor");
5499  }
5500 
5501  // TODO: refactor so we don't need to call this
5502  rb_ractor_finish_marking();
5503 
5504  rb_gc_event_hook(0, RUBY_INTERNAL_EVENT_GC_END_MARK);
5505 }
5506 
5507 static bool
5508 gc_compact_heap_cursors_met_p(rb_heap_t *heap)
5509 {
5510  return heap->sweeping_page == heap->compact_cursor;
5511 }
5512 
5513 
5514 static rb_heap_t *
5515 gc_compact_destination_pool(rb_objspace_t *objspace, rb_heap_t *src_pool, VALUE obj)
5516 {
5517  size_t obj_size = rb_gc_obj_optimal_size(obj);
5518  if (obj_size == 0) {
5519  return src_pool;
5520  }
5521 
5522  size_t idx = 0;
5523  if (rb_gc_impl_size_allocatable_p(obj_size)) {
5524  idx = heap_idx_for_size(obj_size);
5525  }
5526 
5527  return &heaps[idx];
5528 }
5529 
5530 static bool
5531 gc_compact_move(rb_objspace_t *objspace, rb_heap_t *heap, VALUE src)
5532 {
5533  GC_ASSERT(BUILTIN_TYPE(src) != T_MOVED);
5534  GC_ASSERT(gc_is_moveable_obj(objspace, src));
5535 
5536  rb_heap_t *dest_pool = gc_compact_destination_pool(objspace, heap, src);
5537  uint32_t orig_shape = 0;
5538  uint32_t new_shape = 0;
5539 
5540  if (gc_compact_heap_cursors_met_p(dest_pool)) {
5541  return dest_pool != heap;
5542  }
5543 
5544  if (RB_TYPE_P(src, T_OBJECT)) {
5545  orig_shape = rb_gc_get_shape(src);
5546 
5547  if (dest_pool != heap) {
5548  new_shape = rb_gc_rebuild_shape(src, dest_pool - heaps);
5549 
5550  if (new_shape == 0) {
5551  dest_pool = heap;
5552  }
5553  }
5554  }
5555 
5556  while (!try_move(objspace, dest_pool, dest_pool->free_pages, src)) {
5557  struct gc_sweep_context ctx = {
5558  .page = dest_pool->sweeping_page,
5559  .final_slots = 0,
5560  .freed_slots = 0,
5561  .empty_slots = 0,
5562  };
5563 
5564  /* The page of src could be partially compacted, so it may contain
5565  * T_MOVED. Sweeping a page may read objects on this page, so we
5566  * need to lock the page. */
5567  lock_page_body(objspace, GET_PAGE_BODY(src));
5568  gc_sweep_page(objspace, dest_pool, &ctx);
5569  unlock_page_body(objspace, GET_PAGE_BODY(src));
5570 
5571  if (dest_pool->sweeping_page->free_slots > 0) {
5572  heap_add_freepage(dest_pool, dest_pool->sweeping_page);
5573  }
5574 
5575  dest_pool->sweeping_page = ccan_list_next(&dest_pool->pages, dest_pool->sweeping_page, page_node);
5576  if (gc_compact_heap_cursors_met_p(dest_pool)) {
5577  return dest_pool != heap;
5578  }
5579  }
5580 
5581  if (orig_shape != 0) {
5582  if (new_shape != 0) {
5583  VALUE dest = rb_gc_impl_location(objspace, src);
5584  rb_gc_set_shape(dest, new_shape);
5585  }
5586  RMOVED(src)->original_shape_id = orig_shape;
5587  }
5588 
5589  return true;
5590 }
5591 
5592 static bool
5593 gc_compact_plane(rb_objspace_t *objspace, rb_heap_t *heap, uintptr_t p, bits_t bitset, struct heap_page *page)
5594 {
5595  short slot_size = page->slot_size;
5596  short slot_bits = slot_size / BASE_SLOT_SIZE;
5597  GC_ASSERT(slot_bits > 0);
5598 
5599  do {
5600  VALUE vp = (VALUE)p;
5601  GC_ASSERT(vp % BASE_SLOT_SIZE == 0);
5602 
5603  if (bitset & 1) {
5604  objspace->rcompactor.considered_count_table[BUILTIN_TYPE(vp)]++;
5605 
5606  if (gc_is_moveable_obj(objspace, vp)) {
5607  if (!gc_compact_move(objspace, heap, vp)) {
5608  //the cursors met. bubble up
5609  return false;
5610  }
5611  }
5612  }
5613  p += slot_size;
5614  bitset >>= slot_bits;
5615  } while (bitset);
5616 
5617  return true;
5618 }
5619 
5620 // Iterate up all the objects in page, moving them to where they want to go
5621 static bool
5622 gc_compact_page(rb_objspace_t *objspace, rb_heap_t *heap, struct heap_page *page)
5623 {
5624  GC_ASSERT(page == heap->compact_cursor);
5625 
5626  bits_t *mark_bits, *pin_bits;
5627  bits_t bitset;
5628  uintptr_t p = page->start;
5629 
5630  mark_bits = page->mark_bits;
5631  pin_bits = page->pinned_bits;
5632 
5633  // objects that can be moved are marked and not pinned
5634  bitset = (mark_bits[0] & ~pin_bits[0]);
5635  bitset >>= NUM_IN_PAGE(p);
5636  if (bitset) {
5637  if (!gc_compact_plane(objspace, heap, (uintptr_t)p, bitset, page))
5638  return false;
5639  }
5640  p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
5641 
5642  for (int j = 1; j < HEAP_PAGE_BITMAP_LIMIT; j++) {
5643  bitset = (mark_bits[j] & ~pin_bits[j]);
5644  if (bitset) {
5645  if (!gc_compact_plane(objspace, heap, (uintptr_t)p, bitset, page))
5646  return false;
5647  }
5648  p += BITS_BITLENGTH * BASE_SLOT_SIZE;
5649  }
5650 
5651  return true;
5652 }
5653 
5654 static bool
5655 gc_compact_all_compacted_p(rb_objspace_t *objspace)
5656 {
5657  for (int i = 0; i < HEAP_COUNT; i++) {
5658  rb_heap_t *heap = &heaps[i];
5659 
5660  if (heap->total_pages > 0 &&
5661  !gc_compact_heap_cursors_met_p(heap)) {
5662  return false;
5663  }
5664  }
5665 
5666  return true;
5667 }
5668 
5669 static void
5670 gc_sweep_compact(rb_objspace_t *objspace)
5671 {
5672  gc_compact_start(objspace);
5673 #if RGENGC_CHECK_MODE >= 2
5674  gc_verify_internal_consistency(objspace);
5675 #endif
5676 
5677  while (!gc_compact_all_compacted_p(objspace)) {
5678  for (int i = 0; i < HEAP_COUNT; i++) {
5679  rb_heap_t *heap = &heaps[i];
5680 
5681  if (gc_compact_heap_cursors_met_p(heap)) {
5682  continue;
5683  }
5684 
5685  struct heap_page *start_page = heap->compact_cursor;
5686 
5687  if (!gc_compact_page(objspace, heap, start_page)) {
5688  lock_page_body(objspace, start_page->body);
5689 
5690  continue;
5691  }
5692 
5693  // If we get here, we've finished moving all objects on the compact_cursor page
5694  // So we can lock it and move the cursor on to the next one.
5695  lock_page_body(objspace, start_page->body);
5696  heap->compact_cursor = ccan_list_prev(&heap->pages, heap->compact_cursor, page_node);
5697  }
5698  }
5699 
5700  gc_compact_finish(objspace);
5701 
5702 #if RGENGC_CHECK_MODE >= 2
5703  gc_verify_internal_consistency(objspace);
5704 #endif
5705 }
5706 
5707 static void
5708 gc_marks_rest(rb_objspace_t *objspace)
5709 {
5710  gc_report(1, objspace, "gc_marks_rest\n");
5711 
5712  for (int i = 0; i < HEAP_COUNT; i++) {
5713  (&heaps[i])->pooled_pages = NULL;
5714  }
5715 
5716  if (is_incremental_marking(objspace)) {
5717  while (gc_mark_stacked_objects_incremental(objspace, INT_MAX) == FALSE);
5718  }
5719  else {
5720  gc_mark_stacked_objects_all(objspace);
5721  }
5722 
5723  gc_marks_finish(objspace);
5724 }
5725 
5726 static bool
5727 gc_marks_step(rb_objspace_t *objspace, size_t slots)
5728 {
5729  bool marking_finished = false;
5730 
5731  GC_ASSERT(is_marking(objspace));
5732  if (gc_mark_stacked_objects_incremental(objspace, slots)) {
5733  gc_marks_finish(objspace);
5734 
5735  marking_finished = true;
5736  }
5737 
5738  return marking_finished;
5739 }
5740 
5741 static bool
5742 gc_marks_continue(rb_objspace_t *objspace, rb_heap_t *heap)
5743 {
5744  GC_ASSERT(dont_gc_val() == FALSE || objspace->profile.latest_gc_info & GPR_FLAG_METHOD);
5745  bool marking_finished = true;
5746 
5747  gc_marking_enter(objspace);
5748 
5749  if (heap->free_pages) {
5750  gc_report(2, objspace, "gc_marks_continue: has pooled pages");
5751 
5752  marking_finished = gc_marks_step(objspace, objspace->rincgc.step_slots);
5753  }
5754  else {
5755  gc_report(2, objspace, "gc_marks_continue: no more pooled pages (stack depth: %"PRIdSIZE").\n",
5756  mark_stack_size(&objspace->mark_stack));
5757  heap->force_incremental_marking_finish_count++;
5758  gc_marks_rest(objspace);
5759  }
5760 
5761  gc_marking_exit(objspace);
5762 
5763  return marking_finished;
5764 }
5765 
5766 static void
5767 gc_marks_start(rb_objspace_t *objspace, int full_mark)
5768 {
5769  /* start marking */
5770  gc_report(1, objspace, "gc_marks_start: (%s)\n", full_mark ? "full" : "minor");
5771  gc_mode_transition(objspace, gc_mode_marking);
5772 
5773  if (full_mark) {
5774  size_t incremental_marking_steps = (objspace->rincgc.pooled_slots / INCREMENTAL_MARK_STEP_ALLOCATIONS) + 1;
5775  objspace->rincgc.step_slots = (objspace->marked_slots * 2) / incremental_marking_steps;
5776 
5777  if (0) fprintf(stderr, "objspace->marked_slots: %"PRIdSIZE", "
5778  "objspace->rincgc.pooled_page_num: %"PRIdSIZE", "
5779  "objspace->rincgc.step_slots: %"PRIdSIZE", \n",
5780  objspace->marked_slots, objspace->rincgc.pooled_slots, objspace->rincgc.step_slots);
5781  objspace->flags.during_minor_gc = FALSE;
5782  if (ruby_enable_autocompact) {
5783  objspace->flags.during_compacting |= TRUE;
5784  }
5785  objspace->profile.major_gc_count++;
5786  objspace->rgengc.uncollectible_wb_unprotected_objects = 0;
5787  objspace->rgengc.old_objects = 0;
5788  objspace->rgengc.last_major_gc = objspace->profile.count;
5789  objspace->marked_slots = 0;
5790 
5791  for (int i = 0; i < HEAP_COUNT; i++) {
5792  rb_heap_t *heap = &heaps[i];
5793  rgengc_mark_and_rememberset_clear(objspace, heap);
5794  heap_move_pooled_pages_to_free_pages(heap);
5795 
5796  if (objspace->flags.during_compacting) {
5797  struct heap_page *page = NULL;
5798 
5799  ccan_list_for_each(&heap->pages, page, page_node) {
5800  page->pinned_slots = 0;
5801  }
5802  }
5803  }
5804  }
5805  else {
5806  objspace->flags.during_minor_gc = TRUE;
5807  objspace->marked_slots =
5808  objspace->rgengc.old_objects + objspace->rgengc.uncollectible_wb_unprotected_objects; /* uncollectible objects are marked already */
5809  objspace->profile.minor_gc_count++;
5810 
5811  for (int i = 0; i < HEAP_COUNT; i++) {
5812  rgengc_rememberset_mark(objspace, &heaps[i]);
5813  }
5814  }
5815 
5816  mark_roots(objspace, NULL);
5817 
5818  gc_report(1, objspace, "gc_marks_start: (%s) end, stack in %"PRIdSIZE"\n",
5819  full_mark ? "full" : "minor", mark_stack_size(&objspace->mark_stack));
5820 }
5821 
5822 static bool
5823 gc_marks(rb_objspace_t *objspace, int full_mark)
5824 {
5825  gc_prof_mark_timer_start(objspace);
5826  gc_marking_enter(objspace);
5827 
5828  bool marking_finished = false;
5829 
5830  /* setup marking */
5831 
5832  gc_marks_start(objspace, full_mark);
5833  if (!is_incremental_marking(objspace)) {
5834  gc_marks_rest(objspace);
5835  marking_finished = true;
5836  }
5837 
5838 #if RGENGC_PROFILE > 0
5839  if (gc_prof_record(objspace)) {
5840  gc_profile_record *record = gc_prof_record(objspace);
5841  record->old_objects = objspace->rgengc.old_objects;
5842  }
5843 #endif
5844 
5845  gc_marking_exit(objspace);
5846  gc_prof_mark_timer_stop(objspace);
5847 
5848  return marking_finished;
5849 }
5850 
5851 /* RGENGC */
5852 
5853 static void
5854 gc_report_body(int level, rb_objspace_t *objspace, const char *fmt, ...)
5855 {
5856  if (level <= RGENGC_DEBUG) {
5857  char buf[1024];
5858  FILE *out = stderr;
5859  va_list args;
5860  const char *status = " ";
5861 
5862  if (during_gc) {
5863  status = is_full_marking(objspace) ? "+" : "-";
5864  }
5865  else {
5866  if (is_lazy_sweeping(objspace)) {
5867  status = "S";
5868  }
5869  if (is_incremental_marking(objspace)) {
5870  status = "M";
5871  }
5872  }
5873 
5874  va_start(args, fmt);
5875  vsnprintf(buf, 1024, fmt, args);
5876  va_end(args);
5877 
5878  fprintf(out, "%s|", status);
5879  fputs(buf, out);
5880  }
5881 }
5882 
5883 /* bit operations */
5884 
5885 static int
5886 rgengc_remembersetbits_set(rb_objspace_t *objspace, VALUE obj)
5887 {
5888  struct heap_page *page = GET_HEAP_PAGE(obj);
5889  bits_t *bits = &page->remembered_bits[0];
5890 
5891  if (MARKED_IN_BITMAP(bits, obj)) {
5892  return FALSE;
5893  }
5894  else {
5895  page->flags.has_remembered_objects = TRUE;
5896  MARK_IN_BITMAP(bits, obj);
5897  return TRUE;
5898  }
5899 }
5900 
5901 /* wb, etc */
5902 
5903 /* return FALSE if already remembered */
5904 static int
5905 rgengc_remember(rb_objspace_t *objspace, VALUE obj)
5906 {
5907  gc_report(6, objspace, "rgengc_remember: %s %s\n", rb_obj_info(obj),
5908  RVALUE_REMEMBERED(objspace, obj) ? "was already remembered" : "is remembered now");
5909 
5910  check_rvalue_consistency(objspace, obj);
5911 
5912  if (RGENGC_CHECK_MODE) {
5913  if (RVALUE_WB_UNPROTECTED(objspace, obj)) rb_bug("rgengc_remember: %s is not wb protected.", rb_obj_info(obj));
5914  }
5915 
5916 #if RGENGC_PROFILE > 0
5917  if (!RVALUE_REMEMBERED(objspace, obj)) {
5918  if (RVALUE_WB_UNPROTECTED(objspace, obj) == 0) {
5919  objspace->profile.total_remembered_normal_object_count++;
5920 #if RGENGC_PROFILE >= 2
5921  objspace->profile.remembered_normal_object_count_types[BUILTIN_TYPE(obj)]++;
5922 #endif
5923  }
5924  }
5925 #endif /* RGENGC_PROFILE > 0 */
5926 
5927  return rgengc_remembersetbits_set(objspace, obj);
5928 }
5929 
5930 #ifndef PROFILE_REMEMBERSET_MARK
5931 #define PROFILE_REMEMBERSET_MARK 0
5932 #endif
5933 
5934 static inline void
5935 rgengc_rememberset_mark_plane(rb_objspace_t *objspace, uintptr_t p, bits_t bitset)
5936 {
5937  if (bitset) {
5938  do {
5939  if (bitset & 1) {
5940  VALUE obj = (VALUE)p;
5941  gc_report(2, objspace, "rgengc_rememberset_mark: mark %s\n", rb_obj_info(obj));
5942  GC_ASSERT(RVALUE_UNCOLLECTIBLE(objspace, obj));
5943  GC_ASSERT(RVALUE_OLD_P(objspace, obj) || RVALUE_WB_UNPROTECTED(objspace, obj));
5944 
5945  gc_mark_children(objspace, obj);
5946  }
5947  p += BASE_SLOT_SIZE;
5948  bitset >>= 1;
5949  } while (bitset);
5950  }
5951 }
5952 
5953 static void
5954 rgengc_rememberset_mark(rb_objspace_t *objspace, rb_heap_t *heap)
5955 {
5956  size_t j;
5957  struct heap_page *page = 0;
5958 #if PROFILE_REMEMBERSET_MARK
5959  int has_old = 0, has_shady = 0, has_both = 0, skip = 0;
5960 #endif
5961  gc_report(1, objspace, "rgengc_rememberset_mark: start\n");
5962 
5963  ccan_list_for_each(&heap->pages, page, page_node) {
5964  if (page->flags.has_remembered_objects | page->flags.has_uncollectible_wb_unprotected_objects) {
5965  uintptr_t p = page->start;
5966  bits_t bitset, bits[HEAP_PAGE_BITMAP_LIMIT];
5967  bits_t *remembered_bits = page->remembered_bits;
5968  bits_t *uncollectible_bits = page->uncollectible_bits;
5969  bits_t *wb_unprotected_bits = page->wb_unprotected_bits;
5970 #if PROFILE_REMEMBERSET_MARK
5971  if (page->flags.has_remembered_objects && page->flags.has_uncollectible_wb_unprotected_objects) has_both++;
5972  else if (page->flags.has_remembered_objects) has_old++;
5973  else if (page->flags.has_uncollectible_wb_unprotected_objects) has_shady++;
5974 #endif
5975  for (j=0; j<HEAP_PAGE_BITMAP_LIMIT; j++) {
5976  bits[j] = remembered_bits[j] | (uncollectible_bits[j] & wb_unprotected_bits[j]);
5977  remembered_bits[j] = 0;
5978  }
5979  page->flags.has_remembered_objects = FALSE;
5980 
5981  bitset = bits[0];
5982  bitset >>= NUM_IN_PAGE(p);
5983  rgengc_rememberset_mark_plane(objspace, p, bitset);
5984  p += (BITS_BITLENGTH - NUM_IN_PAGE(p)) * BASE_SLOT_SIZE;
5985 
5986  for (j=1; j < HEAP_PAGE_BITMAP_LIMIT; j++) {
5987  bitset = bits[j];
5988  rgengc_rememberset_mark_plane(objspace, p, bitset);
5989  p += BITS_BITLENGTH * BASE_SLOT_SIZE;
5990  }
5991  }
5992 #if PROFILE_REMEMBERSET_MARK
5993  else {
5994  skip++;
5995  }
5996 #endif
5997  }
5998 
5999 #if PROFILE_REMEMBERSET_MARK
6000  fprintf(stderr, "%d\t%d\t%d\t%d\n", has_both, has_old, has_shady, skip);
6001 #endif
6002  gc_report(1, objspace, "rgengc_rememberset_mark: finished\n");
6003 }
6004 
6005 static void
6006 rgengc_mark_and_rememberset_clear(rb_objspace_t *objspace, rb_heap_t *heap)
6007 {
6008  struct heap_page *page = 0;
6009 
6010  ccan_list_for_each(&heap->pages, page, page_node) {
6011  memset(&page->mark_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
6012  memset(&page->uncollectible_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
6013  memset(&page->marking_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
6014  memset(&page->remembered_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
6015  memset(&page->pinned_bits[0], 0, HEAP_PAGE_BITMAP_SIZE);
6016  page->flags.has_uncollectible_wb_unprotected_objects = FALSE;
6017  page->flags.has_remembered_objects = FALSE;
6018  }
6019 }
6020 
6021 /* RGENGC: APIs */
6022 
6023 NOINLINE(static void gc_writebarrier_generational(VALUE a, VALUE b, rb_objspace_t *objspace));
6024 
6025 static void
6026 gc_writebarrier_generational(VALUE a, VALUE b, rb_objspace_t *objspace)
6027 {
6028  if (RGENGC_CHECK_MODE) {
6029  if (!RVALUE_OLD_P(objspace, a)) rb_bug("gc_writebarrier_generational: %s is not an old object.", rb_obj_info(a));
6030  if ( RVALUE_OLD_P(objspace, b)) rb_bug("gc_writebarrier_generational: %s is an old object.", rb_obj_info(b));
6031  if (is_incremental_marking(objspace)) rb_bug("gc_writebarrier_generational: called while incremental marking: %s -> %s", rb_obj_info(a), rb_obj_info(b));
6032  }
6033 
6034  /* mark `a' and remember (default behavior) */
6035  if (!RVALUE_REMEMBERED(objspace, a)) {
6036  int lev = rb_gc_vm_lock_no_barrier();
6037  {
6038  rgengc_remember(objspace, a);
6039  }
6040  rb_gc_vm_unlock_no_barrier(lev);
6041 
6042  gc_report(1, objspace, "gc_writebarrier_generational: %s (remembered) -> %s\n", rb_obj_info(a), rb_obj_info(b));
6043  }
6044 
6045  check_rvalue_consistency(objspace, a);
6046  check_rvalue_consistency(objspace, b);
6047 }
6048 
6049 static void
6050 gc_mark_from(rb_objspace_t *objspace, VALUE obj, VALUE parent)
6051 {
6052  gc_mark_set_parent(objspace, parent);
6053  rgengc_check_relation(objspace, obj);
6054  if (gc_mark_set(objspace, obj) == FALSE) return;
6055  gc_aging(objspace, obj);
6056  gc_grey(objspace, obj);
6057 }
6058 
6059 NOINLINE(static void gc_writebarrier_incremental(VALUE a, VALUE b, rb_objspace_t *objspace));
6060 
6061 static void
6062 gc_writebarrier_incremental(VALUE a, VALUE b, rb_objspace_t *objspace)
6063 {
6064  gc_report(2, objspace, "gc_writebarrier_incremental: [LG] %p -> %s\n", (void *)a, rb_obj_info(b));
6065 
6066  if (RVALUE_BLACK_P(objspace, a)) {
6067  if (RVALUE_WHITE_P(objspace, b)) {
6068  if (!RVALUE_WB_UNPROTECTED(objspace, a)) {
6069  gc_report(2, objspace, "gc_writebarrier_incremental: [IN] %p -> %s\n", (void *)a, rb_obj_info(b));
6070  gc_mark_from(objspace, b, a);
6071  }
6072  }
6073  else if (RVALUE_OLD_P(objspace, a) && !RVALUE_OLD_P(objspace, b)) {
6074  rgengc_remember(objspace, a);
6075  }
6076 
6077  if (RB_UNLIKELY(objspace->flags.during_compacting)) {
6078  MARK_IN_BITMAP(GET_HEAP_PINNED_BITS(b), b);
6079  }
6080  }
6081 }
6082 
6083 void
6084 rb_gc_impl_writebarrier(void *objspace_ptr, VALUE a, VALUE b)
6085 {
6086  rb_objspace_t *objspace = objspace_ptr;
6087 
6088  if (RGENGC_CHECK_MODE) {
6089  if (SPECIAL_CONST_P(a)) rb_bug("rb_gc_writebarrier: a is special const: %"PRIxVALUE, a);
6090  if (SPECIAL_CONST_P(b)) rb_bug("rb_gc_writebarrier: b is special const: %"PRIxVALUE, b);
6091  }
6092 
6093  GC_ASSERT(RB_BUILTIN_TYPE(a) != T_NONE);
6094  GC_ASSERT(RB_BUILTIN_TYPE(a) != T_MOVED);
6095  GC_ASSERT(RB_BUILTIN_TYPE(a) != T_ZOMBIE);
6096  GC_ASSERT(RB_BUILTIN_TYPE(b) != T_NONE);
6097  GC_ASSERT(RB_BUILTIN_TYPE(b) != T_MOVED);
6098  GC_ASSERT(RB_BUILTIN_TYPE(b) != T_ZOMBIE);
6099 
6100  retry:
6101  if (!is_incremental_marking(objspace)) {
6102  if (!RVALUE_OLD_P(objspace, a) || RVALUE_OLD_P(objspace, b)) {
6103  // do nothing
6104  }
6105  else {
6106  gc_writebarrier_generational(a, b, objspace);
6107  }
6108  }
6109  else {
6110  bool retry = false;
6111  /* slow path */
6112  int lev = rb_gc_vm_lock_no_barrier();
6113  {
6114  if (is_incremental_marking(objspace)) {
6115  gc_writebarrier_incremental(a, b, objspace);
6116  }
6117  else {
6118  retry = true;
6119  }
6120  }
6121  rb_gc_vm_unlock_no_barrier(lev);
6122 
6123  if (retry) goto retry;
6124  }
6125  return;
6126 }
6127 
6128 void
6129 rb_gc_impl_writebarrier_unprotect(void *objspace_ptr, VALUE obj)
6130 {
6131  rb_objspace_t *objspace = objspace_ptr;
6132 
6133  if (RVALUE_WB_UNPROTECTED(objspace, obj)) {
6134  return;
6135  }
6136  else {
6137  gc_report(2, objspace, "rb_gc_writebarrier_unprotect: %s %s\n", rb_obj_info(obj),
6138  RVALUE_REMEMBERED(objspace, obj) ? " (already remembered)" : "");
6139 
6140  unsigned int lev = rb_gc_vm_lock_no_barrier();
6141  {
6142  if (RVALUE_OLD_P(objspace, obj)) {
6143  gc_report(1, objspace, "rb_gc_writebarrier_unprotect: %s\n", rb_obj_info(obj));
6144  RVALUE_DEMOTE(objspace, obj);
6145  gc_mark_set(objspace, obj);
6146  gc_remember_unprotected(objspace, obj);
6147 
6148 #if RGENGC_PROFILE
6149  objspace->profile.total_shade_operation_count++;
6150 #if RGENGC_PROFILE >= 2
6151  objspace->profile.shade_operation_count_types[BUILTIN_TYPE(obj)]++;
6152 #endif /* RGENGC_PROFILE >= 2 */
6153 #endif /* RGENGC_PROFILE */
6154  }
6155  else {
6156  RVALUE_AGE_RESET(obj);
6157  }
6158 
6159  RB_DEBUG_COUNTER_INC(obj_wb_unprotect);
6160  MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(obj), obj);
6161  }
6162  rb_gc_vm_unlock_no_barrier(lev);
6163  }
6164 }
6165 
6166 void
6167 rb_gc_impl_copy_attributes(void *objspace_ptr, VALUE dest, VALUE obj)
6168 {
6169  rb_objspace_t *objspace = objspace_ptr;
6170 
6171  if (RVALUE_WB_UNPROTECTED(objspace, obj)) {
6172  rb_gc_impl_writebarrier_unprotect(objspace, dest);
6173  }
6174  rb_gc_impl_copy_finalizer(objspace, dest, obj);
6175 }
6176 
6177 const char *
6178 rb_gc_impl_active_gc_name(void)
6179 {
6180  return "default";
6181 }
6182 
6183 void
6184 rb_gc_impl_writebarrier_remember(void *objspace_ptr, VALUE obj)
6185 {
6186  rb_objspace_t *objspace = objspace_ptr;
6187 
6188  gc_report(1, objspace, "rb_gc_writebarrier_remember: %s\n", rb_obj_info(obj));
6189 
6190  if (is_incremental_marking(objspace)) {
6191  if (RVALUE_BLACK_P(objspace, obj)) {
6192  gc_grey(objspace, obj);
6193  }
6194  }
6195  else {
6196  if (RVALUE_OLD_P(objspace, obj)) {
6197  rgengc_remember(objspace, obj);
6198  }
6199  }
6200 }
6201 
6202 // TODO: rearchitect this function to work for a generic GC
6203 size_t
6204 rb_gc_impl_obj_flags(void *objspace_ptr, VALUE obj, ID* flags, size_t max)
6205 {
6206  rb_objspace_t *objspace = objspace_ptr;
6207  size_t n = 0;
6208  static ID ID_marked;
6209  static ID ID_wb_protected, ID_old, ID_marking, ID_uncollectible, ID_pinned;
6210 
6211  if (!ID_marked) {
6212 #define I(s) ID_##s = rb_intern(#s);
6213  I(marked);
6214  I(wb_protected);
6215  I(old);
6216  I(marking);
6217  I(uncollectible);
6218  I(pinned);
6219 #undef I
6220  }
6221 
6222  if (RVALUE_WB_UNPROTECTED(objspace, obj) == 0 && n < max) flags[n++] = ID_wb_protected;
6223  if (RVALUE_OLD_P(objspace, obj) && n < max) flags[n++] = ID_old;
6224  if (RVALUE_UNCOLLECTIBLE(objspace, obj) && n < max) flags[n++] = ID_uncollectible;
6225  if (RVALUE_MARKING(objspace, obj) && n < max) flags[n++] = ID_marking;
6226  if (RVALUE_MARKED(objspace, obj) && n < max) flags[n++] = ID_marked;
6227  if (RVALUE_PINNED(objspace, obj) && n < max) flags[n++] = ID_pinned;
6228  return n;
6229 }
6230 
6231 void *
6232 rb_gc_impl_ractor_cache_alloc(void *objspace_ptr, void *ractor)
6233 {
6234  rb_objspace_t *objspace = objspace_ptr;
6235 
6236  objspace->live_ractor_cache_count++;
6237 
6238  return calloc1(sizeof(rb_ractor_newobj_cache_t));
6239 }
6240 
6241 void
6242 rb_gc_impl_ractor_cache_free(void *objspace_ptr, void *cache)
6243 {
6244  rb_objspace_t *objspace = objspace_ptr;
6245 
6246  objspace->live_ractor_cache_count--;
6247 
6248  gc_ractor_newobj_cache_clear(cache, NULL);
6249  free(cache);
6250 }
6251 
6252 static void
6253 heap_ready_to_gc(rb_objspace_t *objspace, rb_heap_t *heap)
6254 {
6255  if (!heap->free_pages) {
6256  if (!heap_page_allocate_and_initialize(objspace, heap)) {
6257  objspace->heap_pages.allocatable_slots = 1;
6258  heap_page_allocate_and_initialize(objspace, heap);
6259  }
6260  }
6261 }
6262 
6263 static int
6264 ready_to_gc(rb_objspace_t *objspace)
6265 {
6266  if (dont_gc_val() || during_gc || ruby_disable_gc) {
6267  for (int i = 0; i < HEAP_COUNT; i++) {
6268  rb_heap_t *heap = &heaps[i];
6269  heap_ready_to_gc(objspace, heap);
6270  }
6271  return FALSE;
6272  }
6273  else {
6274  return TRUE;
6275  }
6276 }
6277 
6278 static void
6279 gc_reset_malloc_info(rb_objspace_t *objspace, bool full_mark)
6280 {
6281  gc_prof_set_malloc_info(objspace);
6282  {
6283  size_t inc = RUBY_ATOMIC_SIZE_EXCHANGE(malloc_increase, 0);
6284  size_t old_limit = malloc_limit;
6285 
6286  if (inc > malloc_limit) {
6287  malloc_limit = (size_t)(inc * gc_params.malloc_limit_growth_factor);
6288  if (malloc_limit > gc_params.malloc_limit_max) {
6289  malloc_limit = gc_params.malloc_limit_max;
6290  }
6291  }
6292  else {
6293  malloc_limit = (size_t)(malloc_limit * 0.98); /* magic number */
6294  if (malloc_limit < gc_params.malloc_limit_min) {
6295  malloc_limit = gc_params.malloc_limit_min;
6296  }
6297  }
6298 
6299  if (0) {
6300  if (old_limit != malloc_limit) {
6301  fprintf(stderr, "[%"PRIuSIZE"] malloc_limit: %"PRIuSIZE" -> %"PRIuSIZE"\n",
6302  rb_gc_count(), old_limit, malloc_limit);
6303  }
6304  else {
6305  fprintf(stderr, "[%"PRIuSIZE"] malloc_limit: not changed (%"PRIuSIZE")\n",
6306  rb_gc_count(), malloc_limit);
6307  }
6308  }
6309  }
6310 
6311  /* reset oldmalloc info */
6312 #if RGENGC_ESTIMATE_OLDMALLOC
6313  if (!full_mark) {
6314  if (objspace->rgengc.oldmalloc_increase > objspace->rgengc.oldmalloc_increase_limit) {
6315  gc_needs_major_flags |= GPR_FLAG_MAJOR_BY_OLDMALLOC;
6316  objspace->rgengc.oldmalloc_increase_limit =
6317  (size_t)(objspace->rgengc.oldmalloc_increase_limit * gc_params.oldmalloc_limit_growth_factor);
6318 
6319  if (objspace->rgengc.oldmalloc_increase_limit > gc_params.oldmalloc_limit_max) {
6320  objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_max;
6321  }
6322  }
6323 
6324  if (0) fprintf(stderr, "%"PRIdSIZE"\t%d\t%"PRIuSIZE"\t%"PRIuSIZE"\t%"PRIdSIZE"\n",
6325  rb_gc_count(),
6326  gc_needs_major_flags,
6327  objspace->rgengc.oldmalloc_increase,
6328  objspace->rgengc.oldmalloc_increase_limit,
6329  gc_params.oldmalloc_limit_max);
6330  }
6331  else {
6332  /* major GC */
6333  objspace->rgengc.oldmalloc_increase = 0;
6334 
6335  if ((objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_BY_OLDMALLOC) == 0) {
6336  objspace->rgengc.oldmalloc_increase_limit =
6337  (size_t)(objspace->rgengc.oldmalloc_increase_limit / ((gc_params.oldmalloc_limit_growth_factor - 1)/10 + 1));
6338  if (objspace->rgengc.oldmalloc_increase_limit < gc_params.oldmalloc_limit_min) {
6339  objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
6340  }
6341  }
6342  }
6343 #endif
6344 }
6345 
6346 static int
6347 garbage_collect(rb_objspace_t *objspace, unsigned int reason)
6348 {
6349  int ret;
6350 
6351  int lev = rb_gc_vm_lock();
6352  {
6353 #if GC_PROFILE_MORE_DETAIL
6354  objspace->profile.prepare_time = getrusage_time();
6355 #endif
6356 
6357  gc_rest(objspace);
6358 
6359 #if GC_PROFILE_MORE_DETAIL
6360  objspace->profile.prepare_time = getrusage_time() - objspace->profile.prepare_time;
6361 #endif
6362 
6363  ret = gc_start(objspace, reason);
6364  }
6365  rb_gc_vm_unlock(lev);
6366 
6367  return ret;
6368 }
6369 
6370 static int
6371 gc_start(rb_objspace_t *objspace, unsigned int reason)
6372 {
6373  unsigned int do_full_mark = !!(reason & GPR_FLAG_FULL_MARK);
6374 
6375  /* reason may be clobbered, later, so keep set immediate_sweep here */
6376  objspace->flags.immediate_sweep = !!(reason & GPR_FLAG_IMMEDIATE_SWEEP);
6377 
6378  if (!rb_darray_size(objspace->heap_pages.sorted)) return TRUE; /* heap is not ready */
6379  if (!(reason & GPR_FLAG_METHOD) && !ready_to_gc(objspace)) return TRUE; /* GC is not allowed */
6380 
6381  GC_ASSERT(gc_mode(objspace) == gc_mode_none);
6382  GC_ASSERT(!is_lazy_sweeping(objspace));
6383  GC_ASSERT(!is_incremental_marking(objspace));
6384 
6385  unsigned int lock_lev;
6386  gc_enter(objspace, gc_enter_event_start, &lock_lev);
6387 
6388 #if RGENGC_CHECK_MODE >= 2
6389  gc_verify_internal_consistency(objspace);
6390 #endif
6391 
6392  if (ruby_gc_stressful) {
6393  int flag = FIXNUM_P(ruby_gc_stress_mode) ? FIX2INT(ruby_gc_stress_mode) : 0;
6394 
6395  if ((flag & (1 << gc_stress_no_major)) == 0) {
6396  do_full_mark = TRUE;
6397  }
6398 
6399  objspace->flags.immediate_sweep = !(flag & (1<<gc_stress_no_immediate_sweep));
6400  }
6401 
6402  if (gc_needs_major_flags) {
6403  reason |= gc_needs_major_flags;
6404  do_full_mark = TRUE;
6405  }
6406 
6407  /* if major gc has been disabled, never do a full mark */
6408  if (!gc_config_full_mark_val) {
6409  do_full_mark = FALSE;
6410  }
6411  gc_needs_major_flags = GPR_FLAG_NONE;
6412 
6413  if (do_full_mark && (reason & GPR_FLAG_MAJOR_MASK) == 0) {
6414  reason |= GPR_FLAG_MAJOR_BY_FORCE; /* GC by CAPI, METHOD, and so on. */
6415  }
6416 
6417  if (objspace->flags.dont_incremental ||
6418  reason & GPR_FLAG_IMMEDIATE_MARK ||
6419  ruby_gc_stressful) {
6420  objspace->flags.during_incremental_marking = FALSE;
6421  }
6422  else {
6423  objspace->flags.during_incremental_marking = do_full_mark;
6424  }
6425 
6426  /* Explicitly enable compaction (GC.compact) */
6427  if (do_full_mark && ruby_enable_autocompact) {
6428  objspace->flags.during_compacting = TRUE;
6429 #if RGENGC_CHECK_MODE
6430  objspace->rcompactor.compare_func = ruby_autocompact_compare_func;
6431 #endif
6432  }
6433  else {
6434  objspace->flags.during_compacting = !!(reason & GPR_FLAG_COMPACT);
6435  }
6436 
6437  if (!GC_ENABLE_LAZY_SWEEP || objspace->flags.dont_incremental) {
6438  objspace->flags.immediate_sweep = TRUE;
6439  }
6440 
6441  if (objspace->flags.immediate_sweep) reason |= GPR_FLAG_IMMEDIATE_SWEEP;
6442 
6443  gc_report(1, objspace, "gc_start(reason: %x) => %u, %d, %d\n",
6444  reason,
6445  do_full_mark, !is_incremental_marking(objspace), objspace->flags.immediate_sweep);
6446 
6447 #if USE_DEBUG_COUNTER
6448  RB_DEBUG_COUNTER_INC(gc_count);
6449 
6450  if (reason & GPR_FLAG_MAJOR_MASK) {
6451  (void)RB_DEBUG_COUNTER_INC_IF(gc_major_nofree, reason & GPR_FLAG_MAJOR_BY_NOFREE);
6452  (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldgen, reason & GPR_FLAG_MAJOR_BY_OLDGEN);
6453  (void)RB_DEBUG_COUNTER_INC_IF(gc_major_shady, reason & GPR_FLAG_MAJOR_BY_SHADY);
6454  (void)RB_DEBUG_COUNTER_INC_IF(gc_major_force, reason & GPR_FLAG_MAJOR_BY_FORCE);
6455 #if RGENGC_ESTIMATE_OLDMALLOC
6456  (void)RB_DEBUG_COUNTER_INC_IF(gc_major_oldmalloc, reason & GPR_FLAG_MAJOR_BY_OLDMALLOC);
6457 #endif
6458  }
6459  else {
6460  (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_newobj, reason & GPR_FLAG_NEWOBJ);
6461  (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_malloc, reason & GPR_FLAG_MALLOC);
6462  (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_method, reason & GPR_FLAG_METHOD);
6463  (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_capi, reason & GPR_FLAG_CAPI);
6464  (void)RB_DEBUG_COUNTER_INC_IF(gc_minor_stress, reason & GPR_FLAG_STRESS);
6465  }
6466 #endif
6467 
6468  objspace->profile.count++;
6469  objspace->profile.latest_gc_info = reason;
6470  objspace->profile.total_allocated_objects_at_gc_start = total_allocated_objects(objspace);
6471  objspace->profile.heap_used_at_gc_start = rb_darray_size(objspace->heap_pages.sorted);
6472  objspace->profile.weak_references_count = 0;
6473  objspace->profile.retained_weak_references_count = 0;
6474  gc_prof_setup_new_record(objspace, reason);
6475  gc_reset_malloc_info(objspace, do_full_mark);
6476 
6477  rb_gc_event_hook(0, RUBY_INTERNAL_EVENT_GC_START);
6478 
6479  GC_ASSERT(during_gc);
6480 
6481  gc_prof_timer_start(objspace);
6482  {
6483  if (gc_marks(objspace, do_full_mark)) {
6484  gc_sweep(objspace);
6485  }
6486  }
6487  gc_prof_timer_stop(objspace);
6488 
6489  gc_exit(objspace, gc_enter_event_start, &lock_lev);
6490  return TRUE;
6491 }
6492 
6493 static void
6494 gc_rest(rb_objspace_t *objspace)
6495 {
6496  if (is_incremental_marking(objspace) || is_lazy_sweeping(objspace)) {
6497  unsigned int lock_lev;
6498  gc_enter(objspace, gc_enter_event_rest, &lock_lev);
6499 
6500  if (RGENGC_CHECK_MODE >= 2) gc_verify_internal_consistency(objspace);
6501 
6502  if (is_incremental_marking(objspace)) {
6503  gc_marking_enter(objspace);
6504  gc_marks_rest(objspace);
6505  gc_marking_exit(objspace);
6506 
6507  gc_sweep(objspace);
6508  }
6509 
6510  if (is_lazy_sweeping(objspace)) {
6511  gc_sweeping_enter(objspace);
6512  gc_sweep_rest(objspace);
6513  gc_sweeping_exit(objspace);
6514  }
6515 
6516  gc_exit(objspace, gc_enter_event_rest, &lock_lev);
6517  }
6518 }
6519 
6522  unsigned int reason;
6523 };
6524 
6525 static void
6526 gc_current_status_fill(rb_objspace_t *objspace, char *buff)
6527 {
6528  int i = 0;
6529  if (is_marking(objspace)) {
6530  buff[i++] = 'M';
6531  if (is_full_marking(objspace)) buff[i++] = 'F';
6532  if (is_incremental_marking(objspace)) buff[i++] = 'I';
6533  }
6534  else if (is_sweeping(objspace)) {
6535  buff[i++] = 'S';
6536  if (is_lazy_sweeping(objspace)) buff[i++] = 'L';
6537  }
6538  else {
6539  buff[i++] = 'N';
6540  }
6541  buff[i] = '\0';
6542 }
6543 
6544 static const char *
6545 gc_current_status(rb_objspace_t *objspace)
6546 {
6547  static char buff[0x10];
6548  gc_current_status_fill(objspace, buff);
6549  return buff;
6550 }
6551 
6552 #if PRINT_ENTER_EXIT_TICK
6553 
6554 static tick_t last_exit_tick;
6555 static tick_t enter_tick;
6556 static int enter_count = 0;
6557 static char last_gc_status[0x10];
6558 
6559 static inline void
6560 gc_record(rb_objspace_t *objspace, int direction, const char *event)
6561 {
6562  if (direction == 0) { /* enter */
6563  enter_count++;
6564  enter_tick = tick();
6565  gc_current_status_fill(objspace, last_gc_status);
6566  }
6567  else { /* exit */
6568  tick_t exit_tick = tick();
6569  char current_gc_status[0x10];
6570  gc_current_status_fill(objspace, current_gc_status);
6571 #if 1
6572  /* [last mutator time] [gc time] [event] */
6573  fprintf(stderr, "%"PRItick"\t%"PRItick"\t%s\t[%s->%s|%c]\n",
6574  enter_tick - last_exit_tick,
6575  exit_tick - enter_tick,
6576  event,
6577  last_gc_status, current_gc_status,
6578  (objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_MASK) ? '+' : '-');
6579  last_exit_tick = exit_tick;
6580 #else
6581  /* [enter_tick] [gc time] [event] */
6582  fprintf(stderr, "%"PRItick"\t%"PRItick"\t%s\t[%s->%s|%c]\n",
6583  enter_tick,
6584  exit_tick - enter_tick,
6585  event,
6586  last_gc_status, current_gc_status,
6587  (objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_MASK) ? '+' : '-');
6588 #endif
6589  }
6590 }
6591 #else /* PRINT_ENTER_EXIT_TICK */
6592 static inline void
6593 gc_record(rb_objspace_t *objspace, int direction, const char *event)
6594 {
6595  /* null */
6596 }
6597 #endif /* PRINT_ENTER_EXIT_TICK */
6598 
6599 static const char *
6600 gc_enter_event_cstr(enum gc_enter_event event)
6601 {
6602  switch (event) {
6603  case gc_enter_event_start: return "start";
6604  case gc_enter_event_continue: return "continue";
6605  case gc_enter_event_rest: return "rest";
6606  case gc_enter_event_finalizer: return "finalizer";
6607  }
6608  return NULL;
6609 }
6610 
6611 static void
6612 gc_enter_count(enum gc_enter_event event)
6613 {
6614  switch (event) {
6615  case gc_enter_event_start: RB_DEBUG_COUNTER_INC(gc_enter_start); break;
6616  case gc_enter_event_continue: RB_DEBUG_COUNTER_INC(gc_enter_continue); break;
6617  case gc_enter_event_rest: RB_DEBUG_COUNTER_INC(gc_enter_rest); break;
6618  case gc_enter_event_finalizer: RB_DEBUG_COUNTER_INC(gc_enter_finalizer); break;
6619  }
6620 }
6621 
6622 static bool current_process_time(struct timespec *ts);
6623 
6624 static void
6625 gc_clock_start(struct timespec *ts)
6626 {
6627  if (!current_process_time(ts)) {
6628  ts->tv_sec = 0;
6629  ts->tv_nsec = 0;
6630  }
6631 }
6632 
6633 static unsigned long long
6634 gc_clock_end(struct timespec *ts)
6635 {
6636  struct timespec end_time;
6637 
6638  if ((ts->tv_sec > 0 || ts->tv_nsec > 0) &&
6639  current_process_time(&end_time) &&
6640  end_time.tv_sec >= ts->tv_sec) {
6641  return (unsigned long long)(end_time.tv_sec - ts->tv_sec) * (1000 * 1000 * 1000) +
6642  (end_time.tv_nsec - ts->tv_nsec);
6643  }
6644 
6645  return 0;
6646 }
6647 
6648 static inline void
6649 gc_enter(rb_objspace_t *objspace, enum gc_enter_event event, unsigned int *lock_lev)
6650 {
6651  *lock_lev = rb_gc_vm_lock();
6652 
6653  switch (event) {
6654  case gc_enter_event_rest:
6655  if (!is_marking(objspace)) break;
6656  // fall through
6657  case gc_enter_event_start:
6658  case gc_enter_event_continue:
6659  // stop other ractors
6660  rb_gc_vm_barrier();
6661  break;
6662  default:
6663  break;
6664  }
6665 
6666  gc_enter_count(event);
6667  if (RB_UNLIKELY(during_gc != 0)) rb_bug("during_gc != 0");
6668  if (RGENGC_CHECK_MODE >= 3) gc_verify_internal_consistency(objspace);
6669 
6670  during_gc = TRUE;
6671  RUBY_DEBUG_LOG("%s (%s)",gc_enter_event_cstr(event), gc_current_status(objspace));
6672  gc_report(1, objspace, "gc_enter: %s [%s]\n", gc_enter_event_cstr(event), gc_current_status(objspace));
6673  gc_record(objspace, 0, gc_enter_event_cstr(event));
6674 
6675  rb_gc_event_hook(0, RUBY_INTERNAL_EVENT_GC_ENTER);
6676 }
6677 
6678 static inline void
6679 gc_exit(rb_objspace_t *objspace, enum gc_enter_event event, unsigned int *lock_lev)
6680 {
6681  GC_ASSERT(during_gc != 0);
6682 
6683  rb_gc_event_hook(0, RUBY_INTERNAL_EVENT_GC_EXIT);
6684 
6685  gc_record(objspace, 1, gc_enter_event_cstr(event));
6686  RUBY_DEBUG_LOG("%s (%s)", gc_enter_event_cstr(event), gc_current_status(objspace));
6687  gc_report(1, objspace, "gc_exit: %s [%s]\n", gc_enter_event_cstr(event), gc_current_status(objspace));
6688  during_gc = FALSE;
6689 
6690  rb_gc_vm_unlock(*lock_lev);
6691 }
6692 
6693 #ifndef MEASURE_GC
6694 #define MEASURE_GC (objspace->flags.measure_gc)
6695 #endif
6696 
6697 static void
6698 gc_marking_enter(rb_objspace_t *objspace)
6699 {
6700  GC_ASSERT(during_gc != 0);
6701 
6702  if (MEASURE_GC) {
6703  gc_clock_start(&objspace->profile.marking_start_time);
6704  }
6705 }
6706 
6707 static void
6708 gc_marking_exit(rb_objspace_t *objspace)
6709 {
6710  GC_ASSERT(during_gc != 0);
6711 
6712  if (MEASURE_GC) {
6713  objspace->profile.marking_time_ns += gc_clock_end(&objspace->profile.marking_start_time);
6714  }
6715 }
6716 
6717 static void
6718 gc_sweeping_enter(rb_objspace_t *objspace)
6719 {
6720  GC_ASSERT(during_gc != 0);
6721 
6722  if (MEASURE_GC) {
6723  gc_clock_start(&objspace->profile.sweeping_start_time);
6724  }
6725 }
6726 
6727 static void
6728 gc_sweeping_exit(rb_objspace_t *objspace)
6729 {
6730  GC_ASSERT(during_gc != 0);
6731 
6732  if (MEASURE_GC) {
6733  objspace->profile.sweeping_time_ns += gc_clock_end(&objspace->profile.sweeping_start_time);
6734  }
6735 }
6736 
6737 static void *
6738 gc_with_gvl(void *ptr)
6739 {
6740  struct objspace_and_reason *oar = (struct objspace_and_reason *)ptr;
6741  return (void *)(VALUE)garbage_collect(oar->objspace, oar->reason);
6742 }
6743 
6744 int ruby_thread_has_gvl_p(void);
6745 
6746 static int
6747 garbage_collect_with_gvl(rb_objspace_t *objspace, unsigned int reason)
6748 {
6749  if (dont_gc_val()) return TRUE;
6750  if (ruby_thread_has_gvl_p()) {
6751  return garbage_collect(objspace, reason);
6752  }
6753  else {
6754  if (ruby_native_thread_p()) {
6755  struct objspace_and_reason oar;
6756  oar.objspace = objspace;
6757  oar.reason = reason;
6758  return (int)(VALUE)rb_thread_call_with_gvl(gc_with_gvl, (void *)&oar);
6759  }
6760  else {
6761  /* no ruby thread */
6762  fprintf(stderr, "[FATAL] failed to allocate memory\n");
6763  exit(EXIT_FAILURE);
6764  }
6765  }
6766 }
6767 
6768 static int
6769 gc_set_candidate_object_i(void *vstart, void *vend, size_t stride, void *data)
6770 {
6772 
6773  VALUE v = (VALUE)vstart;
6774  for (; v != (VALUE)vend; v += stride) {
6775  asan_unpoisoning_object(v) {
6776  switch (BUILTIN_TYPE(v)) {
6777  case T_NONE:
6778  case T_ZOMBIE:
6779  break;
6780  default:
6781  rb_gc_prepare_heap_process_object(v);
6782  if (!RVALUE_OLD_P(objspace, v) && !RVALUE_WB_UNPROTECTED(objspace, v)) {
6783  RVALUE_AGE_SET_CANDIDATE(objspace, v);
6784  }
6785  }
6786  }
6787  }
6788 
6789  return 0;
6790 }
6791 
6792 void
6793 rb_gc_impl_start(void *objspace_ptr, bool full_mark, bool immediate_mark, bool immediate_sweep, bool compact)
6794 {
6795  rb_objspace_t *objspace = objspace_ptr;
6796  unsigned int reason = (GPR_FLAG_FULL_MARK |
6797  GPR_FLAG_IMMEDIATE_MARK |
6798  GPR_FLAG_IMMEDIATE_SWEEP |
6799  GPR_FLAG_METHOD);
6800 
6801  int full_marking_p = gc_config_full_mark_val;
6802  gc_config_full_mark_set(TRUE);
6803 
6804  /* For now, compact implies full mark / sweep, so ignore other flags */
6805  if (compact) {
6806  GC_ASSERT(GC_COMPACTION_SUPPORTED);
6807 
6808  reason |= GPR_FLAG_COMPACT;
6809  }
6810  else {
6811  if (!full_mark) reason &= ~GPR_FLAG_FULL_MARK;
6812  if (!immediate_mark) reason &= ~GPR_FLAG_IMMEDIATE_MARK;
6813  if (!immediate_sweep) reason &= ~GPR_FLAG_IMMEDIATE_SWEEP;
6814  }
6815 
6816  garbage_collect(objspace, reason);
6817  gc_finalize_deferred(objspace);
6818 
6819  gc_config_full_mark_set(full_marking_p);
6820 }
6821 
6822 void
6823 rb_gc_impl_prepare_heap(void *objspace_ptr)
6824 {
6825  rb_objspace_t *objspace = objspace_ptr;
6826 
6827  size_t orig_total_slots = objspace_available_slots(objspace);
6828  size_t orig_allocatable_slots = objspace->heap_pages.allocatable_slots;
6829 
6830  rb_gc_impl_each_objects(objspace, gc_set_candidate_object_i, objspace_ptr);
6831 
6832  double orig_max_free_slots = gc_params.heap_free_slots_max_ratio;
6833  /* Ensure that all empty pages are moved onto empty_pages. */
6834  gc_params.heap_free_slots_max_ratio = 0.0;
6835  rb_gc_impl_start(objspace, true, true, true, true);
6836  gc_params.heap_free_slots_max_ratio = orig_max_free_slots;
6837 
6838  objspace->heap_pages.allocatable_slots = 0;
6839  heap_pages_free_unused_pages(objspace_ptr);
6840  GC_ASSERT(objspace->empty_pages_count == 0);
6841  objspace->heap_pages.allocatable_slots = orig_allocatable_slots;
6842 
6843  size_t total_slots = objspace_available_slots(objspace);
6844  if (orig_total_slots > total_slots) {
6845  objspace->heap_pages.allocatable_slots += orig_total_slots - total_slots;
6846  }
6847 
6848 #if defined(HAVE_MALLOC_TRIM) && !defined(RUBY_ALTERNATIVE_MALLOC_HEADER)
6849  malloc_trim(0);
6850 #endif
6851 }
6852 
6853 static int
6854 gc_is_moveable_obj(rb_objspace_t *objspace, VALUE obj)
6855 {
6856  GC_ASSERT(!SPECIAL_CONST_P(obj));
6857 
6858  switch (BUILTIN_TYPE(obj)) {
6859  case T_NONE:
6860  case T_MOVED:
6861  case T_ZOMBIE:
6862  return FALSE;
6863  case T_SYMBOL:
6864  // TODO: restore original behavior
6865  // if (RSYMBOL(obj)->id & ~ID_SCOPE_MASK) {
6866  // return FALSE;
6867  // }
6868  return false;
6869  /* fall through */
6870  case T_STRING:
6871  case T_OBJECT:
6872  case T_FLOAT:
6873  case T_IMEMO:
6874  case T_ARRAY:
6875  case T_BIGNUM:
6876  case T_ICLASS:
6877  case T_MODULE:
6878  case T_REGEXP:
6879  case T_DATA:
6880  case T_MATCH:
6881  case T_STRUCT:
6882  case T_HASH:
6883  case T_FILE:
6884  case T_COMPLEX:
6885  case T_RATIONAL:
6886  case T_NODE:
6887  case T_CLASS:
6888  if (FL_TEST(obj, FL_FINALIZE)) {
6889  /* The finalizer table is a numtable. It looks up objects by address.
6890  * We can't mark the keys in the finalizer table because that would
6891  * prevent the objects from being collected. This check prevents
6892  * objects that are keys in the finalizer table from being moved
6893  * without directly pinning them. */
6894  GC_ASSERT(st_is_member(finalizer_table, obj));
6895 
6896  return FALSE;
6897  }
6898  GC_ASSERT(RVALUE_MARKED(objspace, obj));
6899  GC_ASSERT(!RVALUE_PINNED(objspace, obj));
6900 
6901  return TRUE;
6902 
6903  default:
6904  rb_bug("gc_is_moveable_obj: unreachable (%d)", (int)BUILTIN_TYPE(obj));
6905  break;
6906  }
6907 
6908  return FALSE;
6909 }
6910 
6911 void rb_mv_generic_ivar(VALUE src, VALUE dst);
6912 
6913 static VALUE
6914 gc_move(rb_objspace_t *objspace, VALUE src, VALUE dest, size_t src_slot_size, size_t slot_size)
6915 {
6916  int marked;
6917  int wb_unprotected;
6918  int uncollectible;
6919  int age;
6920 
6921  gc_report(4, objspace, "Moving object: %p -> %p\n", (void *)src, (void *)dest);
6922 
6923  GC_ASSERT(BUILTIN_TYPE(src) != T_NONE);
6924  GC_ASSERT(!MARKED_IN_BITMAP(GET_HEAP_MARK_BITS(dest), dest));
6925 
6926  GC_ASSERT(!RVALUE_MARKING(objspace, src));
6927 
6928  /* Save off bits for current object. */
6929  marked = RVALUE_MARKED(objspace, src);
6930  wb_unprotected = RVALUE_WB_UNPROTECTED(objspace, src);
6931  uncollectible = RVALUE_UNCOLLECTIBLE(objspace, src);
6932  bool remembered = RVALUE_REMEMBERED(objspace, src);
6933  age = RVALUE_AGE_GET(src);
6934 
6935  /* Clear bits for eventual T_MOVED */
6936  CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS(src), src);
6937  CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(src), src);
6938  CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(src), src);
6939  CLEAR_IN_BITMAP(GET_HEAP_PAGE(src)->remembered_bits, src);
6940 
6941  if (FL_TEST(src, FL_EXIVAR)) {
6942  /* Resizing the st table could cause a malloc */
6943  DURING_GC_COULD_MALLOC_REGION_START();
6944  {
6945  rb_mv_generic_ivar(src, dest);
6946  }
6947  DURING_GC_COULD_MALLOC_REGION_END();
6948  }
6949 
6950  if (FL_TEST(src, FL_SEEN_OBJ_ID)) {
6951  /* If the source object's object_id has been seen, we need to update
6952  * the object to object id mapping. */
6953  st_data_t srcid = (st_data_t)src, id;
6954 
6955  gc_report(4, objspace, "Moving object with seen id: %p -> %p\n", (void *)src, (void *)dest);
6956  /* Resizing the st table could cause a malloc */
6957  DURING_GC_COULD_MALLOC_REGION_START();
6958  {
6959  if (!st_delete(objspace->obj_to_id_tbl, &srcid, &id)) {
6960  rb_bug("gc_move: object ID seen, but not in mapping table: %s", rb_obj_info((VALUE)src));
6961  }
6962 
6963  st_insert(objspace->obj_to_id_tbl, (st_data_t)dest, id);
6964  }
6965  DURING_GC_COULD_MALLOC_REGION_END();
6966  }
6967  else {
6968  GC_ASSERT(!st_lookup(objspace->obj_to_id_tbl, (st_data_t)src, NULL));
6969  }
6970 
6971  /* Move the object */
6972  memcpy((void *)dest, (void *)src, MIN(src_slot_size, slot_size));
6973 
6974  if (RVALUE_OVERHEAD > 0) {
6975  void *dest_overhead = (void *)(((uintptr_t)dest) + slot_size - RVALUE_OVERHEAD);
6976  void *src_overhead = (void *)(((uintptr_t)src) + src_slot_size - RVALUE_OVERHEAD);
6977 
6978  memcpy(dest_overhead, src_overhead, RVALUE_OVERHEAD);
6979  }
6980 
6981  memset((void *)src, 0, src_slot_size);
6982  RVALUE_AGE_RESET(src);
6983 
6984  /* Set bits for object in new location */
6985  if (remembered) {
6986  MARK_IN_BITMAP(GET_HEAP_PAGE(dest)->remembered_bits, dest);
6987  }
6988  else {
6989  CLEAR_IN_BITMAP(GET_HEAP_PAGE(dest)->remembered_bits, dest);
6990  }
6991 
6992  if (marked) {
6993  MARK_IN_BITMAP(GET_HEAP_MARK_BITS(dest), dest);
6994  }
6995  else {
6996  CLEAR_IN_BITMAP(GET_HEAP_MARK_BITS(dest), dest);
6997  }
6998 
6999  if (wb_unprotected) {
7000  MARK_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(dest), dest);
7001  }
7002  else {
7003  CLEAR_IN_BITMAP(GET_HEAP_WB_UNPROTECTED_BITS(dest), dest);
7004  }
7005 
7006  if (uncollectible) {
7007  MARK_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(dest), dest);
7008  }
7009  else {
7010  CLEAR_IN_BITMAP(GET_HEAP_UNCOLLECTIBLE_BITS(dest), dest);
7011  }
7012 
7013  RVALUE_AGE_SET(dest, age);
7014  /* Assign forwarding address */
7015  RMOVED(src)->flags = T_MOVED;
7016  RMOVED(src)->dummy = Qundef;
7017  RMOVED(src)->destination = dest;
7018  GC_ASSERT(BUILTIN_TYPE(dest) != T_NONE);
7019 
7020  GET_HEAP_PAGE(src)->heap->total_freed_objects++;
7021  GET_HEAP_PAGE(dest)->heap->total_allocated_objects++;
7022 
7023  return src;
7024 }
7025 
7026 #if GC_CAN_COMPILE_COMPACTION
7027 static int
7028 compare_pinned_slots(const void *left, const void *right, void *dummy)
7029 {
7030  struct heap_page *left_page;
7031  struct heap_page *right_page;
7032 
7033  left_page = *(struct heap_page * const *)left;
7034  right_page = *(struct heap_page * const *)right;
7035 
7036  return left_page->pinned_slots - right_page->pinned_slots;
7037 }
7038 
7039 static int
7040 compare_free_slots(const void *left, const void *right, void *dummy)
7041 {
7042  struct heap_page *left_page;
7043  struct heap_page *right_page;
7044 
7045  left_page = *(struct heap_page * const *)left;
7046  right_page = *(struct heap_page * const *)right;
7047 
7048  return left_page->free_slots - right_page->free_slots;
7049 }
7050 
7051 static void
7052 gc_sort_heap_by_compare_func(rb_objspace_t *objspace, gc_compact_compare_func compare_func)
7053 {
7054  for (int j = 0; j < HEAP_COUNT; j++) {
7055  rb_heap_t *heap = &heaps[j];
7056 
7057  size_t total_pages = heap->total_pages;
7058  size_t size = rb_size_mul_or_raise(total_pages, sizeof(struct heap_page *), rb_eRuntimeError);
7059  struct heap_page *page = 0, **page_list = malloc(size);
7060  size_t i = 0;
7061 
7062  heap->free_pages = NULL;
7063  ccan_list_for_each(&heap->pages, page, page_node) {
7064  page_list[i++] = page;
7065  GC_ASSERT(page);
7066  }
7067 
7068  GC_ASSERT((size_t)i == total_pages);
7069 
7070  /* Sort the heap so "filled pages" are first. `heap_add_page` adds to the
7071  * head of the list, so empty pages will end up at the start of the heap */
7072  ruby_qsort(page_list, total_pages, sizeof(struct heap_page *), compare_func, NULL);
7073 
7074  /* Reset the eden heap */
7075  ccan_list_head_init(&heap->pages);
7076 
7077  for (i = 0; i < total_pages; i++) {
7078  ccan_list_add(&heap->pages, &page_list[i]->page_node);
7079  if (page_list[i]->free_slots != 0) {
7080  heap_add_freepage(heap, page_list[i]);
7081  }
7082  }
7083 
7084  free(page_list);
7085  }
7086 }
7087 #endif
7088 
7089 bool
7090 rb_gc_impl_object_moved_p(void *objspace_ptr, VALUE obj)
7091 {
7092  return gc_object_moved_p(objspace_ptr, obj);
7093 }
7094 
7095 static int
7096 gc_ref_update(void *vstart, void *vend, size_t stride, rb_objspace_t *objspace, struct heap_page *page)
7097 {
7098  VALUE v = (VALUE)vstart;
7099 
7100  page->flags.has_uncollectible_wb_unprotected_objects = FALSE;
7101  page->flags.has_remembered_objects = FALSE;
7102 
7103  /* For each object on the page */
7104  for (; v != (VALUE)vend; v += stride) {
7105  asan_unpoisoning_object(v) {
7106  switch (BUILTIN_TYPE(v)) {
7107  case T_NONE:
7108  case T_MOVED:
7109  case T_ZOMBIE:
7110  break;
7111  default:
7112  if (RVALUE_WB_UNPROTECTED(objspace, v)) {
7113  page->flags.has_uncollectible_wb_unprotected_objects = TRUE;
7114  }
7115  if (RVALUE_REMEMBERED(objspace, v)) {
7116  page->flags.has_remembered_objects = TRUE;
7117  }
7118  if (page->flags.before_sweep) {
7119  if (RVALUE_MARKED(objspace, v)) {
7120  rb_gc_update_object_references(objspace, v);
7121  }
7122  }
7123  else {
7124  rb_gc_update_object_references(objspace, v);
7125  }
7126  }
7127  }
7128  }
7129 
7130  return 0;
7131 }
7132 
7133 static void
7134 gc_update_references(rb_objspace_t *objspace)
7135 {
7136  objspace->flags.during_reference_updating = true;
7137 
7138  struct heap_page *page = NULL;
7139 
7140  for (int i = 0; i < HEAP_COUNT; i++) {
7141  bool should_set_mark_bits = TRUE;
7142  rb_heap_t *heap = &heaps[i];
7143 
7144  ccan_list_for_each(&heap->pages, page, page_node) {
7145  uintptr_t start = (uintptr_t)page->start;
7146  uintptr_t end = start + (page->total_slots * heap->slot_size);
7147 
7148  gc_ref_update((void *)start, (void *)end, heap->slot_size, objspace, page);
7149  if (page == heap->sweeping_page) {
7150  should_set_mark_bits = FALSE;
7151  }
7152  if (should_set_mark_bits) {
7153  gc_setup_mark_bits(page);
7154  }
7155  }
7156  }
7157  gc_ref_update_table_values_only(objspace->obj_to_id_tbl);
7158  gc_update_table_refs(objspace->id_to_obj_tbl);
7159  gc_update_table_refs(finalizer_table);
7160 
7161  rb_gc_update_vm_references((void *)objspace);
7162 
7163  objspace->flags.during_reference_updating = false;
7164 }
7165 
7166 #if GC_CAN_COMPILE_COMPACTION
7167 static void
7168 root_obj_check_moved_i(const char *category, VALUE obj, void *data)
7169 {
7170  rb_objspace_t *objspace = data;
7171 
7172  if (gc_object_moved_p(objspace, obj)) {
7173  rb_bug("ROOT %s points to MOVED: %p -> %s", category, (void *)obj, rb_obj_info(rb_gc_impl_location(objspace, obj)));
7174  }
7175 }
7176 
7177 static void
7178 reachable_object_check_moved_i(VALUE ref, void *data)
7179 {
7180  VALUE parent = (VALUE)data;
7181  if (gc_object_moved_p(rb_gc_get_objspace(), ref)) {
7182  rb_bug("Object %s points to MOVED: %p -> %s", rb_obj_info(parent), (void *)ref, rb_obj_info(rb_gc_impl_location(rb_gc_get_objspace(), ref)));
7183  }
7184 }
7185 
7186 static int
7187 heap_check_moved_i(void *vstart, void *vend, size_t stride, void *data)
7188 {
7189  rb_objspace_t *objspace = data;
7190 
7191  VALUE v = (VALUE)vstart;
7192  for (; v != (VALUE)vend; v += stride) {
7193  if (gc_object_moved_p(objspace, v)) {
7194  /* Moved object still on the heap, something may have a reference. */
7195  }
7196  else {
7197  asan_unpoisoning_object(v) {
7198  switch (BUILTIN_TYPE(v)) {
7199  case T_NONE:
7200  case T_ZOMBIE:
7201  break;
7202  default:
7203  if (!rb_gc_impl_garbage_object_p(objspace, v)) {
7204  rb_objspace_reachable_objects_from(v, reachable_object_check_moved_i, (void *)v);
7205  }
7206  }
7207  }
7208  }
7209  }
7210 
7211  return 0;
7212 }
7213 #endif
7214 
7215 bool
7216 rb_gc_impl_during_gc_p(void *objspace_ptr)
7217 {
7218  rb_objspace_t *objspace = objspace_ptr;
7219 
7220  return during_gc;
7221 }
7222 
7223 #if RGENGC_PROFILE >= 2
7224 
7225 static const char*
7226 type_name(int type, VALUE obj)
7227 {
7228  switch ((enum ruby_value_type)type) {
7229  case RUBY_T_NONE: return "T_NONE";
7230  case RUBY_T_OBJECT: return "T_OBJECT";
7231  case RUBY_T_CLASS: return "T_CLASS";
7232  case RUBY_T_MODULE: return "T_MODULE";
7233  case RUBY_T_FLOAT: return "T_FLOAT";
7234  case RUBY_T_STRING: return "T_STRING";
7235  case RUBY_T_REGEXP: return "T_REGEXP";
7236  case RUBY_T_ARRAY: return "T_ARRAY";
7237  case RUBY_T_HASH: return "T_HASH";
7238  case RUBY_T_STRUCT: return "T_STRUCT";
7239  case RUBY_T_BIGNUM: return "T_BIGNUM";
7240  case RUBY_T_FILE: return "T_FILE";
7241  case RUBY_T_DATA: return "T_DATA";
7242  case RUBY_T_MATCH: return "T_MATCH";
7243  case RUBY_T_COMPLEX: return "T_COMPLEX";
7244  case RUBY_T_RATIONAL: return "T_RATIONAL";
7245  case RUBY_T_NIL: return "T_NIL";
7246  case RUBY_T_TRUE: return "T_TRUE";
7247  case RUBY_T_FALSE: return "T_FALSE";
7248  case RUBY_T_SYMBOL: return "T_SYMBOL";
7249  case RUBY_T_FIXNUM: return "T_FIXNUM";
7250  case RUBY_T_UNDEF: return "T_UNDEF";
7251  case RUBY_T_IMEMO: return "T_IMEMO";
7252  case RUBY_T_NODE: return "T_NODE";
7253  case RUBY_T_ICLASS: return "T_ICLASS";
7254  case RUBY_T_ZOMBIE: return "T_ZOMBIE";
7255  case RUBY_T_MOVED: return "T_MOVED";
7256  default: return "unknown";
7257  }
7258 }
7259 
7260 static void
7261 gc_count_add_each_types(VALUE hash, const char *name, const size_t *types)
7262 {
7263  VALUE result = rb_hash_new_with_size(T_MASK);
7264  int i;
7265  for (i=0; i<T_MASK; i++) {
7266  const char *type = type_name(i, 0);
7267  rb_hash_aset(result, ID2SYM(rb_intern(type)), SIZET2NUM(types[i]));
7268  }
7269  rb_hash_aset(hash, ID2SYM(rb_intern(name)), result);
7270 }
7271 #endif
7272 
7273 size_t
7274 rb_gc_impl_gc_count(void *objspace_ptr)
7275 {
7276  rb_objspace_t *objspace = objspace_ptr;
7277 
7278  return objspace->profile.count;
7279 }
7280 
7281 static VALUE
7282 gc_info_decode(rb_objspace_t *objspace, const VALUE hash_or_key, const unsigned int orig_flags)
7283 {
7284  static VALUE sym_major_by = Qnil, sym_gc_by, sym_immediate_sweep, sym_have_finalizer, sym_state, sym_need_major_by;
7285  static VALUE sym_nofree, sym_oldgen, sym_shady, sym_force, sym_stress;
7286 #if RGENGC_ESTIMATE_OLDMALLOC
7287  static VALUE sym_oldmalloc;
7288 #endif
7289  static VALUE sym_newobj, sym_malloc, sym_method, sym_capi;
7290  static VALUE sym_none, sym_marking, sym_sweeping;
7291  static VALUE sym_weak_references_count, sym_retained_weak_references_count;
7292  VALUE hash = Qnil, key = Qnil;
7293  VALUE major_by, need_major_by;
7294  unsigned int flags = orig_flags ? orig_flags : objspace->profile.latest_gc_info;
7295 
7296  if (SYMBOL_P(hash_or_key)) {
7297  key = hash_or_key;
7298  }
7299  else if (RB_TYPE_P(hash_or_key, T_HASH)) {
7300  hash = hash_or_key;
7301  }
7302  else {
7303  rb_bug("gc_info_decode: non-hash or symbol given");
7304  }
7305 
7306  if (NIL_P(sym_major_by)) {
7307 #define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
7308  S(major_by);
7309  S(gc_by);
7310  S(immediate_sweep);
7311  S(have_finalizer);
7312  S(state);
7313  S(need_major_by);
7314 
7315  S(stress);
7316  S(nofree);
7317  S(oldgen);
7318  S(shady);
7319  S(force);
7320 #if RGENGC_ESTIMATE_OLDMALLOC
7321  S(oldmalloc);
7322 #endif
7323  S(newobj);
7324  S(malloc);
7325  S(method);
7326  S(capi);
7327 
7328  S(none);
7329  S(marking);
7330  S(sweeping);
7331 
7332  S(weak_references_count);
7333  S(retained_weak_references_count);
7334 #undef S
7335  }
7336 
7337 #define SET(name, attr) \
7338  if (key == sym_##name) \
7339  return (attr); \
7340  else if (hash != Qnil) \
7341  rb_hash_aset(hash, sym_##name, (attr));
7342 
7343  major_by =
7344  (flags & GPR_FLAG_MAJOR_BY_NOFREE) ? sym_nofree :
7345  (flags & GPR_FLAG_MAJOR_BY_OLDGEN) ? sym_oldgen :
7346  (flags & GPR_FLAG_MAJOR_BY_SHADY) ? sym_shady :
7347  (flags & GPR_FLAG_MAJOR_BY_FORCE) ? sym_force :
7348 #if RGENGC_ESTIMATE_OLDMALLOC
7349  (flags & GPR_FLAG_MAJOR_BY_OLDMALLOC) ? sym_oldmalloc :
7350 #endif
7351  Qnil;
7352  SET(major_by, major_by);
7353 
7354  if (orig_flags == 0) { /* set need_major_by only if flags not set explicitly */
7355  unsigned int need_major_flags = gc_needs_major_flags;
7356  need_major_by =
7357  (need_major_flags & GPR_FLAG_MAJOR_BY_NOFREE) ? sym_nofree :
7358  (need_major_flags & GPR_FLAG_MAJOR_BY_OLDGEN) ? sym_oldgen :
7359  (need_major_flags & GPR_FLAG_MAJOR_BY_SHADY) ? sym_shady :
7360  (need_major_flags & GPR_FLAG_MAJOR_BY_FORCE) ? sym_force :
7361 #if RGENGC_ESTIMATE_OLDMALLOC
7362  (need_major_flags & GPR_FLAG_MAJOR_BY_OLDMALLOC) ? sym_oldmalloc :
7363 #endif
7364  Qnil;
7365  SET(need_major_by, need_major_by);
7366  }
7367 
7368  SET(gc_by,
7369  (flags & GPR_FLAG_NEWOBJ) ? sym_newobj :
7370  (flags & GPR_FLAG_MALLOC) ? sym_malloc :
7371  (flags & GPR_FLAG_METHOD) ? sym_method :
7372  (flags & GPR_FLAG_CAPI) ? sym_capi :
7373  (flags & GPR_FLAG_STRESS) ? sym_stress :
7374  Qnil
7375  );
7376 
7377  SET(have_finalizer, (flags & GPR_FLAG_HAVE_FINALIZE) ? Qtrue : Qfalse);
7378  SET(immediate_sweep, (flags & GPR_FLAG_IMMEDIATE_SWEEP) ? Qtrue : Qfalse);
7379 
7380  if (orig_flags == 0) {
7381  SET(state, gc_mode(objspace) == gc_mode_none ? sym_none :
7382  gc_mode(objspace) == gc_mode_marking ? sym_marking : sym_sweeping);
7383  }
7384 
7385  SET(weak_references_count, LONG2FIX(objspace->profile.weak_references_count));
7386  SET(retained_weak_references_count, LONG2FIX(objspace->profile.retained_weak_references_count));
7387 #undef SET
7388 
7389  if (!NIL_P(key)) {
7390  // Matched key should return above
7391  return Qundef;
7392  }
7393 
7394  return hash;
7395 }
7396 
7397 VALUE
7398 rb_gc_impl_latest_gc_info(void *objspace_ptr, VALUE key)
7399 {
7400  rb_objspace_t *objspace = objspace_ptr;
7401 
7402  return gc_info_decode(objspace, key, 0);
7403 }
7404 
7405 
7406 enum gc_stat_sym {
7407  gc_stat_sym_count,
7408  gc_stat_sym_time,
7409  gc_stat_sym_marking_time,
7410  gc_stat_sym_sweeping_time,
7411  gc_stat_sym_heap_allocated_pages,
7412  gc_stat_sym_heap_empty_pages,
7413  gc_stat_sym_heap_allocatable_slots,
7414  gc_stat_sym_heap_available_slots,
7415  gc_stat_sym_heap_live_slots,
7416  gc_stat_sym_heap_free_slots,
7417  gc_stat_sym_heap_final_slots,
7418  gc_stat_sym_heap_marked_slots,
7419  gc_stat_sym_heap_eden_pages,
7420  gc_stat_sym_total_allocated_pages,
7421  gc_stat_sym_total_freed_pages,
7422  gc_stat_sym_total_allocated_objects,
7423  gc_stat_sym_total_freed_objects,
7424  gc_stat_sym_malloc_increase_bytes,
7425  gc_stat_sym_malloc_increase_bytes_limit,
7426  gc_stat_sym_minor_gc_count,
7427  gc_stat_sym_major_gc_count,
7428  gc_stat_sym_compact_count,
7429  gc_stat_sym_read_barrier_faults,
7430  gc_stat_sym_total_moved_objects,
7431  gc_stat_sym_remembered_wb_unprotected_objects,
7432  gc_stat_sym_remembered_wb_unprotected_objects_limit,
7433  gc_stat_sym_old_objects,
7434  gc_stat_sym_old_objects_limit,
7435 #if RGENGC_ESTIMATE_OLDMALLOC
7436  gc_stat_sym_oldmalloc_increase_bytes,
7437  gc_stat_sym_oldmalloc_increase_bytes_limit,
7438 #endif
7439  gc_stat_sym_weak_references_count,
7440 #if RGENGC_PROFILE
7441  gc_stat_sym_total_generated_normal_object_count,
7442  gc_stat_sym_total_generated_shady_object_count,
7443  gc_stat_sym_total_shade_operation_count,
7444  gc_stat_sym_total_promoted_count,
7445  gc_stat_sym_total_remembered_normal_object_count,
7446  gc_stat_sym_total_remembered_shady_object_count,
7447 #endif
7448  gc_stat_sym_last
7449 };
7450 
7451 static VALUE gc_stat_symbols[gc_stat_sym_last];
7452 
7453 static void
7454 setup_gc_stat_symbols(void)
7455 {
7456  if (gc_stat_symbols[0] == 0) {
7457 #define S(s) gc_stat_symbols[gc_stat_sym_##s] = ID2SYM(rb_intern_const(#s))
7458  S(count);
7459  S(time);
7460  S(marking_time),
7461  S(sweeping_time),
7462  S(heap_allocated_pages);
7463  S(heap_empty_pages);
7464  S(heap_allocatable_slots);
7465  S(heap_available_slots);
7466  S(heap_live_slots);
7467  S(heap_free_slots);
7468  S(heap_final_slots);
7469  S(heap_marked_slots);
7470  S(heap_eden_pages);
7471  S(total_allocated_pages);
7472  S(total_freed_pages);
7473  S(total_allocated_objects);
7474  S(total_freed_objects);
7475  S(malloc_increase_bytes);
7476  S(malloc_increase_bytes_limit);
7477  S(minor_gc_count);
7478  S(major_gc_count);
7479  S(compact_count);
7480  S(read_barrier_faults);
7481  S(total_moved_objects);
7482  S(remembered_wb_unprotected_objects);
7483  S(remembered_wb_unprotected_objects_limit);
7484  S(old_objects);
7485  S(old_objects_limit);
7486 #if RGENGC_ESTIMATE_OLDMALLOC
7487  S(oldmalloc_increase_bytes);
7488  S(oldmalloc_increase_bytes_limit);
7489 #endif
7490  S(weak_references_count);
7491 #if RGENGC_PROFILE
7492  S(total_generated_normal_object_count);
7493  S(total_generated_shady_object_count);
7494  S(total_shade_operation_count);
7495  S(total_promoted_count);
7496  S(total_remembered_normal_object_count);
7497  S(total_remembered_shady_object_count);
7498 #endif /* RGENGC_PROFILE */
7499 #undef S
7500  }
7501 }
7502 
7503 static uint64_t
7504 ns_to_ms(uint64_t ns)
7505 {
7506  return ns / (1000 * 1000);
7507 }
7508 
7509 VALUE
7510 rb_gc_impl_stat(void *objspace_ptr, VALUE hash_or_sym)
7511 {
7512  rb_objspace_t *objspace = objspace_ptr;
7513  VALUE hash = Qnil, key = Qnil;
7514 
7515  setup_gc_stat_symbols();
7516 
7517  if (RB_TYPE_P(hash_or_sym, T_HASH)) {
7518  hash = hash_or_sym;
7519  }
7520  else if (SYMBOL_P(hash_or_sym)) {
7521  key = hash_or_sym;
7522  }
7523  else {
7524  rb_bug("non-hash or symbol given");
7525  }
7526 
7527 #define SET(name, attr) \
7528  if (key == gc_stat_symbols[gc_stat_sym_##name]) \
7529  return SIZET2NUM(attr); \
7530  else if (hash != Qnil) \
7531  rb_hash_aset(hash, gc_stat_symbols[gc_stat_sym_##name], SIZET2NUM(attr));
7532 
7533  SET(count, objspace->profile.count);
7534  SET(time, (size_t)ns_to_ms(objspace->profile.marking_time_ns + objspace->profile.sweeping_time_ns)); // TODO: UINT64T2NUM
7535  SET(marking_time, (size_t)ns_to_ms(objspace->profile.marking_time_ns));
7536  SET(sweeping_time, (size_t)ns_to_ms(objspace->profile.sweeping_time_ns));
7537 
7538  /* implementation dependent counters */
7539  SET(heap_allocated_pages, rb_darray_size(objspace->heap_pages.sorted));
7540  SET(heap_empty_pages, objspace->empty_pages_count)
7541  SET(heap_allocatable_slots, objspace->heap_pages.allocatable_slots);
7542  SET(heap_available_slots, objspace_available_slots(objspace));
7543  SET(heap_live_slots, objspace_live_slots(objspace));
7544  SET(heap_free_slots, objspace_free_slots(objspace));
7545  SET(heap_final_slots, total_final_slots_count(objspace));
7546  SET(heap_marked_slots, objspace->marked_slots);
7547  SET(heap_eden_pages, heap_eden_total_pages(objspace));
7548  SET(total_allocated_pages, objspace->heap_pages.allocated_pages);
7549  SET(total_freed_pages, objspace->heap_pages.freed_pages);
7550  SET(total_allocated_objects, total_allocated_objects(objspace));
7551  SET(total_freed_objects, total_freed_objects(objspace));
7552  SET(malloc_increase_bytes, malloc_increase);
7553  SET(malloc_increase_bytes_limit, malloc_limit);
7554  SET(minor_gc_count, objspace->profile.minor_gc_count);
7555  SET(major_gc_count, objspace->profile.major_gc_count);
7556  SET(compact_count, objspace->profile.compact_count);
7557  SET(read_barrier_faults, objspace->profile.read_barrier_faults);
7558  SET(total_moved_objects, objspace->rcompactor.total_moved);
7559  SET(remembered_wb_unprotected_objects, objspace->rgengc.uncollectible_wb_unprotected_objects);
7560  SET(remembered_wb_unprotected_objects_limit, objspace->rgengc.uncollectible_wb_unprotected_objects_limit);
7561  SET(old_objects, objspace->rgengc.old_objects);
7562  SET(old_objects_limit, objspace->rgengc.old_objects_limit);
7563 #if RGENGC_ESTIMATE_OLDMALLOC
7564  SET(oldmalloc_increase_bytes, objspace->rgengc.oldmalloc_increase);
7565  SET(oldmalloc_increase_bytes_limit, objspace->rgengc.oldmalloc_increase_limit);
7566 #endif
7567 
7568 #if RGENGC_PROFILE
7569  SET(total_generated_normal_object_count, objspace->profile.total_generated_normal_object_count);
7570  SET(total_generated_shady_object_count, objspace->profile.total_generated_shady_object_count);
7571  SET(total_shade_operation_count, objspace->profile.total_shade_operation_count);
7572  SET(total_promoted_count, objspace->profile.total_promoted_count);
7573  SET(total_remembered_normal_object_count, objspace->profile.total_remembered_normal_object_count);
7574  SET(total_remembered_shady_object_count, objspace->profile.total_remembered_shady_object_count);
7575 #endif /* RGENGC_PROFILE */
7576 #undef SET
7577 
7578  if (!NIL_P(key)) {
7579  // Matched key should return above
7580  return Qundef;
7581  }
7582 
7583 #if defined(RGENGC_PROFILE) && RGENGC_PROFILE >= 2
7584  if (hash != Qnil) {
7585  gc_count_add_each_types(hash, "generated_normal_object_count_types", objspace->profile.generated_normal_object_count_types);
7586  gc_count_add_each_types(hash, "generated_shady_object_count_types", objspace->profile.generated_shady_object_count_types);
7587  gc_count_add_each_types(hash, "shade_operation_count_types", objspace->profile.shade_operation_count_types);
7588  gc_count_add_each_types(hash, "promoted_types", objspace->profile.promoted_types);
7589  gc_count_add_each_types(hash, "remembered_normal_object_count_types", objspace->profile.remembered_normal_object_count_types);
7590  gc_count_add_each_types(hash, "remembered_shady_object_count_types", objspace->profile.remembered_shady_object_count_types);
7591  }
7592 #endif
7593 
7594  return hash;
7595 }
7596 
7597 enum gc_stat_heap_sym {
7598  gc_stat_heap_sym_slot_size,
7599  gc_stat_heap_sym_heap_eden_pages,
7600  gc_stat_heap_sym_heap_eden_slots,
7601  gc_stat_heap_sym_total_allocated_pages,
7602  gc_stat_heap_sym_force_major_gc_count,
7603  gc_stat_heap_sym_force_incremental_marking_finish_count,
7604  gc_stat_heap_sym_total_allocated_objects,
7605  gc_stat_heap_sym_total_freed_objects,
7606  gc_stat_heap_sym_last
7607 };
7608 
7609 static VALUE gc_stat_heap_symbols[gc_stat_heap_sym_last];
7610 
7611 static void
7612 setup_gc_stat_heap_symbols(void)
7613 {
7614  if (gc_stat_heap_symbols[0] == 0) {
7615 #define S(s) gc_stat_heap_symbols[gc_stat_heap_sym_##s] = ID2SYM(rb_intern_const(#s))
7616  S(slot_size);
7617  S(heap_eden_pages);
7618  S(heap_eden_slots);
7619  S(total_allocated_pages);
7620  S(force_major_gc_count);
7621  S(force_incremental_marking_finish_count);
7622  S(total_allocated_objects);
7623  S(total_freed_objects);
7624 #undef S
7625  }
7626 }
7627 
7628 static VALUE
7629 stat_one_heap(rb_heap_t *heap, VALUE hash, VALUE key)
7630 {
7631 #define SET(name, attr) \
7632  if (key == gc_stat_heap_symbols[gc_stat_heap_sym_##name]) \
7633  return SIZET2NUM(attr); \
7634  else if (hash != Qnil) \
7635  rb_hash_aset(hash, gc_stat_heap_symbols[gc_stat_heap_sym_##name], SIZET2NUM(attr));
7636 
7637  SET(slot_size, heap->slot_size);
7638  SET(heap_eden_pages, heap->total_pages);
7639  SET(heap_eden_slots, heap->total_slots);
7640  SET(total_allocated_pages, heap->total_allocated_pages);
7641  SET(force_major_gc_count, heap->force_major_gc_count);
7642  SET(force_incremental_marking_finish_count, heap->force_incremental_marking_finish_count);
7643  SET(total_allocated_objects, heap->total_allocated_objects);
7644  SET(total_freed_objects, heap->total_freed_objects);
7645 #undef SET
7646 
7647  if (!NIL_P(key)) {
7648  // Matched key should return above
7649  return Qundef;
7650  }
7651 
7652  return hash;
7653 }
7654 
7655 VALUE
7656 rb_gc_impl_stat_heap(void *objspace_ptr, VALUE heap_name, VALUE hash_or_sym)
7657 {
7658  rb_objspace_t *objspace = objspace_ptr;
7659 
7660  setup_gc_stat_heap_symbols();
7661 
7662  if (NIL_P(heap_name)) {
7663  if (!RB_TYPE_P(hash_or_sym, T_HASH)) {
7664  rb_bug("non-hash given");
7665  }
7666 
7667  for (int i = 0; i < HEAP_COUNT; i++) {
7668  VALUE hash = rb_hash_aref(hash_or_sym, INT2FIX(i));
7669  if (NIL_P(hash)) {
7670  hash = rb_hash_new();
7671  rb_hash_aset(hash_or_sym, INT2FIX(i), hash);
7672  }
7673 
7674  stat_one_heap(&heaps[i], hash, Qnil);
7675  }
7676  }
7677  else if (FIXNUM_P(heap_name)) {
7678  int heap_idx = FIX2INT(heap_name);
7679 
7680  if (heap_idx < 0 || heap_idx >= HEAP_COUNT) {
7681  rb_raise(rb_eArgError, "size pool index out of range");
7682  }
7683 
7684  if (SYMBOL_P(hash_or_sym)) {
7685  return stat_one_heap(&heaps[heap_idx], Qnil, hash_or_sym);
7686  }
7687  else if (RB_TYPE_P(hash_or_sym, T_HASH)) {
7688  return stat_one_heap(&heaps[heap_idx], hash_or_sym, Qnil);
7689  }
7690  else {
7691  rb_bug("non-hash or symbol given");
7692  }
7693  }
7694  else {
7695  rb_bug("heap_name must be nil or an Integer");
7696  }
7697 
7698  return hash_or_sym;
7699 }
7700 
7701 /* I could include internal.h for this, but doing so undefines some Array macros
7702  * necessary for initialising objects, and I don't want to include all the array
7703  * headers to get them back
7704  * TODO: Investigate why RARRAY_AREF gets undefined in internal.h
7705  */
7706 #ifndef RBOOL
7707 #define RBOOL(v) (v ? Qtrue : Qfalse)
7708 #endif
7709 
7710 VALUE
7711 rb_gc_impl_config_get(void *objspace_ptr)
7712 {
7713 #define sym(name) ID2SYM(rb_intern_const(name))
7714  rb_objspace_t *objspace = objspace_ptr;
7715  VALUE hash = rb_hash_new();
7716 
7717  rb_hash_aset(hash, sym("rgengc_allow_full_mark"), RBOOL(gc_config_full_mark_val));
7718 
7719  return hash;
7720 }
7721 
7722 static int
7723 gc_config_set_key(st_data_t key, st_data_t value, st_data_t data)
7724 {
7726  if (rb_sym2id(key) == rb_intern("rgengc_allow_full_mark")) {
7727  gc_rest(objspace);
7728  gc_config_full_mark_set(RTEST(value));
7729  }
7730  return ST_CONTINUE;
7731 }
7732 
7733 void
7734 rb_gc_impl_config_set(void *objspace_ptr, VALUE hash)
7735 {
7736  rb_objspace_t *objspace = objspace_ptr;
7737 
7738  if (!RB_TYPE_P(hash, T_HASH)) {
7739  rb_raise(rb_eArgError, "expected keyword arguments");
7740  }
7741 
7742  rb_hash_stlike_foreach(hash, gc_config_set_key, (st_data_t)objspace);
7743 }
7744 
7745 VALUE
7746 rb_gc_impl_stress_get(void *objspace_ptr)
7747 {
7748  rb_objspace_t *objspace = objspace_ptr;
7749  return ruby_gc_stress_mode;
7750 }
7751 
7752 void
7753 rb_gc_impl_stress_set(void *objspace_ptr, VALUE flag)
7754 {
7755  rb_objspace_t *objspace = objspace_ptr;
7756 
7757  objspace->flags.gc_stressful = RTEST(flag);
7758  objspace->gc_stress_mode = flag;
7759 }
7760 
7761 static int
7762 get_envparam_size(const char *name, size_t *default_value, size_t lower_bound)
7763 {
7764  const char *ptr = getenv(name);
7765  ssize_t val;
7766 
7767  if (ptr != NULL && *ptr) {
7768  size_t unit = 0;
7769  char *end;
7770 #if SIZEOF_SIZE_T == SIZEOF_LONG_LONG
7771  val = strtoll(ptr, &end, 0);
7772 #else
7773  val = strtol(ptr, &end, 0);
7774 #endif
7775  switch (*end) {
7776  case 'k': case 'K':
7777  unit = 1024;
7778  ++end;
7779  break;
7780  case 'm': case 'M':
7781  unit = 1024*1024;
7782  ++end;
7783  break;
7784  case 'g': case 'G':
7785  unit = 1024*1024*1024;
7786  ++end;
7787  break;
7788  }
7789  while (*end && isspace((unsigned char)*end)) end++;
7790  if (*end) {
7791  if (RTEST(ruby_verbose)) fprintf(stderr, "invalid string for %s: %s\n", name, ptr);
7792  return 0;
7793  }
7794  if (unit > 0) {
7795  if (val < -(ssize_t)(SIZE_MAX / 2 / unit) || (ssize_t)(SIZE_MAX / 2 / unit) < val) {
7796  if (RTEST(ruby_verbose)) fprintf(stderr, "%s=%s is ignored because it overflows\n", name, ptr);
7797  return 0;
7798  }
7799  val *= unit;
7800  }
7801  if (val > 0 && (size_t)val > lower_bound) {
7802  if (RTEST(ruby_verbose)) {
7803  fprintf(stderr, "%s=%"PRIdSIZE" (default value: %"PRIuSIZE")\n", name, val, *default_value);
7804  }
7805  *default_value = (size_t)val;
7806  return 1;
7807  }
7808  else {
7809  if (RTEST(ruby_verbose)) {
7810  fprintf(stderr, "%s=%"PRIdSIZE" (default value: %"PRIuSIZE") is ignored because it must be greater than %"PRIuSIZE".\n",
7811  name, val, *default_value, lower_bound);
7812  }
7813  return 0;
7814  }
7815  }
7816  return 0;
7817 }
7818 
7819 static int
7820 get_envparam_double(const char *name, double *default_value, double lower_bound, double upper_bound, int accept_zero)
7821 {
7822  const char *ptr = getenv(name);
7823  double val;
7824 
7825  if (ptr != NULL && *ptr) {
7826  char *end;
7827  val = strtod(ptr, &end);
7828  if (!*ptr || *end) {
7829  if (RTEST(ruby_verbose)) fprintf(stderr, "invalid string for %s: %s\n", name, ptr);
7830  return 0;
7831  }
7832 
7833  if (accept_zero && val == 0.0) {
7834  goto accept;
7835  }
7836  else if (val <= lower_bound) {
7837  if (RTEST(ruby_verbose)) {
7838  fprintf(stderr, "%s=%f (default value: %f) is ignored because it must be greater than %f.\n",
7839  name, val, *default_value, lower_bound);
7840  }
7841  }
7842  else if (upper_bound != 0.0 && /* ignore upper_bound if it is 0.0 */
7843  val > upper_bound) {
7844  if (RTEST(ruby_verbose)) {
7845  fprintf(stderr, "%s=%f (default value: %f) is ignored because it must be lower than %f.\n",
7846  name, val, *default_value, upper_bound);
7847  }
7848  }
7849  else {
7850  goto accept;
7851  }
7852  }
7853  return 0;
7854 
7855  accept:
7856  if (RTEST(ruby_verbose)) fprintf(stderr, "%s=%f (default value: %f)\n", name, val, *default_value);
7857  *default_value = val;
7858  return 1;
7859 }
7860 
7861 /*
7862  * GC tuning environment variables
7863  *
7864  * * RUBY_GC_HEAP_FREE_SLOTS
7865  * - Prepare at least this amount of slots after GC.
7866  * - Allocate slots if there are not enough slots.
7867  * * RUBY_GC_HEAP_GROWTH_FACTOR (new from 2.1)
7868  * - Allocate slots by this factor.
7869  * - (next slots number) = (current slots number) * (this factor)
7870  * * RUBY_GC_HEAP_GROWTH_MAX_SLOTS (new from 2.1)
7871  * - Allocation rate is limited to this number of slots.
7872  * * RUBY_GC_HEAP_FREE_SLOTS_MIN_RATIO (new from 2.4)
7873  * - Allocate additional pages when the number of free slots is
7874  * lower than the value (total_slots * (this ratio)).
7875  * * RUBY_GC_HEAP_FREE_SLOTS_GOAL_RATIO (new from 2.4)
7876  * - Allocate slots to satisfy this formula:
7877  * free_slots = total_slots * goal_ratio
7878  * - In other words, prepare (total_slots * goal_ratio) free slots.
7879  * - if this value is 0.0, then use RUBY_GC_HEAP_GROWTH_FACTOR directly.
7880  * * RUBY_GC_HEAP_FREE_SLOTS_MAX_RATIO (new from 2.4)
7881  * - Allow to free pages when the number of free slots is
7882  * greater than the value (total_slots * (this ratio)).
7883  * * RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR (new from 2.1.1)
7884  * - Do full GC when the number of old objects is more than R * N
7885  * where R is this factor and
7886  * N is the number of old objects just after last full GC.
7887  *
7888  * * obsolete
7889  * * RUBY_FREE_MIN -> RUBY_GC_HEAP_FREE_SLOTS (from 2.1)
7890  * * RUBY_HEAP_MIN_SLOTS -> RUBY_GC_HEAP_INIT_SLOTS (from 2.1)
7891  *
7892  * * RUBY_GC_MALLOC_LIMIT
7893  * * RUBY_GC_MALLOC_LIMIT_MAX (new from 2.1)
7894  * * RUBY_GC_MALLOC_LIMIT_GROWTH_FACTOR (new from 2.1)
7895  *
7896  * * RUBY_GC_OLDMALLOC_LIMIT (new from 2.1)
7897  * * RUBY_GC_OLDMALLOC_LIMIT_MAX (new from 2.1)
7898  * * RUBY_GC_OLDMALLOC_LIMIT_GROWTH_FACTOR (new from 2.1)
7899  */
7900 
7901 void
7902 rb_gc_impl_set_params(void *objspace_ptr)
7903 {
7904  rb_objspace_t *objspace = objspace_ptr;
7905  /* RUBY_GC_HEAP_FREE_SLOTS */
7906  if (get_envparam_size("RUBY_GC_HEAP_FREE_SLOTS", &gc_params.heap_free_slots, 0)) {
7907  /* ok */
7908  }
7909 
7910  for (int i = 0; i < HEAP_COUNT; i++) {
7911  char env_key[sizeof("RUBY_GC_HEAP_" "_INIT_SLOTS") + DECIMAL_SIZE_OF_BITS(sizeof(int) * CHAR_BIT)];
7912  snprintf(env_key, sizeof(env_key), "RUBY_GC_HEAP_%d_INIT_SLOTS", i);
7913 
7914  get_envparam_size(env_key, &gc_params.heap_init_slots[i], 0);
7915  }
7916 
7917  get_envparam_double("RUBY_GC_HEAP_GROWTH_FACTOR", &gc_params.growth_factor, 1.0, 0.0, FALSE);
7918  get_envparam_size ("RUBY_GC_HEAP_GROWTH_MAX_SLOTS", &gc_params.growth_max_slots, 0);
7919  get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_MIN_RATIO", &gc_params.heap_free_slots_min_ratio,
7920  0.0, 1.0, FALSE);
7921  get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_MAX_RATIO", &gc_params.heap_free_slots_max_ratio,
7922  gc_params.heap_free_slots_min_ratio, 1.0, FALSE);
7923  get_envparam_double("RUBY_GC_HEAP_FREE_SLOTS_GOAL_RATIO", &gc_params.heap_free_slots_goal_ratio,
7924  gc_params.heap_free_slots_min_ratio, gc_params.heap_free_slots_max_ratio, TRUE);
7925  get_envparam_double("RUBY_GC_HEAP_OLDOBJECT_LIMIT_FACTOR", &gc_params.oldobject_limit_factor, 0.0, 0.0, TRUE);
7926  get_envparam_double("RUBY_GC_HEAP_REMEMBERED_WB_UNPROTECTED_OBJECTS_LIMIT_RATIO", &gc_params.uncollectible_wb_unprotected_objects_limit_ratio, 0.0, 0.0, TRUE);
7927 
7928  if (get_envparam_size("RUBY_GC_MALLOC_LIMIT", &gc_params.malloc_limit_min, 0)) {
7929  malloc_limit = gc_params.malloc_limit_min;
7930  }
7931  get_envparam_size ("RUBY_GC_MALLOC_LIMIT_MAX", &gc_params.malloc_limit_max, 0);
7932  if (!gc_params.malloc_limit_max) { /* ignore max-check if 0 */
7933  gc_params.malloc_limit_max = SIZE_MAX;
7934  }
7935  get_envparam_double("RUBY_GC_MALLOC_LIMIT_GROWTH_FACTOR", &gc_params.malloc_limit_growth_factor, 1.0, 0.0, FALSE);
7936 
7937 #if RGENGC_ESTIMATE_OLDMALLOC
7938  if (get_envparam_size("RUBY_GC_OLDMALLOC_LIMIT", &gc_params.oldmalloc_limit_min, 0)) {
7939  objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
7940  }
7941  get_envparam_size ("RUBY_GC_OLDMALLOC_LIMIT_MAX", &gc_params.oldmalloc_limit_max, 0);
7942  get_envparam_double("RUBY_GC_OLDMALLOC_LIMIT_GROWTH_FACTOR", &gc_params.oldmalloc_limit_growth_factor, 1.0, 0.0, FALSE);
7943 #endif
7944 }
7945 
7946 static inline size_t
7947 objspace_malloc_size(rb_objspace_t *objspace, void *ptr, size_t hint)
7948 {
7949 #ifdef HAVE_MALLOC_USABLE_SIZE
7950  return malloc_usable_size(ptr);
7951 #else
7952  return hint;
7953 #endif
7954 }
7955 
7956 enum memop_type {
7957  MEMOP_TYPE_MALLOC = 0,
7958  MEMOP_TYPE_FREE,
7959  MEMOP_TYPE_REALLOC
7960 };
7961 
7962 static inline void
7963 atomic_sub_nounderflow(size_t *var, size_t sub)
7964 {
7965  if (sub == 0) return;
7966 
7967  while (1) {
7968  size_t val = *var;
7969  if (val < sub) sub = val;
7970  if (RUBY_ATOMIC_SIZE_CAS(*var, val, val-sub) == val) break;
7971  }
7972 }
7973 
7974 #define gc_stress_full_mark_after_malloc_p() \
7975  (FIXNUM_P(ruby_gc_stress_mode) && (FIX2LONG(ruby_gc_stress_mode) & (1<<gc_stress_full_mark_after_malloc)))
7976 
7977 static void
7978 objspace_malloc_gc_stress(rb_objspace_t *objspace)
7979 {
7980  if (ruby_gc_stressful && ruby_native_thread_p()) {
7981  unsigned int reason = (GPR_FLAG_IMMEDIATE_MARK | GPR_FLAG_IMMEDIATE_SWEEP |
7982  GPR_FLAG_STRESS | GPR_FLAG_MALLOC);
7983 
7984  if (gc_stress_full_mark_after_malloc_p()) {
7985  reason |= GPR_FLAG_FULL_MARK;
7986  }
7987  garbage_collect_with_gvl(objspace, reason);
7988  }
7989 }
7990 
7991 static inline bool
7992 objspace_malloc_increase_report(rb_objspace_t *objspace, void *mem, size_t new_size, size_t old_size, enum memop_type type)
7993 {
7994  if (0) fprintf(stderr, "increase - ptr: %p, type: %s, new_size: %"PRIdSIZE", old_size: %"PRIdSIZE"\n",
7995  mem,
7996  type == MEMOP_TYPE_MALLOC ? "malloc" :
7997  type == MEMOP_TYPE_FREE ? "free " :
7998  type == MEMOP_TYPE_REALLOC ? "realloc": "error",
7999  new_size, old_size);
8000  return false;
8001 }
8002 
8003 static bool
8004 objspace_malloc_increase_body(rb_objspace_t *objspace, void *mem, size_t new_size, size_t old_size, enum memop_type type)
8005 {
8006  if (new_size > old_size) {
8007  RUBY_ATOMIC_SIZE_ADD(malloc_increase, new_size - old_size);
8008 #if RGENGC_ESTIMATE_OLDMALLOC
8009  RUBY_ATOMIC_SIZE_ADD(objspace->rgengc.oldmalloc_increase, new_size - old_size);
8010 #endif
8011  }
8012  else {
8013  atomic_sub_nounderflow(&malloc_increase, old_size - new_size);
8014 #if RGENGC_ESTIMATE_OLDMALLOC
8015  atomic_sub_nounderflow(&objspace->rgengc.oldmalloc_increase, old_size - new_size);
8016 #endif
8017  }
8018 
8019  if (type == MEMOP_TYPE_MALLOC) {
8020  retry:
8021  if (malloc_increase > malloc_limit && ruby_native_thread_p() && !dont_gc_val()) {
8022  if (ruby_thread_has_gvl_p() && is_lazy_sweeping(objspace)) {
8023  gc_rest(objspace); /* gc_rest can reduce malloc_increase */
8024  goto retry;
8025  }
8026  garbage_collect_with_gvl(objspace, GPR_FLAG_MALLOC);
8027  }
8028  }
8029 
8030 #if MALLOC_ALLOCATED_SIZE
8031  if (new_size >= old_size) {
8032  RUBY_ATOMIC_SIZE_ADD(objspace->malloc_params.allocated_size, new_size - old_size);
8033  }
8034  else {
8035  size_t dec_size = old_size - new_size;
8036  size_t allocated_size = objspace->malloc_params.allocated_size;
8037 
8038 #if MALLOC_ALLOCATED_SIZE_CHECK
8039  if (allocated_size < dec_size) {
8040  rb_bug("objspace_malloc_increase: underflow malloc_params.allocated_size.");
8041  }
8042 #endif
8043  atomic_sub_nounderflow(&objspace->malloc_params.allocated_size, dec_size);
8044  }
8045 
8046  switch (type) {
8047  case MEMOP_TYPE_MALLOC:
8048  RUBY_ATOMIC_SIZE_INC(objspace->malloc_params.allocations);
8049  break;
8050  case MEMOP_TYPE_FREE:
8051  {
8052  size_t allocations = objspace->malloc_params.allocations;
8053  if (allocations > 0) {
8054  atomic_sub_nounderflow(&objspace->malloc_params.allocations, 1);
8055  }
8056 #if MALLOC_ALLOCATED_SIZE_CHECK
8057  else {
8058  GC_ASSERT(objspace->malloc_params.allocations > 0);
8059  }
8060 #endif
8061  }
8062  break;
8063  case MEMOP_TYPE_REALLOC: /* ignore */ break;
8064  }
8065 #endif
8066  return true;
8067 }
8068 
8069 #define objspace_malloc_increase(...) \
8070  for (bool malloc_increase_done = objspace_malloc_increase_report(__VA_ARGS__); \
8071  !malloc_increase_done; \
8072  malloc_increase_done = objspace_malloc_increase_body(__VA_ARGS__))
8073 
8074 struct malloc_obj_info { /* 4 words */
8075  size_t size;
8076 };
8077 
8078 static inline size_t
8079 objspace_malloc_prepare(rb_objspace_t *objspace, size_t size)
8080 {
8081  if (size == 0) size = 1;
8082 
8083 #if CALC_EXACT_MALLOC_SIZE
8084  size += sizeof(struct malloc_obj_info);
8085 #endif
8086 
8087  return size;
8088 }
8089 
8090 static bool
8091 malloc_during_gc_p(rb_objspace_t *objspace)
8092 {
8093  /* malloc is not allowed during GC when we're not using multiple ractors
8094  * (since ractors can run while another thread is sweeping) and when we
8095  * have the GVL (since if we don't have the GVL, we'll try to acquire the
8096  * GVL which will block and ensure the other thread finishes GC). */
8097  return during_gc && !dont_gc_val() && !rb_gc_multi_ractor_p() && ruby_thread_has_gvl_p();
8098 }
8099 
8100 static inline void *
8101 objspace_malloc_fixup(rb_objspace_t *objspace, void *mem, size_t size)
8102 {
8103  size = objspace_malloc_size(objspace, mem, size);
8104  objspace_malloc_increase(objspace, mem, size, 0, MEMOP_TYPE_MALLOC) {}
8105 
8106 #if CALC_EXACT_MALLOC_SIZE
8107  {
8108  struct malloc_obj_info *info = mem;
8109  info->size = size;
8110  mem = info + 1;
8111  }
8112 #endif
8113 
8114  return mem;
8115 }
8116 
8117 #if defined(__GNUC__) && RUBY_DEBUG
8118 #define RB_BUG_INSTEAD_OF_RB_MEMERROR 1
8119 #endif
8120 
8121 #ifndef RB_BUG_INSTEAD_OF_RB_MEMERROR
8122 # define RB_BUG_INSTEAD_OF_RB_MEMERROR 0
8123 #endif
8124 
8125 #define GC_MEMERROR(...) \
8126  ((RB_BUG_INSTEAD_OF_RB_MEMERROR+0) ? rb_bug("" __VA_ARGS__) : rb_memerror())
8127 
8128 #define TRY_WITH_GC(siz, expr) do { \
8129  const gc_profile_record_flag gpr = \
8130  GPR_FLAG_FULL_MARK | \
8131  GPR_FLAG_IMMEDIATE_MARK | \
8132  GPR_FLAG_IMMEDIATE_SWEEP | \
8133  GPR_FLAG_MALLOC; \
8134  objspace_malloc_gc_stress(objspace); \
8135  \
8136  if (RB_LIKELY((expr))) { \
8137  /* Success on 1st try */ \
8138  } \
8139  else if (!garbage_collect_with_gvl(objspace, gpr)) { \
8140  /* @shyouhei thinks this doesn't happen */ \
8141  GC_MEMERROR("TRY_WITH_GC: could not GC"); \
8142  } \
8143  else if ((expr)) { \
8144  /* Success on 2nd try */ \
8145  } \
8146  else { \
8147  GC_MEMERROR("TRY_WITH_GC: could not allocate:" \
8148  "%"PRIdSIZE" bytes for %s", \
8149  siz, # expr); \
8150  } \
8151  } while (0)
8152 
8153 static void
8154 check_malloc_not_in_gc(rb_objspace_t *objspace, const char *msg)
8155 {
8156  if (RB_UNLIKELY(malloc_during_gc_p(objspace))) {
8157  dont_gc_on();
8158  during_gc = false;
8159  rb_bug("Cannot %s during GC", msg);
8160  }
8161 }
8162 
8163 void
8164 rb_gc_impl_free(void *objspace_ptr, void *ptr, size_t old_size)
8165 {
8166  rb_objspace_t *objspace = objspace_ptr;
8167 
8168  if (!ptr) {
8169  /*
8170  * ISO/IEC 9899 says "If ptr is a null pointer, no action occurs" since
8171  * its first version. We would better follow.
8172  */
8173  return;
8174  }
8175 #if CALC_EXACT_MALLOC_SIZE
8176  struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
8177  ptr = info;
8178  old_size = info->size;
8179 #endif
8180  old_size = objspace_malloc_size(objspace, ptr, old_size);
8181 
8182  objspace_malloc_increase(objspace, ptr, 0, old_size, MEMOP_TYPE_FREE) {
8183  free(ptr);
8184  ptr = NULL;
8185  RB_DEBUG_COUNTER_INC(heap_xfree);
8186  }
8187 }
8188 
8189 void *
8190 rb_gc_impl_malloc(void *objspace_ptr, size_t size)
8191 {
8192  rb_objspace_t *objspace = objspace_ptr;
8193  check_malloc_not_in_gc(objspace, "malloc");
8194 
8195  void *mem;
8196 
8197  size = objspace_malloc_prepare(objspace, size);
8198  TRY_WITH_GC(size, mem = malloc(size));
8199  RB_DEBUG_COUNTER_INC(heap_xmalloc);
8200  return objspace_malloc_fixup(objspace, mem, size);
8201 }
8202 
8203 void *
8204 rb_gc_impl_calloc(void *objspace_ptr, size_t size)
8205 {
8206  rb_objspace_t *objspace = objspace_ptr;
8207 
8208  if (RB_UNLIKELY(malloc_during_gc_p(objspace))) {
8209  rb_warn("calloc during GC detected, this could cause crashes if it triggers another GC");
8210 #if RGENGC_CHECK_MODE || RUBY_DEBUG
8211  rb_bug("Cannot calloc during GC");
8212 #endif
8213  }
8214 
8215  void *mem;
8216 
8217  size = objspace_malloc_prepare(objspace, size);
8218  TRY_WITH_GC(size, mem = calloc1(size));
8219  return objspace_malloc_fixup(objspace, mem, size);
8220 }
8221 
8222 void *
8223 rb_gc_impl_realloc(void *objspace_ptr, void *ptr, size_t new_size, size_t old_size)
8224 {
8225  rb_objspace_t *objspace = objspace_ptr;
8226 
8227  check_malloc_not_in_gc(objspace, "realloc");
8228 
8229  void *mem;
8230 
8231  if (!ptr) return rb_gc_impl_malloc(objspace, new_size);
8232 
8233  /*
8234  * The behavior of realloc(ptr, 0) is implementation defined.
8235  * Therefore we don't use realloc(ptr, 0) for portability reason.
8236  * see http://www.open-std.org/jtc1/sc22/wg14/www/docs/dr_400.htm
8237  */
8238  if (new_size == 0) {
8239  if ((mem = rb_gc_impl_malloc(objspace, 0)) != NULL) {
8240  /*
8241  * - OpenBSD's malloc(3) man page says that when 0 is passed, it
8242  * returns a non-NULL pointer to an access-protected memory page.
8243  * The returned pointer cannot be read / written at all, but
8244  * still be a valid argument of free().
8245  *
8246  * https://man.openbsd.org/malloc.3
8247  *
8248  * - Linux's malloc(3) man page says that it _might_ perhaps return
8249  * a non-NULL pointer when its argument is 0. That return value
8250  * is safe (and is expected) to be passed to free().
8251  *
8252  * https://man7.org/linux/man-pages/man3/malloc.3.html
8253  *
8254  * - As I read the implementation jemalloc's malloc() returns fully
8255  * normal 16 bytes memory region when its argument is 0.
8256  *
8257  * - As I read the implementation musl libc's malloc() returns
8258  * fully normal 32 bytes memory region when its argument is 0.
8259  *
8260  * - Other malloc implementations can also return non-NULL.
8261  */
8262  rb_gc_impl_free(objspace, ptr, old_size);
8263  return mem;
8264  }
8265  else {
8266  /*
8267  * It is dangerous to return NULL here, because that could lead to
8268  * RCE. Fallback to 1 byte instead of zero.
8269  *
8270  * https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-11932
8271  */
8272  new_size = 1;
8273  }
8274  }
8275 
8276 #if CALC_EXACT_MALLOC_SIZE
8277  {
8278  struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
8279  new_size += sizeof(struct malloc_obj_info);
8280  ptr = info;
8281  old_size = info->size;
8282  }
8283 #endif
8284 
8285  old_size = objspace_malloc_size(objspace, ptr, old_size);
8286  TRY_WITH_GC(new_size, mem = RB_GNUC_EXTENSION_BLOCK(realloc(ptr, new_size)));
8287  new_size = objspace_malloc_size(objspace, mem, new_size);
8288 
8289 #if CALC_EXACT_MALLOC_SIZE
8290  {
8291  struct malloc_obj_info *info = mem;
8292  info->size = new_size;
8293  mem = info + 1;
8294  }
8295 #endif
8296 
8297  objspace_malloc_increase(objspace, mem, new_size, old_size, MEMOP_TYPE_REALLOC);
8298 
8299  RB_DEBUG_COUNTER_INC(heap_xrealloc);
8300  return mem;
8301 }
8302 
8303 void
8304 rb_gc_impl_adjust_memory_usage(void *objspace_ptr, ssize_t diff)
8305 {
8306  rb_objspace_t *objspace = objspace_ptr;
8307 
8308  if (diff > 0) {
8309  objspace_malloc_increase(objspace, 0, diff, 0, MEMOP_TYPE_REALLOC);
8310  }
8311  else if (diff < 0) {
8312  objspace_malloc_increase(objspace, 0, 0, -diff, MEMOP_TYPE_REALLOC);
8313  }
8314 }
8315 
8316 // TODO: move GC profiler stuff back into gc.c
8317 /*
8318  ------------------------------ GC profiler ------------------------------
8319 */
8320 
8321 #define GC_PROFILE_RECORD_DEFAULT_SIZE 100
8322 
8323 static bool
8324 current_process_time(struct timespec *ts)
8325 {
8326 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_PROCESS_CPUTIME_ID)
8327  {
8328  static int try_clock_gettime = 1;
8329  if (try_clock_gettime && clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ts) == 0) {
8330  return true;
8331  }
8332  else {
8333  try_clock_gettime = 0;
8334  }
8335  }
8336 #endif
8337 
8338 #ifdef RUSAGE_SELF
8339  {
8340  struct rusage usage;
8341  struct timeval time;
8342  if (getrusage(RUSAGE_SELF, &usage) == 0) {
8343  time = usage.ru_utime;
8344  ts->tv_sec = time.tv_sec;
8345  ts->tv_nsec = (int32_t)time.tv_usec * 1000;
8346  return true;
8347  }
8348  }
8349 #endif
8350 
8351 #ifdef _WIN32
8352  {
8353  FILETIME creation_time, exit_time, kernel_time, user_time;
8354  ULARGE_INTEGER ui;
8355 
8356  if (GetProcessTimes(GetCurrentProcess(),
8357  &creation_time, &exit_time, &kernel_time, &user_time) != 0) {
8358  memcpy(&ui, &user_time, sizeof(FILETIME));
8359 #define PER100NSEC (uint64_t)(1000 * 1000 * 10)
8360  ts->tv_nsec = (long)(ui.QuadPart % PER100NSEC);
8361  ts->tv_sec = (time_t)(ui.QuadPart / PER100NSEC);
8362  return true;
8363  }
8364  }
8365 #endif
8366 
8367  return false;
8368 }
8369 
8370 static double
8371 getrusage_time(void)
8372 {
8373  struct timespec ts;
8374  if (current_process_time(&ts)) {
8375  return ts.tv_sec + ts.tv_nsec * 1e-9;
8376  }
8377  else {
8378  return 0.0;
8379  }
8380 }
8381 
8382 
8383 static inline void
8384 gc_prof_setup_new_record(rb_objspace_t *objspace, unsigned int reason)
8385 {
8386  if (objspace->profile.run) {
8387  size_t index = objspace->profile.next_index;
8388  gc_profile_record *record;
8389 
8390  /* create new record */
8391  objspace->profile.next_index++;
8392 
8393  if (!objspace->profile.records) {
8394  objspace->profile.size = GC_PROFILE_RECORD_DEFAULT_SIZE;
8395  objspace->profile.records = malloc(xmalloc2_size(sizeof(gc_profile_record), objspace->profile.size));
8396  }
8397  if (index >= objspace->profile.size) {
8398  void *ptr;
8399  objspace->profile.size += 1000;
8400  ptr = realloc(objspace->profile.records, xmalloc2_size(sizeof(gc_profile_record), objspace->profile.size));
8401  if (!ptr) rb_memerror();
8402  objspace->profile.records = ptr;
8403  }
8404  if (!objspace->profile.records) {
8405  rb_bug("gc_profile malloc or realloc miss");
8406  }
8407  record = objspace->profile.current_record = &objspace->profile.records[objspace->profile.next_index - 1];
8408  MEMZERO(record, gc_profile_record, 1);
8409 
8410  /* setup before-GC parameter */
8411  record->flags = reason | (ruby_gc_stressful ? GPR_FLAG_STRESS : 0);
8412 #if MALLOC_ALLOCATED_SIZE
8413  record->allocated_size = malloc_allocated_size;
8414 #endif
8415 #if GC_PROFILE_MORE_DETAIL && GC_PROFILE_DETAIL_MEMORY
8416 #ifdef RUSAGE_SELF
8417  {
8418  struct rusage usage;
8419  if (getrusage(RUSAGE_SELF, &usage) == 0) {
8420  record->maxrss = usage.ru_maxrss;
8421  record->minflt = usage.ru_minflt;
8422  record->majflt = usage.ru_majflt;
8423  }
8424  }
8425 #endif
8426 #endif
8427  }
8428 }
8429 
8430 static inline void
8431 gc_prof_timer_start(rb_objspace_t *objspace)
8432 {
8433  if (gc_prof_enabled(objspace)) {
8434  gc_profile_record *record = gc_prof_record(objspace);
8435 #if GC_PROFILE_MORE_DETAIL
8436  record->prepare_time = objspace->profile.prepare_time;
8437 #endif
8438  record->gc_time = 0;
8439  record->gc_invoke_time = getrusage_time();
8440  }
8441 }
8442 
8443 static double
8444 elapsed_time_from(double time)
8445 {
8446  double now = getrusage_time();
8447  if (now > time) {
8448  return now - time;
8449  }
8450  else {
8451  return 0;
8452  }
8453 }
8454 
8455 static inline void
8456 gc_prof_timer_stop(rb_objspace_t *objspace)
8457 {
8458  if (gc_prof_enabled(objspace)) {
8459  gc_profile_record *record = gc_prof_record(objspace);
8460  record->gc_time = elapsed_time_from(record->gc_invoke_time);
8461  record->gc_invoke_time -= objspace->profile.invoke_time;
8462  }
8463 }
8464 
8465 #ifdef BUILDING_SHARED_GC
8466 # define RUBY_DTRACE_GC_HOOK(name)
8467 #else
8468 # define RUBY_DTRACE_GC_HOOK(name) \
8469  do {if (RUBY_DTRACE_GC_##name##_ENABLED()) RUBY_DTRACE_GC_##name();} while (0)
8470 #endif
8471 
8472 static inline void
8473 gc_prof_mark_timer_start(rb_objspace_t *objspace)
8474 {
8475  RUBY_DTRACE_GC_HOOK(MARK_BEGIN);
8476 #if GC_PROFILE_MORE_DETAIL
8477  if (gc_prof_enabled(objspace)) {
8478  gc_prof_record(objspace)->gc_mark_time = getrusage_time();
8479  }
8480 #endif
8481 }
8482 
8483 static inline void
8484 gc_prof_mark_timer_stop(rb_objspace_t *objspace)
8485 {
8486  RUBY_DTRACE_GC_HOOK(MARK_END);
8487 #if GC_PROFILE_MORE_DETAIL
8488  if (gc_prof_enabled(objspace)) {
8489  gc_profile_record *record = gc_prof_record(objspace);
8490  record->gc_mark_time = elapsed_time_from(record->gc_mark_time);
8491  }
8492 #endif
8493 }
8494 
8495 static inline void
8496 gc_prof_sweep_timer_start(rb_objspace_t *objspace)
8497 {
8498  RUBY_DTRACE_GC_HOOK(SWEEP_BEGIN);
8499  if (gc_prof_enabled(objspace)) {
8500  gc_profile_record *record = gc_prof_record(objspace);
8501 
8502  if (record->gc_time > 0 || GC_PROFILE_MORE_DETAIL) {
8503  objspace->profile.gc_sweep_start_time = getrusage_time();
8504  }
8505  }
8506 }
8507 
8508 static inline void
8509 gc_prof_sweep_timer_stop(rb_objspace_t *objspace)
8510 {
8511  RUBY_DTRACE_GC_HOOK(SWEEP_END);
8512 
8513  if (gc_prof_enabled(objspace)) {
8514  double sweep_time;
8515  gc_profile_record *record = gc_prof_record(objspace);
8516 
8517  if (record->gc_time > 0) {
8518  sweep_time = elapsed_time_from(objspace->profile.gc_sweep_start_time);
8519  /* need to accumulate GC time for lazy sweep after gc() */
8520  record->gc_time += sweep_time;
8521  }
8522  else if (GC_PROFILE_MORE_DETAIL) {
8523  sweep_time = elapsed_time_from(objspace->profile.gc_sweep_start_time);
8524  }
8525 
8526 #if GC_PROFILE_MORE_DETAIL
8527  record->gc_sweep_time += sweep_time;
8528  if (heap_pages_deferred_final) record->flags |= GPR_FLAG_HAVE_FINALIZE;
8529 #endif
8530  if (heap_pages_deferred_final) objspace->profile.latest_gc_info |= GPR_FLAG_HAVE_FINALIZE;
8531  }
8532 }
8533 
8534 static inline void
8535 gc_prof_set_malloc_info(rb_objspace_t *objspace)
8536 {
8537 #if GC_PROFILE_MORE_DETAIL
8538  if (gc_prof_enabled(objspace)) {
8539  gc_profile_record *record = gc_prof_record(objspace);
8540  record->allocate_increase = malloc_increase;
8541  record->allocate_limit = malloc_limit;
8542  }
8543 #endif
8544 }
8545 
8546 static inline void
8547 gc_prof_set_heap_info(rb_objspace_t *objspace)
8548 {
8549  if (gc_prof_enabled(objspace)) {
8550  gc_profile_record *record = gc_prof_record(objspace);
8551  size_t live = objspace->profile.total_allocated_objects_at_gc_start - total_freed_objects(objspace);
8552  size_t total = objspace->profile.heap_used_at_gc_start * HEAP_PAGE_OBJ_LIMIT;
8553 
8554 #if GC_PROFILE_MORE_DETAIL
8555  record->heap_use_pages = objspace->profile.heap_used_at_gc_start;
8556  record->heap_live_objects = live;
8557  record->heap_free_objects = total - live;
8558 #endif
8559 
8560  record->heap_total_objects = total;
8561  record->heap_use_size = live * BASE_SLOT_SIZE;
8562  record->heap_total_size = total * BASE_SLOT_SIZE;
8563  }
8564 }
8565 
8566 /*
8567  * call-seq:
8568  * GC::Profiler.clear -> nil
8569  *
8570  * Clears the \GC profiler data.
8571  *
8572  */
8573 
8574 static VALUE
8575 gc_profile_clear(VALUE _)
8576 {
8577  rb_objspace_t *objspace = rb_gc_get_objspace();
8578  void *p = objspace->profile.records;
8579  objspace->profile.records = NULL;
8580  objspace->profile.size = 0;
8581  objspace->profile.next_index = 0;
8582  objspace->profile.current_record = 0;
8583  free(p);
8584  return Qnil;
8585 }
8586 
8587 /*
8588  * call-seq:
8589  * GC::Profiler.raw_data -> [Hash, ...]
8590  *
8591  * Returns an Array of individual raw profile data Hashes ordered
8592  * from earliest to latest by +:GC_INVOKE_TIME+.
8593  *
8594  * For example:
8595  *
8596  * [
8597  * {
8598  * :GC_TIME=>1.3000000000000858e-05,
8599  * :GC_INVOKE_TIME=>0.010634999999999999,
8600  * :HEAP_USE_SIZE=>289640,
8601  * :HEAP_TOTAL_SIZE=>588960,
8602  * :HEAP_TOTAL_OBJECTS=>14724,
8603  * :GC_IS_MARKED=>false
8604  * },
8605  * # ...
8606  * ]
8607  *
8608  * The keys mean:
8609  *
8610  * +:GC_TIME+::
8611  * Time elapsed in seconds for this GC run
8612  * +:GC_INVOKE_TIME+::
8613  * Time elapsed in seconds from startup to when the GC was invoked
8614  * +:HEAP_USE_SIZE+::
8615  * Total bytes of heap used
8616  * +:HEAP_TOTAL_SIZE+::
8617  * Total size of heap in bytes
8618  * +:HEAP_TOTAL_OBJECTS+::
8619  * Total number of objects
8620  * +:GC_IS_MARKED+::
8621  * Returns +true+ if the GC is in mark phase
8622  *
8623  * If ruby was built with +GC_PROFILE_MORE_DETAIL+, you will also have access
8624  * to the following hash keys:
8625  *
8626  * +:GC_MARK_TIME+::
8627  * +:GC_SWEEP_TIME+::
8628  * +:ALLOCATE_INCREASE+::
8629  * +:ALLOCATE_LIMIT+::
8630  * +:HEAP_USE_PAGES+::
8631  * +:HEAP_LIVE_OBJECTS+::
8632  * +:HEAP_FREE_OBJECTS+::
8633  * +:HAVE_FINALIZE+::
8634  *
8635  */
8636 
8637 static VALUE
8638 gc_profile_record_get(VALUE _)
8639 {
8640  VALUE prof;
8641  VALUE gc_profile = rb_ary_new();
8642  size_t i;
8643  rb_objspace_t *objspace = rb_gc_get_objspace();
8644 
8645  if (!objspace->profile.run) {
8646  return Qnil;
8647  }
8648 
8649  for (i =0; i < objspace->profile.next_index; i++) {
8650  gc_profile_record *record = &objspace->profile.records[i];
8651 
8652  prof = rb_hash_new();
8653  rb_hash_aset(prof, ID2SYM(rb_intern("GC_FLAGS")), gc_info_decode(objspace, rb_hash_new(), record->flags));
8654  rb_hash_aset(prof, ID2SYM(rb_intern("GC_TIME")), DBL2NUM(record->gc_time));
8655  rb_hash_aset(prof, ID2SYM(rb_intern("GC_INVOKE_TIME")), DBL2NUM(record->gc_invoke_time));
8656  rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_SIZE")), SIZET2NUM(record->heap_use_size));
8657  rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_SIZE")), SIZET2NUM(record->heap_total_size));
8658  rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_TOTAL_OBJECTS")), SIZET2NUM(record->heap_total_objects));
8659  rb_hash_aset(prof, ID2SYM(rb_intern("MOVED_OBJECTS")), SIZET2NUM(record->moved_objects));
8660  rb_hash_aset(prof, ID2SYM(rb_intern("GC_IS_MARKED")), Qtrue);
8661 #if GC_PROFILE_MORE_DETAIL
8662  rb_hash_aset(prof, ID2SYM(rb_intern("GC_MARK_TIME")), DBL2NUM(record->gc_mark_time));
8663  rb_hash_aset(prof, ID2SYM(rb_intern("GC_SWEEP_TIME")), DBL2NUM(record->gc_sweep_time));
8664  rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_INCREASE")), SIZET2NUM(record->allocate_increase));
8665  rb_hash_aset(prof, ID2SYM(rb_intern("ALLOCATE_LIMIT")), SIZET2NUM(record->allocate_limit));
8666  rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_USE_PAGES")), SIZET2NUM(record->heap_use_pages));
8667  rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_LIVE_OBJECTS")), SIZET2NUM(record->heap_live_objects));
8668  rb_hash_aset(prof, ID2SYM(rb_intern("HEAP_FREE_OBJECTS")), SIZET2NUM(record->heap_free_objects));
8669 
8670  rb_hash_aset(prof, ID2SYM(rb_intern("REMOVING_OBJECTS")), SIZET2NUM(record->removing_objects));
8671  rb_hash_aset(prof, ID2SYM(rb_intern("EMPTY_OBJECTS")), SIZET2NUM(record->empty_objects));
8672 
8673  rb_hash_aset(prof, ID2SYM(rb_intern("HAVE_FINALIZE")), (record->flags & GPR_FLAG_HAVE_FINALIZE) ? Qtrue : Qfalse);
8674 #endif
8675 
8676 #if RGENGC_PROFILE > 0
8677  rb_hash_aset(prof, ID2SYM(rb_intern("OLD_OBJECTS")), SIZET2NUM(record->old_objects));
8678  rb_hash_aset(prof, ID2SYM(rb_intern("REMEMBERED_NORMAL_OBJECTS")), SIZET2NUM(record->remembered_normal_objects));
8679  rb_hash_aset(prof, ID2SYM(rb_intern("REMEMBERED_SHADY_OBJECTS")), SIZET2NUM(record->remembered_shady_objects));
8680 #endif
8681  rb_ary_push(gc_profile, prof);
8682  }
8683 
8684  return gc_profile;
8685 }
8686 
8687 #if GC_PROFILE_MORE_DETAIL
8688 #define MAJOR_REASON_MAX 0x10
8689 
8690 static char *
8691 gc_profile_dump_major_reason(unsigned int flags, char *buff)
8692 {
8693  unsigned int reason = flags & GPR_FLAG_MAJOR_MASK;
8694  int i = 0;
8695 
8696  if (reason == GPR_FLAG_NONE) {
8697  buff[0] = '-';
8698  buff[1] = 0;
8699  }
8700  else {
8701 #define C(x, s) \
8702  if (reason & GPR_FLAG_MAJOR_BY_##x) { \
8703  buff[i++] = #x[0]; \
8704  if (i >= MAJOR_REASON_MAX) rb_bug("gc_profile_dump_major_reason: overflow"); \
8705  buff[i] = 0; \
8706  }
8707  C(NOFREE, N);
8708  C(OLDGEN, O);
8709  C(SHADY, S);
8710 #if RGENGC_ESTIMATE_OLDMALLOC
8711  C(OLDMALLOC, M);
8712 #endif
8713 #undef C
8714  }
8715  return buff;
8716 }
8717 #endif
8718 
8719 
8720 
8721 static void
8722 gc_profile_dump_on(VALUE out, VALUE (*append)(VALUE, VALUE))
8723 {
8724  rb_objspace_t *objspace = rb_gc_get_objspace();
8725  size_t count = objspace->profile.next_index;
8726 #ifdef MAJOR_REASON_MAX
8727  char reason_str[MAJOR_REASON_MAX];
8728 #endif
8729 
8730  if (objspace->profile.run && count /* > 1 */) {
8731  size_t i;
8732  const gc_profile_record *record;
8733 
8734  append(out, rb_sprintf("GC %"PRIuSIZE" invokes.\n", objspace->profile.count));
8735  append(out, rb_str_new_cstr("Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC Time(ms)\n"));
8736 
8737  for (i = 0; i < count; i++) {
8738  record = &objspace->profile.records[i];
8739  append(out, rb_sprintf("%5"PRIuSIZE" %19.3f %20"PRIuSIZE" %20"PRIuSIZE" %20"PRIuSIZE" %30.20f\n",
8740  i+1, record->gc_invoke_time, record->heap_use_size,
8741  record->heap_total_size, record->heap_total_objects, record->gc_time*1000));
8742  }
8743 
8744 #if GC_PROFILE_MORE_DETAIL
8745  const char *str = "\n\n" \
8746  "More detail.\n" \
8747  "Prepare Time = Previously GC's rest sweep time\n"
8748  "Index Flags Allocate Inc. Allocate Limit"
8749 #if CALC_EXACT_MALLOC_SIZE
8750  " Allocated Size"
8751 #endif
8752  " Use Page Mark Time(ms) Sweep Time(ms) Prepare Time(ms) LivingObj FreeObj RemovedObj EmptyObj"
8753 #if RGENGC_PROFILE
8754  " OldgenObj RemNormObj RemShadObj"
8755 #endif
8756 #if GC_PROFILE_DETAIL_MEMORY
8757  " MaxRSS(KB) MinorFLT MajorFLT"
8758 #endif
8759  "\n";
8760  append(out, rb_str_new_cstr(str));
8761 
8762  for (i = 0; i < count; i++) {
8763  record = &objspace->profile.records[i];
8764  append(out, rb_sprintf("%5"PRIuSIZE" %4s/%c/%6s%c %13"PRIuSIZE" %15"PRIuSIZE
8765 #if CALC_EXACT_MALLOC_SIZE
8766  " %15"PRIuSIZE
8767 #endif
8768  " %9"PRIuSIZE" %17.12f %17.12f %17.12f %10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE
8769 #if RGENGC_PROFILE
8770  "%10"PRIuSIZE" %10"PRIuSIZE" %10"PRIuSIZE
8771 #endif
8772 #if GC_PROFILE_DETAIL_MEMORY
8773  "%11ld %8ld %8ld"
8774 #endif
8775 
8776  "\n",
8777  i+1,
8778  gc_profile_dump_major_reason(record->flags, reason_str),
8779  (record->flags & GPR_FLAG_HAVE_FINALIZE) ? 'F' : '.',
8780  (record->flags & GPR_FLAG_NEWOBJ) ? "NEWOBJ" :
8781  (record->flags & GPR_FLAG_MALLOC) ? "MALLOC" :
8782  (record->flags & GPR_FLAG_METHOD) ? "METHOD" :
8783  (record->flags & GPR_FLAG_CAPI) ? "CAPI__" : "??????",
8784  (record->flags & GPR_FLAG_STRESS) ? '!' : ' ',
8785  record->allocate_increase, record->allocate_limit,
8786 #if CALC_EXACT_MALLOC_SIZE
8787  record->allocated_size,
8788 #endif
8789  record->heap_use_pages,
8790  record->gc_mark_time*1000,
8791  record->gc_sweep_time*1000,
8792  record->prepare_time*1000,
8793 
8794  record->heap_live_objects,
8795  record->heap_free_objects,
8796  record->removing_objects,
8797  record->empty_objects
8798 #if RGENGC_PROFILE
8799  ,
8800  record->old_objects,
8801  record->remembered_normal_objects,
8802  record->remembered_shady_objects
8803 #endif
8804 #if GC_PROFILE_DETAIL_MEMORY
8805  ,
8806  record->maxrss / 1024,
8807  record->minflt,
8808  record->majflt
8809 #endif
8810 
8811  ));
8812  }
8813 #endif
8814  }
8815 }
8816 
8817 /*
8818  * call-seq:
8819  * GC::Profiler.result -> String
8820  *
8821  * Returns a profile data report such as:
8822  *
8823  * GC 1 invokes.
8824  * Index Invoke Time(sec) Use Size(byte) Total Size(byte) Total Object GC time(ms)
8825  * 1 0.012 159240 212940 10647 0.00000000000001530000
8826  */
8827 
8828 static VALUE
8829 gc_profile_result(VALUE _)
8830 {
8831  VALUE str = rb_str_buf_new(0);
8832  gc_profile_dump_on(str, rb_str_buf_append);
8833  return str;
8834 }
8835 
8836 /*
8837  * call-seq:
8838  * GC::Profiler.report
8839  * GC::Profiler.report(io)
8840  *
8841  * Writes the GC::Profiler.result to <tt>$stdout</tt> or the given IO object.
8842  *
8843  */
8844 
8845 static VALUE
8846 gc_profile_report(int argc, VALUE *argv, VALUE self)
8847 {
8848  VALUE out;
8849 
8850  out = (!rb_check_arity(argc, 0, 1) ? rb_stdout : argv[0]);
8851  gc_profile_dump_on(out, rb_io_write);
8852 
8853  return Qnil;
8854 }
8855 
8856 /*
8857  * call-seq:
8858  * GC::Profiler.total_time -> float
8859  *
8860  * The total time used for garbage collection in seconds
8861  */
8862 
8863 static VALUE
8864 gc_profile_total_time(VALUE self)
8865 {
8866  double time = 0;
8867  rb_objspace_t *objspace = rb_gc_get_objspace();
8868 
8869  if (objspace->profile.run && objspace->profile.next_index > 0) {
8870  size_t i;
8871  size_t count = objspace->profile.next_index;
8872 
8873  for (i = 0; i < count; i++) {
8874  time += objspace->profile.records[i].gc_time;
8875  }
8876  }
8877  return DBL2NUM(time);
8878 }
8879 
8880 /*
8881  * call-seq:
8882  * GC::Profiler.enabled? -> true or false
8883  *
8884  * The current status of \GC profile mode.
8885  */
8886 
8887 static VALUE
8888 gc_profile_enable_get(VALUE self)
8889 {
8890  rb_objspace_t *objspace = rb_gc_get_objspace();
8891  return objspace->profile.run ? Qtrue : Qfalse;
8892 }
8893 
8894 /*
8895  * call-seq:
8896  * GC::Profiler.enable -> nil
8897  *
8898  * Starts the \GC profiler.
8899  *
8900  */
8901 
8902 static VALUE
8903 gc_profile_enable(VALUE _)
8904 {
8905  rb_objspace_t *objspace = rb_gc_get_objspace();
8906  objspace->profile.run = TRUE;
8907  objspace->profile.current_record = 0;
8908  return Qnil;
8909 }
8910 
8911 /*
8912  * call-seq:
8913  * GC::Profiler.disable -> nil
8914  *
8915  * Stops the \GC profiler.
8916  *
8917  */
8918 
8919 static VALUE
8920 gc_profile_disable(VALUE _)
8921 {
8922  rb_objspace_t *objspace = rb_gc_get_objspace();
8923 
8924  objspace->profile.run = FALSE;
8925  objspace->profile.current_record = 0;
8926  return Qnil;
8927 }
8928 
8929 /*
8930  * call-seq:
8931  * GC.verify_internal_consistency -> nil
8932  *
8933  * Verify internal consistency.
8934  *
8935  * This method is implementation specific.
8936  * Now this method checks generational consistency
8937  * if RGenGC is supported.
8938  */
8939 static VALUE
8940 gc_verify_internal_consistency_m(VALUE dummy)
8941 {
8942  gc_verify_internal_consistency(rb_gc_get_objspace());
8943  return Qnil;
8944 }
8945 
8946 #if GC_CAN_COMPILE_COMPACTION
8947 /*
8948  * call-seq:
8949  * GC.auto_compact = flag
8950  *
8951  * Updates automatic compaction mode.
8952  *
8953  * When enabled, the compactor will execute on every major collection.
8954  *
8955  * Enabling compaction will degrade performance on major collections.
8956  */
8957 static VALUE
8958 gc_set_auto_compact(VALUE _, VALUE v)
8959 {
8960  GC_ASSERT(GC_COMPACTION_SUPPORTED);
8961 
8962  ruby_enable_autocompact = RTEST(v);
8963 
8964 #if RGENGC_CHECK_MODE
8965  ruby_autocompact_compare_func = NULL;
8966 
8967  if (SYMBOL_P(v)) {
8968  ID id = RB_SYM2ID(v);
8969  if (id == rb_intern("empty")) {
8970  ruby_autocompact_compare_func = compare_free_slots;
8971  }
8972  }
8973 #endif
8974 
8975  return v;
8976 }
8977 #else
8978 # define gc_set_auto_compact rb_f_notimplement
8979 #endif
8980 
8981 #if GC_CAN_COMPILE_COMPACTION
8982 /*
8983  * call-seq:
8984  * GC.auto_compact -> true or false
8985  *
8986  * Returns whether or not automatic compaction has been enabled.
8987  */
8988 static VALUE
8989 gc_get_auto_compact(VALUE _)
8990 {
8991  return ruby_enable_autocompact ? Qtrue : Qfalse;
8992 }
8993 #else
8994 # define gc_get_auto_compact rb_f_notimplement
8995 #endif
8996 
8997 #if GC_CAN_COMPILE_COMPACTION
8998 /*
8999  * call-seq:
9000  * GC.latest_compact_info -> hash
9001  *
9002  * Returns information about object moved in the most recent \GC compaction.
9003  *
9004  * The returned +hash+ contains the following keys:
9005  *
9006  * [considered]
9007  * Hash containing the type of the object as the key and the number of
9008  * objects of that type that were considered for movement.
9009  * [moved]
9010  * Hash containing the type of the object as the key and the number of
9011  * objects of that type that were actually moved.
9012  * [moved_up]
9013  * Hash containing the type of the object as the key and the number of
9014  * objects of that type that were increased in size.
9015  * [moved_down]
9016  * Hash containing the type of the object as the key and the number of
9017  * objects of that type that were decreased in size.
9018  *
9019  * Some objects can't be moved (due to pinning) so these numbers can be used to
9020  * calculate compaction efficiency.
9021  */
9022 static VALUE
9023 gc_compact_stats(VALUE self)
9024 {
9025  rb_objspace_t *objspace = rb_gc_get_objspace();
9026  VALUE h = rb_hash_new();
9027  VALUE considered = rb_hash_new();
9028  VALUE moved = rb_hash_new();
9029  VALUE moved_up = rb_hash_new();
9030  VALUE moved_down = rb_hash_new();
9031 
9032  for (size_t i = 0; i < T_MASK; i++) {
9033  if (objspace->rcompactor.considered_count_table[i]) {
9034  rb_hash_aset(considered, type_sym(i), SIZET2NUM(objspace->rcompactor.considered_count_table[i]));
9035  }
9036 
9037  if (objspace->rcompactor.moved_count_table[i]) {
9038  rb_hash_aset(moved, type_sym(i), SIZET2NUM(objspace->rcompactor.moved_count_table[i]));
9039  }
9040 
9041  if (objspace->rcompactor.moved_up_count_table[i]) {
9042  rb_hash_aset(moved_up, type_sym(i), SIZET2NUM(objspace->rcompactor.moved_up_count_table[i]));
9043  }
9044 
9045  if (objspace->rcompactor.moved_down_count_table[i]) {
9046  rb_hash_aset(moved_down, type_sym(i), SIZET2NUM(objspace->rcompactor.moved_down_count_table[i]));
9047  }
9048  }
9049 
9050  rb_hash_aset(h, ID2SYM(rb_intern("considered")), considered);
9051  rb_hash_aset(h, ID2SYM(rb_intern("moved")), moved);
9052  rb_hash_aset(h, ID2SYM(rb_intern("moved_up")), moved_up);
9053  rb_hash_aset(h, ID2SYM(rb_intern("moved_down")), moved_down);
9054 
9055  return h;
9056 }
9057 #else
9058 # define gc_compact_stats rb_f_notimplement
9059 #endif
9060 
9061 #if GC_CAN_COMPILE_COMPACTION
9062 /*
9063  * call-seq:
9064  * GC.compact -> hash
9065  *
9066  * This function compacts objects together in Ruby's heap. It eliminates
9067  * unused space (or fragmentation) in the heap by moving objects in to that
9068  * unused space.
9069  *
9070  * The returned +hash+ contains statistics about the objects that were moved;
9071  * see GC.latest_compact_info.
9072  *
9073  * This method is only expected to work on CRuby.
9074  *
9075  * To test whether \GC compaction is supported, use the idiom:
9076  *
9077  * GC.respond_to?(:compact)
9078  */
9079 static VALUE
9080 gc_compact(VALUE self)
9081 {
9082  rb_objspace_t *objspace = rb_gc_get_objspace();
9083  int full_marking_p = gc_config_full_mark_val;
9084  gc_config_full_mark_set(TRUE);
9085 
9086  /* Run GC with compaction enabled */
9087  rb_gc_impl_start(rb_gc_get_objspace(), true, true, true, true);
9088  gc_config_full_mark_set(full_marking_p);
9089 
9090  return gc_compact_stats(self);
9091 }
9092 #else
9093 # define gc_compact rb_f_notimplement
9094 #endif
9095 
9096 #if GC_CAN_COMPILE_COMPACTION
9099  size_t required_slots[HEAP_COUNT];
9100 };
9101 
9102 static int
9103 desired_compaction_pages_i(struct heap_page *page, void *data)
9104 {
9105  struct desired_compaction_pages_i_data *tdata = data;
9106  rb_objspace_t *objspace = tdata->objspace;
9107  VALUE vstart = (VALUE)page->start;
9108  VALUE vend = vstart + (VALUE)(page->total_slots * page->heap->slot_size);
9109 
9110 
9111  for (VALUE v = vstart; v != vend; v += page->heap->slot_size) {
9112  asan_unpoisoning_object(v) {
9113  /* skip T_NONEs; they won't be moved */
9114  if (BUILTIN_TYPE(v) != T_NONE) {
9115  rb_heap_t *dest_pool = gc_compact_destination_pool(objspace, page->heap, v);
9116  size_t dest_pool_idx = dest_pool - heaps;
9117  tdata->required_slots[dest_pool_idx]++;
9118  }
9119  }
9120  }
9121 
9122  return 0;
9123 }
9124 
9125 /* call-seq:
9126  * GC.verify_compaction_references(toward: nil, double_heap: false) -> hash
9127  *
9128  * Verify compaction reference consistency.
9129  *
9130  * This method is implementation specific. During compaction, objects that
9131  * were moved are replaced with T_MOVED objects. No object should have a
9132  * reference to a T_MOVED object after compaction.
9133  *
9134  * This function expands the heap to ensure room to move all objects,
9135  * compacts the heap to make sure everything moves, updates all references,
9136  * then performs a full \GC. If any object contains a reference to a T_MOVED
9137  * object, that object should be pushed on the mark stack, and will
9138  * make a SEGV.
9139  */
9140 static VALUE
9141 gc_verify_compaction_references(int argc, VALUE* argv, VALUE self)
9142 {
9143  static ID keywords[3] = {0};
9144  if (!keywords[0]) {
9145  keywords[0] = rb_intern("toward");
9146  keywords[1] = rb_intern("double_heap");
9147  keywords[2] = rb_intern("expand_heap");
9148  }
9149 
9150  VALUE options;
9151  rb_scan_args_kw(rb_keyword_given_p(), argc, argv, ":", &options);
9152 
9153  VALUE arguments[3] = { Qnil, Qfalse, Qfalse };
9154  int kwarg_count = rb_get_kwargs(options, keywords, 0, 3, arguments);
9155  bool toward_empty = kwarg_count > 0 && SYMBOL_P(arguments[0]) && SYM2ID(arguments[0]) == rb_intern("empty");
9156  bool expand_heap = (kwarg_count > 1 && RTEST(arguments[1])) || (kwarg_count > 2 && RTEST(arguments[2]));
9157 
9158  rb_objspace_t *objspace = rb_gc_get_objspace();
9159 
9160  /* Clear the heap. */
9161  rb_gc_impl_start(objspace, true, true, true, false);
9162 
9163  unsigned int lev = rb_gc_vm_lock();
9164  {
9165  gc_rest(objspace);
9166 
9167  /* if both double_heap and expand_heap are set, expand_heap takes precedence */
9168  if (expand_heap) {
9169  struct desired_compaction_pages_i_data desired_compaction = {
9170  .objspace = objspace,
9171  .required_slots = {0},
9172  };
9173  /* Work out how many objects want to be in each size pool, taking account of moves */
9174  objspace_each_pages(objspace, desired_compaction_pages_i, &desired_compaction, TRUE);
9175 
9176  /* Find out which pool has the most pages */
9177  size_t max_existing_pages = 0;
9178  for (int i = 0; i < HEAP_COUNT; i++) {
9179  rb_heap_t *heap = &heaps[i];
9180  max_existing_pages = MAX(max_existing_pages, heap->total_pages);
9181  }
9182 
9183  /* Add pages to each size pool so that compaction is guaranteed to move every object */
9184  for (int i = 0; i < HEAP_COUNT; i++) {
9185  rb_heap_t *heap = &heaps[i];
9186 
9187  size_t pages_to_add = 0;
9188  /*
9189  * Step 1: Make sure every pool has the same number of pages, by adding empty pages
9190  * to smaller pools. This is required to make sure the compact cursor can advance
9191  * through all of the pools in `gc_sweep_compact` without hitting the "sweep &
9192  * compact cursors met" condition on some pools before fully compacting others
9193  */
9194  pages_to_add += max_existing_pages - heap->total_pages;
9195  /*
9196  * Step 2: Now add additional free pages to each size pool sufficient to hold all objects
9197  * that want to be in that size pool, whether moved into it or moved within it
9198  */
9199  objspace->heap_pages.allocatable_slots = desired_compaction.required_slots[i];
9200  while (objspace->heap_pages.allocatable_slots > 0) {
9201  heap_page_allocate_and_initialize(objspace, heap);
9202  }
9203  /*
9204  * Step 3: Add two more pages so that the compact & sweep cursors will meet _after_ all objects
9205  * have been moved, and not on the last iteration of the `gc_sweep_compact` loop
9206  */
9207  pages_to_add += 2;
9208 
9209  for (; pages_to_add > 0; pages_to_add--) {
9210  heap_page_allocate_and_initialize_force(objspace, heap);
9211  }
9212  }
9213  }
9214 
9215  if (toward_empty) {
9216  objspace->rcompactor.compare_func = compare_free_slots;
9217  }
9218  }
9219  rb_gc_vm_unlock(lev);
9220 
9221  rb_gc_impl_start(rb_gc_get_objspace(), true, true, true, true);
9222 
9223  rb_objspace_reachable_objects_from_root(root_obj_check_moved_i, objspace);
9224  objspace_each_objects(objspace, heap_check_moved_i, objspace, TRUE);
9225 
9226  objspace->rcompactor.compare_func = NULL;
9227 
9228  return gc_compact_stats(self);
9229 }
9230 #else
9231 # define gc_verify_compaction_references rb_f_notimplement
9232 #endif
9233 
9234 void
9235 rb_gc_impl_objspace_free(void *objspace_ptr)
9236 {
9237  rb_objspace_t *objspace = objspace_ptr;
9238 
9239  if (is_lazy_sweeping(objspace))
9240  rb_bug("lazy sweeping underway when freeing object space");
9241 
9242  free(objspace->profile.records);
9243  objspace->profile.records = NULL;
9244 
9245  for (size_t i = 0; i < rb_darray_size(objspace->heap_pages.sorted); i++) {
9246  heap_page_free(objspace, rb_darray_get(objspace->heap_pages.sorted, i));
9247  }
9248  rb_darray_free(objspace->heap_pages.sorted);
9249  heap_pages_lomem = 0;
9250  heap_pages_himem = 0;
9251 
9252  for (int i = 0; i < HEAP_COUNT; i++) {
9253  rb_heap_t *heap = &heaps[i];
9254  heap->total_pages = 0;
9255  heap->total_slots = 0;
9256  }
9257 
9258  st_free_table(objspace->id_to_obj_tbl);
9259  st_free_table(objspace->obj_to_id_tbl);
9260 
9261  free_stack_chunks(&objspace->mark_stack);
9262  mark_stack_free_cache(&objspace->mark_stack);
9263 
9264  rb_darray_free(objspace->weak_references);
9265 
9266  free(objspace);
9267 }
9268 
9269 #if MALLOC_ALLOCATED_SIZE
9270 /*
9271  * call-seq:
9272  * GC.malloc_allocated_size -> Integer
9273  *
9274  * Returns the size of memory allocated by malloc().
9275  *
9276  * Only available if ruby was built with +CALC_EXACT_MALLOC_SIZE+.
9277  */
9278 
9279 static VALUE
9280 gc_malloc_allocated_size(VALUE self)
9281 {
9282  return UINT2NUM(rb_objspace.malloc_params.allocated_size);
9283 }
9284 
9285 /*
9286  * call-seq:
9287  * GC.malloc_allocations -> Integer
9288  *
9289  * Returns the number of malloc() allocations.
9290  *
9291  * Only available if ruby was built with +CALC_EXACT_MALLOC_SIZE+.
9292  */
9293 
9294 static VALUE
9295 gc_malloc_allocations(VALUE self)
9296 {
9297  return UINT2NUM(rb_objspace.malloc_params.allocations);
9298 }
9299 #endif
9300 
9301 void rb_gc_impl_before_fork(void *objspace_ptr) { /* no-op */ }
9302 void rb_gc_impl_after_fork(void *objspace_ptr, rb_pid_t pid) { /* no-op */ }
9303 
9304 void *
9305 rb_gc_impl_objspace_alloc(void)
9306 {
9307  rb_objspace_t *objspace = calloc1(sizeof(rb_objspace_t));
9308 
9309  return objspace;
9310 }
9311 
9312 void
9313 rb_gc_impl_objspace_init(void *objspace_ptr)
9314 {
9315  rb_objspace_t *objspace = objspace_ptr;
9316 
9317  gc_config_full_mark_set(TRUE);
9318 
9319  objspace->flags.measure_gc = true;
9320  malloc_limit = gc_params.malloc_limit_min;
9321  objspace->finalize_deferred_pjob = rb_postponed_job_preregister(0, gc_finalize_deferred, objspace);
9322  if (objspace->finalize_deferred_pjob == POSTPONED_JOB_HANDLE_INVALID) {
9323  rb_bug("Could not preregister postponed job for GC");
9324  }
9325 
9326  for (int i = 0; i < HEAP_COUNT; i++) {
9327  rb_heap_t *heap = &heaps[i];
9328 
9329  heap->slot_size = (1 << i) * BASE_SLOT_SIZE;
9330 
9331  ccan_list_head_init(&heap->pages);
9332  }
9333 
9334  rb_darray_make(&objspace->heap_pages.sorted, 0);
9335  rb_darray_make(&objspace->weak_references, 0);
9336 
9337  // TODO: debug why on Windows Ruby crashes on boot when GC is on.
9338 #ifdef _WIN32
9339  dont_gc_on();
9340 #endif
9341 
9342 #if defined(INIT_HEAP_PAGE_ALLOC_USE_MMAP)
9343  /* Need to determine if we can use mmap at runtime. */
9344  heap_page_alloc_use_mmap = INIT_HEAP_PAGE_ALLOC_USE_MMAP;
9345 #endif
9346  objspace->next_object_id = OBJ_ID_INITIAL;
9347  objspace->id_to_obj_tbl = st_init_table(&object_id_hash_type);
9348  objspace->obj_to_id_tbl = st_init_numtable();
9349 #if RGENGC_ESTIMATE_OLDMALLOC
9350  objspace->rgengc.oldmalloc_increase_limit = gc_params.oldmalloc_limit_min;
9351 #endif
9352  /* Set size pools allocatable pages. */
9353  for (int i = 0; i < HEAP_COUNT; i++) {
9354  /* Set the default value of heap_init_slots. */
9355  gc_params.heap_init_slots[i] = GC_HEAP_INIT_SLOTS;
9356  }
9357 
9358  init_mark_stack(&objspace->mark_stack);
9359 
9360  objspace->profile.invoke_time = getrusage_time();
9361  finalizer_table = st_init_numtable();
9362 }
9363 
9364 void
9365 rb_gc_impl_init(void)
9366 {
9367  VALUE gc_constants = rb_hash_new();
9368  rb_hash_aset(gc_constants, ID2SYM(rb_intern("DEBUG")), GC_DEBUG ? Qtrue : Qfalse);
9369  rb_hash_aset(gc_constants, ID2SYM(rb_intern("BASE_SLOT_SIZE")), SIZET2NUM(BASE_SLOT_SIZE - RVALUE_OVERHEAD));
9370  rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_OVERHEAD")), SIZET2NUM(RVALUE_OVERHEAD));
9371  rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_OBJ_LIMIT")), SIZET2NUM(HEAP_PAGE_OBJ_LIMIT));
9372  rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_BITMAP_SIZE")), SIZET2NUM(HEAP_PAGE_BITMAP_SIZE));
9373  rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_PAGE_SIZE")), SIZET2NUM(HEAP_PAGE_SIZE));
9374  rb_hash_aset(gc_constants, ID2SYM(rb_intern("HEAP_COUNT")), LONG2FIX(HEAP_COUNT));
9375  rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVARGC_MAX_ALLOCATE_SIZE")), LONG2FIX(heap_slot_size(HEAP_COUNT - 1)));
9376  rb_hash_aset(gc_constants, ID2SYM(rb_intern("RVALUE_OLD_AGE")), LONG2FIX(RVALUE_OLD_AGE));
9377  if (RB_BUG_INSTEAD_OF_RB_MEMERROR+0) {
9378  rb_hash_aset(gc_constants, ID2SYM(rb_intern("RB_BUG_INSTEAD_OF_RB_MEMERROR")), Qtrue);
9379  }
9380  OBJ_FREEZE(gc_constants);
9381  /* Internal constants in the garbage collector. */
9382  rb_define_const(rb_mGC, "INTERNAL_CONSTANTS", gc_constants);
9383 
9384  if (GC_COMPACTION_SUPPORTED) {
9385  rb_define_singleton_method(rb_mGC, "compact", gc_compact, 0);
9386  rb_define_singleton_method(rb_mGC, "auto_compact", gc_get_auto_compact, 0);
9387  rb_define_singleton_method(rb_mGC, "auto_compact=", gc_set_auto_compact, 1);
9388  rb_define_singleton_method(rb_mGC, "latest_compact_info", gc_compact_stats, 0);
9389  rb_define_singleton_method(rb_mGC, "verify_compaction_references", gc_verify_compaction_references, -1);
9390  }
9391  else {
9394  rb_define_singleton_method(rb_mGC, "auto_compact=", rb_f_notimplement, 1);
9395  rb_define_singleton_method(rb_mGC, "latest_compact_info", rb_f_notimplement, 0);
9396  rb_define_singleton_method(rb_mGC, "verify_compaction_references", rb_f_notimplement, -1);
9397  }
9398 
9399  /* internal methods */
9400  rb_define_singleton_method(rb_mGC, "verify_internal_consistency", gc_verify_internal_consistency_m, 0);
9401 
9402 #if MALLOC_ALLOCATED_SIZE
9403  rb_define_singleton_method(rb_mGC, "malloc_allocated_size", gc_malloc_allocated_size, 0);
9404  rb_define_singleton_method(rb_mGC, "malloc_allocations", gc_malloc_allocations, 0);
9405 #endif
9406 
9407  VALUE rb_mProfiler = rb_define_module_under(rb_mGC, "Profiler");
9408  rb_define_singleton_method(rb_mProfiler, "enabled?", gc_profile_enable_get, 0);
9409  rb_define_singleton_method(rb_mProfiler, "enable", gc_profile_enable, 0);
9410  rb_define_singleton_method(rb_mProfiler, "raw_data", gc_profile_record_get, 0);
9411  rb_define_singleton_method(rb_mProfiler, "disable", gc_profile_disable, 0);
9412  rb_define_singleton_method(rb_mProfiler, "clear", gc_profile_clear, 0);
9413  rb_define_singleton_method(rb_mProfiler, "result", gc_profile_result, 0);
9414  rb_define_singleton_method(rb_mProfiler, "report", gc_profile_report, -1);
9415  rb_define_singleton_method(rb_mProfiler, "total_time", gc_profile_total_time, 0);
9416 
9417  {
9418  VALUE opts;
9419  /* \GC build options */
9420  rb_define_const(rb_mGC, "OPTS", opts = rb_ary_new());
9421 #define OPT(o) if (o) rb_ary_push(opts, rb_interned_str(#o, sizeof(#o) - 1))
9422  OPT(GC_DEBUG);
9423  OPT(USE_RGENGC);
9424  OPT(RGENGC_DEBUG);
9425  OPT(RGENGC_CHECK_MODE);
9426  OPT(RGENGC_PROFILE);
9427  OPT(RGENGC_ESTIMATE_OLDMALLOC);
9428  OPT(GC_PROFILE_MORE_DETAIL);
9429  OPT(GC_ENABLE_LAZY_SWEEP);
9430  OPT(CALC_EXACT_MALLOC_SIZE);
9431  OPT(MALLOC_ALLOCATED_SIZE);
9432  OPT(MALLOC_ALLOCATED_SIZE_CHECK);
9433  OPT(GC_PROFILE_DETAIL_MEMORY);
9434  OPT(GC_COMPACTION_SUPPORTED);
9435 #undef OPT
9436  OBJ_FREEZE(opts);
9437  }
9438 }
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition: assert.h:219
Atomic operations.
#define RUBY_ATOMIC_VALUE_CAS(var, oldval, newval)
Identical to RUBY_ATOMIC_CAS, except it expects its arguments are VALUE.
Definition: atomic.h:343
#define RUBY_ATOMIC_SIZE_EXCHANGE(var, val)
Identical to RUBY_ATOMIC_EXCHANGE, except it expects its arguments are size_t.
Definition: atomic.h:233
#define RUBY_ATOMIC_SIZE_INC(var)
Identical to RUBY_ATOMIC_INC, except it expects its argument is size_t.
Definition: atomic.h:209
#define RUBY_ATOMIC_SIZE_CAS(var, oldval, newval)
Identical to RUBY_ATOMIC_CAS, except it expects its arguments are size_t.
Definition: atomic.h:247
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition: atomic.h:69
#define RUBY_ATOMIC_SIZE_ADD(var, val)
Identical to RUBY_ATOMIC_ADD, except it expects its arguments are size_t.
Definition: atomic.h:260
#define RUBY_ATOMIC_VALUE_EXCHANGE(var, val)
Identical to RUBY_ATOMIC_EXCHANGE, except it expects its arguments are VALUE.
Definition: atomic.h:329
#define RUBY_ATOMIC_SET(var, val)
Identical to RUBY_ATOMIC_EXCHANGE, except for the return type.
Definition: atomic.h:160
#define RUBY_ATOMIC_EXCHANGE(var, val)
Atomically replaces the value pointed by var with val.
Definition: atomic.h:127
#define RB_LIKELY(x)
Asserts that the given Boolean expression likely holds.
Definition: assume.h:43
#define RB_UNLIKELY(x)
Asserts that the given Boolean expression likely doesn't hold.
Definition: assume.h:50
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
Definition: cxxanyargs.hpp:685
unsigned int rb_postponed_job_handle_t
The type of a handle returned from rb_postponed_job_preregister and passed to rb_postponed_job_trigge...
Definition: debug.h:665
void rb_postponed_job_trigger(rb_postponed_job_handle_t h)
Triggers a pre-registered job registered with rb_postponed_job_preregister, scheduling it for executi...
Definition: vm_trace.c:1783
rb_postponed_job_handle_t rb_postponed_job_preregister(unsigned int flags, rb_postponed_job_func_t func, void *data)
Pre-registers a func in Ruby's postponed job preregistration table, returning an opaque handle which ...
Definition: vm_trace.c:1749
#define RUBY_INTERNAL_EVENT_GC_EXIT
gc_exit() is called.
Definition: event.h:99
#define RUBY_INTERNAL_EVENT_GC_ENTER
gc_enter() is called.
Definition: event.h:98
#define RUBY_INTERNAL_EVENT_GC_END_SWEEP
GC ended sweep phase.
Definition: event.h:97
#define RUBY_INTERNAL_EVENT_GC_END_MARK
GC ended mark phase.
Definition: event.h:96
#define RUBY_INTERNAL_EVENT_OBJSPACE_MASK
Bitmask of GC events.
Definition: event.h:100
#define RUBY_INTERNAL_EVENT_FREEOBJ
Object swept.
Definition: event.h:94
#define RUBY_INTERNAL_EVENT_GC_START
GC started.
Definition: event.h:95
uint32_t rb_event_flag_t
Represents event(s).
Definition: event.h:108
#define RUBY_INTERNAL_EVENT_NEWOBJ
Object allocated.
Definition: event.h:93
static VALUE RB_FL_TEST(VALUE obj, VALUE flags)
Tests if the given flag(s) are set or not.
Definition: fl_type.h:495
static void RB_FL_SET_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_SET().
Definition: fl_type.h:606
static void RB_FL_UNSET_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_UNSET().
Definition: fl_type.h:666
@ RUBY_FL_PROMOTED
Ruby objects are "generational".
Definition: fl_type.h:218
VALUE rb_define_module_under(VALUE outer, const char *name)
Defines a module under the namespace of outer.
Definition: class.c:1119
int rb_scan_args_kw(int kw_flag, int argc, const VALUE *argv, const char *fmt,...)
Identical to rb_scan_args(), except it also accepts kw_splat.
Definition: class.c:2648
int rb_keyword_given_p(void)
Determines if the current method is given a keyword argument.
Definition: eval.c:929
int rb_get_kwargs(VALUE keyword_hash, const ID *table, int required, int optional, VALUE *values)
Keyword argument deconstructor.
Definition: class.c:2424
#define T_COMPLEX
Old name of RUBY_T_COMPLEX.
Definition: value_type.h:59
#define T_FILE
Old name of RUBY_T_FILE.
Definition: value_type.h:62
#define FL_EXIVAR
Old name of RUBY_FL_EXIVAR.
Definition: fl_type.h:66
#define T_STRING
Old name of RUBY_T_STRING.
Definition: value_type.h:78
#define xfree
Old name of ruby_xfree.
Definition: xmalloc.h:58
#define T_MASK
Old name of RUBY_T_MASK.
Definition: value_type.h:68
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition: long.h:48
#define OBJ_FROZEN
Old name of RB_OBJ_FROZEN.
Definition: fl_type.h:137
#define T_NIL
Old name of RUBY_T_NIL.
Definition: value_type.h:72
#define T_FLOAT
Old name of RUBY_T_FLOAT.
Definition: value_type.h:64
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition: value_type.h:67
#define ID2SYM
Old name of RB_ID2SYM.
Definition: symbol.h:44
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
Definition: value_type.h:57
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
Definition: value_type.h:79
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition: fl_type.h:135
#define T_FIXNUM
Old name of RUBY_T_FIXNUM.
Definition: value_type.h:63
#define SYM2ID
Old name of RB_SYM2ID.
Definition: symbol.h:45
#define T_DATA
Old name of RUBY_T_DATA.
Definition: value_type.h:60
#define FL_SEEN_OBJ_ID
Old name of RUBY_FL_SEEN_OBJ_ID.
Definition: fl_type.h:65
#define T_NONE
Old name of RUBY_T_NONE.
Definition: value_type.h:74
#define T_NODE
Old name of RUBY_T_NODE.
Definition: value_type.h:73
#define SIZET2NUM
Old name of RB_SIZE2NUM.
Definition: size_t.h:62
#define xmalloc
Old name of ruby_xmalloc.
Definition: xmalloc.h:53
#define LONG2FIX
Old name of RB_INT2FIX.
Definition: long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition: int.h:41
#define FL_FINALIZE
Old name of RUBY_FL_FINALIZE.
Definition: fl_type.h:61
#define T_MODULE
Old name of RUBY_T_MODULE.
Definition: value_type.h:70
#define T_TRUE
Old name of RUBY_T_TRUE.
Definition: value_type.h:81
#define T_RATIONAL
Old name of RUBY_T_RATIONAL.
Definition: value_type.h:76
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition: value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition: value_type.h:65
#define ALLOC_N
Old name of RB_ALLOC_N.
Definition: memory.h:394
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition: fl_type.h:132
#define FL_SET
Old name of RB_FL_SET.
Definition: fl_type.h:129
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition: array.h:658
#define T_FALSE
Old name of RUBY_T_FALSE.
Definition: value_type.h:61
#define ULL2NUM
Old name of RB_ULL2NUM.
Definition: long_long.h:31
#define T_UNDEF
Old name of RUBY_T_UNDEF.
Definition: value_type.h:82
#define Qtrue
Old name of RUBY_Qtrue.
#define T_ZOMBIE
Old name of RUBY_T_ZOMBIE.
Definition: value_type.h:83
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition: long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition: value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition: value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define FL_WB_PROTECTED
Old name of RUBY_FL_WB_PROTECTED.
Definition: fl_type.h:59
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
Definition: value_type.h:80
#define DBL2NUM
Old name of rb_float_new.
Definition: double.h:29
#define T_MATCH
Old name of RUBY_T_MATCH.
Definition: value_type.h:69
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition: value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition: value_type.h:85
#define T_MOVED
Old name of RUBY_T_MOVED.
Definition: value_type.h:71
#define FL_TEST
Old name of RB_FL_TEST.
Definition: fl_type.h:131
#define FL_UNSET
Old name of RB_FL_UNSET.
Definition: fl_type.h:133
#define UINT2NUM
Old name of RB_UINT2NUM.
Definition: int.h:46
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition: value_type.h:88
#define T_REGEXP
Old name of RUBY_T_REGEXP.
Definition: value_type.h:77
void rb_raise(VALUE exc_class, const char *fmt,...)
Exception entry point.
Definition: error.c:3635
void rb_bug(const char *fmt,...)
Interpreter panic switch.
Definition: error.c:1089
VALUE rb_eRangeError
RangeError exception.
Definition: error.c:1412
#define ruby_verbose
This variable controls whether the interpreter is in debug mode.
Definition: error.h:475
VALUE rb_eRuntimeError
RuntimeError exception.
Definition: error.c:1406
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition: error.c:466
VALUE rb_eArgError
ArgumentError exception.
Definition: error.c:1409
VALUE rb_ensure(VALUE(*b_proc)(VALUE), VALUE data1, VALUE(*e_proc)(VALUE), VALUE data2)
An equivalent to ensure clause.
Definition: eval.c:1045
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
Definition: object.c:104
VALUE rb_mGC
GC module.
Definition: gc.c:420
VALUE rb_equal(VALUE lhs, VALUE rhs)
This function is an optimised version of calling #==.
Definition: object.c:179
VALUE rb_stdout
STDOUT constant.
Definition: io.c:201
#define RB_GNUC_EXTENSION_BLOCK(x)
This is expanded to the passed token for non-GCC compilers.
Definition: defines.h:91
Routines to manipulate encodings of strings.
VALUE rb_funcall(VALUE recv, ID mid, int n,...)
Calls a method.
Definition: vm_eval.c:1099
void rb_gc_mark(VALUE obj)
Marks an object.
Definition: gc.c:2197
void rb_memerror(void)
Triggers out-of-memory error.
Definition: gc.c:4405
static bool RB_OBJ_PROMOTED_RAW(VALUE obj)
This is the implementation of RB_OBJ_PROMOTED().
Definition: gc.h:706
#define USE_RGENGC
Definition: gc.h:428
size_t rb_gc_count(void)
Identical to rb_gc_stat(), with "count" parameter.
Definition: gc.c:3636
VALUE rb_ary_new(void)
Allocates a new, empty array.
Definition: array.c:747
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
Definition: array.c:1384
VALUE rb_big_eql(VALUE lhs, VALUE rhs)
Equality, in terms of eql?.
Definition: bignum.c:5573
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition: error.h:284
VALUE rb_hash_aref(VALUE hash, VALUE key)
Queries the given key in the given hash table.
Definition: hash.c:2073
VALUE rb_hash_aset(VALUE hash, VALUE key, VALUE val)
Inserts or replaces ("upsert"s) the objects into the given hash table.
Definition: hash.c:2893
VALUE rb_hash(VALUE obj)
Calculates a message authentication code of the passed object.
Definition: hash.c:267
VALUE rb_hash_new(void)
Creates a new, empty hash object.
Definition: hash.c:1475
VALUE rb_io_write(VALUE io, VALUE str)
Writes the given string to the given IO.
Definition: io.c:2296
VALUE rb_str_buf_append(VALUE dst, VALUE src)
Identical to rb_str_cat_cstr(), except it takes Ruby's string instead of C's.
Definition: string.c:3632
VALUE rb_str_new_cstr(const char *ptr)
Identical to rb_str_new(), except it assumes the passed pointer is a pointer to a C string.
Definition: string.c:1074
VALUE rb_str_buf_new(long capa)
Allocates a "string buffer".
Definition: string.c:1643
const char * rb_sourcefile(void)
Resembles __FILE__.
Definition: vm.c:1862
VALUE rb_f_notimplement(int argc, const VALUE *argv, VALUE obj, VALUE marker)
Raises rb_eNotImpError.
Definition: vm_method.c:481
int rb_sourceline(void)
Resembles __LINE__.
Definition: vm.c:1876
#define RB_SYM2ID
Just another name of rb_sym2id.
Definition: symbol.h:43
ID rb_intern(const char *name)
Finds or creates a symbol of the given name.
Definition: symbol.c:823
ID rb_sym2id(VALUE obj)
Converts an instance of rb_cSymbol into an ID.
Definition: symbol.c:917
void rb_define_const(VALUE klass, const char *name, VALUE val)
Defines a Ruby level constant under a namespace.
Definition: variable.c:3726
char * ptr
Pointer to the underlying memory region, of at least capa bytes.
Definition: io.h:2
int len
Length of the buffer.
Definition: io.h:8
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
Definition: thread.c:1902
#define strtod(s, e)
Just another name of ruby_strtod.
Definition: util.h:223
void ruby_qsort(void *, const size_t, const size_t, int(*)(const void *, const void *, void *), void *)
Reentrant implementation of quick sort.
#define DECIMAL_SIZE_OF_BITS(n)
an approximation of ceil(n * log10(2)), up to 1,048,576 (1<<20) without overflow within 32-bit calcul...
Definition: util.h:39
VALUE rb_sprintf(const char *fmt,...)
Ruby's extended sprintf(3).
Definition: sprintf.c:1217
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
Definition: memory.h:355
VALUE type(ANYARGS)
ANYARGS-ed function type.
Definition: cxxanyargs.hpp:56
int st_foreach(st_table *q, int_type *w, st_data_t e)
Iteration over the given table.
Definition: cxxanyargs.hpp:432
#define RARRAY_LEN
Just another name of rb_array_len.
Definition: rarray.h:51
#define RARRAY_AREF(a, i)
Definition: rarray.h:403
#define RBASIC(obj)
Convenient casting macro.
Definition: rbasic.h:40
#define errno
Ractor-aware version of errno.
Definition: ruby.h:388
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition: thread.c:5542
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
#define _(args)
This was a transition path from K&R to ANSI.
Definition: stdarg.h:35
Ruby object's base components.
Definition: rbasic.h:63
VALUE flags
Per-object flags.
Definition: rbasic.h:75
Definition: mmtk.c:19
Definition: st.h:79
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition: value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition: value.h:40
static enum ruby_value_type RB_BUILTIN_TYPE(VALUE obj)
Queries the type of the object.
Definition: value_type.h:182
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition: value_type.h:376
ruby_value_type
C-level type of an object.
Definition: value_type.h:113
@ RUBY_T_SYMBOL
Definition: value_type.h:135
@ RUBY_T_MATCH
Definition: value_type.h:128
@ RUBY_T_MODULE
Definition: value_type.h:118
@ RUBY_T_ICLASS
Hidden classes known as IClasses.
Definition: value_type.h:141
@ RUBY_T_MOVED
Definition: value_type.h:143
@ RUBY_T_FIXNUM
Integers formerly known as Fixnums.
Definition: value_type.h:136
@ RUBY_T_IMEMO
Definition: value_type.h:139
@ RUBY_T_NODE
Definition: value_type.h:140
@ RUBY_T_OBJECT
Definition: value_type.h:116
@ RUBY_T_DATA
Definition: value_type.h:127
@ RUBY_T_FALSE
Definition: value_type.h:134
@ RUBY_T_UNDEF
Definition: value_type.h:137
@ RUBY_T_COMPLEX
Definition: value_type.h:129
@ RUBY_T_STRING
Definition: value_type.h:120
@ RUBY_T_HASH
Definition: value_type.h:123
@ RUBY_T_NIL
Definition: value_type.h:132
@ RUBY_T_CLASS
Definition: value_type.h:117
@ RUBY_T_ARRAY
Definition: value_type.h:122
@ RUBY_T_MASK
Bitmask of ruby_value_type.
Definition: value_type.h:145
@ RUBY_T_RATIONAL
Definition: value_type.h:130
@ RUBY_T_ZOMBIE
Definition: value_type.h:142
@ RUBY_T_BIGNUM
Definition: value_type.h:125
@ RUBY_T_TRUE
Definition: value_type.h:133
@ RUBY_T_FLOAT
Definition: value_type.h:119
@ RUBY_T_STRUCT
Definition: value_type.h:124
@ RUBY_T_NONE
Non-object (swept etc.)
Definition: value_type.h:114
@ RUBY_T_REGEXP
Definition: value_type.h:121
@ RUBY_T_FILE
Definition: value_type.h:126