Ruby  3.4.0dev (2024-12-06 revision 892c46283a5ea4179500d951c9d4866c0051f27b)
gc.c (892c46283a5ea4179500d951c9d4866c0051f27b)
1 /**********************************************************************
2 
3  gc.c -
4 
5  $Author$
6  created at: Tue Oct 5 09:44:46 JST 1993
7 
8  Copyright (C) 1993-2007 Yukihiro Matsumoto
9  Copyright (C) 2000 Network Applied Communication Laboratory, Inc.
10  Copyright (C) 2000 Information-technology Promotion Agency, Japan
11 
12 **********************************************************************/
13 
14 #define rb_data_object_alloc rb_data_object_alloc
15 #define rb_data_typed_object_alloc rb_data_typed_object_alloc
16 
17 #include "ruby/internal/config.h"
18 #ifdef _WIN32
19 # include "ruby/ruby.h"
20 #endif
21 
22 #if defined(__wasm__) && !defined(__EMSCRIPTEN__)
23 # include "wasm/setjmp.h"
24 # include "wasm/machine.h"
25 #else
26 # include <setjmp.h>
27 #endif
28 #include <stdarg.h>
29 #include <stdio.h>
30 
31 /* MALLOC_HEADERS_BEGIN */
32 #ifndef HAVE_MALLOC_USABLE_SIZE
33 # ifdef _WIN32
34 # define HAVE_MALLOC_USABLE_SIZE
35 # define malloc_usable_size(a) _msize(a)
36 # elif defined HAVE_MALLOC_SIZE
37 # define HAVE_MALLOC_USABLE_SIZE
38 # define malloc_usable_size(a) malloc_size(a)
39 # endif
40 #endif
41 
42 #ifdef HAVE_MALLOC_USABLE_SIZE
43 # ifdef RUBY_ALTERNATIVE_MALLOC_HEADER
44 /* Alternative malloc header is included in ruby/missing.h */
45 # elif defined(HAVE_MALLOC_H)
46 # include <malloc.h>
47 # elif defined(HAVE_MALLOC_NP_H)
48 # include <malloc_np.h>
49 # elif defined(HAVE_MALLOC_MALLOC_H)
50 # include <malloc/malloc.h>
51 # endif
52 #endif
53 
54 /* MALLOC_HEADERS_END */
55 
56 #ifdef HAVE_SYS_TIME_H
57 # include <sys/time.h>
58 #endif
59 
60 #ifdef HAVE_SYS_RESOURCE_H
61 # include <sys/resource.h>
62 #endif
63 
64 #if defined _WIN32 || defined __CYGWIN__
65 # include <windows.h>
66 #elif defined(HAVE_POSIX_MEMALIGN)
67 #elif defined(HAVE_MEMALIGN)
68 # include <malloc.h>
69 #endif
70 
71 #include <sys/types.h>
72 
73 #ifdef __EMSCRIPTEN__
74 #include <emscripten.h>
75 #endif
76 
77 /* For ruby_annotate_mmap */
78 #ifdef HAVE_SYS_PRCTL_H
79 #include <sys/prctl.h>
80 #endif
81 
82 #undef LIST_HEAD /* ccan/list conflicts with BSD-origin sys/queue.h. */
83 
84 #include "constant.h"
85 #include "darray.h"
86 #include "debug_counter.h"
87 #include "eval_intern.h"
88 #include "gc/gc.h"
89 #include "id_table.h"
90 #include "internal.h"
91 #include "internal/class.h"
92 #include "internal/compile.h"
93 #include "internal/complex.h"
94 #include "internal/cont.h"
95 #include "internal/error.h"
96 #include "internal/eval.h"
97 #include "internal/gc.h"
98 #include "internal/hash.h"
99 #include "internal/imemo.h"
100 #include "internal/io.h"
101 #include "internal/numeric.h"
102 #include "internal/object.h"
103 #include "internal/proc.h"
104 #include "internal/rational.h"
105 #include "internal/sanitizers.h"
106 #include "internal/struct.h"
107 #include "internal/symbol.h"
108 #include "internal/thread.h"
109 #include "internal/variable.h"
110 #include "internal/warnings.h"
111 #include "rjit.h"
112 #include "probes.h"
113 #include "regint.h"
114 #include "ruby/debug.h"
115 #include "ruby/io.h"
116 #include "ruby/re.h"
117 #include "ruby/st.h"
118 #include "ruby/thread.h"
119 #include "ruby/util.h"
120 #include "ruby/vm.h"
121 #include "ruby_assert.h"
122 #include "ruby_atomic.h"
123 #include "symbol.h"
124 #include "vm_core.h"
125 #include "vm_sync.h"
126 #include "vm_callinfo.h"
127 #include "ractor_core.h"
128 #include "yjit.h"
129 
130 #include "builtin.h"
131 #include "shape.h"
132 
133 unsigned int
134 rb_gc_vm_lock(void)
135 {
136  unsigned int lev;
137  RB_VM_LOCK_ENTER_LEV(&lev);
138  return lev;
139 }
140 
141 void
142 rb_gc_vm_unlock(unsigned int lev)
143 {
144  RB_VM_LOCK_LEAVE_LEV(&lev);
145 }
146 
147 unsigned int
148 rb_gc_cr_lock(void)
149 {
150  unsigned int lev;
151  RB_VM_LOCK_ENTER_CR_LEV(GET_RACTOR(), &lev);
152  return lev;
153 }
154 
155 void
156 rb_gc_cr_unlock(unsigned int lev)
157 {
158  RB_VM_LOCK_LEAVE_CR_LEV(GET_RACTOR(), &lev);
159 }
160 
161 unsigned int
162 rb_gc_vm_lock_no_barrier(void)
163 {
164  unsigned int lev = 0;
165  RB_VM_LOCK_ENTER_LEV_NB(&lev);
166  return lev;
167 }
168 
169 void
170 rb_gc_vm_unlock_no_barrier(unsigned int lev)
171 {
172  RB_VM_LOCK_LEAVE_LEV(&lev);
173 }
174 
175 void
176 rb_gc_vm_barrier(void)
177 {
178  rb_vm_barrier();
179 }
180 
181 #if USE_MODULAR_GC
182 void *
183 rb_gc_get_ractor_newobj_cache(void)
184 {
185  return GET_RACTOR()->newobj_cache;
186 }
187 
188 void
189 rb_gc_initialize_vm_context(struct rb_gc_vm_context *context)
190 {
191  rb_native_mutex_initialize(&context->lock);
192  context->ec = GET_EC();
193 }
194 
195 void
196 rb_gc_worker_thread_set_vm_context(struct rb_gc_vm_context *context)
197 {
198  rb_native_mutex_lock(&context->lock);
199 
200  GC_ASSERT(rb_current_execution_context(false) == NULL);
201 
202 #ifdef RB_THREAD_LOCAL_SPECIFIER
203  rb_current_ec_set(context->ec);
204 #else
205  native_tls_set(ruby_current_ec_key, context->ec);
206 #endif
207 }
208 
209 void
210 rb_gc_worker_thread_unset_vm_context(struct rb_gc_vm_context *context)
211 {
212  rb_native_mutex_unlock(&context->lock);
213 
214  GC_ASSERT(rb_current_execution_context(true) == context->ec);
215 
216 #ifdef RB_THREAD_LOCAL_SPECIFIER
217  rb_current_ec_set(NULL);
218 #else
219  native_tls_set(ruby_current_ec_key, NULL);
220 #endif
221 }
222 #endif
223 
224 bool
225 rb_gc_event_hook_required_p(rb_event_flag_t event)
226 {
227  return ruby_vm_event_flags & event;
228 }
229 
230 void
231 rb_gc_event_hook(VALUE obj, rb_event_flag_t event)
232 {
233  if (LIKELY(!rb_gc_event_hook_required_p(event))) return;
234 
235  rb_execution_context_t *ec = GET_EC();
236  if (!ec->cfp) return;
237 
238  EXEC_EVENT_HOOK(ec, event, ec->cfp->self, 0, 0, 0, obj);
239 }
240 
241 void *
242 rb_gc_get_objspace(void)
243 {
244  return GET_VM()->gc.objspace;
245 }
246 
247 
248 void
249 rb_gc_ractor_newobj_cache_foreach(void (*func)(void *cache, void *data), void *data)
250 {
251  rb_ractor_t *r = NULL;
252  ccan_list_for_each(&GET_VM()->ractor.set, r, vmlr_node) {
253  func(r->newobj_cache, data);
254  }
255 }
256 
257 void
258 rb_gc_run_obj_finalizer(VALUE objid, long count, VALUE (*callback)(long i, void *data), void *data)
259 {
260  volatile struct {
261  VALUE errinfo;
262  VALUE final;
263  rb_control_frame_t *cfp;
264  VALUE *sp;
265  long finished;
266  } saved;
267 
268  rb_execution_context_t * volatile ec = GET_EC();
269 #define RESTORE_FINALIZER() (\
270  ec->cfp = saved.cfp, \
271  ec->cfp->sp = saved.sp, \
272  ec->errinfo = saved.errinfo)
273 
274  saved.errinfo = ec->errinfo;
275  saved.cfp = ec->cfp;
276  saved.sp = ec->cfp->sp;
277  saved.finished = 0;
278  saved.final = Qundef;
279 
280  EC_PUSH_TAG(ec);
281  enum ruby_tag_type state = EC_EXEC_TAG();
282  if (state != TAG_NONE) {
283  ++saved.finished; /* skip failed finalizer */
284 
285  VALUE failed_final = saved.final;
286  saved.final = Qundef;
287  if (!UNDEF_P(failed_final) && !NIL_P(ruby_verbose)) {
288  rb_warn("Exception in finalizer %+"PRIsVALUE, failed_final);
289  rb_ec_error_print(ec, ec->errinfo);
290  }
291  }
292 
293  for (long i = saved.finished; RESTORE_FINALIZER(), i < count; saved.finished = ++i) {
294  saved.final = callback(i, data);
295  rb_check_funcall(saved.final, idCall, 1, &objid);
296  }
297  EC_POP_TAG();
298 #undef RESTORE_FINALIZER
299 }
300 
301 void
302 rb_gc_set_pending_interrupt(void)
303 {
304  rb_execution_context_t *ec = GET_EC();
305  ec->interrupt_mask |= PENDING_INTERRUPT_MASK;
306 }
307 
308 void
309 rb_gc_unset_pending_interrupt(void)
310 {
311  rb_execution_context_t *ec = GET_EC();
312  ec->interrupt_mask &= ~PENDING_INTERRUPT_MASK;
313 }
314 
315 bool
316 rb_gc_multi_ractor_p(void)
317 {
318  return rb_multi_ractor_p();
319 }
320 
321 bool rb_obj_is_main_ractor(VALUE gv);
322 
323 bool
324 rb_gc_shutdown_call_finalizer_p(VALUE obj)
325 {
326  switch (BUILTIN_TYPE(obj)) {
327  case T_DATA:
328  if (!ruby_free_at_exit_p() && (!DATA_PTR(obj) || !RDATA(obj)->dfree)) return false;
329  if (rb_obj_is_thread(obj)) return false;
330  if (rb_obj_is_mutex(obj)) return false;
331  if (rb_obj_is_fiber(obj)) return false;
332  if (rb_obj_is_main_ractor(obj)) return false;
333 
334  return true;
335 
336  case T_FILE:
337  return true;
338 
339  case T_SYMBOL:
340  if (RSYMBOL(obj)->fstr &&
341  (BUILTIN_TYPE(RSYMBOL(obj)->fstr) == T_NONE ||
342  BUILTIN_TYPE(RSYMBOL(obj)->fstr) == T_ZOMBIE)) {
343  RSYMBOL(obj)->fstr = 0;
344  }
345  return true;
346 
347  case T_NONE:
348  return false;
349 
350  default:
351  return ruby_free_at_exit_p();
352  }
353 }
354 
355 uint32_t
356 rb_gc_get_shape(VALUE obj)
357 {
358  return (uint32_t)rb_shape_get_shape_id(obj);
359 }
360 
361 void
362 rb_gc_set_shape(VALUE obj, uint32_t shape_id)
363 {
364  rb_shape_set_shape_id(obj, (uint32_t)shape_id);
365 }
366 
367 uint32_t
368 rb_gc_rebuild_shape(VALUE obj, size_t heap_id)
369 {
370  rb_shape_t *orig_shape = rb_shape_get_shape(obj);
371 
372  if (rb_shape_obj_too_complex(obj)) return (uint32_t)OBJ_TOO_COMPLEX_SHAPE_ID;
373 
374  rb_shape_t *initial_shape = rb_shape_get_shape_by_id((shape_id_t)(heap_id + FIRST_T_OBJECT_SHAPE_ID));
375  rb_shape_t *new_shape = rb_shape_traverse_from_new_root(initial_shape, orig_shape);
376 
377  if (!new_shape) return 0;
378 
379  return (uint32_t)rb_shape_id(new_shape);
380 }
381 
382 void rb_vm_update_references(void *ptr);
383 
384 #define rb_setjmp(env) RUBY_SETJMP(env)
385 #define rb_jmp_buf rb_jmpbuf_t
386 #undef rb_data_object_wrap
387 
388 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
389 #define MAP_ANONYMOUS MAP_ANON
390 #endif
391 
392 #define unless_objspace(objspace) \
393  void *objspace; \
394  rb_vm_t *unless_objspace_vm = GET_VM(); \
395  if (unless_objspace_vm) objspace = unless_objspace_vm->gc.objspace; \
396  else /* return; or objspace will be warned uninitialized */
397 
398 #define RMOVED(obj) ((struct RMoved *)(obj))
399 
400 #define TYPED_UPDATE_IF_MOVED(_objspace, _type, _thing) do { \
401  if (rb_gc_impl_object_moved_p((_objspace), (VALUE)(_thing))) { \
402  *(_type *)&(_thing) = (_type)rb_gc_impl_location(_objspace, (VALUE)_thing); \
403  } \
404 } while (0)
405 
406 #define UPDATE_IF_MOVED(_objspace, _thing) TYPED_UPDATE_IF_MOVED(_objspace, VALUE, _thing)
407 
408 #if RUBY_MARK_FREE_DEBUG
409 int ruby_gc_debug_indent = 0;
410 #endif
411 
412 #ifndef RGENGC_OBJ_INFO
413 # define RGENGC_OBJ_INFO RGENGC_CHECK_MODE
414 #endif
415 
416 #ifndef CALC_EXACT_MALLOC_SIZE
417 # define CALC_EXACT_MALLOC_SIZE 0
418 #endif
419 
421 
422 static size_t malloc_offset = 0;
423 #if defined(HAVE_MALLOC_USABLE_SIZE)
424 static size_t
425 gc_compute_malloc_offset(void)
426 {
427  // Different allocators use different metadata storage strategies which result in different
428  // ideal sizes.
429  // For instance malloc(64) will waste 8B with glibc, but waste 0B with jemalloc.
430  // But malloc(56) will waste 0B with glibc, but waste 8B with jemalloc.
431  // So we try allocating 64, 56 and 48 bytes and select the first offset that doesn't
432  // waste memory.
433  // This was tested on Linux with glibc 2.35 and jemalloc 5, and for both it result in
434  // no wasted memory.
435  size_t offset = 0;
436  for (offset = 0; offset <= 16; offset += 8) {
437  size_t allocated = (64 - offset);
438  void *test_ptr = malloc(allocated);
439  size_t wasted = malloc_usable_size(test_ptr) - allocated;
440  free(test_ptr);
441 
442  if (wasted == 0) {
443  return offset;
444  }
445  }
446  return 0;
447 }
448 #else
449 static size_t
450 gc_compute_malloc_offset(void)
451 {
452  // If we don't have malloc_usable_size, we use powers of 2.
453  return 0;
454 }
455 #endif
456 
457 size_t
458 rb_malloc_grow_capa(size_t current, size_t type_size)
459 {
460  size_t current_capacity = current;
461  if (current_capacity < 4) {
462  current_capacity = 4;
463  }
464  current_capacity *= type_size;
465 
466  // We double the current capacity.
467  size_t new_capacity = (current_capacity * 2);
468 
469  // And round up to the next power of 2 if it's not already one.
470  if (rb_popcount64(new_capacity) != 1) {
471  new_capacity = (size_t)(1 << (64 - nlz_int64(new_capacity)));
472  }
473 
474  new_capacity -= malloc_offset;
475  new_capacity /= type_size;
476  if (current > new_capacity) {
477  rb_bug("rb_malloc_grow_capa: current_capacity=%zu, new_capacity=%zu, malloc_offset=%zu", current, new_capacity, malloc_offset);
478  }
479  RUBY_ASSERT(new_capacity > current);
480  return new_capacity;
481 }
482 
483 static inline struct rbimpl_size_mul_overflow_tag
484 size_add_overflow(size_t x, size_t y)
485 {
486  size_t z;
487  bool p;
488 #if 0
489 
490 #elif defined(ckd_add)
491  p = ckd_add(&z, x, y);
492 
493 #elif __has_builtin(__builtin_add_overflow)
494  p = __builtin_add_overflow(x, y, &z);
495 
496 #elif defined(DSIZE_T)
497  RB_GNUC_EXTENSION DSIZE_T dx = x;
498  RB_GNUC_EXTENSION DSIZE_T dy = y;
499  RB_GNUC_EXTENSION DSIZE_T dz = dx + dy;
500  p = dz > SIZE_MAX;
501  z = (size_t)dz;
502 
503 #else
504  z = x + y;
505  p = z < y;
506 
507 #endif
508  return (struct rbimpl_size_mul_overflow_tag) { p, z, };
509 }
510 
511 static inline struct rbimpl_size_mul_overflow_tag
512 size_mul_add_overflow(size_t x, size_t y, size_t z) /* x * y + z */
513 {
514  struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
515  struct rbimpl_size_mul_overflow_tag u = size_add_overflow(t.right, z);
516  return (struct rbimpl_size_mul_overflow_tag) { t.left || u.left, u.right };
517 }
518 
519 static inline struct rbimpl_size_mul_overflow_tag
520 size_mul_add_mul_overflow(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
521 {
522  struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
523  struct rbimpl_size_mul_overflow_tag u = rbimpl_size_mul_overflow(z, w);
524  struct rbimpl_size_mul_overflow_tag v = size_add_overflow(t.right, u.right);
525  return (struct rbimpl_size_mul_overflow_tag) { t.left || u.left || v.left, v.right };
526 }
527 
528 PRINTF_ARGS(NORETURN(static void gc_raise(VALUE, const char*, ...)), 2, 3);
529 
530 static inline size_t
531 size_mul_or_raise(size_t x, size_t y, VALUE exc)
532 {
533  struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(x, y);
534  if (LIKELY(!t.left)) {
535  return t.right;
536  }
537  else if (rb_during_gc()) {
538  rb_memerror(); /* or...? */
539  }
540  else {
541  gc_raise(
542  exc,
543  "integer overflow: %"PRIuSIZE
544  " * %"PRIuSIZE
545  " > %"PRIuSIZE,
546  x, y, (size_t)SIZE_MAX);
547  }
548 }
549 
550 size_t
551 rb_size_mul_or_raise(size_t x, size_t y, VALUE exc)
552 {
553  return size_mul_or_raise(x, y, exc);
554 }
555 
556 static inline size_t
557 size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
558 {
559  struct rbimpl_size_mul_overflow_tag t = size_mul_add_overflow(x, y, z);
560  if (LIKELY(!t.left)) {
561  return t.right;
562  }
563  else if (rb_during_gc()) {
564  rb_memerror(); /* or...? */
565  }
566  else {
567  gc_raise(
568  exc,
569  "integer overflow: %"PRIuSIZE
570  " * %"PRIuSIZE
571  " + %"PRIuSIZE
572  " > %"PRIuSIZE,
573  x, y, z, (size_t)SIZE_MAX);
574  }
575 }
576 
577 size_t
578 rb_size_mul_add_or_raise(size_t x, size_t y, size_t z, VALUE exc)
579 {
580  return size_mul_add_or_raise(x, y, z, exc);
581 }
582 
583 static inline size_t
584 size_mul_add_mul_or_raise(size_t x, size_t y, size_t z, size_t w, VALUE exc)
585 {
586  struct rbimpl_size_mul_overflow_tag t = size_mul_add_mul_overflow(x, y, z, w);
587  if (LIKELY(!t.left)) {
588  return t.right;
589  }
590  else if (rb_during_gc()) {
591  rb_memerror(); /* or...? */
592  }
593  else {
594  gc_raise(
595  exc,
596  "integer overflow: %"PRIdSIZE
597  " * %"PRIdSIZE
598  " + %"PRIdSIZE
599  " * %"PRIdSIZE
600  " > %"PRIdSIZE,
601  x, y, z, w, (size_t)SIZE_MAX);
602  }
603 }
604 
605 #if defined(HAVE_RB_GC_GUARDED_PTR_VAL) && HAVE_RB_GC_GUARDED_PTR_VAL
606 /* trick the compiler into thinking a external signal handler uses this */
607 volatile VALUE rb_gc_guarded_val;
608 volatile VALUE *
609 rb_gc_guarded_ptr_val(volatile VALUE *ptr, VALUE val)
610 {
611  rb_gc_guarded_val = val;
612 
613  return ptr;
614 }
615 #endif
616 
617 static const char *obj_type_name(VALUE obj);
618 #include "gc/default/default.c"
619 
620 #if USE_MODULAR_GC && !defined(HAVE_DLOPEN)
621 # error "Modular GC requires dlopen"
622 #elif USE_MODULAR_GC
623 #include <dlfcn.h>
624 
625 typedef struct gc_function_map {
626  // Bootup
627  void *(*objspace_alloc)(void);
628  void (*objspace_init)(void *objspace_ptr);
629  void (*objspace_free)(void *objspace_ptr);
630  void *(*ractor_cache_alloc)(void *objspace_ptr, void *ractor);
631  void (*ractor_cache_free)(void *objspace_ptr, void *cache);
632  void (*set_params)(void *objspace_ptr);
633  void (*init)(void);
634  size_t *(*heap_sizes)(void *objspace_ptr);
635  // Shutdown
636  void (*shutdown_free_objects)(void *objspace_ptr);
637  // GC
638  void (*start)(void *objspace_ptr, bool full_mark, bool immediate_mark, bool immediate_sweep, bool compact);
639  bool (*during_gc_p)(void *objspace_ptr);
640  void (*prepare_heap)(void *objspace_ptr);
641  void (*gc_enable)(void *objspace_ptr);
642  void (*gc_disable)(void *objspace_ptr, bool finish_current_gc);
643  bool (*gc_enabled_p)(void *objspace_ptr);
644  VALUE (*config_get)(void *objpace_ptr);
645  void (*config_set)(void *objspace_ptr, VALUE hash);
646  void (*stress_set)(void *objspace_ptr, VALUE flag);
647  VALUE (*stress_get)(void *objspace_ptr);
648  // Object allocation
649  VALUE (*new_obj)(void *objspace_ptr, void *cache_ptr, VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, bool wb_protected, size_t alloc_size);
650  size_t (*obj_slot_size)(VALUE obj);
651  size_t (*heap_id_for_size)(void *objspace_ptr, size_t size);
652  bool (*size_allocatable_p)(size_t size);
653  // Malloc
654  void *(*malloc)(void *objspace_ptr, size_t size);
655  void *(*calloc)(void *objspace_ptr, size_t size);
656  void *(*realloc)(void *objspace_ptr, void *ptr, size_t new_size, size_t old_size);
657  void (*free)(void *objspace_ptr, void *ptr, size_t old_size);
658  void (*adjust_memory_usage)(void *objspace_ptr, ssize_t diff);
659  // Marking
660  void (*mark)(void *objspace_ptr, VALUE obj);
661  void (*mark_and_move)(void *objspace_ptr, VALUE *ptr);
662  void (*mark_and_pin)(void *objspace_ptr, VALUE obj);
663  void (*mark_maybe)(void *objspace_ptr, VALUE obj);
664  void (*mark_weak)(void *objspace_ptr, VALUE *ptr);
665  void (*remove_weak)(void *objspace_ptr, VALUE parent_obj, VALUE *ptr);
666  // Compaction
667  bool (*object_moved_p)(void *objspace_ptr, VALUE obj);
668  VALUE (*location)(void *objspace_ptr, VALUE value);
669  // Write barriers
670  void (*writebarrier)(void *objspace_ptr, VALUE a, VALUE b);
671  void (*writebarrier_unprotect)(void *objspace_ptr, VALUE obj);
672  void (*writebarrier_remember)(void *objspace_ptr, VALUE obj);
673  // Heap walking
674  void (*each_objects)(void *objspace_ptr, int (*callback)(void *, void *, size_t, void *), void *data);
675  void (*each_object)(void *objspace_ptr, void (*func)(VALUE obj, void *data), void *data);
676  // Finalizers
677  void (*make_zombie)(void *objspace_ptr, VALUE obj, void (*dfree)(void *), void *data);
678  VALUE (*define_finalizer)(void *objspace_ptr, VALUE obj, VALUE block);
679  void (*undefine_finalizer)(void *objspace_ptr, VALUE obj);
680  void (*copy_finalizer)(void *objspace_ptr, VALUE dest, VALUE obj);
681  void (*shutdown_call_finalizer)(void *objspace_ptr);
682  // Object ID
683  VALUE (*object_id)(void *objspace_ptr, VALUE obj);
684  VALUE (*object_id_to_ref)(void *objspace_ptr, VALUE object_id);
685  // Forking
686  void (*before_fork)(void *objspace_ptr);
687  void (*after_fork)(void *objspace_ptr, rb_pid_t pid);
688  // Statistics
689  void (*set_measure_total_time)(void *objspace_ptr, VALUE flag);
690  bool (*get_measure_total_time)(void *objspace_ptr);
691  unsigned long long (*get_total_time)(void *objspace_ptr);
692  size_t (*gc_count)(void *objspace_ptr);
693  VALUE (*latest_gc_info)(void *objspace_ptr, VALUE key);
694  VALUE (*stat)(void *objspace_ptr, VALUE hash_or_sym);
695  VALUE (*stat_heap)(void *objspace_ptr, VALUE heap_name, VALUE hash_or_sym);
696  // Miscellaneous
697  size_t (*obj_flags)(void *objspace_ptr, VALUE obj, ID* flags, size_t max);
698  bool (*pointer_to_heap_p)(void *objspace_ptr, const void *ptr);
699  bool (*garbage_object_p)(void *objspace_ptr, VALUE obj);
700  void (*set_event_hook)(void *objspace_ptr, const rb_event_flag_t event);
701  void (*copy_attributes)(void *objspace_ptr, VALUE dest, VALUE obj);
702  // GC Identification
703  const char *(*active_gc_name)(void);
704 
705  bool modular_gc_loaded_p;
706 } rb_gc_function_map_t;
707 
708 static rb_gc_function_map_t rb_gc_functions;
709 
710 # define RUBY_GC_LIBRARY "RUBY_GC_LIBRARY"
711 
712 static void
713 ruby_modular_gc_init(void)
714 {
715  // Assert that the directory path ends with a /
716  RUBY_ASSERT_ALWAYS(MODULAR_GC_DIR[sizeof(MODULAR_GC_DIR) - 2] == '/');
717 
718  char *gc_so_file = getenv(RUBY_GC_LIBRARY);
719 
720  rb_gc_function_map_t gc_functions = { 0 };
721 
722  char *gc_so_path = NULL;
723  void *handle = NULL;
724  if (gc_so_file) {
725  /* Check to make sure that gc_so_file matches /[\w-_]+/ so that it does
726  * not load a shared object outside of the directory. */
727  for (size_t i = 0; i < strlen(gc_so_file); i++) {
728  char c = gc_so_file[i];
729  if (isalnum(c)) continue;
730  switch (c) {
731  case '-':
732  case '_':
733  break;
734  default:
735  fprintf(stderr, "Only alphanumeric, dash, and underscore is allowed in "RUBY_GC_LIBRARY"\n");
736  exit(1);
737  }
738  }
739 
740  size_t gc_so_path_size = strlen(MODULAR_GC_DIR "librubygc." DLEXT) + strlen(gc_so_file) + 1;
741  gc_so_path = alloca(gc_so_path_size);
742  {
743  size_t gc_so_path_idx = 0;
744 #define GC_SO_PATH_APPEND(str) do { \
745  gc_so_path_idx += strlcpy(gc_so_path + gc_so_path_idx, str, gc_so_path_size - gc_so_path_idx); \
746 } while (0)
747  GC_SO_PATH_APPEND(MODULAR_GC_DIR);
748  GC_SO_PATH_APPEND("librubygc.");
749  GC_SO_PATH_APPEND(gc_so_file);
750  GC_SO_PATH_APPEND(DLEXT);
751  GC_ASSERT(gc_so_path_idx == gc_so_path_size - 1);
752 #undef GC_SO_PATH_APPEND
753  }
754 
755  handle = dlopen(gc_so_path, RTLD_LAZY | RTLD_GLOBAL);
756  if (!handle) {
757  fprintf(stderr, "ruby_modular_gc_init: Shared library %s cannot be opened: %s\n", gc_so_path, dlerror());
758  exit(1);
759  }
760 
761  gc_functions.modular_gc_loaded_p = true;
762  }
763 
764 # define load_modular_gc_func(name) do { \
765  if (handle) { \
766  const char *func_name = "rb_gc_impl_" #name; \
767  gc_functions.name = dlsym(handle, func_name); \
768  if (!gc_functions.name) { \
769  fprintf(stderr, "ruby_modular_gc_init: %s function not exported by library %s\n", func_name, gc_so_path); \
770  exit(1); \
771  } \
772  } \
773  else { \
774  gc_functions.name = rb_gc_impl_##name; \
775  } \
776 } while (0)
777 
778  // Bootup
779  load_modular_gc_func(objspace_alloc);
780  load_modular_gc_func(objspace_init);
781  load_modular_gc_func(objspace_free);
782  load_modular_gc_func(ractor_cache_alloc);
783  load_modular_gc_func(ractor_cache_free);
784  load_modular_gc_func(set_params);
785  load_modular_gc_func(init);
786  load_modular_gc_func(heap_sizes);
787  // Shutdown
788  load_modular_gc_func(shutdown_free_objects);
789  // GC
790  load_modular_gc_func(start);
791  load_modular_gc_func(during_gc_p);
792  load_modular_gc_func(prepare_heap);
793  load_modular_gc_func(gc_enable);
794  load_modular_gc_func(gc_disable);
795  load_modular_gc_func(gc_enabled_p);
796  load_modular_gc_func(config_set);
797  load_modular_gc_func(config_get);
798  load_modular_gc_func(stress_set);
799  load_modular_gc_func(stress_get);
800  // Object allocation
801  load_modular_gc_func(new_obj);
802  load_modular_gc_func(obj_slot_size);
803  load_modular_gc_func(heap_id_for_size);
804  load_modular_gc_func(size_allocatable_p);
805  // Malloc
806  load_modular_gc_func(malloc);
807  load_modular_gc_func(calloc);
808  load_modular_gc_func(realloc);
809  load_modular_gc_func(free);
810  load_modular_gc_func(adjust_memory_usage);
811  // Marking
812  load_modular_gc_func(mark);
813  load_modular_gc_func(mark_and_move);
814  load_modular_gc_func(mark_and_pin);
815  load_modular_gc_func(mark_maybe);
816  load_modular_gc_func(mark_weak);
817  load_modular_gc_func(remove_weak);
818  // Compaction
819  load_modular_gc_func(object_moved_p);
820  load_modular_gc_func(location);
821  // Write barriers
822  load_modular_gc_func(writebarrier);
823  load_modular_gc_func(writebarrier_unprotect);
824  load_modular_gc_func(writebarrier_remember);
825  // Heap walking
826  load_modular_gc_func(each_objects);
827  load_modular_gc_func(each_object);
828  // Finalizers
829  load_modular_gc_func(make_zombie);
830  load_modular_gc_func(define_finalizer);
831  load_modular_gc_func(undefine_finalizer);
832  load_modular_gc_func(copy_finalizer);
833  load_modular_gc_func(shutdown_call_finalizer);
834  // Object ID
835  load_modular_gc_func(object_id);
836  load_modular_gc_func(object_id_to_ref);
837  // Forking
838  load_modular_gc_func(before_fork);
839  load_modular_gc_func(after_fork);
840  // Statistics
841  load_modular_gc_func(set_measure_total_time);
842  load_modular_gc_func(get_measure_total_time);
843  load_modular_gc_func(get_total_time);
844  load_modular_gc_func(gc_count);
845  load_modular_gc_func(latest_gc_info);
846  load_modular_gc_func(stat);
847  load_modular_gc_func(stat_heap);
848  // Miscellaneous
849  load_modular_gc_func(obj_flags);
850  load_modular_gc_func(pointer_to_heap_p);
851  load_modular_gc_func(garbage_object_p);
852  load_modular_gc_func(set_event_hook);
853  load_modular_gc_func(copy_attributes);
854  //GC Identification
855  load_modular_gc_func(active_gc_name);
856 
857 # undef load_modular_gc_func
858 
859  rb_gc_functions = gc_functions;
860 }
861 
862 // Bootup
863 # define rb_gc_impl_objspace_alloc rb_gc_functions.objspace_alloc
864 # define rb_gc_impl_objspace_init rb_gc_functions.objspace_init
865 # define rb_gc_impl_objspace_free rb_gc_functions.objspace_free
866 # define rb_gc_impl_ractor_cache_alloc rb_gc_functions.ractor_cache_alloc
867 # define rb_gc_impl_ractor_cache_free rb_gc_functions.ractor_cache_free
868 # define rb_gc_impl_set_params rb_gc_functions.set_params
869 # define rb_gc_impl_init rb_gc_functions.init
870 # define rb_gc_impl_heap_sizes rb_gc_functions.heap_sizes
871 // Shutdown
872 # define rb_gc_impl_shutdown_free_objects rb_gc_functions.shutdown_free_objects
873 // GC
874 # define rb_gc_impl_start rb_gc_functions.start
875 # define rb_gc_impl_during_gc_p rb_gc_functions.during_gc_p
876 # define rb_gc_impl_prepare_heap rb_gc_functions.prepare_heap
877 # define rb_gc_impl_gc_enable rb_gc_functions.gc_enable
878 # define rb_gc_impl_gc_disable rb_gc_functions.gc_disable
879 # define rb_gc_impl_gc_enabled_p rb_gc_functions.gc_enabled_p
880 # define rb_gc_impl_config_get rb_gc_functions.config_get
881 # define rb_gc_impl_config_set rb_gc_functions.config_set
882 # define rb_gc_impl_stress_set rb_gc_functions.stress_set
883 # define rb_gc_impl_stress_get rb_gc_functions.stress_get
884 // Object allocation
885 # define rb_gc_impl_new_obj rb_gc_functions.new_obj
886 # define rb_gc_impl_obj_slot_size rb_gc_functions.obj_slot_size
887 # define rb_gc_impl_heap_id_for_size rb_gc_functions.heap_id_for_size
888 # define rb_gc_impl_size_allocatable_p rb_gc_functions.size_allocatable_p
889 // Malloc
890 # define rb_gc_impl_malloc rb_gc_functions.malloc
891 # define rb_gc_impl_calloc rb_gc_functions.calloc
892 # define rb_gc_impl_realloc rb_gc_functions.realloc
893 # define rb_gc_impl_free rb_gc_functions.free
894 # define rb_gc_impl_adjust_memory_usage rb_gc_functions.adjust_memory_usage
895 // Marking
896 # define rb_gc_impl_mark rb_gc_functions.mark
897 # define rb_gc_impl_mark_and_move rb_gc_functions.mark_and_move
898 # define rb_gc_impl_mark_and_pin rb_gc_functions.mark_and_pin
899 # define rb_gc_impl_mark_maybe rb_gc_functions.mark_maybe
900 # define rb_gc_impl_mark_weak rb_gc_functions.mark_weak
901 # define rb_gc_impl_remove_weak rb_gc_functions.remove_weak
902 // Compaction
903 # define rb_gc_impl_object_moved_p rb_gc_functions.object_moved_p
904 # define rb_gc_impl_location rb_gc_functions.location
905 // Write barriers
906 # define rb_gc_impl_writebarrier rb_gc_functions.writebarrier
907 # define rb_gc_impl_writebarrier_unprotect rb_gc_functions.writebarrier_unprotect
908 # define rb_gc_impl_writebarrier_remember rb_gc_functions.writebarrier_remember
909 // Heap walking
910 # define rb_gc_impl_each_objects rb_gc_functions.each_objects
911 # define rb_gc_impl_each_object rb_gc_functions.each_object
912 // Finalizers
913 # define rb_gc_impl_make_zombie rb_gc_functions.make_zombie
914 # define rb_gc_impl_define_finalizer rb_gc_functions.define_finalizer
915 # define rb_gc_impl_undefine_finalizer rb_gc_functions.undefine_finalizer
916 # define rb_gc_impl_copy_finalizer rb_gc_functions.copy_finalizer
917 # define rb_gc_impl_shutdown_call_finalizer rb_gc_functions.shutdown_call_finalizer
918 // Object ID
919 # define rb_gc_impl_object_id rb_gc_functions.object_id
920 # define rb_gc_impl_object_id_to_ref rb_gc_functions.object_id_to_ref
921 // Forking
922 # define rb_gc_impl_before_fork rb_gc_functions.before_fork
923 # define rb_gc_impl_after_fork rb_gc_functions.after_fork
924 // Statistics
925 # define rb_gc_impl_set_measure_total_time rb_gc_functions.set_measure_total_time
926 # define rb_gc_impl_get_measure_total_time rb_gc_functions.get_measure_total_time
927 # define rb_gc_impl_get_total_time rb_gc_functions.get_total_time
928 # define rb_gc_impl_gc_count rb_gc_functions.gc_count
929 # define rb_gc_impl_latest_gc_info rb_gc_functions.latest_gc_info
930 # define rb_gc_impl_stat rb_gc_functions.stat
931 # define rb_gc_impl_stat_heap rb_gc_functions.stat_heap
932 // Miscellaneous
933 # define rb_gc_impl_obj_flags rb_gc_functions.obj_flags
934 # define rb_gc_impl_pointer_to_heap_p rb_gc_functions.pointer_to_heap_p
935 # define rb_gc_impl_garbage_object_p rb_gc_functions.garbage_object_p
936 # define rb_gc_impl_set_event_hook rb_gc_functions.set_event_hook
937 # define rb_gc_impl_copy_attributes rb_gc_functions.copy_attributes
938 // GC Identification
939 # define rb_gc_impl_active_gc_name rb_gc_functions.active_gc_name
940 #endif
941 
942 static VALUE initial_stress = Qfalse;
943 
944 void *
945 rb_objspace_alloc(void)
946 {
947 #if USE_MODULAR_GC
948  ruby_modular_gc_init();
949 #endif
950 
951  void *objspace = rb_gc_impl_objspace_alloc();
952  ruby_current_vm_ptr->gc.objspace = objspace;
953 
954  rb_gc_impl_objspace_init(objspace);
955  rb_gc_impl_stress_set(objspace, initial_stress);
956 
957  return objspace;
958 }
959 
960 void
961 rb_objspace_free(void *objspace)
962 {
963  rb_gc_impl_objspace_free(objspace);
964 }
965 
966 size_t
967 rb_gc_obj_slot_size(VALUE obj)
968 {
969  return rb_gc_impl_obj_slot_size(obj);
970 }
971 
972 static inline void
973 gc_validate_pc(void) {
974 #if RUBY_DEBUG
975  rb_execution_context_t *ec = GET_EC();
976  const rb_control_frame_t *cfp = ec->cfp;
977  if (cfp && VM_FRAME_RUBYFRAME_P(cfp) && cfp->pc) {
978  RUBY_ASSERT(cfp->pc >= ISEQ_BODY(cfp->iseq)->iseq_encoded);
979  RUBY_ASSERT(cfp->pc <= ISEQ_BODY(cfp->iseq)->iseq_encoded + ISEQ_BODY(cfp->iseq)->iseq_size);
980  }
981 #endif
982 }
983 
984 static inline VALUE
985 newobj_of(rb_ractor_t *cr, VALUE klass, VALUE flags, VALUE v1, VALUE v2, VALUE v3, bool wb_protected, size_t size)
986 {
987  VALUE obj = rb_gc_impl_new_obj(rb_gc_get_objspace(), cr->newobj_cache, klass, flags, v1, v2, v3, wb_protected, size);
988 
989  gc_validate_pc();
990 
991  if (UNLIKELY(rb_gc_event_hook_required_p(RUBY_INTERNAL_EVENT_NEWOBJ))) {
992  unsigned int lev;
993  RB_VM_LOCK_ENTER_CR_LEV(cr, &lev);
994  {
995  memset((char *)obj + RVALUE_SIZE, 0, rb_gc_obj_slot_size(obj) - RVALUE_SIZE);
996 
997  rb_gc_event_hook(obj, RUBY_INTERNAL_EVENT_NEWOBJ);
998  }
999  RB_VM_LOCK_LEAVE_CR_LEV(cr, &lev);
1000  }
1001 
1002  return obj;
1003 }
1004 
1005 VALUE
1006 rb_wb_unprotected_newobj_of(VALUE klass, VALUE flags, size_t size)
1007 {
1008  GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
1009  return newobj_of(GET_RACTOR(), klass, flags, 0, 0, 0, FALSE, size);
1010 }
1011 
1012 VALUE
1013 rb_wb_protected_newobj_of(rb_execution_context_t *ec, VALUE klass, VALUE flags, size_t size)
1014 {
1015  GC_ASSERT((flags & FL_WB_PROTECTED) == 0);
1016  return newobj_of(rb_ec_ractor_ptr(ec), klass, flags, 0, 0, 0, TRUE, size);
1017 }
1018 
1019 #define UNEXPECTED_NODE(func) \
1020  rb_bug(#func"(): GC does not handle T_NODE 0x%x(%p) 0x%"PRIxVALUE, \
1021  BUILTIN_TYPE(obj), (void*)(obj), RBASIC(obj)->flags)
1022 
1023 static inline void
1024 rb_data_object_check(VALUE klass)
1025 {
1026  if (klass != rb_cObject && (rb_get_alloc_func(klass) == rb_class_allocate_instance)) {
1027  rb_undef_alloc_func(klass);
1028  rb_warn("undefining the allocator of T_DATA class %"PRIsVALUE, klass);
1029  }
1030 }
1031 
1032 VALUE
1033 rb_data_object_wrap(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
1034 {
1035  RUBY_ASSERT_ALWAYS(dfree != (RUBY_DATA_FUNC)1);
1036  if (klass) rb_data_object_check(klass);
1037  return newobj_of(GET_RACTOR(), klass, T_DATA, (VALUE)dmark, (VALUE)dfree, (VALUE)datap, !dmark, sizeof(struct RTypedData));
1038 }
1039 
1040 VALUE
1042 {
1043  VALUE obj = rb_data_object_wrap(klass, 0, dmark, dfree);
1044  DATA_PTR(obj) = xcalloc(1, size);
1045  return obj;
1046 }
1047 
1048 static VALUE
1049 typed_data_alloc(VALUE klass, VALUE typed_flag, void *datap, const rb_data_type_t *type, size_t size)
1050 {
1051  RBIMPL_NONNULL_ARG(type);
1052  if (klass) rb_data_object_check(klass);
1053  bool wb_protected = (type->flags & RUBY_FL_WB_PROTECTED) || !type->function.dmark;
1054  return newobj_of(GET_RACTOR(), klass, T_DATA, (VALUE)type, 1 | typed_flag, (VALUE)datap, wb_protected, size);
1055 }
1056 
1057 VALUE
1059 {
1060  if (UNLIKELY(type->flags & RUBY_TYPED_EMBEDDABLE)) {
1061  rb_raise(rb_eTypeError, "Cannot wrap an embeddable TypedData");
1062  }
1063 
1064  return typed_data_alloc(klass, 0, datap, type, sizeof(struct RTypedData));
1065 }
1066 
1067 VALUE
1069 {
1070  if (type->flags & RUBY_TYPED_EMBEDDABLE) {
1071  if (!(type->flags & RUBY_TYPED_FREE_IMMEDIATELY)) {
1072  rb_raise(rb_eTypeError, "Embeddable TypedData must be freed immediately");
1073  }
1074 
1075  size_t embed_size = offsetof(struct RTypedData, data) + size;
1076  if (rb_gc_size_allocatable_p(embed_size)) {
1077  VALUE obj = typed_data_alloc(klass, TYPED_DATA_EMBEDDED, 0, type, embed_size);
1078  memset((char *)obj + offsetof(struct RTypedData, data), 0, size);
1079  return obj;
1080  }
1081  }
1082 
1083  VALUE obj = typed_data_alloc(klass, 0, NULL, type, sizeof(struct RTypedData));
1084  DATA_PTR(obj) = xcalloc(1, size);
1085  return obj;
1086 }
1087 
1088 static size_t
1089 rb_objspace_data_type_memsize(VALUE obj)
1090 {
1091  size_t size = 0;
1092  if (RTYPEDDATA_P(obj)) {
1093  const rb_data_type_t *type = RTYPEDDATA_TYPE(obj);
1094  const void *ptr = RTYPEDDATA_GET_DATA(obj);
1095 
1096  if (RTYPEDDATA_TYPE(obj)->flags & RUBY_TYPED_EMBEDDABLE && !RTYPEDDATA_EMBEDDED_P(obj)) {
1097 #ifdef HAVE_MALLOC_USABLE_SIZE
1098  size += malloc_usable_size((void *)ptr);
1099 #endif
1100  }
1101 
1102  if (ptr && type->function.dsize) {
1103  size += type->function.dsize(ptr);
1104  }
1105  }
1106 
1107  return size;
1108 }
1109 
1110 const char *
1111 rb_objspace_data_type_name(VALUE obj)
1112 {
1113  if (RTYPEDDATA_P(obj)) {
1114  return RTYPEDDATA_TYPE(obj)->wrap_struct_name;
1115  }
1116  else {
1117  return 0;
1118  }
1119 }
1120 
1121 static enum rb_id_table_iterator_result
1122 cvar_table_free_i(VALUE value, void *ctx)
1123 {
1124  xfree((void *)value);
1125  return ID_TABLE_CONTINUE;
1126 }
1127 
1128 static inline void
1129 make_io_zombie(void *objspace, VALUE obj)
1130 {
1131  rb_io_t *fptr = RFILE(obj)->fptr;
1132  rb_gc_impl_make_zombie(objspace, obj, rb_io_fptr_finalize_internal, fptr);
1133 }
1134 
1135 static bool
1136 rb_data_free(void *objspace, VALUE obj)
1137 {
1138  void *data = RTYPEDDATA_P(obj) ? RTYPEDDATA_GET_DATA(obj) : DATA_PTR(obj);
1139  if (data) {
1140  int free_immediately = false;
1141  void (*dfree)(void *);
1142 
1143  if (RTYPEDDATA_P(obj)) {
1144  free_immediately = (RTYPEDDATA(obj)->type->flags & RUBY_TYPED_FREE_IMMEDIATELY) != 0;
1145  dfree = RTYPEDDATA(obj)->type->function.dfree;
1146  }
1147  else {
1148  dfree = RDATA(obj)->dfree;
1149  }
1150 
1151  if (dfree) {
1152  if (dfree == RUBY_DEFAULT_FREE) {
1153  if (!RTYPEDDATA_P(obj) || !RTYPEDDATA_EMBEDDED_P(obj)) {
1154  xfree(data);
1155  RB_DEBUG_COUNTER_INC(obj_data_xfree);
1156  }
1157  }
1158  else if (free_immediately) {
1159  (*dfree)(data);
1160  if (RTYPEDDATA_TYPE(obj)->flags & RUBY_TYPED_EMBEDDABLE && !RTYPEDDATA_EMBEDDED_P(obj)) {
1161  xfree(data);
1162  }
1163 
1164  RB_DEBUG_COUNTER_INC(obj_data_imm_free);
1165  }
1166  else {
1167  rb_gc_impl_make_zombie(rb_gc_get_objspace(), obj, dfree, data);
1168  RB_DEBUG_COUNTER_INC(obj_data_zombie);
1169  return FALSE;
1170  }
1171  }
1172  else {
1173  RB_DEBUG_COUNTER_INC(obj_data_empty);
1174  }
1175  }
1176 
1177  return true;
1178 }
1179 
1180 void
1181 rb_gc_obj_free_vm_weak_references(VALUE obj)
1182 {
1183  if (FL_TEST(obj, FL_EXIVAR)) {
1185  FL_UNSET(obj, FL_EXIVAR);
1186  }
1187 
1188  switch (BUILTIN_TYPE(obj)) {
1189  case T_STRING:
1190  if (FL_TEST(obj, RSTRING_FSTR)) {
1191  st_data_t fstr = (st_data_t)obj;
1192  st_delete(rb_vm_fstring_table(), &fstr, NULL);
1193  RB_DEBUG_COUNTER_INC(obj_str_fstr);
1194 
1195  FL_UNSET(obj, RSTRING_FSTR);
1196  }
1197  break;
1198  case T_SYMBOL:
1199  rb_gc_free_dsymbol(obj);
1200  break;
1201  case T_IMEMO:
1202  switch (imemo_type(obj)) {
1203  case imemo_callinfo:
1204  rb_vm_ci_free((const struct rb_callinfo *)obj);
1205  break;
1206  case imemo_ment:
1207  rb_free_method_entry_vm_weak_references((const rb_method_entry_t *)obj);
1208  break;
1209  default:
1210  break;
1211  }
1212  break;
1213  default:
1214  break;
1215  }
1216 }
1217 
1218 bool
1219 rb_gc_obj_free(void *objspace, VALUE obj)
1220 {
1221  RB_DEBUG_COUNTER_INC(obj_free);
1222 
1223  switch (BUILTIN_TYPE(obj)) {
1224  case T_NIL:
1225  case T_FIXNUM:
1226  case T_TRUE:
1227  case T_FALSE:
1228  rb_bug("obj_free() called for broken object");
1229  break;
1230  default:
1231  break;
1232  }
1233 
1234  switch (BUILTIN_TYPE(obj)) {
1235  case T_OBJECT:
1236  if (rb_shape_obj_too_complex(obj)) {
1237  RB_DEBUG_COUNTER_INC(obj_obj_too_complex);
1238  st_free_table(ROBJECT_IV_HASH(obj));
1239  }
1240  else if (RBASIC(obj)->flags & ROBJECT_EMBED) {
1241  RB_DEBUG_COUNTER_INC(obj_obj_embed);
1242  }
1243  else {
1244  xfree(ROBJECT(obj)->as.heap.ivptr);
1245  RB_DEBUG_COUNTER_INC(obj_obj_ptr);
1246  }
1247  break;
1248  case T_MODULE:
1249  case T_CLASS:
1250  rb_id_table_free(RCLASS_M_TBL(obj));
1251  rb_cc_table_free(obj);
1252  if (rb_shape_obj_too_complex(obj)) {
1253  st_free_table((st_table *)RCLASS_IVPTR(obj));
1254  }
1255  else {
1256  xfree(RCLASS_IVPTR(obj));
1257  }
1258 
1259  if (RCLASS_CONST_TBL(obj)) {
1260  rb_free_const_table(RCLASS_CONST_TBL(obj));
1261  }
1262  if (RCLASS_CVC_TBL(obj)) {
1263  rb_id_table_foreach_values(RCLASS_CVC_TBL(obj), cvar_table_free_i, NULL);
1264  rb_id_table_free(RCLASS_CVC_TBL(obj));
1265  }
1266  rb_class_remove_subclass_head(obj);
1267  rb_class_remove_from_module_subclasses(obj);
1268  rb_class_remove_from_super_subclasses(obj);
1269  if (FL_TEST_RAW(obj, RCLASS_SUPERCLASSES_INCLUDE_SELF)) {
1270  xfree(RCLASS_SUPERCLASSES(obj));
1271  }
1272 
1273  (void)RB_DEBUG_COUNTER_INC_IF(obj_module_ptr, BUILTIN_TYPE(obj) == T_MODULE);
1274  (void)RB_DEBUG_COUNTER_INC_IF(obj_class_ptr, BUILTIN_TYPE(obj) == T_CLASS);
1275  break;
1276  case T_STRING:
1277  rb_str_free(obj);
1278  break;
1279  case T_ARRAY:
1280  rb_ary_free(obj);
1281  break;
1282  case T_HASH:
1283 #if USE_DEBUG_COUNTER
1284  switch (RHASH_SIZE(obj)) {
1285  case 0:
1286  RB_DEBUG_COUNTER_INC(obj_hash_empty);
1287  break;
1288  case 1:
1289  RB_DEBUG_COUNTER_INC(obj_hash_1);
1290  break;
1291  case 2:
1292  RB_DEBUG_COUNTER_INC(obj_hash_2);
1293  break;
1294  case 3:
1295  RB_DEBUG_COUNTER_INC(obj_hash_3);
1296  break;
1297  case 4:
1298  RB_DEBUG_COUNTER_INC(obj_hash_4);
1299  break;
1300  case 5:
1301  case 6:
1302  case 7:
1303  case 8:
1304  RB_DEBUG_COUNTER_INC(obj_hash_5_8);
1305  break;
1306  default:
1307  GC_ASSERT(RHASH_SIZE(obj) > 8);
1308  RB_DEBUG_COUNTER_INC(obj_hash_g8);
1309  }
1310 
1311  if (RHASH_AR_TABLE_P(obj)) {
1312  if (RHASH_AR_TABLE(obj) == NULL) {
1313  RB_DEBUG_COUNTER_INC(obj_hash_null);
1314  }
1315  else {
1316  RB_DEBUG_COUNTER_INC(obj_hash_ar);
1317  }
1318  }
1319  else {
1320  RB_DEBUG_COUNTER_INC(obj_hash_st);
1321  }
1322 #endif
1323 
1324  rb_hash_free(obj);
1325  break;
1326  case T_REGEXP:
1327  if (RREGEXP(obj)->ptr) {
1328  onig_free(RREGEXP(obj)->ptr);
1329  RB_DEBUG_COUNTER_INC(obj_regexp_ptr);
1330  }
1331  break;
1332  case T_DATA:
1333  if (!rb_data_free(objspace, obj)) return false;
1334  break;
1335  case T_MATCH:
1336  {
1337  rb_matchext_t *rm = RMATCH_EXT(obj);
1338 #if USE_DEBUG_COUNTER
1339  if (rm->regs.num_regs >= 8) {
1340  RB_DEBUG_COUNTER_INC(obj_match_ge8);
1341  }
1342  else if (rm->regs.num_regs >= 4) {
1343  RB_DEBUG_COUNTER_INC(obj_match_ge4);
1344  }
1345  else if (rm->regs.num_regs >= 1) {
1346  RB_DEBUG_COUNTER_INC(obj_match_under4);
1347  }
1348 #endif
1349  onig_region_free(&rm->regs, 0);
1350  xfree(rm->char_offset);
1351 
1352  RB_DEBUG_COUNTER_INC(obj_match_ptr);
1353  }
1354  break;
1355  case T_FILE:
1356  if (RFILE(obj)->fptr) {
1357  make_io_zombie(objspace, obj);
1358  RB_DEBUG_COUNTER_INC(obj_file_ptr);
1359  return FALSE;
1360  }
1361  break;
1362  case T_RATIONAL:
1363  RB_DEBUG_COUNTER_INC(obj_rational);
1364  break;
1365  case T_COMPLEX:
1366  RB_DEBUG_COUNTER_INC(obj_complex);
1367  break;
1368  case T_MOVED:
1369  break;
1370  case T_ICLASS:
1371  /* Basically , T_ICLASS shares table with the module */
1372  if (RICLASS_OWNS_M_TBL_P(obj)) {
1373  /* Method table is not shared for origin iclasses of classes */
1374  rb_id_table_free(RCLASS_M_TBL(obj));
1375  }
1376  if (RCLASS_CALLABLE_M_TBL(obj) != NULL) {
1377  rb_id_table_free(RCLASS_CALLABLE_M_TBL(obj));
1378  }
1379  rb_class_remove_subclass_head(obj);
1380  rb_cc_table_free(obj);
1381  rb_class_remove_from_module_subclasses(obj);
1382  rb_class_remove_from_super_subclasses(obj);
1383 
1384  RB_DEBUG_COUNTER_INC(obj_iclass_ptr);
1385  break;
1386 
1387  case T_FLOAT:
1388  RB_DEBUG_COUNTER_INC(obj_float);
1389  break;
1390 
1391  case T_BIGNUM:
1392  if (!BIGNUM_EMBED_P(obj) && BIGNUM_DIGITS(obj)) {
1393  xfree(BIGNUM_DIGITS(obj));
1394  RB_DEBUG_COUNTER_INC(obj_bignum_ptr);
1395  }
1396  else {
1397  RB_DEBUG_COUNTER_INC(obj_bignum_embed);
1398  }
1399  break;
1400 
1401  case T_NODE:
1402  UNEXPECTED_NODE(obj_free);
1403  break;
1404 
1405  case T_STRUCT:
1406  if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) ||
1407  RSTRUCT(obj)->as.heap.ptr == NULL) {
1408  RB_DEBUG_COUNTER_INC(obj_struct_embed);
1409  }
1410  else {
1411  xfree((void *)RSTRUCT(obj)->as.heap.ptr);
1412  RB_DEBUG_COUNTER_INC(obj_struct_ptr);
1413  }
1414  break;
1415 
1416  case T_SYMBOL:
1417  RB_DEBUG_COUNTER_INC(obj_symbol);
1418  break;
1419 
1420  case T_IMEMO:
1421  rb_imemo_free((VALUE)obj);
1422  break;
1423 
1424  default:
1425  rb_bug("gc_sweep(): unknown data type 0x%x(%p) 0x%"PRIxVALUE,
1426  BUILTIN_TYPE(obj), (void*)obj, RBASIC(obj)->flags);
1427  }
1428 
1429  if (FL_TEST(obj, FL_FINALIZE)) {
1430  rb_gc_impl_make_zombie(rb_gc_get_objspace(), obj, 0, 0);
1431  return FALSE;
1432  }
1433  else {
1434  return TRUE;
1435  }
1436 }
1437 
1438 void
1439 rb_objspace_set_event_hook(const rb_event_flag_t event)
1440 {
1441  rb_gc_impl_set_event_hook(rb_gc_get_objspace(), event);
1442 }
1443 
1444 static int
1445 internal_object_p(VALUE obj)
1446 {
1447  void *ptr = asan_unpoison_object_temporary(obj);
1448 
1449  if (RBASIC(obj)->flags) {
1450  switch (BUILTIN_TYPE(obj)) {
1451  case T_NODE:
1452  UNEXPECTED_NODE(internal_object_p);
1453  break;
1454  case T_NONE:
1455  case T_MOVED:
1456  case T_IMEMO:
1457  case T_ICLASS:
1458  case T_ZOMBIE:
1459  break;
1460  case T_CLASS:
1461  if (!RBASIC(obj)->klass) break;
1462  if (RCLASS_SINGLETON_P(obj)) {
1463  return rb_singleton_class_internal_p(obj);
1464  }
1465  return 0;
1466  default:
1467  if (!RBASIC(obj)->klass) break;
1468  return 0;
1469  }
1470  }
1471  if (ptr || !RBASIC(obj)->flags) {
1472  asan_poison_object(obj);
1473  }
1474  return 1;
1475 }
1476 
1477 int
1478 rb_objspace_internal_object_p(VALUE obj)
1479 {
1480  return internal_object_p(obj);
1481 }
1482 
1484  size_t num;
1485  VALUE of;
1486 };
1487 
1488 static int
1489 os_obj_of_i(void *vstart, void *vend, size_t stride, void *data)
1490 {
1491  struct os_each_struct *oes = (struct os_each_struct *)data;
1492 
1493  VALUE v = (VALUE)vstart;
1494  for (; v != (VALUE)vend; v += stride) {
1495  if (!internal_object_p(v)) {
1496  if (!oes->of || rb_obj_is_kind_of(v, oes->of)) {
1497  if (!rb_multi_ractor_p() || rb_ractor_shareable_p(v)) {
1498  rb_yield(v);
1499  oes->num++;
1500  }
1501  }
1502  }
1503  }
1504 
1505  return 0;
1506 }
1507 
1508 static VALUE
1509 os_obj_of(VALUE of)
1510 {
1511  struct os_each_struct oes;
1512 
1513  oes.num = 0;
1514  oes.of = of;
1515  rb_objspace_each_objects(os_obj_of_i, &oes);
1516  return SIZET2NUM(oes.num);
1517 }
1518 
1519 /*
1520  * call-seq:
1521  * ObjectSpace.each_object([module]) {|obj| ... } -> integer
1522  * ObjectSpace.each_object([module]) -> an_enumerator
1523  *
1524  * Calls the block once for each living, nonimmediate object in this
1525  * Ruby process. If <i>module</i> is specified, calls the block
1526  * for only those classes or modules that match (or are a subclass of)
1527  * <i>module</i>. Returns the number of objects found. Immediate
1528  * objects (<code>Fixnum</code>s, <code>Symbol</code>s
1529  * <code>true</code>, <code>false</code>, and <code>nil</code>) are
1530  * never returned. In the example below, #each_object returns both
1531  * the numbers we defined and several constants defined in the Math
1532  * module.
1533  *
1534  * If no block is given, an enumerator is returned instead.
1535  *
1536  * a = 102.7
1537  * b = 95 # Won't be returned
1538  * c = 12345678987654321
1539  * count = ObjectSpace.each_object(Numeric) {|x| p x }
1540  * puts "Total count: #{count}"
1541  *
1542  * <em>produces:</em>
1543  *
1544  * 12345678987654321
1545  * 102.7
1546  * 2.71828182845905
1547  * 3.14159265358979
1548  * 2.22044604925031e-16
1549  * 1.7976931348623157e+308
1550  * 2.2250738585072e-308
1551  * Total count: 7
1552  *
1553  */
1554 
1555 static VALUE
1556 os_each_obj(int argc, VALUE *argv, VALUE os)
1557 {
1558  VALUE of;
1559 
1560  of = (!rb_check_arity(argc, 0, 1) ? 0 : argv[0]);
1561  RETURN_ENUMERATOR(os, 1, &of);
1562  return os_obj_of(of);
1563 }
1564 
1565 /*
1566  * call-seq:
1567  * ObjectSpace.undefine_finalizer(obj)
1568  *
1569  * Removes all finalizers for <i>obj</i>.
1570  *
1571  */
1572 
1573 static VALUE
1574 undefine_final(VALUE os, VALUE obj)
1575 {
1576  rb_check_frozen(obj);
1577 
1578  rb_gc_impl_undefine_finalizer(rb_gc_get_objspace(), obj);
1579 
1580  return obj;
1581 }
1582 
1583 static void
1584 should_be_callable(VALUE block)
1585 {
1586  if (!rb_obj_respond_to(block, idCall, TRUE)) {
1587  rb_raise(rb_eArgError, "wrong type argument %"PRIsVALUE" (should be callable)",
1588  rb_obj_class(block));
1589  }
1590 }
1591 
1592 static void
1593 should_be_finalizable(VALUE obj)
1594 {
1595  if (!FL_ABLE(obj)) {
1596  rb_raise(rb_eArgError, "cannot define finalizer for %s",
1597  rb_obj_classname(obj));
1598  }
1599  rb_check_frozen(obj);
1600 }
1601 
1602 void
1604 {
1605  rb_gc_impl_copy_finalizer(rb_gc_get_objspace(), dest, obj);
1606 }
1607 
1608 /*
1609  * call-seq:
1610  * ObjectSpace.define_finalizer(obj, aProc=proc())
1611  *
1612  * Adds <i>aProc</i> as a finalizer, to be called after <i>obj</i>
1613  * was destroyed. The object ID of the <i>obj</i> will be passed
1614  * as an argument to <i>aProc</i>. If <i>aProc</i> is a lambda or
1615  * method, make sure it can be called with a single argument.
1616  *
1617  * The return value is an array <code>[0, aProc]</code>.
1618  *
1619  * The two recommended patterns are to either create the finaliser proc
1620  * in a non-instance method where it can safely capture the needed state,
1621  * or to use a custom callable object that stores the needed state
1622  * explicitly as instance variables.
1623  *
1624  * class Foo
1625  * def initialize(data_needed_for_finalization)
1626  * ObjectSpace.define_finalizer(self, self.class.create_finalizer(data_needed_for_finalization))
1627  * end
1628  *
1629  * def self.create_finalizer(data_needed_for_finalization)
1630  * proc {
1631  * puts "finalizing #{data_needed_for_finalization}"
1632  * }
1633  * end
1634  * end
1635  *
1636  * class Bar
1637  * class Remover
1638  * def initialize(data_needed_for_finalization)
1639  * @data_needed_for_finalization = data_needed_for_finalization
1640  * end
1641  *
1642  * def call(id)
1643  * puts "finalizing #{@data_needed_for_finalization}"
1644  * end
1645  * end
1646  *
1647  * def initialize(data_needed_for_finalization)
1648  * ObjectSpace.define_finalizer(self, Remover.new(data_needed_for_finalization))
1649  * end
1650  * end
1651  *
1652  * Note that if your finalizer references the object to be
1653  * finalized it will never be run on GC, although it will still be
1654  * run at exit. You will get a warning if you capture the object
1655  * to be finalized as the receiver of the finalizer.
1656  *
1657  * class CapturesSelf
1658  * def initialize(name)
1659  * ObjectSpace.define_finalizer(self, proc {
1660  * # this finalizer will only be run on exit
1661  * puts "finalizing #{name}"
1662  * })
1663  * end
1664  * end
1665  *
1666  * Also note that finalization can be unpredictable and is never guaranteed
1667  * to be run except on exit.
1668  */
1669 
1670 static VALUE
1671 define_final(int argc, VALUE *argv, VALUE os)
1672 {
1673  VALUE obj, block;
1674 
1675  rb_scan_args(argc, argv, "11", &obj, &block);
1676  if (argc == 1) {
1677  block = rb_block_proc();
1678  }
1679 
1680  if (rb_callable_receiver(block) == obj) {
1681  rb_warn("finalizer references object to be finalized");
1682  }
1683 
1684  return rb_define_finalizer(obj, block);
1685 }
1686 
1687 VALUE
1689 {
1690  should_be_finalizable(obj);
1691  should_be_callable(block);
1692 
1693  block = rb_gc_impl_define_finalizer(rb_gc_get_objspace(), obj, block);
1694 
1695  block = rb_ary_new3(2, INT2FIX(0), block);
1696  OBJ_FREEZE(block);
1697  return block;
1698 }
1699 
1700 void
1701 rb_objspace_call_finalizer(void)
1702 {
1703  rb_gc_impl_shutdown_call_finalizer(rb_gc_get_objspace());
1704 }
1705 
1706 void
1707 rb_objspace_free_objects(void *objspace)
1708 {
1709  rb_gc_impl_shutdown_free_objects(objspace);
1710 }
1711 
1712 int
1713 rb_objspace_garbage_object_p(VALUE obj)
1714 {
1715  return rb_gc_impl_garbage_object_p(rb_gc_get_objspace(), obj);
1716 }
1717 
1718 /*
1719  * call-seq:
1720  * ObjectSpace._id2ref(object_id) -> an_object
1721  *
1722  * Converts an object id to a reference to the object. May not be
1723  * called on an object id passed as a parameter to a finalizer.
1724  *
1725  * s = "I am a string" #=> "I am a string"
1726  * r = ObjectSpace._id2ref(s.object_id) #=> "I am a string"
1727  * r == s #=> true
1728  *
1729  * On multi-ractor mode, if the object is not shareable, it raises
1730  * RangeError.
1731  */
1732 
1733 static VALUE
1734 id2ref(VALUE objid)
1735 {
1736 #if SIZEOF_LONG == SIZEOF_VOIDP
1737 #define NUM2PTR(x) NUM2ULONG(x)
1738 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
1739 #define NUM2PTR(x) NUM2ULL(x)
1740 #endif
1741  objid = rb_to_int(objid);
1742  if (FIXNUM_P(objid) || rb_big_size(objid) <= SIZEOF_VOIDP) {
1743  VALUE ptr = NUM2PTR(objid);
1744  if (SPECIAL_CONST_P(ptr)) {
1745  if (ptr == Qtrue) return Qtrue;
1746  if (ptr == Qfalse) return Qfalse;
1747  if (NIL_P(ptr)) return Qnil;
1748  if (FIXNUM_P(ptr)) return ptr;
1749  if (FLONUM_P(ptr)) return ptr;
1750 
1751  if (SYMBOL_P(ptr)) {
1752  // Check that the symbol is valid
1753  if (rb_static_id_valid_p(SYM2ID(ptr))) {
1754  return ptr;
1755  }
1756  else {
1757  rb_raise(rb_eRangeError, "%p is not symbol id value", (void *)ptr);
1758  }
1759  }
1760 
1761  rb_raise(rb_eRangeError, "%+"PRIsVALUE" is not id value", rb_int2str(objid, 10));
1762  }
1763  }
1764 
1765  VALUE obj = rb_gc_impl_object_id_to_ref(rb_gc_get_objspace(), objid);
1766  if (!rb_multi_ractor_p() || rb_ractor_shareable_p(obj)) {
1767  return obj;
1768  }
1769  else {
1770  rb_raise(rb_eRangeError, "%+"PRIsVALUE" is id of the unshareable object on multi-ractor", rb_int2str(objid, 10));
1771  }
1772 }
1773 
1774 /* :nodoc: */
1775 static VALUE
1776 os_id2ref(VALUE os, VALUE objid)
1777 {
1778  return id2ref(objid);
1779 }
1780 
1781 static VALUE
1782 rb_find_object_id(void *objspace, VALUE obj, VALUE (*get_heap_object_id)(void *, VALUE))
1783 {
1784  if (SPECIAL_CONST_P(obj)) {
1785 #if SIZEOF_LONG == SIZEOF_VOIDP
1786  return LONG2NUM((SIGNED_VALUE)obj);
1787 #else
1788  return LL2NUM((SIGNED_VALUE)obj);
1789 #endif
1790  }
1791 
1792  return get_heap_object_id(objspace, obj);
1793 }
1794 
1795 static VALUE
1796 nonspecial_obj_id(void *_objspace, VALUE obj)
1797 {
1798 #if SIZEOF_LONG == SIZEOF_VOIDP
1799  return (VALUE)((SIGNED_VALUE)(obj)|FIXNUM_FLAG);
1800 #elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
1801  return LL2NUM((SIGNED_VALUE)(obj) / 2);
1802 #else
1803 # error not supported
1804 #endif
1805 }
1806 
1807 VALUE
1809 {
1810  return rb_find_object_id(NULL, obj, nonspecial_obj_id);
1811 }
1812 
1813 /*
1814  * Document-method: __id__
1815  * Document-method: object_id
1816  *
1817  * call-seq:
1818  * obj.__id__ -> integer
1819  * obj.object_id -> integer
1820  *
1821  * Returns an integer identifier for +obj+.
1822  *
1823  * The same number will be returned on all calls to +object_id+ for a given
1824  * object, and no two active objects will share an id.
1825  *
1826  * Note: that some objects of builtin classes are reused for optimization.
1827  * This is the case for immediate values and frozen string literals.
1828  *
1829  * BasicObject implements +__id__+, Kernel implements +object_id+.
1830  *
1831  * Immediate values are not passed by reference but are passed by value:
1832  * +nil+, +true+, +false+, Fixnums, Symbols, and some Floats.
1833  *
1834  * Object.new.object_id == Object.new.object_id # => false
1835  * (21 * 2).object_id == (21 * 2).object_id # => true
1836  * "hello".object_id == "hello".object_id # => false
1837  * "hi".freeze.object_id == "hi".freeze.object_id # => true
1838  */
1839 
1840 VALUE
1842 {
1843  /* If obj is an immediate, the object ID is obj directly converted to a Numeric.
1844  * Otherwise, the object ID is a Numeric that is a non-zero multiple of
1845  * (RUBY_IMMEDIATE_MASK + 1) which guarantees that it does not collide with
1846  * any immediates. */
1847  return rb_find_object_id(rb_gc_get_objspace(), obj, rb_gc_impl_object_id);
1848 }
1849 
1850 static enum rb_id_table_iterator_result
1851 cc_table_memsize_i(VALUE ccs_ptr, void *data_ptr)
1852 {
1853  size_t *total_size = data_ptr;
1854  struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
1855  *total_size += sizeof(*ccs);
1856  *total_size += sizeof(ccs->entries[0]) * ccs->capa;
1857  return ID_TABLE_CONTINUE;
1858 }
1859 
1860 static size_t
1861 cc_table_memsize(struct rb_id_table *cc_table)
1862 {
1863  size_t total = rb_id_table_memsize(cc_table);
1864  rb_id_table_foreach_values(cc_table, cc_table_memsize_i, &total);
1865  return total;
1866 }
1867 
1868 size_t
1869 rb_obj_memsize_of(VALUE obj)
1870 {
1871  size_t size = 0;
1872 
1873  if (SPECIAL_CONST_P(obj)) {
1874  return 0;
1875  }
1876 
1877  if (FL_TEST(obj, FL_EXIVAR)) {
1878  size += rb_generic_ivar_memsize(obj);
1879  }
1880 
1881  switch (BUILTIN_TYPE(obj)) {
1882  case T_OBJECT:
1883  if (rb_shape_obj_too_complex(obj)) {
1884  size += rb_st_memsize(ROBJECT_IV_HASH(obj));
1885  }
1886  else if (!(RBASIC(obj)->flags & ROBJECT_EMBED)) {
1887  size += ROBJECT_IV_CAPACITY(obj) * sizeof(VALUE);
1888  }
1889  break;
1890  case T_MODULE:
1891  case T_CLASS:
1892  if (RCLASS_M_TBL(obj)) {
1893  size += rb_id_table_memsize(RCLASS_M_TBL(obj));
1894  }
1895  // class IV sizes are allocated as powers of two
1896  size += SIZEOF_VALUE << bit_length(RCLASS_IV_COUNT(obj));
1897  if (RCLASS_CVC_TBL(obj)) {
1898  size += rb_id_table_memsize(RCLASS_CVC_TBL(obj));
1899  }
1900  if (RCLASS_EXT(obj)->const_tbl) {
1901  size += rb_id_table_memsize(RCLASS_EXT(obj)->const_tbl);
1902  }
1903  if (RCLASS_CC_TBL(obj)) {
1904  size += cc_table_memsize(RCLASS_CC_TBL(obj));
1905  }
1906  if (FL_TEST_RAW(obj, RCLASS_SUPERCLASSES_INCLUDE_SELF)) {
1907  size += (RCLASS_SUPERCLASS_DEPTH(obj) + 1) * sizeof(VALUE);
1908  }
1909  break;
1910  case T_ICLASS:
1911  if (RICLASS_OWNS_M_TBL_P(obj)) {
1912  if (RCLASS_M_TBL(obj)) {
1913  size += rb_id_table_memsize(RCLASS_M_TBL(obj));
1914  }
1915  }
1916  if (RCLASS_CC_TBL(obj)) {
1917  size += cc_table_memsize(RCLASS_CC_TBL(obj));
1918  }
1919  break;
1920  case T_STRING:
1921  size += rb_str_memsize(obj);
1922  break;
1923  case T_ARRAY:
1924  size += rb_ary_memsize(obj);
1925  break;
1926  case T_HASH:
1927  if (RHASH_ST_TABLE_P(obj)) {
1928  VM_ASSERT(RHASH_ST_TABLE(obj) != NULL);
1929  /* st_table is in the slot */
1930  size += st_memsize(RHASH_ST_TABLE(obj)) - sizeof(st_table);
1931  }
1932  break;
1933  case T_REGEXP:
1934  if (RREGEXP_PTR(obj)) {
1935  size += onig_memsize(RREGEXP_PTR(obj));
1936  }
1937  break;
1938  case T_DATA:
1939  size += rb_objspace_data_type_memsize(obj);
1940  break;
1941  case T_MATCH:
1942  {
1943  rb_matchext_t *rm = RMATCH_EXT(obj);
1944  size += onig_region_memsize(&rm->regs);
1945  size += sizeof(struct rmatch_offset) * rm->char_offset_num_allocated;
1946  }
1947  break;
1948  case T_FILE:
1949  if (RFILE(obj)->fptr) {
1950  size += rb_io_memsize(RFILE(obj)->fptr);
1951  }
1952  break;
1953  case T_RATIONAL:
1954  case T_COMPLEX:
1955  break;
1956  case T_IMEMO:
1957  size += rb_imemo_memsize(obj);
1958  break;
1959 
1960  case T_FLOAT:
1961  case T_SYMBOL:
1962  break;
1963 
1964  case T_BIGNUM:
1965  if (!(RBASIC(obj)->flags & BIGNUM_EMBED_FLAG) && BIGNUM_DIGITS(obj)) {
1966  size += BIGNUM_LEN(obj) * sizeof(BDIGIT);
1967  }
1968  break;
1969 
1970  case T_NODE:
1971  UNEXPECTED_NODE(obj_memsize_of);
1972  break;
1973 
1974  case T_STRUCT:
1975  if ((RBASIC(obj)->flags & RSTRUCT_EMBED_LEN_MASK) == 0 &&
1976  RSTRUCT(obj)->as.heap.ptr) {
1977  size += sizeof(VALUE) * RSTRUCT_LEN(obj);
1978  }
1979  break;
1980 
1981  case T_ZOMBIE:
1982  case T_MOVED:
1983  break;
1984 
1985  default:
1986  rb_bug("objspace/memsize_of(): unknown data type 0x%x(%p)",
1987  BUILTIN_TYPE(obj), (void*)obj);
1988  }
1989 
1990  return size + rb_gc_obj_slot_size(obj);
1991 }
1992 
1993 static int
1994 set_zero(st_data_t key, st_data_t val, st_data_t arg)
1995 {
1996  VALUE k = (VALUE)key;
1997  VALUE hash = (VALUE)arg;
1998  rb_hash_aset(hash, k, INT2FIX(0));
1999  return ST_CONTINUE;
2000 }
2001 
2003  size_t counts[T_MASK+1];
2004  size_t freed;
2005  size_t total;
2006 };
2007 
2008 static void
2009 count_objects_i(VALUE obj, void *d)
2010 {
2011  struct count_objects_data *data = (struct count_objects_data *)d;
2012 
2013  if (RBASIC(obj)->flags) {
2014  data->counts[BUILTIN_TYPE(obj)]++;
2015  }
2016  else {
2017  data->freed++;
2018  }
2019 
2020  data->total++;
2021 }
2022 
2023 /*
2024  * call-seq:
2025  * ObjectSpace.count_objects([result_hash]) -> hash
2026  *
2027  * Counts all objects grouped by type.
2028  *
2029  * It returns a hash, such as:
2030  * {
2031  * :TOTAL=>10000,
2032  * :FREE=>3011,
2033  * :T_OBJECT=>6,
2034  * :T_CLASS=>404,
2035  * # ...
2036  * }
2037  *
2038  * The contents of the returned hash are implementation specific.
2039  * It may be changed in future.
2040  *
2041  * The keys starting with +:T_+ means live objects.
2042  * For example, +:T_ARRAY+ is the number of arrays.
2043  * +:FREE+ means object slots which is not used now.
2044  * +:TOTAL+ means sum of above.
2045  *
2046  * If the optional argument +result_hash+ is given,
2047  * it is overwritten and returned. This is intended to avoid probe effect.
2048  *
2049  * h = {}
2050  * ObjectSpace.count_objects(h)
2051  * puts h
2052  * # => { :TOTAL=>10000, :T_CLASS=>158280, :T_MODULE=>20672, :T_STRING=>527249 }
2053  *
2054  * This method is only expected to work on C Ruby.
2055  *
2056  */
2057 
2058 static VALUE
2059 count_objects(int argc, VALUE *argv, VALUE os)
2060 {
2061  struct count_objects_data data = { 0 };
2062  VALUE hash = Qnil;
2063 
2064  if (rb_check_arity(argc, 0, 1) == 1) {
2065  hash = argv[0];
2066  if (!RB_TYPE_P(hash, T_HASH))
2067  rb_raise(rb_eTypeError, "non-hash given");
2068  }
2069 
2070  rb_gc_impl_each_object(rb_gc_get_objspace(), count_objects_i, &data);
2071 
2072  if (NIL_P(hash)) {
2073  hash = rb_hash_new();
2074  }
2075  else if (!RHASH_EMPTY_P(hash)) {
2076  rb_hash_stlike_foreach(hash, set_zero, hash);
2077  }
2078  rb_hash_aset(hash, ID2SYM(rb_intern("TOTAL")), SIZET2NUM(data.total));
2079  rb_hash_aset(hash, ID2SYM(rb_intern("FREE")), SIZET2NUM(data.freed));
2080 
2081  for (size_t i = 0; i <= T_MASK; i++) {
2082  VALUE type = type_sym(i);
2083  if (data.counts[i])
2084  rb_hash_aset(hash, type, SIZET2NUM(data.counts[i]));
2085  }
2086 
2087  return hash;
2088 }
2089 
2090 #define SET_STACK_END SET_MACHINE_STACK_END(&ec->machine.stack_end)
2091 
2092 #define STACK_START (ec->machine.stack_start)
2093 #define STACK_END (ec->machine.stack_end)
2094 #define STACK_LEVEL_MAX (ec->machine.stack_maxsize/sizeof(VALUE))
2095 
2096 #if STACK_GROW_DIRECTION < 0
2097 # define STACK_LENGTH (size_t)(STACK_START - STACK_END)
2098 #elif STACK_GROW_DIRECTION > 0
2099 # define STACK_LENGTH (size_t)(STACK_END - STACK_START + 1)
2100 #else
2101 # define STACK_LENGTH ((STACK_END < STACK_START) ? (size_t)(STACK_START - STACK_END) \
2102  : (size_t)(STACK_END - STACK_START + 1))
2103 #endif
2104 #if !STACK_GROW_DIRECTION
2105 int ruby_stack_grow_direction;
2106 int
2107 ruby_get_stack_grow_direction(volatile VALUE *addr)
2108 {
2109  VALUE *end;
2110  SET_MACHINE_STACK_END(&end);
2111 
2112  if (end > addr) return ruby_stack_grow_direction = 1;
2113  return ruby_stack_grow_direction = -1;
2114 }
2115 #endif
2116 
2117 size_t
2119 {
2120  rb_execution_context_t *ec = GET_EC();
2121  SET_STACK_END;
2122  if (p) *p = STACK_UPPER(STACK_END, STACK_START, STACK_END);
2123  return STACK_LENGTH;
2124 }
2125 
2126 #define PREVENT_STACK_OVERFLOW 1
2127 #ifndef PREVENT_STACK_OVERFLOW
2128 #if !(defined(POSIX_SIGNAL) && defined(SIGSEGV) && defined(HAVE_SIGALTSTACK))
2129 # define PREVENT_STACK_OVERFLOW 1
2130 #else
2131 # define PREVENT_STACK_OVERFLOW 0
2132 #endif
2133 #endif
2134 #if PREVENT_STACK_OVERFLOW && !defined(__EMSCRIPTEN__)
2135 static int
2136 stack_check(rb_execution_context_t *ec, int water_mark)
2137 {
2138  SET_STACK_END;
2139 
2140  size_t length = STACK_LENGTH;
2141  size_t maximum_length = STACK_LEVEL_MAX - water_mark;
2142 
2143  return length > maximum_length;
2144 }
2145 #else
2146 #define stack_check(ec, water_mark) FALSE
2147 #endif
2148 
2149 #define STACKFRAME_FOR_CALL_CFUNC 2048
2150 
2151 int
2152 rb_ec_stack_check(rb_execution_context_t *ec)
2153 {
2154  return stack_check(ec, STACKFRAME_FOR_CALL_CFUNC);
2155 }
2156 
2157 int
2159 {
2160  return stack_check(GET_EC(), STACKFRAME_FOR_CALL_CFUNC);
2161 }
2162 
2163 /* ==================== Marking ==================== */
2164 
2165 #define RB_GC_MARK_OR_TRAVERSE(func, obj_or_ptr, obj, check_obj) do { \
2166  if (!RB_SPECIAL_CONST_P(obj)) { \
2167  rb_vm_t *vm = GET_VM(); \
2168  void *objspace = vm->gc.objspace; \
2169  if (LIKELY(vm->gc.mark_func_data == NULL)) { \
2170  GC_ASSERT(rb_gc_impl_during_gc_p(objspace)); \
2171  (func)(objspace, (obj_or_ptr)); \
2172  } \
2173  else if (check_obj ? \
2174  rb_gc_impl_pointer_to_heap_p(objspace, (const void *)obj) && \
2175  !rb_gc_impl_garbage_object_p(objspace, obj) : \
2176  true) { \
2177  GC_ASSERT(!rb_gc_impl_during_gc_p(objspace)); \
2178  struct gc_mark_func_data_struct *mark_func_data = vm->gc.mark_func_data; \
2179  vm->gc.mark_func_data = NULL; \
2180  mark_func_data->mark_func((obj), mark_func_data->data); \
2181  vm->gc.mark_func_data = mark_func_data; \
2182  } \
2183  } \
2184 } while (0)
2185 
2186 static inline void
2187 gc_mark_internal(VALUE obj)
2188 {
2189  RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark, obj, obj, false);
2190 }
2191 
2192 void
2194 {
2195  gc_mark_internal(obj);
2196 }
2197 
2198 void
2199 rb_gc_mark_and_move(VALUE *ptr)
2200 {
2201  RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark_and_move, ptr, *ptr, false);
2202 }
2203 
2204 static inline void
2205 gc_mark_and_pin_internal(VALUE obj)
2206 {
2207  RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark_and_pin, obj, obj, false);
2208 }
2209 
2210 void
2212 {
2213  gc_mark_and_pin_internal(obj);
2214 }
2215 
2216 static inline void
2217 gc_mark_maybe_internal(VALUE obj)
2218 {
2219  RB_GC_MARK_OR_TRAVERSE(rb_gc_impl_mark_maybe, obj, obj, true);
2220 }
2221 
2222 void
2224 {
2225  gc_mark_maybe_internal(obj);
2226 }
2227 
2228 void
2229 rb_gc_mark_weak(VALUE *ptr)
2230 {
2231  if (RB_SPECIAL_CONST_P(*ptr)) return;
2232 
2233  rb_vm_t *vm = GET_VM();
2234  void *objspace = vm->gc.objspace;
2235  if (LIKELY(vm->gc.mark_func_data == NULL)) {
2236  GC_ASSERT(rb_gc_impl_during_gc_p(objspace));
2237 
2238  rb_gc_impl_mark_weak(objspace, ptr);
2239  }
2240  else {
2241  GC_ASSERT(!rb_gc_impl_during_gc_p(objspace));
2242  }
2243 }
2244 
2245 void
2246 rb_gc_remove_weak(VALUE parent_obj, VALUE *ptr)
2247 {
2248  rb_gc_impl_remove_weak(rb_gc_get_objspace(), parent_obj, ptr);
2249 }
2250 
2251 ATTRIBUTE_NO_ADDRESS_SAFETY_ANALYSIS(static void each_location(register const VALUE *x, register long n, void (*cb)(VALUE, void *), void *data));
2252 static void
2253 each_location(register const VALUE *x, register long n, void (*cb)(VALUE, void *), void *data)
2254 {
2255  VALUE v;
2256  while (n--) {
2257  v = *x;
2258  cb(v, data);
2259  x++;
2260  }
2261 }
2262 
2263 static void
2264 each_location_ptr(const VALUE *start, const VALUE *end, void (*cb)(VALUE, void *), void *data)
2265 {
2266  if (end <= start) return;
2267  each_location(start, end - start, cb, data);
2268 }
2269 
2270 static void
2271 gc_mark_maybe_each_location(VALUE obj, void *data)
2272 {
2273  gc_mark_maybe_internal(obj);
2274 }
2275 
2276 void
2277 rb_gc_mark_locations(const VALUE *start, const VALUE *end)
2278 {
2279  each_location_ptr(start, end, gc_mark_maybe_each_location, NULL);
2280 }
2281 
2282 void
2283 rb_gc_mark_values(long n, const VALUE *values)
2284 {
2285  for (long i = 0; i < n; i++) {
2286  gc_mark_internal(values[i]);
2287  }
2288 }
2289 
2290 void
2291 rb_gc_mark_vm_stack_values(long n, const VALUE *values)
2292 {
2293  for (long i = 0; i < n; i++) {
2294  gc_mark_and_pin_internal(values[i]);
2295  }
2296 }
2297 
2298 static int
2299 mark_key(st_data_t key, st_data_t value, st_data_t data)
2300 {
2301  gc_mark_and_pin_internal((VALUE)key);
2302 
2303  return ST_CONTINUE;
2304 }
2305 
2306 void
2308 {
2309  if (!tbl) return;
2310 
2311  st_foreach(tbl, mark_key, (st_data_t)rb_gc_get_objspace());
2312 }
2313 
2314 static int
2315 mark_keyvalue(st_data_t key, st_data_t value, st_data_t data)
2316 {
2317  gc_mark_internal((VALUE)key);
2318  gc_mark_internal((VALUE)value);
2319 
2320  return ST_CONTINUE;
2321 }
2322 
2323 static int
2324 pin_key_pin_value(st_data_t key, st_data_t value, st_data_t data)
2325 {
2326  gc_mark_and_pin_internal((VALUE)key);
2327  gc_mark_and_pin_internal((VALUE)value);
2328 
2329  return ST_CONTINUE;
2330 }
2331 
2332 static int
2333 pin_key_mark_value(st_data_t key, st_data_t value, st_data_t data)
2334 {
2335  gc_mark_and_pin_internal((VALUE)key);
2336  gc_mark_internal((VALUE)value);
2337 
2338  return ST_CONTINUE;
2339 }
2340 
2341 static void
2342 mark_hash(VALUE hash)
2343 {
2344  if (rb_hash_compare_by_id_p(hash)) {
2345  rb_hash_stlike_foreach(hash, pin_key_mark_value, 0);
2346  }
2347  else {
2348  rb_hash_stlike_foreach(hash, mark_keyvalue, 0);
2349  }
2350 
2351  gc_mark_internal(RHASH(hash)->ifnone);
2352 }
2353 
2354 void
2356 {
2357  if (!tbl) return;
2358 
2359  st_foreach(tbl, pin_key_pin_value, 0);
2360 }
2361 
2362 static enum rb_id_table_iterator_result
2363 mark_method_entry_i(VALUE me, void *objspace)
2364 {
2365  gc_mark_internal(me);
2366 
2367  return ID_TABLE_CONTINUE;
2368 }
2369 
2370 static void
2371 mark_m_tbl(void *objspace, struct rb_id_table *tbl)
2372 {
2373  if (tbl) {
2374  rb_id_table_foreach_values(tbl, mark_method_entry_i, objspace);
2375  }
2376 }
2377 
2378 #if STACK_GROW_DIRECTION < 0
2379 #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_END, (end) = STACK_START)
2380 #elif STACK_GROW_DIRECTION > 0
2381 #define GET_STACK_BOUNDS(start, end, appendix) ((start) = STACK_START, (end) = STACK_END+(appendix))
2382 #else
2383 #define GET_STACK_BOUNDS(start, end, appendix) \
2384  ((STACK_END < STACK_START) ? \
2385  ((start) = STACK_END, (end) = STACK_START) : ((start) = STACK_START, (end) = STACK_END+(appendix)))
2386 #endif
2387 
2388 static void
2389 gc_mark_machine_stack_location_maybe(VALUE obj, void *data)
2390 {
2391  gc_mark_maybe_internal(obj);
2392 
2393 #ifdef RUBY_ASAN_ENABLED
2394  const rb_execution_context_t *ec = (const rb_execution_context_t *)data;
2395  void *fake_frame_start;
2396  void *fake_frame_end;
2397  bool is_fake_frame = asan_get_fake_stack_extents(
2398  ec->machine.asan_fake_stack_handle, obj,
2399  ec->machine.stack_start, ec->machine.stack_end,
2400  &fake_frame_start, &fake_frame_end
2401  );
2402  if (is_fake_frame) {
2403  each_location_ptr(fake_frame_start, fake_frame_end, gc_mark_maybe_each_location, NULL);
2404  }
2405 #endif
2406 }
2407 
2408 #if defined(__wasm__)
2409 
2410 
2411 static VALUE *rb_stack_range_tmp[2];
2412 
2413 static void
2414 rb_mark_locations(void *begin, void *end)
2415 {
2416  rb_stack_range_tmp[0] = begin;
2417  rb_stack_range_tmp[1] = end;
2418 }
2419 
2420 void
2421 rb_gc_save_machine_context(void)
2422 {
2423  // no-op
2424 }
2425 
2426 # if defined(__EMSCRIPTEN__)
2427 
2428 static void
2429 mark_current_machine_context(const rb_execution_context_t *ec)
2430 {
2431  emscripten_scan_stack(rb_mark_locations);
2432  each_location_ptr(rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe_each_location, NULL);
2433 
2434  emscripten_scan_registers(rb_mark_locations);
2435  each_location_ptr(rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe_each_location, NULL);
2436 }
2437 # else // use Asyncify version
2438 
2439 static void
2440 mark_current_machine_context(const rb_execution_context_t *ec)
2441 {
2442  VALUE *stack_start, *stack_end;
2443  SET_STACK_END;
2444  GET_STACK_BOUNDS(stack_start, stack_end, 1);
2445  each_location_ptr(stack_start, stack_end, gc_mark_maybe_each_location, NULL);
2446 
2447  rb_wasm_scan_locals(rb_mark_locations);
2448  each_location_ptr(rb_stack_range_tmp[0], rb_stack_range_tmp[1], gc_mark_maybe_each_location, NULL);
2449 }
2450 
2451 # endif
2452 
2453 #else // !defined(__wasm__)
2454 
2455 void
2456 rb_gc_save_machine_context(void)
2457 {
2458  rb_thread_t *thread = GET_THREAD();
2459 
2460  RB_VM_SAVE_MACHINE_CONTEXT(thread);
2461 }
2462 
2463 
2464 static void
2465 mark_current_machine_context(const rb_execution_context_t *ec)
2466 {
2467  rb_gc_mark_machine_context(ec);
2468 }
2469 #endif
2470 
2471 void
2472 rb_gc_mark_machine_context(const rb_execution_context_t *ec)
2473 {
2474  VALUE *stack_start, *stack_end;
2475 
2476  GET_STACK_BOUNDS(stack_start, stack_end, 0);
2477  RUBY_DEBUG_LOG("ec->th:%u stack_start:%p stack_end:%p", rb_ec_thread_ptr(ec)->serial, stack_start, stack_end);
2478 
2479  void *data =
2480 #ifdef RUBY_ASAN_ENABLED
2481  /* gc_mark_machine_stack_location_maybe() uses data as const */
2482  (rb_execution_context_t *)ec;
2483 #else
2484  NULL;
2485 #endif
2486 
2487  each_location_ptr(stack_start, stack_end, gc_mark_machine_stack_location_maybe, data);
2488  int num_regs = sizeof(ec->machine.regs)/(sizeof(VALUE));
2489  each_location((VALUE*)&ec->machine.regs, num_regs, gc_mark_machine_stack_location_maybe, data);
2490 }
2491 
2492 static int
2493 rb_mark_tbl_i(st_data_t key, st_data_t value, st_data_t data)
2494 {
2495  gc_mark_and_pin_internal((VALUE)value);
2496 
2497  return ST_CONTINUE;
2498 }
2499 
2500 void
2502 {
2503  if (!tbl || tbl->num_entries == 0) return;
2504 
2505  st_foreach(tbl, rb_mark_tbl_i, 0);
2506 }
2507 
2508 static void
2509 gc_mark_tbl_no_pin(st_table *tbl)
2510 {
2511  if (!tbl || tbl->num_entries == 0) return;
2512 
2513  st_foreach(tbl, gc_mark_tbl_no_pin_i, 0);
2514 }
2515 
2516 void
2518 {
2519  gc_mark_tbl_no_pin(tbl);
2520 }
2521 
2522 static enum rb_id_table_iterator_result
2523 mark_cvc_tbl_i(VALUE cvc_entry, void *objspace)
2524 {
2525  struct rb_cvar_class_tbl_entry *entry;
2526 
2527  entry = (struct rb_cvar_class_tbl_entry *)cvc_entry;
2528 
2529  RUBY_ASSERT(entry->cref == 0 || (BUILTIN_TYPE((VALUE)entry->cref) == T_IMEMO && IMEMO_TYPE_P(entry->cref, imemo_cref)));
2530  gc_mark_internal((VALUE)entry->cref);
2531 
2532  return ID_TABLE_CONTINUE;
2533 }
2534 
2535 static void
2536 mark_cvc_tbl(void *objspace, VALUE klass)
2537 {
2538  struct rb_id_table *tbl = RCLASS_CVC_TBL(klass);
2539  if (tbl) {
2540  rb_id_table_foreach_values(tbl, mark_cvc_tbl_i, objspace);
2541  }
2542 }
2543 
2544 static bool
2545 gc_declarative_marking_p(const rb_data_type_t *type)
2546 {
2547  return (type->flags & RUBY_TYPED_DECL_MARKING) != 0;
2548 }
2549 
2550 static enum rb_id_table_iterator_result
2551 mark_const_table_i(VALUE value, void *objspace)
2552 {
2553  const rb_const_entry_t *ce = (const rb_const_entry_t *)value;
2554 
2555  gc_mark_internal(ce->value);
2556  gc_mark_internal(ce->file);
2557 
2558  return ID_TABLE_CONTINUE;
2559 }
2560 
2561 void
2562 rb_gc_mark_roots(void *objspace, const char **categoryp)
2563 {
2564  rb_execution_context_t *ec = GET_EC();
2565  rb_vm_t *vm = rb_ec_vm_ptr(ec);
2566 
2567 #define MARK_CHECKPOINT(category) do { \
2568  if (categoryp) *categoryp = category; \
2569 } while (0)
2570 
2571  MARK_CHECKPOINT("vm");
2572  rb_vm_mark(vm);
2573  if (vm->self) gc_mark_internal(vm->self);
2574 
2575  MARK_CHECKPOINT("end_proc");
2576  rb_mark_end_proc();
2577 
2578  MARK_CHECKPOINT("global_tbl");
2579  rb_gc_mark_global_tbl();
2580 
2581 #if USE_YJIT
2582  void rb_yjit_root_mark(void); // in Rust
2583 
2584  if (rb_yjit_enabled_p) {
2585  MARK_CHECKPOINT("YJIT");
2586  rb_yjit_root_mark();
2587  }
2588 #endif
2589 
2590  MARK_CHECKPOINT("machine_context");
2591  mark_current_machine_context(ec);
2592 
2593  MARK_CHECKPOINT("finish");
2594 
2595 #undef MARK_CHECKPOINT
2596 }
2597 
2598 #define TYPED_DATA_REFS_OFFSET_LIST(d) (size_t *)(uintptr_t)RTYPEDDATA(d)->type->function.dmark
2599 
2600 void
2601 rb_gc_mark_children(void *objspace, VALUE obj)
2602 {
2603  if (FL_TEST(obj, FL_EXIVAR)) {
2604  rb_mark_generic_ivar(obj);
2605  }
2606 
2607  switch (BUILTIN_TYPE(obj)) {
2608  case T_FLOAT:
2609  case T_BIGNUM:
2610  case T_SYMBOL:
2611  /* Not immediates, but does not have references and singleton class.
2612  *
2613  * RSYMBOL(obj)->fstr intentionally not marked. See log for 96815f1e
2614  * ("symbol.c: remove rb_gc_mark_symbols()") */
2615  return;
2616 
2617  case T_NIL:
2618  case T_FIXNUM:
2619  rb_bug("rb_gc_mark() called for broken object");
2620  break;
2621 
2622  case T_NODE:
2623  UNEXPECTED_NODE(rb_gc_mark);
2624  break;
2625 
2626  case T_IMEMO:
2627  rb_imemo_mark_and_move(obj, false);
2628  return;
2629 
2630  default:
2631  break;
2632  }
2633 
2634  gc_mark_internal(RBASIC(obj)->klass);
2635 
2636  switch (BUILTIN_TYPE(obj)) {
2637  case T_CLASS:
2638  if (FL_TEST(obj, FL_SINGLETON)) {
2639  gc_mark_internal(RCLASS_ATTACHED_OBJECT(obj));
2640  }
2641  // Continue to the shared T_CLASS/T_MODULE
2642  case T_MODULE:
2643  if (RCLASS_SUPER(obj)) {
2644  gc_mark_internal(RCLASS_SUPER(obj));
2645  }
2646 
2647  mark_m_tbl(objspace, RCLASS_M_TBL(obj));
2648  mark_cvc_tbl(objspace, obj);
2649  rb_cc_table_mark(obj);
2650  if (rb_shape_obj_too_complex(obj)) {
2651  gc_mark_tbl_no_pin((st_table *)RCLASS_IVPTR(obj));
2652  }
2653  else {
2654  for (attr_index_t i = 0; i < RCLASS_IV_COUNT(obj); i++) {
2655  gc_mark_internal(RCLASS_IVPTR(obj)[i]);
2656  }
2657  }
2658 
2659  if (RCLASS_CONST_TBL(obj)) {
2660  rb_id_table_foreach_values(RCLASS_CONST_TBL(obj), mark_const_table_i, objspace);
2661  }
2662 
2663  gc_mark_internal(RCLASS_EXT(obj)->classpath);
2664  break;
2665 
2666  case T_ICLASS:
2667  if (RICLASS_OWNS_M_TBL_P(obj)) {
2668  mark_m_tbl(objspace, RCLASS_M_TBL(obj));
2669  }
2670  if (RCLASS_SUPER(obj)) {
2671  gc_mark_internal(RCLASS_SUPER(obj));
2672  }
2673 
2674  if (RCLASS_INCLUDER(obj)) {
2675  gc_mark_internal(RCLASS_INCLUDER(obj));
2676  }
2677  mark_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj));
2678  rb_cc_table_mark(obj);
2679  break;
2680 
2681  case T_ARRAY:
2682  if (ARY_SHARED_P(obj)) {
2683  VALUE root = ARY_SHARED_ROOT(obj);
2684  gc_mark_internal(root);
2685  }
2686  else {
2687  long len = RARRAY_LEN(obj);
2688  const VALUE *ptr = RARRAY_CONST_PTR(obj);
2689  for (long i = 0; i < len; i++) {
2690  gc_mark_internal(ptr[i]);
2691  }
2692  }
2693  break;
2694 
2695  case T_HASH:
2696  mark_hash(obj);
2697  break;
2698 
2699  case T_STRING:
2700  if (STR_SHARED_P(obj)) {
2701  if (STR_EMBED_P(RSTRING(obj)->as.heap.aux.shared)) {
2702  /* Embedded shared strings cannot be moved because this string
2703  * points into the slot of the shared string. There may be code
2704  * using the RSTRING_PTR on the stack, which would pin this
2705  * string but not pin the shared string, causing it to move. */
2706  gc_mark_and_pin_internal(RSTRING(obj)->as.heap.aux.shared);
2707  }
2708  else {
2709  gc_mark_internal(RSTRING(obj)->as.heap.aux.shared);
2710  }
2711  }
2712  break;
2713 
2714  case T_DATA: {
2715  void *const ptr = RTYPEDDATA_P(obj) ? RTYPEDDATA_GET_DATA(obj) : DATA_PTR(obj);
2716 
2717  if (ptr) {
2718  if (RTYPEDDATA_P(obj) && gc_declarative_marking_p(RTYPEDDATA(obj)->type)) {
2719  size_t *offset_list = TYPED_DATA_REFS_OFFSET_LIST(obj);
2720 
2721  for (size_t offset = *offset_list; offset != RUBY_REF_END; offset = *offset_list++) {
2722  gc_mark_internal(*(VALUE *)((char *)ptr + offset));
2723  }
2724  }
2725  else {
2726  RUBY_DATA_FUNC mark_func = RTYPEDDATA_P(obj) ?
2727  RTYPEDDATA(obj)->type->function.dmark :
2728  RDATA(obj)->dmark;
2729  if (mark_func) (*mark_func)(ptr);
2730  }
2731  }
2732 
2733  break;
2734  }
2735 
2736  case T_OBJECT: {
2737  rb_shape_t *shape = rb_shape_get_shape_by_id(ROBJECT_SHAPE_ID(obj));
2738 
2739  if (rb_shape_obj_too_complex(obj)) {
2740  gc_mark_tbl_no_pin(ROBJECT_IV_HASH(obj));
2741  }
2742  else {
2743  const VALUE * const ptr = ROBJECT_IVPTR(obj);
2744 
2745  uint32_t len = ROBJECT_IV_COUNT(obj);
2746  for (uint32_t i = 0; i < len; i++) {
2747  gc_mark_internal(ptr[i]);
2748  }
2749  }
2750 
2751  if (shape) {
2752  VALUE klass = RBASIC_CLASS(obj);
2753 
2754  // Increment max_iv_count if applicable, used to determine size pool allocation
2755  attr_index_t num_of_ivs = shape->next_iv_index;
2756  if (RCLASS_EXT(klass)->max_iv_count < num_of_ivs) {
2757  RCLASS_EXT(klass)->max_iv_count = num_of_ivs;
2758  }
2759  }
2760 
2761  break;
2762  }
2763 
2764  case T_FILE:
2765  if (RFILE(obj)->fptr) {
2766  gc_mark_internal(RFILE(obj)->fptr->self);
2767  gc_mark_internal(RFILE(obj)->fptr->pathv);
2768  gc_mark_internal(RFILE(obj)->fptr->tied_io_for_writing);
2769  gc_mark_internal(RFILE(obj)->fptr->writeconv_asciicompat);
2770  gc_mark_internal(RFILE(obj)->fptr->writeconv_pre_ecopts);
2771  gc_mark_internal(RFILE(obj)->fptr->encs.ecopts);
2772  gc_mark_internal(RFILE(obj)->fptr->write_lock);
2773  gc_mark_internal(RFILE(obj)->fptr->timeout);
2774  }
2775  break;
2776 
2777  case T_REGEXP:
2778  gc_mark_internal(RREGEXP(obj)->src);
2779  break;
2780 
2781  case T_MATCH:
2782  gc_mark_internal(RMATCH(obj)->regexp);
2783  if (RMATCH(obj)->str) {
2784  gc_mark_internal(RMATCH(obj)->str);
2785  }
2786  break;
2787 
2788  case T_RATIONAL:
2789  gc_mark_internal(RRATIONAL(obj)->num);
2790  gc_mark_internal(RRATIONAL(obj)->den);
2791  break;
2792 
2793  case T_COMPLEX:
2794  gc_mark_internal(RCOMPLEX(obj)->real);
2795  gc_mark_internal(RCOMPLEX(obj)->imag);
2796  break;
2797 
2798  case T_STRUCT: {
2799  const long len = RSTRUCT_LEN(obj);
2800  const VALUE * const ptr = RSTRUCT_CONST_PTR(obj);
2801 
2802  for (long i = 0; i < len; i++) {
2803  gc_mark_internal(ptr[i]);
2804  }
2805 
2806  break;
2807  }
2808 
2809  default:
2810  if (BUILTIN_TYPE(obj) == T_MOVED) rb_bug("rb_gc_mark(): %p is T_MOVED", (void *)obj);
2811  if (BUILTIN_TYPE(obj) == T_NONE) rb_bug("rb_gc_mark(): %p is T_NONE", (void *)obj);
2812  if (BUILTIN_TYPE(obj) == T_ZOMBIE) rb_bug("rb_gc_mark(): %p is T_ZOMBIE", (void *)obj);
2813  rb_bug("rb_gc_mark(): unknown data type 0x%x(%p) %s",
2814  BUILTIN_TYPE(obj), (void *)obj,
2815  rb_gc_impl_pointer_to_heap_p(rb_gc_get_objspace(), (void *)obj) ? "corrupted object" : "non object");
2816  }
2817 }
2818 
2819 size_t
2820 rb_gc_obj_optimal_size(VALUE obj)
2821 {
2822  switch (BUILTIN_TYPE(obj)) {
2823  case T_ARRAY:
2824  return rb_ary_size_as_embedded(obj);
2825 
2826  case T_OBJECT:
2827  if (rb_shape_obj_too_complex(obj)) {
2828  return sizeof(struct RObject);
2829  }
2830  else {
2831  return rb_obj_embedded_size(ROBJECT_IV_CAPACITY(obj));
2832  }
2833 
2834  case T_STRING:
2835  return rb_str_size_as_embedded(obj);
2836 
2837  case T_HASH:
2838  return sizeof(struct RHash) + (RHASH_ST_TABLE_P(obj) ? sizeof(st_table) : sizeof(ar_table));
2839 
2840  default:
2841  return 0;
2842  }
2843 }
2844 
2845 void
2846 rb_gc_writebarrier(VALUE a, VALUE b)
2847 {
2848  rb_gc_impl_writebarrier(rb_gc_get_objspace(), a, b);
2849 }
2850 
2851 void
2853 {
2854  rb_gc_impl_writebarrier_unprotect(rb_gc_get_objspace(), obj);
2855 }
2856 
2857 /*
2858  * remember `obj' if needed.
2859  */
2860 void
2861 rb_gc_writebarrier_remember(VALUE obj)
2862 {
2863  rb_gc_impl_writebarrier_remember(rb_gc_get_objspace(), obj);
2864 }
2865 
2866 void
2867 rb_gc_copy_attributes(VALUE dest, VALUE obj)
2868 {
2869  rb_gc_impl_copy_attributes(rb_gc_get_objspace(), dest, obj);
2870 }
2871 
2872 int
2873 rb_gc_modular_gc_loaded_p(void)
2874 {
2875 #if USE_MODULAR_GC
2876  return rb_gc_functions.modular_gc_loaded_p;
2877 #else
2878  return false;
2879 #endif
2880 }
2881 
2882 const char *
2883 rb_gc_active_gc_name(void)
2884 {
2885  const char *gc_name = rb_gc_impl_active_gc_name();
2886 
2887  const size_t len = strlen(gc_name);
2888  if (len > RB_GC_MAX_NAME_LEN) {
2889  rb_bug("GC should have a name no more than %d chars long. Currently: %zu (%s)",
2890  RB_GC_MAX_NAME_LEN, len, gc_name);
2891  }
2892 
2893  return gc_name;
2894 }
2895 
2896 // TODO: rearchitect this function to work for a generic GC
2897 size_t
2898 rb_obj_gc_flags(VALUE obj, ID* flags, size_t max)
2899 {
2900  return rb_gc_impl_obj_flags(rb_gc_get_objspace(), obj, flags, max);
2901 }
2902 
2903 /* GC */
2904 
2905 void *
2906 rb_gc_ractor_cache_alloc(rb_ractor_t *ractor)
2907 {
2908  return rb_gc_impl_ractor_cache_alloc(rb_gc_get_objspace(), ractor);
2909 }
2910 
2911 void
2912 rb_gc_ractor_cache_free(void *cache)
2913 {
2914  rb_gc_impl_ractor_cache_free(rb_gc_get_objspace(), cache);
2915 }
2916 
2917 void
2919 {
2920  if (!rb_gc_impl_pointer_to_heap_p(rb_gc_get_objspace(), (void *)obj))
2921  return;
2922 
2923  rb_vm_register_global_object(obj);
2924 }
2925 
2926 void
2928 {
2929  rb_vm_t *vm = GET_VM();
2930 
2931  VALUE obj = *addr;
2932 
2933  struct global_object_list *tmp = ALLOC(struct global_object_list);
2934  tmp->next = vm->global_object_list;
2935  tmp->varptr = addr;
2936  vm->global_object_list = tmp;
2937 
2938  /*
2939  * Because some C extensions have assignment-then-register bugs,
2940  * we guard `obj` here so that it would not get swept defensively.
2941  */
2942  RB_GC_GUARD(obj);
2943  if (0 && !SPECIAL_CONST_P(obj)) {
2944  rb_warn("Object is assigned to registering address already: %"PRIsVALUE,
2945  rb_obj_class(obj));
2946  rb_print_backtrace(stderr);
2947  }
2948 }
2949 
2950 void
2952 {
2953  rb_vm_t *vm = GET_VM();
2954  struct global_object_list *tmp = vm->global_object_list;
2955 
2956  if (tmp->varptr == addr) {
2957  vm->global_object_list = tmp->next;
2958  xfree(tmp);
2959  return;
2960  }
2961  while (tmp->next) {
2962  if (tmp->next->varptr == addr) {
2963  struct global_object_list *t = tmp->next;
2964 
2965  tmp->next = tmp->next->next;
2966  xfree(t);
2967  break;
2968  }
2969  tmp = tmp->next;
2970  }
2971 }
2972 
2973 void
2975 {
2977 }
2978 
2979 static VALUE
2980 gc_start_internal(rb_execution_context_t *ec, VALUE self, VALUE full_mark, VALUE immediate_mark, VALUE immediate_sweep, VALUE compact)
2981 {
2982  rb_gc_impl_start(rb_gc_get_objspace(), RTEST(full_mark), RTEST(immediate_mark), RTEST(immediate_sweep), RTEST(compact));
2983 
2984  return Qnil;
2985 }
2986 
2987 /*
2988  * rb_objspace_each_objects() is special C API to walk through
2989  * Ruby object space. This C API is too difficult to use it.
2990  * To be frank, you should not use it. Or you need to read the
2991  * source code of this function and understand what this function does.
2992  *
2993  * 'callback' will be called several times (the number of heap page,
2994  * at current implementation) with:
2995  * vstart: a pointer to the first living object of the heap_page.
2996  * vend: a pointer to next to the valid heap_page area.
2997  * stride: a distance to next VALUE.
2998  *
2999  * If callback() returns non-zero, the iteration will be stopped.
3000  *
3001  * This is a sample callback code to iterate liveness objects:
3002  *
3003  * static int
3004  * sample_callback(void *vstart, void *vend, int stride, void *data)
3005  * {
3006  * VALUE v = (VALUE)vstart;
3007  * for (; v != (VALUE)vend; v += stride) {
3008  * if (!rb_objspace_internal_object_p(v)) { // liveness check
3009  * // do something with live object 'v'
3010  * }
3011  * }
3012  * return 0; // continue to iteration
3013  * }
3014  *
3015  * Note: 'vstart' is not a top of heap_page. This point the first
3016  * living object to grasp at least one object to avoid GC issue.
3017  * This means that you can not walk through all Ruby object page
3018  * including freed object page.
3019  *
3020  * Note: On this implementation, 'stride' is the same as sizeof(RVALUE).
3021  * However, there are possibilities to pass variable values with
3022  * 'stride' with some reasons. You must use stride instead of
3023  * use some constant value in the iteration.
3024  */
3025 void
3026 rb_objspace_each_objects(int (*callback)(void *, void *, size_t, void *), void *data)
3027 {
3028  rb_gc_impl_each_objects(rb_gc_get_objspace(), callback, data);
3029 }
3030 
3031 static void
3032 gc_ref_update_array(void *objspace, VALUE v)
3033 {
3034  if (ARY_SHARED_P(v)) {
3035  VALUE old_root = RARRAY(v)->as.heap.aux.shared_root;
3036 
3037  UPDATE_IF_MOVED(objspace, RARRAY(v)->as.heap.aux.shared_root);
3038 
3039  VALUE new_root = RARRAY(v)->as.heap.aux.shared_root;
3040  // If the root is embedded and its location has changed
3041  if (ARY_EMBED_P(new_root) && new_root != old_root) {
3042  size_t offset = (size_t)(RARRAY(v)->as.heap.ptr - RARRAY(old_root)->as.ary);
3043  GC_ASSERT(RARRAY(v)->as.heap.ptr >= RARRAY(old_root)->as.ary);
3044  RARRAY(v)->as.heap.ptr = RARRAY(new_root)->as.ary + offset;
3045  }
3046  }
3047  else {
3048  long len = RARRAY_LEN(v);
3049 
3050  if (len > 0) {
3051  VALUE *ptr = (VALUE *)RARRAY_CONST_PTR(v);
3052  for (long i = 0; i < len; i++) {
3053  UPDATE_IF_MOVED(objspace, ptr[i]);
3054  }
3055  }
3056 
3057  if (rb_gc_obj_slot_size(v) >= rb_ary_size_as_embedded(v)) {
3058  if (rb_ary_embeddable_p(v)) {
3059  rb_ary_make_embedded(v);
3060  }
3061  }
3062  }
3063 }
3064 
3065 static void
3066 gc_ref_update_object(void *objspace, VALUE v)
3067 {
3068  VALUE *ptr = ROBJECT_IVPTR(v);
3069 
3070  if (rb_shape_obj_too_complex(v)) {
3071  gc_ref_update_table_values_only(ROBJECT_IV_HASH(v));
3072  return;
3073  }
3074 
3075  size_t slot_size = rb_gc_obj_slot_size(v);
3076  size_t embed_size = rb_obj_embedded_size(ROBJECT_IV_CAPACITY(v));
3077  if (slot_size >= embed_size && !RB_FL_TEST_RAW(v, ROBJECT_EMBED)) {
3078  // Object can be re-embedded
3079  memcpy(ROBJECT(v)->as.ary, ptr, sizeof(VALUE) * ROBJECT_IV_COUNT(v));
3080  RB_FL_SET_RAW(v, ROBJECT_EMBED);
3081  xfree(ptr);
3082  ptr = ROBJECT(v)->as.ary;
3083  }
3084 
3085  for (uint32_t i = 0; i < ROBJECT_IV_COUNT(v); i++) {
3086  UPDATE_IF_MOVED(objspace, ptr[i]);
3087  }
3088 }
3089 
3090 void
3091 rb_gc_ref_update_table_values_only(st_table *tbl)
3092 {
3093  gc_ref_update_table_values_only(tbl);
3094 }
3095 
3096 /* Update MOVED references in a VALUE=>VALUE st_table */
3097 void
3099 {
3100  gc_update_table_refs(ptr);
3101 }
3102 
3103 static void
3104 gc_ref_update_hash(void *objspace, VALUE v)
3105 {
3106  rb_hash_stlike_foreach_with_replace(v, hash_foreach_replace, hash_replace_ref, (st_data_t)objspace);
3107 }
3108 
3109 static void
3110 gc_update_values(void *objspace, long n, VALUE *values)
3111 {
3112  for (long i = 0; i < n; i++) {
3113  UPDATE_IF_MOVED(objspace, values[i]);
3114  }
3115 }
3116 
3117 void
3118 rb_gc_update_values(long n, VALUE *values)
3119 {
3120  gc_update_values(rb_gc_get_objspace(), n, values);
3121 }
3122 
3123 static enum rb_id_table_iterator_result
3124 check_id_table_move(VALUE value, void *data)
3125 {
3126  void *objspace = (void *)data;
3127 
3128  if (rb_gc_impl_object_moved_p(objspace, (VALUE)value)) {
3129  return ID_TABLE_REPLACE;
3130  }
3131 
3132  return ID_TABLE_CONTINUE;
3133 }
3134 
3135 VALUE
3137 {
3138  return rb_gc_impl_location(rb_gc_get_objspace(), value);
3139 }
3140 
3141 void
3142 rb_gc_prepare_heap_process_object(VALUE obj)
3143 {
3144  switch (BUILTIN_TYPE(obj)) {
3145  case T_STRING:
3146  // Precompute the string coderange. This both save time for when it will be
3147  // eventually needed, and avoid mutating heap pages after a potential fork.
3148  rb_enc_str_coderange(obj);
3149  break;
3150  default:
3151  break;
3152  }
3153 }
3154 
3155 void
3156 rb_gc_prepare_heap(void)
3157 {
3158  rb_gc_impl_prepare_heap(rb_gc_get_objspace());
3159 }
3160 
3161 size_t
3162 rb_gc_heap_id_for_size(size_t size)
3163 {
3164  return rb_gc_impl_heap_id_for_size(rb_gc_get_objspace(), size);
3165 }
3166 
3167 bool
3168 rb_gc_size_allocatable_p(size_t size)
3169 {
3170  return rb_gc_impl_size_allocatable_p(size);
3171 }
3172 
3173 static enum rb_id_table_iterator_result
3174 update_id_table(VALUE *value, void *data, int existing)
3175 {
3176  void *objspace = (void *)data;
3177 
3178  if (rb_gc_impl_object_moved_p(objspace, (VALUE)*value)) {
3179  *value = rb_gc_impl_location(objspace, (VALUE)*value);
3180  }
3181 
3182  return ID_TABLE_CONTINUE;
3183 }
3184 
3185 static void
3186 update_m_tbl(void *objspace, struct rb_id_table *tbl)
3187 {
3188  if (tbl) {
3189  rb_id_table_foreach_values_with_replace(tbl, check_id_table_move, update_id_table, objspace);
3190  }
3191 }
3192 
3193 static enum rb_id_table_iterator_result
3194 update_cc_tbl_i(VALUE ccs_ptr, void *objspace)
3195 {
3196  struct rb_class_cc_entries *ccs = (struct rb_class_cc_entries *)ccs_ptr;
3197  VM_ASSERT(vm_ccs_p(ccs));
3198 
3199  if (rb_gc_impl_object_moved_p(objspace, (VALUE)ccs->cme)) {
3200  ccs->cme = (const rb_callable_method_entry_t *)rb_gc_impl_location(objspace, (VALUE)ccs->cme);
3201  }
3202 
3203  for (int i=0; i<ccs->len; i++) {
3204  if (rb_gc_impl_object_moved_p(objspace, (VALUE)ccs->entries[i].cc)) {
3205  ccs->entries[i].cc = (struct rb_callcache *)rb_gc_location((VALUE)ccs->entries[i].cc);
3206  }
3207  }
3208 
3209  // do not replace
3210  return ID_TABLE_CONTINUE;
3211 }
3212 
3213 static void
3214 update_cc_tbl(void *objspace, VALUE klass)
3215 {
3216  struct rb_id_table *tbl = RCLASS_CC_TBL(klass);
3217  if (tbl) {
3218  rb_id_table_foreach_values(tbl, update_cc_tbl_i, objspace);
3219  }
3220 }
3221 
3222 static enum rb_id_table_iterator_result
3223 update_cvc_tbl_i(VALUE cvc_entry, void *objspace)
3224 {
3225  struct rb_cvar_class_tbl_entry *entry;
3226 
3227  entry = (struct rb_cvar_class_tbl_entry *)cvc_entry;
3228 
3229  if (entry->cref) {
3230  TYPED_UPDATE_IF_MOVED(objspace, rb_cref_t *, entry->cref);
3231  }
3232 
3233  entry->class_value = rb_gc_impl_location(objspace, entry->class_value);
3234 
3235  return ID_TABLE_CONTINUE;
3236 }
3237 
3238 static void
3239 update_cvc_tbl(void *objspace, VALUE klass)
3240 {
3241  struct rb_id_table *tbl = RCLASS_CVC_TBL(klass);
3242  if (tbl) {
3243  rb_id_table_foreach_values(tbl, update_cvc_tbl_i, objspace);
3244  }
3245 }
3246 
3247 static enum rb_id_table_iterator_result
3248 update_const_table(VALUE value, void *objspace)
3249 {
3250  rb_const_entry_t *ce = (rb_const_entry_t *)value;
3251 
3252  if (rb_gc_impl_object_moved_p(objspace, ce->value)) {
3253  ce->value = rb_gc_impl_location(objspace, ce->value);
3254  }
3255 
3256  if (rb_gc_impl_object_moved_p(objspace, ce->file)) {
3257  ce->file = rb_gc_impl_location(objspace, ce->file);
3258  }
3259 
3260  return ID_TABLE_CONTINUE;
3261 }
3262 
3263 static void
3264 update_const_tbl(void *objspace, struct rb_id_table *tbl)
3265 {
3266  if (!tbl) return;
3267  rb_id_table_foreach_values(tbl, update_const_table, objspace);
3268 }
3269 
3270 static void
3271 update_subclass_entries(void *objspace, rb_subclass_entry_t *entry)
3272 {
3273  while (entry) {
3274  UPDATE_IF_MOVED(objspace, entry->klass);
3275  entry = entry->next;
3276  }
3277 }
3278 
3279 static void
3280 update_class_ext(void *objspace, rb_classext_t *ext)
3281 {
3282  UPDATE_IF_MOVED(objspace, ext->origin_);
3283  UPDATE_IF_MOVED(objspace, ext->includer);
3284  UPDATE_IF_MOVED(objspace, ext->refined_class);
3285  update_subclass_entries(objspace, ext->subclasses);
3286 }
3287 
3288 static void
3289 update_superclasses(void *objspace, VALUE obj)
3290 {
3291  if (FL_TEST_RAW(obj, RCLASS_SUPERCLASSES_INCLUDE_SELF)) {
3292  for (size_t i = 0; i < RCLASS_SUPERCLASS_DEPTH(obj) + 1; i++) {
3293  UPDATE_IF_MOVED(objspace, RCLASS_SUPERCLASSES(obj)[i]);
3294  }
3295  }
3296 }
3297 
3298 extern rb_symbols_t ruby_global_symbols;
3299 #define global_symbols ruby_global_symbols
3300 
3301 #if USE_MODULAR_GC
3302 struct global_vm_table_foreach_data {
3303  vm_table_foreach_callback_func callback;
3304  vm_table_update_callback_func update_callback;
3305  void *data;
3306 };
3307 
3308 static int
3309 vm_weak_table_foreach_key(st_data_t key, st_data_t value, st_data_t data, int error)
3310 {
3311  struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3312 
3313  return iter_data->callback((VALUE)key, iter_data->data);
3314 }
3315 
3316 static int
3317 vm_weak_table_foreach_update_key(st_data_t *key, st_data_t *value, st_data_t data, int existing)
3318 {
3319  struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3320 
3321  return iter_data->update_callback((VALUE *)key, iter_data->data);
3322 }
3323 
3324 static int
3325 vm_weak_table_str_sym_foreach(st_data_t key, st_data_t value, st_data_t data, int error)
3326 {
3327  struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3328 
3329  if (STATIC_SYM_P(value)) {
3330  return ST_CONTINUE;
3331  }
3332  else {
3333  return iter_data->callback((VALUE)value, iter_data->data);
3334  }
3335 }
3336 
3337 static int
3338 vm_weak_table_foreach_update_value(st_data_t *key, st_data_t *value, st_data_t data, int existing)
3339 {
3340  struct global_vm_table_foreach_data *iter_data = (struct global_vm_table_foreach_data *)data;
3341 
3342  return iter_data->update_callback((VALUE *)value, iter_data->data);
3343 }
3344 
3345 static int
3346 vm_weak_table_gen_ivar_foreach(st_data_t key, st_data_t value, st_data_t data, int error)
3347 {
3348  int retval = vm_weak_table_foreach_key(key, value, data, error);
3349  if (retval == ST_DELETE) {
3350  FL_UNSET((VALUE)key, FL_EXIVAR);
3351  }
3352  return retval;
3353 }
3354 
3355 static int
3356 vm_weak_table_frozen_strings_foreach(st_data_t key, st_data_t value, st_data_t data, int error)
3357 {
3358  GC_ASSERT(RB_TYPE_P((VALUE)key, T_STRING));
3359 
3360  int retval = vm_weak_table_foreach_key(key, value, data, error);
3361  if (retval == ST_DELETE) {
3362  FL_UNSET((VALUE)key, RSTRING_FSTR);
3363  }
3364  return retval;
3365 }
3366 
3367 struct st_table *rb_generic_ivtbl_get(void);
3368 
3369 void
3370 rb_gc_vm_weak_table_foreach(vm_table_foreach_callback_func callback,
3371  vm_table_update_callback_func update_callback,
3372  void *data,
3373  enum rb_gc_vm_weak_tables table)
3374 {
3375  rb_vm_t *vm = GET_VM();
3376 
3377  struct global_vm_table_foreach_data foreach_data = {
3378  .callback = callback,
3379  .update_callback = update_callback,
3380  .data = data
3381  };
3382 
3383  switch (table) {
3384  case RB_GC_VM_CI_TABLE: {
3385  st_foreach_with_replace(
3386  vm->ci_table,
3387  vm_weak_table_foreach_key,
3388  vm_weak_table_foreach_update_key,
3389  (st_data_t)&foreach_data
3390  );
3391  break;
3392  }
3393  case RB_GC_VM_OVERLOADED_CME_TABLE: {
3394  st_foreach_with_replace(
3395  vm->overloaded_cme_table,
3396  vm_weak_table_foreach_key,
3397  vm_weak_table_foreach_update_key,
3398  (st_data_t)&foreach_data
3399  );
3400  break;
3401  }
3402  case RB_GC_VM_GLOBAL_SYMBOLS_TABLE: {
3403  st_foreach_with_replace(
3404  global_symbols.str_sym,
3405  vm_weak_table_str_sym_foreach,
3406  vm_weak_table_foreach_update_value,
3407  (st_data_t)&foreach_data
3408  );
3409  break;
3410  }
3411  case RB_GC_VM_GENERIC_IV_TABLE: {
3412  st_table *generic_iv_tbl = rb_generic_ivtbl_get();
3413  st_foreach_with_replace(
3414  generic_iv_tbl,
3415  vm_weak_table_gen_ivar_foreach,
3416  vm_weak_table_foreach_update_key,
3417  (st_data_t)&foreach_data
3418  );
3419  break;
3420  }
3421  case RB_GC_VM_FROZEN_STRINGS_TABLE: {
3422  st_table *frozen_strings = GET_VM()->frozen_strings;
3423  st_foreach_with_replace(
3424  frozen_strings,
3425  vm_weak_table_frozen_strings_foreach,
3426  vm_weak_table_foreach_update_key,
3427  (st_data_t)&foreach_data
3428  );
3429  break;
3430  }
3431  default:
3432  rb_bug("rb_gc_vm_weak_table_foreach: unknown table %d", table);
3433  }
3434 }
3435 #endif
3436 
3437 void
3438 rb_gc_update_vm_references(void *objspace)
3439 {
3440  rb_execution_context_t *ec = GET_EC();
3441  rb_vm_t *vm = rb_ec_vm_ptr(ec);
3442 
3443  rb_vm_update_references(vm);
3444  rb_gc_update_global_tbl();
3445  global_symbols.ids = rb_gc_impl_location(objspace, global_symbols.ids);
3446  global_symbols.dsymbol_fstr_hash = rb_gc_impl_location(objspace, global_symbols.dsymbol_fstr_hash);
3447  gc_update_table_refs(global_symbols.str_sym);
3448 
3449 #if USE_YJIT
3450  void rb_yjit_root_update_references(void); // in Rust
3451 
3452  if (rb_yjit_enabled_p) {
3453  rb_yjit_root_update_references();
3454  }
3455 #endif
3456 }
3457 
3458 void
3459 rb_gc_update_object_references(void *objspace, VALUE obj)
3460 {
3461  if (FL_TEST(obj, FL_EXIVAR)) {
3462  rb_ref_update_generic_ivar(obj);
3463  }
3464 
3465  switch (BUILTIN_TYPE(obj)) {
3466  case T_CLASS:
3467  if (FL_TEST(obj, FL_SINGLETON)) {
3468  UPDATE_IF_MOVED(objspace, RCLASS_ATTACHED_OBJECT(obj));
3469  }
3470  // Continue to the shared T_CLASS/T_MODULE
3471  case T_MODULE:
3472  if (RCLASS_SUPER((VALUE)obj)) {
3473  UPDATE_IF_MOVED(objspace, RCLASS(obj)->super);
3474  }
3475  update_m_tbl(objspace, RCLASS_M_TBL(obj));
3476  update_cc_tbl(objspace, obj);
3477  update_cvc_tbl(objspace, obj);
3478  update_superclasses(objspace, obj);
3479 
3480  if (rb_shape_obj_too_complex(obj)) {
3481  gc_ref_update_table_values_only(RCLASS_IV_HASH(obj));
3482  }
3483  else {
3484  for (attr_index_t i = 0; i < RCLASS_IV_COUNT(obj); i++) {
3485  UPDATE_IF_MOVED(objspace, RCLASS_IVPTR(obj)[i]);
3486  }
3487  }
3488 
3489  update_class_ext(objspace, RCLASS_EXT(obj));
3490  update_const_tbl(objspace, RCLASS_CONST_TBL(obj));
3491 
3492  UPDATE_IF_MOVED(objspace, RCLASS_EXT(obj)->classpath);
3493  break;
3494 
3495  case T_ICLASS:
3496  if (RICLASS_OWNS_M_TBL_P(obj)) {
3497  update_m_tbl(objspace, RCLASS_M_TBL(obj));
3498  }
3499  if (RCLASS_SUPER((VALUE)obj)) {
3500  UPDATE_IF_MOVED(objspace, RCLASS(obj)->super);
3501  }
3502  update_class_ext(objspace, RCLASS_EXT(obj));
3503  update_m_tbl(objspace, RCLASS_CALLABLE_M_TBL(obj));
3504  update_cc_tbl(objspace, obj);
3505  break;
3506 
3507  case T_IMEMO:
3508  rb_imemo_mark_and_move(obj, true);
3509  return;
3510 
3511  case T_NIL:
3512  case T_FIXNUM:
3513  case T_NODE:
3514  case T_MOVED:
3515  case T_NONE:
3516  /* These can't move */
3517  return;
3518 
3519  case T_ARRAY:
3520  gc_ref_update_array(objspace, obj);
3521  break;
3522 
3523  case T_HASH:
3524  gc_ref_update_hash(objspace, obj);
3525  UPDATE_IF_MOVED(objspace, RHASH(obj)->ifnone);
3526  break;
3527 
3528  case T_STRING:
3529  {
3530  if (STR_SHARED_P(obj)) {
3531  UPDATE_IF_MOVED(objspace, RSTRING(obj)->as.heap.aux.shared);
3532  }
3533 
3534  /* If, after move the string is not embedded, and can fit in the
3535  * slot it's been placed in, then re-embed it. */
3536  if (rb_gc_obj_slot_size(obj) >= rb_str_size_as_embedded(obj)) {
3537  if (!STR_EMBED_P(obj) && rb_str_reembeddable_p(obj)) {
3538  rb_str_make_embedded(obj);
3539  }
3540  }
3541 
3542  break;
3543  }
3544  case T_DATA:
3545  /* Call the compaction callback, if it exists */
3546  {
3547  void *const ptr = RTYPEDDATA_P(obj) ? RTYPEDDATA_GET_DATA(obj) : DATA_PTR(obj);
3548  if (ptr) {
3549  if (RTYPEDDATA_P(obj) && gc_declarative_marking_p(RTYPEDDATA(obj)->type)) {
3550  size_t *offset_list = TYPED_DATA_REFS_OFFSET_LIST(obj);
3551 
3552  for (size_t offset = *offset_list; offset != RUBY_REF_END; offset = *offset_list++) {
3553  VALUE *ref = (VALUE *)((char *)ptr + offset);
3554  if (SPECIAL_CONST_P(*ref)) continue;
3555  *ref = rb_gc_impl_location(objspace, *ref);
3556  }
3557  }
3558  else if (RTYPEDDATA_P(obj)) {
3559  RUBY_DATA_FUNC compact_func = RTYPEDDATA(obj)->type->function.dcompact;
3560  if (compact_func) (*compact_func)(ptr);
3561  }
3562  }
3563  }
3564  break;
3565 
3566  case T_OBJECT:
3567  gc_ref_update_object(objspace, obj);
3568  break;
3569 
3570  case T_FILE:
3571  if (RFILE(obj)->fptr) {
3572  UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->self);
3573  UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->pathv);
3574  UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->tied_io_for_writing);
3575  UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->writeconv_asciicompat);
3576  UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->writeconv_pre_ecopts);
3577  UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->encs.ecopts);
3578  UPDATE_IF_MOVED(objspace, RFILE(obj)->fptr->write_lock);
3579  }
3580  break;
3581  case T_REGEXP:
3582  UPDATE_IF_MOVED(objspace, RREGEXP(obj)->src);
3583  break;
3584 
3585  case T_SYMBOL:
3586  UPDATE_IF_MOVED(objspace, RSYMBOL(obj)->fstr);
3587  break;
3588 
3589  case T_FLOAT:
3590  case T_BIGNUM:
3591  break;
3592 
3593  case T_MATCH:
3594  UPDATE_IF_MOVED(objspace, RMATCH(obj)->regexp);
3595 
3596  if (RMATCH(obj)->str) {
3597  UPDATE_IF_MOVED(objspace, RMATCH(obj)->str);
3598  }
3599  break;
3600 
3601  case T_RATIONAL:
3602  UPDATE_IF_MOVED(objspace, RRATIONAL(obj)->num);
3603  UPDATE_IF_MOVED(objspace, RRATIONAL(obj)->den);
3604  break;
3605 
3606  case T_COMPLEX:
3607  UPDATE_IF_MOVED(objspace, RCOMPLEX(obj)->real);
3608  UPDATE_IF_MOVED(objspace, RCOMPLEX(obj)->imag);
3609 
3610  break;
3611 
3612  case T_STRUCT:
3613  {
3614  long i, len = RSTRUCT_LEN(obj);
3615  VALUE *ptr = (VALUE *)RSTRUCT_CONST_PTR(obj);
3616 
3617  for (i = 0; i < len; i++) {
3618  UPDATE_IF_MOVED(objspace, ptr[i]);
3619  }
3620  }
3621  break;
3622  default:
3623  rb_bug("unreachable");
3624  break;
3625  }
3626 
3627  UPDATE_IF_MOVED(objspace, RBASIC(obj)->klass);
3628 }
3629 
3630 VALUE
3632 {
3633  rb_gc();
3634  return Qnil;
3635 }
3636 
3637 void
3638 rb_gc(void)
3639 {
3640  unless_objspace(objspace) { return; }
3641 
3642  rb_gc_impl_start(objspace, true, true, true, false);
3643 }
3644 
3645 int
3647 {
3648  unless_objspace(objspace) { return FALSE; }
3649 
3650  return rb_gc_impl_during_gc_p(objspace);
3651 }
3652 
3653 size_t
3655 {
3656  return rb_gc_impl_gc_count(rb_gc_get_objspace());
3657 }
3658 
3659 static VALUE
3660 gc_count(rb_execution_context_t *ec, VALUE self)
3661 {
3662  return SIZET2NUM(rb_gc_count());
3663 }
3664 
3665 VALUE
3667 {
3668  if (!SYMBOL_P(key) && !RB_TYPE_P(key, T_HASH)) {
3669  rb_raise(rb_eTypeError, "non-hash or symbol given");
3670  }
3671 
3672  VALUE val = rb_gc_impl_latest_gc_info(rb_gc_get_objspace(), key);
3673 
3674  if (val == Qundef) {
3675  rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
3676  }
3677 
3678  return val;
3679 }
3680 
3681 static VALUE
3682 gc_stat(rb_execution_context_t *ec, VALUE self, VALUE arg) // arg is (nil || hash || symbol)
3683 {
3684  if (NIL_P(arg)) {
3685  arg = rb_hash_new();
3686  }
3687  else if (!RB_TYPE_P(arg, T_HASH) && !SYMBOL_P(arg)) {
3688  rb_raise(rb_eTypeError, "non-hash or symbol given");
3689  }
3690 
3691  VALUE ret = rb_gc_impl_stat(rb_gc_get_objspace(), arg);
3692 
3693  if (ret == Qundef) {
3694  GC_ASSERT(SYMBOL_P(arg));
3695 
3696  rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(arg));
3697  }
3698 
3699  return ret;
3700 }
3701 
3702 size_t
3704 {
3705  if (!RB_TYPE_P(arg, T_HASH) && !SYMBOL_P(arg)) {
3706  rb_raise(rb_eTypeError, "non-hash or symbol given");
3707  }
3708 
3709  VALUE ret = rb_gc_impl_stat(rb_gc_get_objspace(), arg);
3710 
3711  if (ret == Qundef) {
3712  GC_ASSERT(SYMBOL_P(arg));
3713 
3714  rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(arg));
3715  }
3716 
3717  if (SYMBOL_P(arg)) {
3718  return NUM2SIZET(ret);
3719  }
3720  else {
3721  return 0;
3722  }
3723 }
3724 
3725 static VALUE
3726 gc_stat_heap(rb_execution_context_t *ec, VALUE self, VALUE heap_name, VALUE arg)
3727 {
3728  if (NIL_P(arg)) {
3729  arg = rb_hash_new();
3730  }
3731 
3732  if (NIL_P(heap_name)) {
3733  if (!RB_TYPE_P(arg, T_HASH)) {
3734  rb_raise(rb_eTypeError, "non-hash given");
3735  }
3736  }
3737  else if (FIXNUM_P(heap_name)) {
3738  if (!SYMBOL_P(arg) && !RB_TYPE_P(arg, T_HASH)) {
3739  rb_raise(rb_eTypeError, "non-hash or symbol given");
3740  }
3741  }
3742  else {
3743  rb_raise(rb_eTypeError, "heap_name must be nil or an Integer");
3744  }
3745 
3746  VALUE ret = rb_gc_impl_stat_heap(rb_gc_get_objspace(), heap_name, arg);
3747 
3748  if (ret == Qundef) {
3749  GC_ASSERT(SYMBOL_P(arg));
3750 
3751  rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(arg));
3752  }
3753 
3754  return ret;
3755 }
3756 
3757 static VALUE
3758 gc_config_get(rb_execution_context_t *ec, VALUE self)
3759 {
3760  VALUE cfg_hash = rb_gc_impl_config_get(rb_gc_get_objspace());
3761  rb_hash_aset(cfg_hash, sym("implementation"), rb_fstring_cstr(rb_gc_impl_active_gc_name()));
3762 
3763  return cfg_hash;
3764 }
3765 
3766 static VALUE
3767 gc_config_set(rb_execution_context_t *ec, VALUE self, VALUE hash)
3768 {
3769  void *objspace = rb_gc_get_objspace();
3770 
3771  rb_gc_impl_config_set(objspace, hash);
3772 
3773  return rb_gc_impl_config_get(objspace);
3774 }
3775 
3776 static VALUE
3777 gc_stress_get(rb_execution_context_t *ec, VALUE self)
3778 {
3779  return rb_gc_impl_stress_get(rb_gc_get_objspace());
3780 }
3781 
3782 static VALUE
3783 gc_stress_set_m(rb_execution_context_t *ec, VALUE self, VALUE flag)
3784 {
3785  rb_gc_impl_stress_set(rb_gc_get_objspace(), flag);
3786 
3787  return flag;
3788 }
3789 
3790 void
3791 rb_gc_initial_stress_set(VALUE flag)
3792 {
3793  initial_stress = flag;
3794 }
3795 
3796 size_t *
3797 rb_gc_heap_sizes(void)
3798 {
3799  return rb_gc_impl_heap_sizes(rb_gc_get_objspace());
3800 }
3801 
3802 VALUE
3804 {
3805  return rb_objspace_gc_enable(rb_gc_get_objspace());
3806 }
3807 
3808 VALUE
3809 rb_objspace_gc_enable(void *objspace)
3810 {
3811  bool disabled = !rb_gc_impl_gc_enabled_p(objspace);
3812  rb_gc_impl_gc_enable(objspace);
3813  return RBOOL(disabled);
3814 }
3815 
3816 static VALUE
3817 gc_enable(rb_execution_context_t *ec, VALUE _)
3818 {
3819  return rb_gc_enable();
3820 }
3821 
3822 static VALUE
3823 gc_disable_no_rest(void *objspace)
3824 {
3825  bool disabled = !rb_gc_impl_gc_enabled_p(objspace);
3826  rb_gc_impl_gc_disable(objspace, false);
3827  return RBOOL(disabled);
3828 }
3829 
3830 VALUE
3831 rb_gc_disable_no_rest(void)
3832 {
3833  return gc_disable_no_rest(rb_gc_get_objspace());
3834 }
3835 
3836 VALUE
3838 {
3839  return rb_objspace_gc_disable(rb_gc_get_objspace());
3840 }
3841 
3842 VALUE
3843 rb_objspace_gc_disable(void *objspace)
3844 {
3845  bool disabled = !rb_gc_impl_gc_enabled_p(objspace);
3846  rb_gc_impl_gc_disable(objspace, true);
3847  return RBOOL(disabled);
3848 }
3849 
3850 static VALUE
3851 gc_disable(rb_execution_context_t *ec, VALUE _)
3852 {
3853  return rb_gc_disable();
3854 }
3855 
3856 // TODO: think about moving ruby_gc_set_params into Init_heap or Init_gc
3857 void
3858 ruby_gc_set_params(void)
3859 {
3860  rb_gc_impl_set_params(rb_gc_get_objspace());
3861 }
3862 
3863 void
3864 rb_objspace_reachable_objects_from(VALUE obj, void (func)(VALUE, void *), void *data)
3865 {
3866  RB_VM_LOCK_ENTER();
3867  {
3868  if (rb_gc_impl_during_gc_p(rb_gc_get_objspace())) rb_bug("rb_objspace_reachable_objects_from() is not supported while during GC");
3869 
3870  if (!RB_SPECIAL_CONST_P(obj)) {
3871  rb_vm_t *vm = GET_VM();
3872  struct gc_mark_func_data_struct *prev_mfd = vm->gc.mark_func_data;
3873  struct gc_mark_func_data_struct mfd = {
3874  .mark_func = func,
3875  .data = data,
3876  };
3877 
3878  vm->gc.mark_func_data = &mfd;
3879  rb_gc_mark_children(rb_gc_get_objspace(), obj);
3880  vm->gc.mark_func_data = prev_mfd;
3881  }
3882  }
3883  RB_VM_LOCK_LEAVE();
3884 }
3885 
3887  const char *category;
3888  void (*func)(const char *category, VALUE, void *);
3889  void *data;
3890 };
3891 
3892 static void
3893 root_objects_from(VALUE obj, void *ptr)
3894 {
3895  const struct root_objects_data *data = (struct root_objects_data *)ptr;
3896  (*data->func)(data->category, obj, data->data);
3897 }
3898 
3899 void
3900 rb_objspace_reachable_objects_from_root(void (func)(const char *category, VALUE, void *), void *passing_data)
3901 {
3902  if (rb_gc_impl_during_gc_p(rb_gc_get_objspace())) rb_bug("rb_gc_impl_objspace_reachable_objects_from_root() is not supported while during GC");
3903 
3904  rb_vm_t *vm = GET_VM();
3905 
3906  struct root_objects_data data = {
3907  .func = func,
3908  .data = passing_data,
3909  };
3910 
3911  struct gc_mark_func_data_struct *prev_mfd = vm->gc.mark_func_data;
3912  struct gc_mark_func_data_struct mfd = {
3913  .mark_func = root_objects_from,
3914  .data = &data,
3915  };
3916 
3917  vm->gc.mark_func_data = &mfd;
3918  rb_gc_save_machine_context();
3919  rb_gc_mark_roots(vm->gc.objspace, &data.category);
3920  vm->gc.mark_func_data = prev_mfd;
3921 }
3922 
3923 /*
3924  ------------------------------ DEBUG ------------------------------
3925 */
3926 
3927 static const char *
3928 type_name(int type, VALUE obj)
3929 {
3930  switch (type) {
3931 #define TYPE_NAME(t) case (t): return #t;
3932  TYPE_NAME(T_NONE);
3933  TYPE_NAME(T_OBJECT);
3934  TYPE_NAME(T_CLASS);
3935  TYPE_NAME(T_MODULE);
3936  TYPE_NAME(T_FLOAT);
3937  TYPE_NAME(T_STRING);
3938  TYPE_NAME(T_REGEXP);
3939  TYPE_NAME(T_ARRAY);
3940  TYPE_NAME(T_HASH);
3941  TYPE_NAME(T_STRUCT);
3942  TYPE_NAME(T_BIGNUM);
3943  TYPE_NAME(T_FILE);
3944  TYPE_NAME(T_MATCH);
3945  TYPE_NAME(T_COMPLEX);
3946  TYPE_NAME(T_RATIONAL);
3947  TYPE_NAME(T_NIL);
3948  TYPE_NAME(T_TRUE);
3949  TYPE_NAME(T_FALSE);
3950  TYPE_NAME(T_SYMBOL);
3951  TYPE_NAME(T_FIXNUM);
3952  TYPE_NAME(T_UNDEF);
3953  TYPE_NAME(T_IMEMO);
3954  TYPE_NAME(T_ICLASS);
3955  TYPE_NAME(T_MOVED);
3956  TYPE_NAME(T_ZOMBIE);
3957  case T_DATA:
3958  if (obj && rb_objspace_data_type_name(obj)) {
3959  return rb_objspace_data_type_name(obj);
3960  }
3961  return "T_DATA";
3962 #undef TYPE_NAME
3963  }
3964  return "unknown";
3965 }
3966 
3967 static const char *
3968 obj_type_name(VALUE obj)
3969 {
3970  return type_name(TYPE(obj), obj);
3971 }
3972 
3973 const char *
3974 rb_method_type_name(rb_method_type_t type)
3975 {
3976  switch (type) {
3977  case VM_METHOD_TYPE_ISEQ: return "iseq";
3978  case VM_METHOD_TYPE_ATTRSET: return "attrest";
3979  case VM_METHOD_TYPE_IVAR: return "ivar";
3980  case VM_METHOD_TYPE_BMETHOD: return "bmethod";
3981  case VM_METHOD_TYPE_ALIAS: return "alias";
3982  case VM_METHOD_TYPE_REFINED: return "refined";
3983  case VM_METHOD_TYPE_CFUNC: return "cfunc";
3984  case VM_METHOD_TYPE_ZSUPER: return "zsuper";
3985  case VM_METHOD_TYPE_MISSING: return "missing";
3986  case VM_METHOD_TYPE_OPTIMIZED: return "optimized";
3987  case VM_METHOD_TYPE_UNDEF: return "undef";
3988  case VM_METHOD_TYPE_NOTIMPLEMENTED: return "notimplemented";
3989  }
3990  rb_bug("rb_method_type_name: unreachable (type: %d)", type);
3991 }
3992 
3993 static void
3994 rb_raw_iseq_info(char *const buff, const size_t buff_size, const rb_iseq_t *iseq)
3995 {
3996  if (buff_size > 0 && ISEQ_BODY(iseq) && ISEQ_BODY(iseq)->location.label && !RB_TYPE_P(ISEQ_BODY(iseq)->location.pathobj, T_MOVED)) {
3997  VALUE path = rb_iseq_path(iseq);
3998  int n = ISEQ_BODY(iseq)->location.first_lineno;
3999  snprintf(buff, buff_size, " %s@%s:%d",
4000  RSTRING_PTR(ISEQ_BODY(iseq)->location.label),
4001  RSTRING_PTR(path), n);
4002  }
4003 }
4004 
4005 static int
4006 str_len_no_raise(VALUE str)
4007 {
4008  long len = RSTRING_LEN(str);
4009  if (len < 0) return 0;
4010  if (len > INT_MAX) return INT_MAX;
4011  return (int)len;
4012 }
4013 
4014 #define BUFF_ARGS buff + pos, buff_size - pos
4015 #define APPEND_F(...) if ((pos += snprintf(BUFF_ARGS, "" __VA_ARGS__)) >= buff_size) goto end
4016 #define APPEND_S(s) do { \
4017  if ((pos + (int)rb_strlen_lit(s)) >= buff_size) { \
4018  goto end; \
4019  } \
4020  else { \
4021  memcpy(buff + pos, (s), rb_strlen_lit(s) + 1); \
4022  } \
4023  } while (0)
4024 #define C(c, s) ((c) != 0 ? (s) : " ")
4025 
4026 static size_t
4027 rb_raw_obj_info_common(char *const buff, const size_t buff_size, const VALUE obj)
4028 {
4029  size_t pos = 0;
4030 
4031  if (SPECIAL_CONST_P(obj)) {
4032  APPEND_F("%s", obj_type_name(obj));
4033 
4034  if (FIXNUM_P(obj)) {
4035  APPEND_F(" %ld", FIX2LONG(obj));
4036  }
4037  else if (SYMBOL_P(obj)) {
4038  APPEND_F(" %s", rb_id2name(SYM2ID(obj)));
4039  }
4040  }
4041  else {
4042  // const int age = RVALUE_AGE_GET(obj);
4043 
4044  if (rb_gc_impl_pointer_to_heap_p(rb_gc_get_objspace(), (void *)obj)) {
4045  // TODO: fixme
4046  // APPEND_F("%p [%d%s%s%s%s%s%s] %s ",
4047  // (void *)obj, age,
4048  // C(RVALUE_UNCOLLECTIBLE_BITMAP(obj), "L"),
4049  // C(RVALUE_MARK_BITMAP(obj), "M"),
4050  // C(RVALUE_PIN_BITMAP(obj), "P"),
4051  // C(RVALUE_MARKING_BITMAP(obj), "R"),
4052  // C(RVALUE_WB_UNPROTECTED_BITMAP(obj), "U"),
4053  // C(rb_objspace_garbage_object_p(obj), "G"),
4054  // obj_type_name(obj));
4055  }
4056  else {
4057  /* fake */
4058  // APPEND_F("%p [%dXXXX] %s",
4059  // (void *)obj, age,
4060  // obj_type_name(obj));
4061  }
4062 
4063  if (internal_object_p(obj)) {
4064  /* ignore */
4065  }
4066  else if (RBASIC(obj)->klass == 0) {
4067  APPEND_S("(temporary internal)");
4068  }
4069  else if (RTEST(RBASIC(obj)->klass)) {
4070  VALUE class_path = rb_class_path_cached(RBASIC(obj)->klass);
4071  if (!NIL_P(class_path)) {
4072  APPEND_F("(%s)", RSTRING_PTR(class_path));
4073  }
4074  }
4075  }
4076  end:
4077 
4078  return pos;
4079 }
4080 
4081 const char *rb_raw_obj_info(char *const buff, const size_t buff_size, VALUE obj);
4082 
4083 static size_t
4084 rb_raw_obj_info_buitin_type(char *const buff, const size_t buff_size, const VALUE obj, size_t pos)
4085 {
4086  if (LIKELY(pos < buff_size) && !SPECIAL_CONST_P(obj)) {
4087  const enum ruby_value_type type = BUILTIN_TYPE(obj);
4088 
4089  switch (type) {
4090  case T_NODE:
4091  UNEXPECTED_NODE(rb_raw_obj_info);
4092  break;
4093  case T_ARRAY:
4094  if (ARY_SHARED_P(obj)) {
4095  APPEND_S("shared -> ");
4096  rb_raw_obj_info(BUFF_ARGS, ARY_SHARED_ROOT(obj));
4097  }
4098  else if (ARY_EMBED_P(obj)) {
4099  APPEND_F("[%s%s] len: %ld (embed)",
4100  C(ARY_EMBED_P(obj), "E"),
4101  C(ARY_SHARED_P(obj), "S"),
4102  RARRAY_LEN(obj));
4103  }
4104  else {
4105  APPEND_F("[%s%s] len: %ld, capa:%ld ptr:%p",
4106  C(ARY_EMBED_P(obj), "E"),
4107  C(ARY_SHARED_P(obj), "S"),
4108  RARRAY_LEN(obj),
4109  ARY_EMBED_P(obj) ? -1L : RARRAY(obj)->as.heap.aux.capa,
4110  (void *)RARRAY_CONST_PTR(obj));
4111  }
4112  break;
4113  case T_STRING: {
4114  if (STR_SHARED_P(obj)) {
4115  APPEND_F(" [shared] len: %ld", RSTRING_LEN(obj));
4116  }
4117  else {
4118  if (STR_EMBED_P(obj)) APPEND_S(" [embed]");
4119 
4120  APPEND_F(" len: %ld, capa: %" PRIdSIZE, RSTRING_LEN(obj), rb_str_capacity(obj));
4121  }
4122  APPEND_F(" \"%.*s\"", str_len_no_raise(obj), RSTRING_PTR(obj));
4123  break;
4124  }
4125  case T_SYMBOL: {
4126  VALUE fstr = RSYMBOL(obj)->fstr;
4127  ID id = RSYMBOL(obj)->id;
4128  if (RB_TYPE_P(fstr, T_STRING)) {
4129  APPEND_F(":%s id:%d", RSTRING_PTR(fstr), (unsigned int)id);
4130  }
4131  else {
4132  APPEND_F("(%p) id:%d", (void *)fstr, (unsigned int)id);
4133  }
4134  break;
4135  }
4136  case T_MOVED: {
4137  APPEND_F("-> %p", (void*)rb_gc_impl_location(rb_gc_get_objspace(), obj));
4138  break;
4139  }
4140  case T_HASH: {
4141  APPEND_F("[%c] %"PRIdSIZE,
4142  RHASH_AR_TABLE_P(obj) ? 'A' : 'S',
4143  RHASH_SIZE(obj));
4144  break;
4145  }
4146  case T_CLASS:
4147  case T_MODULE:
4148  {
4149  VALUE class_path = rb_class_path_cached(obj);
4150  if (!NIL_P(class_path)) {
4151  APPEND_F("%s", RSTRING_PTR(class_path));
4152  }
4153  else {
4154  APPEND_S("(anon)");
4155  }
4156  break;
4157  }
4158  case T_ICLASS:
4159  {
4160  VALUE class_path = rb_class_path_cached(RBASIC_CLASS(obj));
4161  if (!NIL_P(class_path)) {
4162  APPEND_F("src:%s", RSTRING_PTR(class_path));
4163  }
4164  break;
4165  }
4166  case T_OBJECT:
4167  {
4168  if (rb_shape_obj_too_complex(obj)) {
4169  size_t hash_len = rb_st_table_size(ROBJECT_IV_HASH(obj));
4170  APPEND_F("(too_complex) len:%zu", hash_len);
4171  }
4172  else {
4173  uint32_t len = ROBJECT_IV_CAPACITY(obj);
4174 
4175  if (RBASIC(obj)->flags & ROBJECT_EMBED) {
4176  APPEND_F("(embed) len:%d", len);
4177  }
4178  else {
4179  VALUE *ptr = ROBJECT_IVPTR(obj);
4180  APPEND_F("len:%d ptr:%p", len, (void *)ptr);
4181  }
4182  }
4183  }
4184  break;
4185  case T_DATA: {
4186  const struct rb_block *block;
4187  const rb_iseq_t *iseq;
4188  if (rb_obj_is_proc(obj) &&
4189  (block = vm_proc_block(obj)) != NULL &&
4190  (vm_block_type(block) == block_type_iseq) &&
4191  (iseq = vm_block_iseq(block)) != NULL) {
4192  rb_raw_iseq_info(BUFF_ARGS, iseq);
4193  }
4194  else if (rb_ractor_p(obj)) {
4195  rb_ractor_t *r = (void *)DATA_PTR(obj);
4196  if (r) {
4197  APPEND_F("r:%d", r->pub.id);
4198  }
4199  }
4200  else {
4201  const char * const type_name = rb_objspace_data_type_name(obj);
4202  if (type_name) {
4203  APPEND_F("%s", type_name);
4204  }
4205  }
4206  break;
4207  }
4208  case T_IMEMO: {
4209  APPEND_F("<%s> ", rb_imemo_name(imemo_type(obj)));
4210 
4211  switch (imemo_type(obj)) {
4212  case imemo_ment:
4213  {
4214  const rb_method_entry_t *me = (const rb_method_entry_t *)obj;
4215 
4216  APPEND_F(":%s (%s%s%s%s) type:%s aliased:%d owner:%p defined_class:%p",
4217  rb_id2name(me->called_id),
4218  METHOD_ENTRY_VISI(me) == METHOD_VISI_PUBLIC ? "pub" :
4219  METHOD_ENTRY_VISI(me) == METHOD_VISI_PRIVATE ? "pri" : "pro",
4220  METHOD_ENTRY_COMPLEMENTED(me) ? ",cmp" : "",
4221  METHOD_ENTRY_CACHED(me) ? ",cc" : "",
4222  METHOD_ENTRY_INVALIDATED(me) ? ",inv" : "",
4223  me->def ? rb_method_type_name(me->def->type) : "NULL",
4224  me->def ? me->def->aliased : -1,
4225  (void *)me->owner, // obj_info(me->owner),
4226  (void *)me->defined_class); //obj_info(me->defined_class)));
4227 
4228  if (me->def) {
4229  switch (me->def->type) {
4230  case VM_METHOD_TYPE_ISEQ:
4231  APPEND_S(" (iseq:");
4232  rb_raw_obj_info(BUFF_ARGS, (VALUE)me->def->body.iseq.iseqptr);
4233  APPEND_S(")");
4234  break;
4235  default:
4236  break;
4237  }
4238  }
4239 
4240  break;
4241  }
4242  case imemo_iseq: {
4243  const rb_iseq_t *iseq = (const rb_iseq_t *)obj;
4244  rb_raw_iseq_info(BUFF_ARGS, iseq);
4245  break;
4246  }
4247  case imemo_callinfo:
4248  {
4249  const struct rb_callinfo *ci = (const struct rb_callinfo *)obj;
4250  APPEND_F("(mid:%s, flag:%x argc:%d, kwarg:%s)",
4251  rb_id2name(vm_ci_mid(ci)),
4252  vm_ci_flag(ci),
4253  vm_ci_argc(ci),
4254  vm_ci_kwarg(ci) ? "available" : "NULL");
4255  break;
4256  }
4257  case imemo_callcache:
4258  {
4259  const struct rb_callcache *cc = (const struct rb_callcache *)obj;
4260  VALUE class_path = cc->klass ? rb_class_path_cached(cc->klass) : Qnil;
4261  const rb_callable_method_entry_t *cme = vm_cc_cme(cc);
4262 
4263  APPEND_F("(klass:%s cme:%s%s (%p) call:%p",
4264  NIL_P(class_path) ? (cc->klass ? "??" : "<NULL>") : RSTRING_PTR(class_path),
4265  cme ? rb_id2name(cme->called_id) : "<NULL>",
4266  cme ? (METHOD_ENTRY_INVALIDATED(cme) ? " [inv]" : "") : "",
4267  (void *)cme,
4268  (void *)(uintptr_t)vm_cc_call(cc));
4269  break;
4270  }
4271  default:
4272  break;
4273  }
4274  }
4275  default:
4276  break;
4277  }
4278  }
4279  end:
4280 
4281  return pos;
4282 }
4283 
4284 #undef C
4285 
4286 #define asan_unpoisoning_object(obj) \
4287  for (void *poisoned = asan_unpoison_object_temporary(obj), \
4288  *unpoisoning = &poisoned; /* flag to loop just once */ \
4289  unpoisoning; \
4290  unpoisoning = asan_poison_object_restore(obj, poisoned))
4291 
4292 const char *
4293 rb_raw_obj_info(char *const buff, const size_t buff_size, VALUE obj)
4294 {
4295  asan_unpoisoning_object(obj) {
4296  size_t pos = rb_raw_obj_info_common(buff, buff_size, obj);
4297  pos = rb_raw_obj_info_buitin_type(buff, buff_size, obj, pos);
4298  if (pos >= buff_size) {} // truncated
4299  }
4300 
4301  return buff;
4302 }
4303 
4304 #undef APPEND_S
4305 #undef APPEND_F
4306 #undef BUFF_ARGS
4307 
4308 #if RGENGC_OBJ_INFO
4309 #define OBJ_INFO_BUFFERS_NUM 10
4310 #define OBJ_INFO_BUFFERS_SIZE 0x100
4311 static rb_atomic_t obj_info_buffers_index = 0;
4312 static char obj_info_buffers[OBJ_INFO_BUFFERS_NUM][OBJ_INFO_BUFFERS_SIZE];
4313 
4314 /* Increments *var atomically and resets *var to 0 when maxval is
4315  * reached. Returns the wraparound old *var value (0...maxval). */
4316 static rb_atomic_t
4317 atomic_inc_wraparound(rb_atomic_t *var, const rb_atomic_t maxval)
4318 {
4319  rb_atomic_t oldval = RUBY_ATOMIC_FETCH_ADD(*var, 1);
4320  if (RB_UNLIKELY(oldval >= maxval - 1)) { // wraparound *var
4321  const rb_atomic_t newval = oldval + 1;
4322  RUBY_ATOMIC_CAS(*var, newval, newval % maxval);
4323  oldval %= maxval;
4324  }
4325  return oldval;
4326 }
4327 
4328 static const char *
4329 obj_info(VALUE obj)
4330 {
4331  rb_atomic_t index = atomic_inc_wraparound(&obj_info_buffers_index, OBJ_INFO_BUFFERS_NUM);
4332  char *const buff = obj_info_buffers[index];
4333  return rb_raw_obj_info(buff, OBJ_INFO_BUFFERS_SIZE, obj);
4334 }
4335 #else
4336 static const char *
4337 obj_info(VALUE obj)
4338 {
4339  return obj_type_name(obj);
4340 }
4341 #endif
4342 
4343 /*
4344  ------------------------ Extended allocator ------------------------
4345 */
4346 
4348  VALUE exc;
4349  const char *fmt;
4350  va_list *ap;
4351 };
4352 
4353 static void *
4354 gc_vraise(void *ptr)
4355 {
4356  struct gc_raise_tag *argv = ptr;
4357  rb_vraise(argv->exc, argv->fmt, *argv->ap);
4358  UNREACHABLE_RETURN(NULL);
4359 }
4360 
4361 static void
4362 gc_raise(VALUE exc, const char *fmt, ...)
4363 {
4364  va_list ap;
4365  va_start(ap, fmt);
4366  struct gc_raise_tag argv = {
4367  exc, fmt, &ap,
4368  };
4369 
4370  if (ruby_thread_has_gvl_p()) {
4371  gc_vraise(&argv);
4372  UNREACHABLE;
4373  }
4374  else if (ruby_native_thread_p()) {
4375  rb_thread_call_with_gvl(gc_vraise, &argv);
4376  UNREACHABLE;
4377  }
4378  else {
4379  /* Not in a ruby thread */
4380  fprintf(stderr, "%s", "[FATAL] ");
4381  vfprintf(stderr, fmt, ap);
4382  }
4383 
4384  va_end(ap);
4385  abort();
4386 }
4387 
4388 NORETURN(static void negative_size_allocation_error(const char *));
4389 static void
4390 negative_size_allocation_error(const char *msg)
4391 {
4392  gc_raise(rb_eNoMemError, "%s", msg);
4393 }
4394 
4395 static void *
4396 ruby_memerror_body(void *dummy)
4397 {
4398  rb_memerror();
4399  return 0;
4400 }
4401 
4402 NORETURN(static void ruby_memerror(void));
4404 static void
4405 ruby_memerror(void)
4406 {
4407  if (ruby_thread_has_gvl_p()) {
4408  rb_memerror();
4409  }
4410  else {
4411  if (ruby_native_thread_p()) {
4412  rb_thread_call_with_gvl(ruby_memerror_body, 0);
4413  }
4414  else {
4415  /* no ruby thread */
4416  fprintf(stderr, "[FATAL] failed to allocate memory\n");
4417  }
4418  }
4419 
4420  /* We have discussions whether we should die here; */
4421  /* We might rethink about it later. */
4422  exit(EXIT_FAILURE);
4423 }
4424 
4425 void
4427 {
4428  /* the `GET_VM()->special_exceptions` below assumes that
4429  * the VM is reachable from the current thread. We should
4430  * definitely make sure of that. */
4431  RUBY_ASSERT_ALWAYS(ruby_thread_has_gvl_p());
4432 
4433  rb_execution_context_t *ec = GET_EC();
4434  VALUE exc = GET_VM()->special_exceptions[ruby_error_nomemory];
4435 
4436  if (!exc ||
4437  rb_ec_raised_p(ec, RAISED_NOMEMORY)) {
4438  fprintf(stderr, "[FATAL] failed to allocate memory\n");
4439  exit(EXIT_FAILURE);
4440  }
4441  if (rb_ec_raised_p(ec, RAISED_NOMEMORY)) {
4442  rb_ec_raised_clear(ec);
4443  }
4444  else {
4445  rb_ec_raised_set(ec, RAISED_NOMEMORY);
4446  exc = ruby_vm_special_exception_copy(exc);
4447  }
4448  ec->errinfo = exc;
4449  EC_JUMP_TAG(ec, TAG_RAISE);
4450 }
4451 
4452 void
4453 rb_malloc_info_show_results(void)
4454 {
4455 }
4456 
4457 static void *
4458 handle_malloc_failure(void *ptr)
4459 {
4460  if (LIKELY(ptr)) {
4461  return ptr;
4462  }
4463  else {
4464  ruby_memerror();
4466  }
4467 }
4468 
4469 static void *ruby_xmalloc_body(size_t size);
4470 
4471 void *
4472 ruby_xmalloc(size_t size)
4473 {
4474  return handle_malloc_failure(ruby_xmalloc_body(size));
4475 }
4476 
4477 static void *
4478 ruby_xmalloc_body(size_t size)
4479 {
4480  if ((ssize_t)size < 0) {
4481  negative_size_allocation_error("too large allocation size");
4482  }
4483 
4484  return rb_gc_impl_malloc(rb_gc_get_objspace(), size);
4485 }
4486 
4487 void
4488 ruby_malloc_size_overflow(size_t count, size_t elsize)
4489 {
4491  "malloc: possible integer overflow (%"PRIuSIZE"*%"PRIuSIZE")",
4492  count, elsize);
4493 }
4494 
4495 static void *ruby_xmalloc2_body(size_t n, size_t size);
4496 
4497 void *
4498 ruby_xmalloc2(size_t n, size_t size)
4499 {
4500  return handle_malloc_failure(ruby_xmalloc2_body(n, size));
4501 }
4502 
4503 static void *
4504 ruby_xmalloc2_body(size_t n, size_t size)
4505 {
4506  return rb_gc_impl_malloc(rb_gc_get_objspace(), xmalloc2_size(n, size));
4507 }
4508 
4509 static void *ruby_xcalloc_body(size_t n, size_t size);
4510 
4511 void *
4512 ruby_xcalloc(size_t n, size_t size)
4513 {
4514  return handle_malloc_failure(ruby_xcalloc_body(n, size));
4515 }
4516 
4517 static void *
4518 ruby_xcalloc_body(size_t n, size_t size)
4519 {
4520  return rb_gc_impl_calloc(rb_gc_get_objspace(), xmalloc2_size(n, size));
4521 }
4522 
4523 static void *ruby_sized_xrealloc_body(void *ptr, size_t new_size, size_t old_size);
4524 
4525 #ifdef ruby_sized_xrealloc
4526 #undef ruby_sized_xrealloc
4527 #endif
4528 void *
4529 ruby_sized_xrealloc(void *ptr, size_t new_size, size_t old_size)
4530 {
4531  return handle_malloc_failure(ruby_sized_xrealloc_body(ptr, new_size, old_size));
4532 }
4533 
4534 static void *
4535 ruby_sized_xrealloc_body(void *ptr, size_t new_size, size_t old_size)
4536 {
4537  if ((ssize_t)new_size < 0) {
4538  negative_size_allocation_error("too large allocation size");
4539  }
4540 
4541  return rb_gc_impl_realloc(rb_gc_get_objspace(), ptr, new_size, old_size);
4542 }
4543 
4544 void *
4545 ruby_xrealloc(void *ptr, size_t new_size)
4546 {
4547  return ruby_sized_xrealloc(ptr, new_size, 0);
4548 }
4549 
4550 static void *ruby_sized_xrealloc2_body(void *ptr, size_t n, size_t size, size_t old_n);
4551 
4552 #ifdef ruby_sized_xrealloc2
4553 #undef ruby_sized_xrealloc2
4554 #endif
4555 void *
4556 ruby_sized_xrealloc2(void *ptr, size_t n, size_t size, size_t old_n)
4557 {
4558  return handle_malloc_failure(ruby_sized_xrealloc2_body(ptr, n, size, old_n));
4559 }
4560 
4561 static void *
4562 ruby_sized_xrealloc2_body(void *ptr, size_t n, size_t size, size_t old_n)
4563 {
4564  size_t len = xmalloc2_size(n, size);
4565  return rb_gc_impl_realloc(rb_gc_get_objspace(), ptr, len, old_n * size);
4566 }
4567 
4568 void *
4569 ruby_xrealloc2(void *ptr, size_t n, size_t size)
4570 {
4571  return ruby_sized_xrealloc2(ptr, n, size, 0);
4572 }
4573 
4574 #ifdef ruby_sized_xfree
4575 #undef ruby_sized_xfree
4576 #endif
4577 void
4578 ruby_sized_xfree(void *x, size_t size)
4579 {
4580  if (LIKELY(x)) {
4581  /* It's possible for a C extension's pthread destructor function set by pthread_key_create
4582  * to be called after ruby_vm_destruct and attempt to free memory. Fall back to mimfree in
4583  * that case. */
4584  if (LIKELY(GET_VM())) {
4585  rb_gc_impl_free(rb_gc_get_objspace(), x, size);
4586  }
4587  else {
4588  ruby_mimfree(x);
4589  }
4590  }
4591 }
4592 
4593 void
4594 ruby_xfree(void *x)
4595 {
4596  ruby_sized_xfree(x, 0);
4597 }
4598 
4599 void *
4600 rb_xmalloc_mul_add(size_t x, size_t y, size_t z) /* x * y + z */
4601 {
4602  size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
4603  return ruby_xmalloc(w);
4604 }
4605 
4606 void *
4607 rb_xcalloc_mul_add(size_t x, size_t y, size_t z) /* x * y + z */
4608 {
4609  size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
4610  return ruby_xcalloc(w, 1);
4611 }
4612 
4613 void *
4614 rb_xrealloc_mul_add(const void *p, size_t x, size_t y, size_t z) /* x * y + z */
4615 {
4616  size_t w = size_mul_add_or_raise(x, y, z, rb_eArgError);
4617  return ruby_xrealloc((void *)p, w);
4618 }
4619 
4620 void *
4621 rb_xmalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
4622 {
4623  size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
4624  return ruby_xmalloc(u);
4625 }
4626 
4627 void *
4628 rb_xcalloc_mul_add_mul(size_t x, size_t y, size_t z, size_t w) /* x * y + z * w */
4629 {
4630  size_t u = size_mul_add_mul_or_raise(x, y, z, w, rb_eArgError);
4631  return ruby_xcalloc(u, 1);
4632 }
4633 
4634 /* Mimic ruby_xmalloc, but need not rb_objspace.
4635  * should return pointer suitable for ruby_xfree
4636  */
4637 void *
4638 ruby_mimmalloc(size_t size)
4639 {
4640  void *mem;
4641 #if CALC_EXACT_MALLOC_SIZE
4642  size += sizeof(struct malloc_obj_info);
4643 #endif
4644  mem = malloc(size);
4645 #if CALC_EXACT_MALLOC_SIZE
4646  if (!mem) {
4647  return NULL;
4648  }
4649  else
4650  /* set 0 for consistency of allocated_size/allocations */
4651  {
4652  struct malloc_obj_info *info = mem;
4653  info->size = 0;
4654  mem = info + 1;
4655  }
4656 #endif
4657  return mem;
4658 }
4659 
4660 void *
4661 ruby_mimcalloc(size_t num, size_t size)
4662 {
4663  void *mem;
4664 #if CALC_EXACT_MALLOC_SIZE
4665  struct rbimpl_size_mul_overflow_tag t = rbimpl_size_mul_overflow(num, size);
4666  if (UNLIKELY(t.left)) {
4667  return NULL;
4668  }
4669  size = t.right + sizeof(struct malloc_obj_info);
4670  mem = calloc1(size);
4671  if (!mem) {
4672  return NULL;
4673  }
4674  else
4675  /* set 0 for consistency of allocated_size/allocations */
4676  {
4677  struct malloc_obj_info *info = mem;
4678  info->size = 0;
4679  mem = info + 1;
4680  }
4681 #else
4682  mem = calloc(num, size);
4683 #endif
4684  return mem;
4685 }
4686 
4687 void
4688 ruby_mimfree(void *ptr)
4689 {
4690 #if CALC_EXACT_MALLOC_SIZE
4691  struct malloc_obj_info *info = (struct malloc_obj_info *)ptr - 1;
4692  ptr = info;
4693 #endif
4694  free(ptr);
4695 }
4696 
4697 void
4699 {
4700  unless_objspace(objspace) { return; }
4701 
4702  rb_gc_impl_adjust_memory_usage(objspace, diff);
4703 }
4704 
4705 const char *
4706 rb_obj_info(VALUE obj)
4707 {
4708  return obj_info(obj);
4709 }
4710 
4711 void
4712 rb_obj_info_dump(VALUE obj)
4713 {
4714  char buff[0x100];
4715  fprintf(stderr, "rb_obj_info_dump: %s\n", rb_raw_obj_info(buff, 0x100, obj));
4716 }
4717 
4718 void
4719 rb_obj_info_dump_loc(VALUE obj, const char *file, int line, const char *func)
4720 {
4721  char buff[0x100];
4722  fprintf(stderr, "<OBJ_INFO:%s@%s:%d> %s\n", func, file, line, rb_raw_obj_info(buff, 0x100, obj));
4723 }
4724 
4725 void
4726 rb_gc_before_fork(void)
4727 {
4728  rb_gc_impl_before_fork(rb_gc_get_objspace());
4729 }
4730 
4731 void
4732 rb_gc_after_fork(rb_pid_t pid)
4733 {
4734  rb_gc_impl_after_fork(rb_gc_get_objspace(), pid);
4735 }
4736 
4737 /*
4738  * Document-module: ObjectSpace
4739  *
4740  * The ObjectSpace module contains a number of routines
4741  * that interact with the garbage collection facility and allow you to
4742  * traverse all living objects with an iterator.
4743  *
4744  * ObjectSpace also provides support for object finalizers, procs that will be
4745  * called after a specific object was destroyed by garbage collection. See
4746  * the documentation for +ObjectSpace.define_finalizer+ for important
4747  * information on how to use this method correctly.
4748  *
4749  * a = "A"
4750  * b = "B"
4751  *
4752  * ObjectSpace.define_finalizer(a, proc {|id| puts "Finalizer one on #{id}" })
4753  * ObjectSpace.define_finalizer(b, proc {|id| puts "Finalizer two on #{id}" })
4754  *
4755  * a = nil
4756  * b = nil
4757  *
4758  * _produces:_
4759  *
4760  * Finalizer two on 537763470
4761  * Finalizer one on 537763480
4762  */
4763 
4764 /* Document-class: GC::Profiler
4765  *
4766  * The GC profiler provides access to information on GC runs including time,
4767  * length and object space size.
4768  *
4769  * Example:
4770  *
4771  * GC::Profiler.enable
4772  *
4773  * require 'rdoc/rdoc'
4774  *
4775  * GC::Profiler.report
4776  *
4777  * GC::Profiler.disable
4778  *
4779  * See also GC.count, GC.malloc_allocated_size and GC.malloc_allocations
4780  */
4781 
4782 #include "gc.rbinc"
4783 
4784 void
4785 Init_GC(void)
4786 {
4787 #undef rb_intern
4788  malloc_offset = gc_compute_malloc_offset();
4789 
4790  rb_mGC = rb_define_module("GC");
4791 
4792  VALUE rb_mObjSpace = rb_define_module("ObjectSpace");
4793 
4794  rb_define_module_function(rb_mObjSpace, "each_object", os_each_obj, -1);
4795 
4796  rb_define_module_function(rb_mObjSpace, "define_finalizer", define_final, -1);
4797  rb_define_module_function(rb_mObjSpace, "undefine_finalizer", undefine_final, 1);
4798 
4799  rb_define_module_function(rb_mObjSpace, "_id2ref", os_id2ref, 1);
4800 
4801  rb_vm_register_special_exception(ruby_error_nomemory, rb_eNoMemError, "failed to allocate memory");
4802 
4804  rb_define_method(rb_mKernel, "object_id", rb_obj_id, 0);
4805 
4806  rb_define_module_function(rb_mObjSpace, "count_objects", count_objects, -1);
4807 
4808  rb_gc_impl_init();
4809 }
4810 
4811 // Set a name for the anonymous virtual memory area. `addr` is the starting
4812 // address of the area and `size` is its length in bytes. `name` is a
4813 // NUL-terminated human-readable string.
4814 //
4815 // This function is usually called after calling `mmap()`. The human-readable
4816 // annotation helps developers identify the call site of `mmap()` that created
4817 // the memory mapping.
4818 //
4819 // This function currently only works on Linux 5.17 or higher. After calling
4820 // this function, we can see annotations in the form of "[anon:...]" in
4821 // `/proc/self/maps`, where `...` is the content of `name`. This function has
4822 // no effect when called on other platforms.
4823 void
4824 ruby_annotate_mmap(const void *addr, unsigned long size, const char *name)
4825 {
4826 #if defined(HAVE_SYS_PRCTL_H) && defined(PR_SET_VMA) && defined(PR_SET_VMA_ANON_NAME)
4827  // The name length cannot exceed 80 (including the '\0').
4828  RUBY_ASSERT(strlen(name) < 80);
4829  prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, (unsigned long)addr, size, name);
4830  // We ignore errors in prctl. prctl may set errno to EINVAL for several
4831  // reasons.
4832  // 1. The attr (PR_SET_VMA_ANON_NAME) is not a valid attribute.
4833  // 2. addr is an invalid address.
4834  // 3. The string pointed by name is too long.
4835  // The first error indicates PR_SET_VMA_ANON_NAME is not available, and may
4836  // happen if we run the compiled binary on an old kernel. In theory, all
4837  // other errors should result in a failure. But since EINVAL cannot tell
4838  // the first error from others, and this function is mainly used for
4839  // debugging, we silently ignore the error.
4840  errno = 0;
4841 #endif
4842 }
#define RUBY_ASSERT_ALWAYS(expr,...)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
Definition: assert.h:199
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition: assert.h:219
#define RUBY_ATOMIC_CAS(var, oldval, newval)
Atomic compare-and-swap.
Definition: atomic.h:140
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition: atomic.h:69
#define RUBY_ATOMIC_FETCH_ADD(var, val)
Atomically replaces the value pointed by var with the result of addition of val to the old value of v...
Definition: atomic.h:93
#define RB_UNLIKELY(x)
Asserts that the given Boolean expression likely doesn't hold.
Definition: assume.h:50
uint32_t rb_event_flag_t
Represents event(s).
Definition: event.h:108
#define RUBY_INTERNAL_EVENT_NEWOBJ
Object allocated.
Definition: event.h:93
static VALUE RB_FL_TEST_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_TEST().
Definition: fl_type.h:469
static void RB_FL_SET_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_SET().
Definition: fl_type.h:606
@ RUBY_FL_WB_PROTECTED
Definition: fl_type.h:199
VALUE rb_define_module(const char *name)
Defines a top-level module.
Definition: class.c:1095
void rb_define_module_function(VALUE module, const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a module function for a module.
Definition: class.c:2329
int rb_scan_args(int argc, const VALUE *argv, const char *fmt,...)
Retrieves argument from argc and argv to given VALUE references according to the format string.
Definition: class.c:2635
void rb_define_method(VALUE klass, const char *name, VALUE(*func)(ANYARGS), int argc)
Defines a method.
Definition: class.c:2142
#define T_COMPLEX
Old name of RUBY_T_COMPLEX.
Definition: value_type.h:59
#define TYPE(_)
Old name of rb_type.
Definition: value_type.h:108
#define FL_SINGLETON
Old name of RUBY_FL_SINGLETON.
Definition: fl_type.h:58
#define T_FILE
Old name of RUBY_T_FILE.
Definition: value_type.h:62
#define FL_EXIVAR
Old name of RUBY_FL_EXIVAR.
Definition: fl_type.h:66
#define ALLOC
Old name of RB_ALLOC.
Definition: memory.h:395
#define T_STRING
Old name of RUBY_T_STRING.
Definition: value_type.h:78
#define xfree
Old name of ruby_xfree.
Definition: xmalloc.h:58
#define T_MASK
Old name of RUBY_T_MASK.
Definition: value_type.h:68
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition: long.h:48
#define T_NIL
Old name of RUBY_T_NIL.
Definition: value_type.h:72
#define UNREACHABLE
Old name of RBIMPL_UNREACHABLE.
Definition: assume.h:28
#define T_FLOAT
Old name of RUBY_T_FLOAT.
Definition: value_type.h:64
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition: value_type.h:67
#define ID2SYM
Old name of RB_ID2SYM.
Definition: symbol.h:44
#define T_BIGNUM
Old name of RUBY_T_BIGNUM.
Definition: value_type.h:57
#define SPECIAL_CONST_P
Old name of RB_SPECIAL_CONST_P.
#define T_STRUCT
Old name of RUBY_T_STRUCT.
Definition: value_type.h:79
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition: fl_type.h:135
#define T_FIXNUM
Old name of RUBY_T_FIXNUM.
Definition: value_type.h:63
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition: assume.h:29
#define SYM2ID
Old name of RB_SYM2ID.
Definition: symbol.h:45
#define T_DATA
Old name of RUBY_T_DATA.
Definition: value_type.h:60
#define FIXNUM_FLAG
Old name of RUBY_FIXNUM_FLAG.
#define LL2NUM
Old name of RB_LL2NUM.
Definition: long_long.h:30
#define T_NONE
Old name of RUBY_T_NONE.
Definition: value_type.h:74
#define T_NODE
Old name of RUBY_T_NODE.
Definition: value_type.h:73
#define SIZET2NUM
Old name of RB_SIZE2NUM.
Definition: size_t.h:62
#define FL_FINALIZE
Old name of RUBY_FL_FINALIZE.
Definition: fl_type.h:61
#define T_MODULE
Old name of RUBY_T_MODULE.
Definition: value_type.h:70
#define STATIC_SYM_P
Old name of RB_STATIC_SYM_P.
#define T_TRUE
Old name of RUBY_T_TRUE.
Definition: value_type.h:81
#define T_RATIONAL
Old name of RUBY_T_RATIONAL.
Definition: value_type.h:76
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition: value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition: value_type.h:65
#define FL_ABLE
Old name of RB_FL_ABLE.
Definition: fl_type.h:122
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition: fl_type.h:132
#define rb_ary_new3
Old name of rb_ary_new_from_args.
Definition: array.h:658
#define LONG2NUM
Old name of RB_LONG2NUM.
Definition: long.h:50
#define T_FALSE
Old name of RUBY_T_FALSE.
Definition: value_type.h:61
#define T_UNDEF
Old name of RUBY_T_UNDEF.
Definition: value_type.h:82
#define FLONUM_P
Old name of RB_FLONUM_P.
#define Qtrue
Old name of RUBY_Qtrue.
#define T_ZOMBIE
Old name of RUBY_T_ZOMBIE.
Definition: value_type.h:83
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition: long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition: value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition: value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define FL_WB_PROTECTED
Old name of RUBY_FL_WB_PROTECTED.
Definition: fl_type.h:59
#define T_SYMBOL
Old name of RUBY_T_SYMBOL.
Definition: value_type.h:80
#define T_MATCH
Old name of RUBY_T_MATCH.
Definition: value_type.h:69
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition: value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition: value_type.h:85
#define T_MOVED
Old name of RUBY_T_MOVED.
Definition: value_type.h:71
#define FL_TEST
Old name of RB_FL_TEST.
Definition: fl_type.h:131
#define xcalloc
Old name of ruby_xcalloc.
Definition: xmalloc.h:55
#define FL_UNSET
Old name of RB_FL_UNSET.
Definition: fl_type.h:133
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define NUM2SIZET
Old name of RB_NUM2SIZE.
Definition: size_t.h:61
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition: value_type.h:88
#define T_REGEXP
Old name of RUBY_T_REGEXP.
Definition: value_type.h:77
size_t ruby_stack_length(VALUE **p)
Queries what Ruby thinks is the machine stack.
Definition: gc.c:2118
int ruby_stack_check(void)
Checks for stack overflow.
Definition: gc.c:2158
void rb_raise(VALUE exc_class, const char *fmt,...)
Exception entry point.
Definition: error.c:3635
void rb_bug(const char *fmt,...)
Interpreter panic switch.
Definition: error.c:1089
VALUE rb_eNoMemError
NoMemoryError exception.
Definition: error.c:1419
VALUE rb_eRangeError
RangeError exception.
Definition: error.c:1412
#define ruby_verbose
This variable controls whether the interpreter is in debug mode.
Definition: error.h:475
VALUE rb_eTypeError
TypeError exception.
Definition: error.c:1408
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition: error.c:466
VALUE rb_eArgError
ArgumentError exception.
Definition: error.c:1409
size_t rb_obj_embedded_size(uint32_t numiv)
Internal header for Object.
Definition: object.c:98
VALUE rb_mKernel
Kernel module.
Definition: object.c:65
VALUE rb_mGC
GC module.
Definition: gc.c:420
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition: object.c:247
VALUE rb_cBasicObject
BasicObject class.
Definition: object.c:64
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition: object.c:865
VALUE rb_to_int(VALUE val)
Identical to rb_check_to_int(), except it raises in case of conversion mismatch.
Definition: object.c:3188
#define RB_GNUC_EXTENSION
This is expanded to nothing for non-GCC compilers.
Definition: defines.h:89
int rb_enc_str_coderange(VALUE str)
Scans the passed string to collect its code range.
Definition: string.c:900
void rb_gc_mark(VALUE obj)
Marks an object.
Definition: gc.c:2211
void rb_mark_tbl_no_pin(struct st_table *tbl)
Identical to rb_mark_tbl(), except it marks objects using rb_gc_mark_movable().
Definition: gc.c:2517
void rb_memerror(void)
Triggers out-of-memory error.
Definition: gc.c:4426
size_t rb_gc_stat(VALUE key_or_buf)
Obtains various GC related profiles.
Definition: gc.c:3703
void rb_gc_mark_movable(VALUE obj)
Maybe this is the only function provided for C extensions to control the pinning of objects,...
Definition: gc.c:2193
VALUE rb_gc_disable(void)
Disables GC.
Definition: gc.c:3837
VALUE rb_gc_start(void)
Identical to rb_gc(), except the return value.
Definition: gc.c:3631
VALUE rb_gc_latest_gc_info(VALUE key_or_buf)
Obtains various info regarding the most recent GC run.
Definition: gc.c:3666
void rb_mark_tbl(struct st_table *tbl)
Identical to rb_mark_hash(), except it marks only values of the table and leave their associated keys...
Definition: gc.c:2501
VALUE rb_gc_enable(void)
(Re-) enables GC.
Definition: gc.c:3803
void rb_mark_hash(struct st_table *tbl)
Marks keys and values associated inside of the given table.
Definition: gc.c:2355
int rb_during_gc(void)
Queries if the GC is busy.
Definition: gc.c:3646
void rb_gc_register_address(VALUE *valptr)
Inform the garbage collector that the global or static variable pointed by valptr stores a live Ruby ...
Definition: gc.c:2927
void rb_gc_unregister_address(VALUE *valptr)
Inform the garbage collector that a pointer previously passed to rb_gc_register_address() no longer p...
Definition: gc.c:2951
void rb_gc_mark_maybe(VALUE obj)
Identical to rb_gc_mark(), except it allows the passed value be a non-object.
Definition: gc.c:2223
void rb_gc_writebarrier(VALUE old, VALUE young)
This is the implementation of RB_OBJ_WRITE().
Definition: gc.c:2846
VALUE rb_gc_location(VALUE obj)
Finds a new "location" of an object.
Definition: gc.c:3136
void rb_gc_writebarrier_unprotect(VALUE obj)
This is the implementation of RB_OBJ_WB_UNPROTECT().
Definition: gc.c:2852
void rb_gc_mark_locations(const VALUE *start, const VALUE *end)
Marks objects between the two pointers.
Definition: gc.c:2277
void rb_gc(void)
Triggers a GC process.
Definition: gc.c:3638
void rb_global_variable(VALUE *)
An alias for rb_gc_register_address().
Definition: gc.c:2974
void rb_gc_register_mark_object(VALUE object)
Inform the garbage collector that object is a live Ruby object that should not be moved.
Definition: gc.c:2918
void rb_gc_update_tbl_refs(st_table *ptr)
Updates references inside of tables.
Definition: gc.c:3098
void rb_mark_set(struct st_table *tbl)
Identical to rb_mark_hash(), except it marks only keys of the table and leave their associated values...
Definition: gc.c:2307
VALUE rb_define_finalizer(VALUE obj, VALUE block)
Assigns a finaliser for an object.
Definition: gc.c:1688
void rb_gc_copy_finalizer(VALUE dst, VALUE src)
Copy&paste an object's finaliser to another.
Definition: gc.c:1603
void rb_gc_adjust_memory_usage(ssize_t diff)
Informs that there are external memory usages.
Definition: gc.c:4698
size_t rb_gc_count(void)
Identical to rb_gc_stat(), with "count" parameter.
Definition: gc.c:3654
Defines RBIMPL_HAS_BUILTIN.
void rb_ary_free(VALUE ary)
Destroys the given array for no reason.
Definition: array.c:869
VALUE rb_obj_is_fiber(VALUE obj)
Queries if an object is a fiber.
Definition: cont.c:1181
#define RETURN_ENUMERATOR(obj, argc, argv)
Identical to RETURN_SIZED_ENUMERATOR(), except its size is unknown.
Definition: enumerator.h:239
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition: error.h:284
VALUE rb_hash_aset(VALUE hash, VALUE key, VALUE val)
Inserts or replaces ("upsert"s) the objects into the given hash table.
Definition: hash.c:2893
VALUE rb_hash_new(void)
Creates a new, empty hash object.
Definition: hash.c:1475
VALUE rb_obj_id(VALUE obj)
Finds or creates an integer primary key of the given object.
Definition: gc.c:1841
VALUE rb_memory_id(VALUE obj)
Identical to rb_obj_id(), except it hesitates from allocating a new instance of rb_cInteger.
Definition: gc.c:1808
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition: proc.c:813
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
Definition: proc.c:119
void rb_str_free(VALUE str)
Destroys the given string for no reason.
Definition: string.c:1677
size_t rb_str_capacity(VALUE str)
Queries the capacity of the given string.
Definition: string.c:954
VALUE rb_class_path_cached(VALUE mod)
Just another name of rb_mod_name.
Definition: variable.c:302
void rb_free_generic_ivar(VALUE obj)
Frees the list of instance variables.
Definition: variable.c:1167
void rb_undef_alloc_func(VALUE klass)
Deletes the allocator function of a class.
Definition: vm_method.c:1291
VALUE rb_check_funcall(VALUE recv, ID mid, int argc, const VALUE *argv)
Identical to rb_funcallv(), except it returns RUBY_Qundef instead of raising rb_eNoMethodError.
Definition: vm_eval.c:668
rb_alloc_func_t rb_get_alloc_func(VALUE klass)
Queries the allocator function of a class.
Definition: vm_method.c:1297
int rb_obj_respond_to(VALUE obj, ID mid, int private_p)
Identical to rb_respond_to(), except it additionally takes the visibility parameter.
Definition: vm_method.c:2944
const char * rb_id2name(ID id)
Retrieves the name mapped to the given id.
Definition: symbol.c:992
ID rb_intern(const char *name)
Finds or creates a symbol of the given name.
Definition: symbol.c:823
VALUE rb_sym2str(VALUE symbol)
Obtain a frozen string representation of a symbol (not including the leading colon).
Definition: symbol.c:970
char * ptr
Pointer to the underlying memory region, of at least capa bytes.
Definition: io.h:2
int len
Length of the buffer.
Definition: io.h:8
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
Definition: ractor.h:249
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
Definition: thread.c:1902
bool ruby_free_at_exit_p(void)
Returns whether the Ruby VM will free all memory at shutdown.
Definition: vm.c:4500
VALUE rb_yield(VALUE val)
Yields the block.
Definition: vm_eval.c:1354
#define RBIMPL_ATTR_MAYBE_UNUSED()
Wraps (or simulates) [[maybe_unused]]
Definition: maybe_unused.h:33
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition: memory.h:162
VALUE type(ANYARGS)
ANYARGS-ed function type.
Definition: cxxanyargs.hpp:56
int st_foreach(st_table *q, int_type *w, st_data_t e)
Iteration over the given table.
Definition: cxxanyargs.hpp:432
#define RARRAY_LEN
Just another name of rb_array_len.
Definition: rarray.h:51
#define RARRAY(obj)
Convenient casting macro.
Definition: rarray.h:44
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition: rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition: rbasic.h:150
#define RBASIC(obj)
Convenient casting macro.
Definition: rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
Definition: rclass.h:44
#define RCLASS(obj)
Convenient casting macro.
Definition: rclass.h:38
VALUE rb_data_object_wrap(VALUE klass, void *datap, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
This is the primitive way to wrap an existing C struct into RData.
Definition: gc.c:1033
#define DATA_PTR(obj)
Convenient getter macro.
Definition: rdata.h:67
VALUE rb_data_object_zalloc(VALUE klass, size_t size, RUBY_DATA_FUNC dmark, RUBY_DATA_FUNC dfree)
Identical to rb_data_object_wrap(), except it allocates a new data region internally instead of takin...
Definition: gc.c:1041
#define RDATA(obj)
Convenient casting macro.
Definition: rdata.h:59
#define RUBY_DEFAULT_FREE
This is a value you can set to RData::dfree.
Definition: rdata.h:78
void(* RUBY_DATA_FUNC)(void *)
This is the type of callbacks registered to RData.
Definition: rdata.h:104
#define RFILE(obj)
Convenient casting macro.
Definition: rfile.h:50
#define RHASH_SIZE(h)
Queries the size of the hash.
Definition: rhash.h:69
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition: rhash.h:79
#define RMATCH(obj)
Convenient casting macro.
Definition: rmatch.h:37
#define ROBJECT(obj)
Convenient casting macro.
Definition: robject.h:43
static VALUE * ROBJECT_IVPTR(VALUE obj)
Queries the instance variables.
Definition: robject.h:136
#define RREGEXP(obj)
Convenient casting macro.
Definition: rregexp.h:37
#define RREGEXP_PTR(obj)
Convenient accessor macro.
Definition: rregexp.h:45
static char * RSTRING_PTR(VALUE str)
Queries the contents pointer of the string.
Definition: rstring.h:416
static long RSTRING_LEN(VALUE str)
Queries the length of the string.
Definition: rstring.h:367
#define RSTRING(obj)
Convenient casting macro.
Definition: rstring.h:41
static long RSTRUCT_LEN(VALUE st)
Returns the number of struct members.
Definition: rstruct.h:94
static bool RTYPEDDATA_P(VALUE obj)
Checks whether the passed object is RTypedData or RData.
Definition: rtypeddata.h:579
VALUE rb_data_typed_object_wrap(VALUE klass, void *datap, const rb_data_type_t *type)
This is the primitive way to wrap an existing C struct into RTypedData.
Definition: gc.c:1058
VALUE rb_data_typed_object_zalloc(VALUE klass, size_t size, const rb_data_type_t *type)
Identical to rb_data_typed_object_wrap(), except it allocates a new data region internally instead of...
Definition: gc.c:1068
#define RTYPEDDATA(obj)
Convenient casting macro.
Definition: rtypeddata.h:94
static const struct rb_data_type_struct * RTYPEDDATA_TYPE(VALUE obj)
Queries for the type of given object.
Definition: rtypeddata.h:602
const char * rb_obj_classname(VALUE obj)
Queries the name of the class of the passed object.
Definition: variable.c:427
#define errno
Ractor-aware version of errno.
Definition: ruby.h:388
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition: thread.c:5542
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
Defines old _.
#define _(args)
This was a transition path from K&R to ANSI.
Definition: stdarg.h:35
Definition: hash.h:53
Ruby's ordinal objects.
Definition: robject.h:83
"Typed" user data.
Definition: rtypeddata.h:350
Definition: mmtk.c:19
Definition: method.h:62
Definition: constant.h:33
CREF (Class REFerence)
Definition: method.h:44
Definition: class.h:36
This is the struct that holds necessary info for a struct.
Definition: rtypeddata.h:200
const char * wrap_struct_name
Name of structs of this kind.
Definition: rtypeddata.h:207
VALUE flags
Type-specific behavioural characteristics.
Definition: rtypeddata.h:309
Ruby's IO, metadata and buffers.
Definition: io.h:143
Represents a match.
Definition: rmatch.h:71
struct rmatch_offset * char_offset
Capture group offsets, in C array.
Definition: rmatch.h:79
int char_offset_num_allocated
Number of rmatch_offset that ::rmatch::char_offset holds.
Definition: rmatch.h:82
struct re_registers regs
"Registers" of a match.
Definition: rmatch.h:76
Definition: method.h:54
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition: method.h:135
Definition: shape.h:44
Internal header for Class.
Definition: class.h:29
Represents the region of a capture group.
Definition: rmatch.h:65
Definition: st.h:79
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
intptr_t SIGNED_VALUE
A signed integer type that has the same width with VALUE.
Definition: value.h:63
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition: value.h:52
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
Definition: value.h:69
uintptr_t VALUE
Type that represents a Ruby object.
Definition: value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition: value_type.h:376
ruby_value_type
C-level type of an object.
Definition: value_type.h:113
void * ruby_xmalloc2(size_t nelems, size_t elemsiz)
Identical to ruby_xmalloc(), except it allocates nelems * elemsiz bytes.
Definition: gc.c:4498
void * ruby_xmalloc(size_t size)
Allocates a storage instance.
Definition: gc.c:4472
void ruby_xfree(void *ptr)
Deallocates a storage instance.
Definition: gc.c:4594
void * ruby_xcalloc(size_t nelems, size_t elemsiz)
Identical to ruby_xmalloc2(), except it returns a zero-filled storage instance.
Definition: gc.c:4512
void * ruby_xrealloc(void *ptr, size_t newsiz)
Resize the storage instance.
Definition: gc.c:4545
void * ruby_xrealloc2(void *ptr, size_t newelems, size_t newsiz)
Identical to ruby_xrealloc(), except it resizes the given storage instance to newelems * newsiz bytes...
Definition: gc.c:4569