Ruby  3.4.0dev (2024-12-06 revision 892c46283a5ea4179500d951c9d4866c0051f27b)
vm_core.h (892c46283a5ea4179500d951c9d4866c0051f27b)
1 #ifndef RUBY_VM_CORE_H
2 #define RUBY_VM_CORE_H
3 /**********************************************************************
4 
5  vm_core.h -
6 
7  $Author$
8  created at: 04/01/01 19:41:38 JST
9 
10  Copyright (C) 2004-2007 Koichi Sasada
11 
12 **********************************************************************/
13 
14 /*
15  * Enable check mode.
16  * 1: enable local assertions.
17  */
18 #ifndef VM_CHECK_MODE
19 
20 // respect RUBY_DUBUG: if given n is 0, then use RUBY_DEBUG
21 #define N_OR_RUBY_DEBUG(n) (((n) > 0) ? (n) : RUBY_DEBUG)
22 
23 #define VM_CHECK_MODE N_OR_RUBY_DEBUG(0)
24 #endif
25 
39 #ifndef VMDEBUG
40 #define VMDEBUG 0
41 #endif
42 
43 #if 0
44 #undef VMDEBUG
45 #define VMDEBUG 3
46 #endif
47 
48 #include "ruby/internal/config.h"
49 
50 #include <stddef.h>
51 #include <signal.h>
52 #include <stdarg.h>
53 
54 #include "ruby_assert.h"
55 
56 #define RVALUE_SIZE (sizeof(struct RBasic) + sizeof(VALUE[RBIMPL_RVALUE_EMBED_LEN_MAX]))
57 
58 #if VM_CHECK_MODE > 0
59 #define VM_ASSERT(expr, ...) \
60  RUBY_ASSERT_MESG_WHEN(VM_CHECK_MODE > 0, expr, #expr RBIMPL_VA_OPT_ARGS(__VA_ARGS__))
61 #define VM_UNREACHABLE(func) rb_bug(#func ": unreachable")
62 #define RUBY_ASSERT_CRITICAL_SECTION
63 #define RUBY_DEBUG_THREAD_SCHEDULE() rb_thread_schedule()
64 #else
65 #define VM_ASSERT(/*expr, */...) ((void)0)
66 #define VM_UNREACHABLE(func) UNREACHABLE
67 #define RUBY_DEBUG_THREAD_SCHEDULE()
68 #endif
69 
70 #define RUBY_ASSERT_MUTEX_OWNED(mutex) VM_ASSERT(rb_mutex_owned_p(mutex))
71 
72 #if defined(RUBY_ASSERT_CRITICAL_SECTION)
73 /*
74 # Critical Section Assertions
75 
76 These assertions are used to ensure that context switching does not occur between two points in the code. In theory,
77 such code should already be protected by a mutex, but these assertions are used to ensure that the mutex is held.
78 
79 The specific case where it can be useful is where a mutex is held further up the call stack, and the code in question
80 may not directly hold the mutex. In this case, the critical section assertions can be used to ensure that the mutex is
81 held by someone else.
82 
83 These assertions are only enabled when RUBY_ASSERT_CRITICAL_SECTION is defined, which is only defined if VM_CHECK_MODE
84 is set.
85 
86 ## Example Usage
87 
88 ```c
89 RUBY_ASSERT_CRITICAL_SECTION_ENTER();
90 // ... some code which does not invoke rb_vm_check_ints() ...
91 RUBY_ASSERT_CRITICAL_SECTION_LEAVE();
92 ```
93 
94 If `rb_vm_check_ints()` is called between the `RUBY_ASSERT_CRITICAL_SECTION_ENTER()` and
95 `RUBY_ASSERT_CRITICAL_SECTION_LEAVE()`, a failed assertion will result.
96 */
97 extern int ruby_assert_critical_section_entered;
98 #define RUBY_ASSERT_CRITICAL_SECTION_ENTER() do{ruby_assert_critical_section_entered += 1;}while(false)
99 #define RUBY_ASSERT_CRITICAL_SECTION_LEAVE() do{VM_ASSERT(ruby_assert_critical_section_entered > 0);ruby_assert_critical_section_entered -= 1;}while(false)
100 #else
101 #define RUBY_ASSERT_CRITICAL_SECTION_ENTER()
102 #define RUBY_ASSERT_CRITICAL_SECTION_LEAVE()
103 #endif
104 
105 #if defined(__wasm__) && !defined(__EMSCRIPTEN__)
106 # include "wasm/setjmp.h"
107 #else
108 # include <setjmp.h>
109 #endif
110 
111 #if defined(__linux__) || defined(__FreeBSD__)
112 # define RB_THREAD_T_HAS_NATIVE_ID
113 #endif
114 
115 #include "ruby/internal/stdbool.h"
116 #include "ccan/list/list.h"
117 #include "id.h"
118 #include "internal.h"
119 #include "internal/array.h"
120 #include "internal/basic_operators.h"
121 #include "internal/sanitizers.h"
122 #include "internal/serial.h"
123 #include "internal/vm.h"
124 #include "method.h"
125 #include "node.h"
126 #include "ruby/ruby.h"
127 #include "ruby/st.h"
128 #include "ruby_atomic.h"
129 #include "vm_opts.h"
130 
131 #include "ruby/thread_native.h"
132 /*
133  * implementation selector of get_insn_info algorithm
134  * 0: linear search
135  * 1: binary search
136  * 2: succinct bitvector
137  */
138 #ifndef VM_INSN_INFO_TABLE_IMPL
139 # define VM_INSN_INFO_TABLE_IMPL 2
140 #endif
141 
142 #if defined(NSIG_MAX) /* POSIX issue 8 */
143 # undef NSIG
144 # define NSIG NSIG_MAX
145 #elif defined(_SIG_MAXSIG) /* FreeBSD */
146 # undef NSIG
147 # define NSIG _SIG_MAXSIG
148 #elif defined(_SIGMAX) /* QNX */
149 # define NSIG (_SIGMAX + 1)
150 #elif defined(NSIG) /* 99% of everything else */
151 # /* take it */
152 #else /* Last resort */
153 # define NSIG (sizeof(sigset_t) * CHAR_BIT + 1)
154 #endif
155 
156 #define RUBY_NSIG NSIG
157 
158 #if defined(SIGCLD)
159 # define RUBY_SIGCHLD (SIGCLD)
160 #elif defined(SIGCHLD)
161 # define RUBY_SIGCHLD (SIGCHLD)
162 #endif
163 
164 #if defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) && defined(SA_SIGINFO) && !defined(__NetBSD__)
165 # define USE_SIGALTSTACK
166 void *rb_allocate_sigaltstack(void);
167 void *rb_register_sigaltstack(void *);
168 # define RB_ALTSTACK_INIT(var, altstack) var = rb_register_sigaltstack(altstack)
169 # define RB_ALTSTACK_FREE(var) free(var)
170 # define RB_ALTSTACK(var) var
171 #else /* noop */
172 # define RB_ALTSTACK_INIT(var, altstack)
173 # define RB_ALTSTACK_FREE(var)
174 # define RB_ALTSTACK(var) (0)
175 #endif
176 
177 #include THREAD_IMPL_H
178 #define RUBY_VM_THREAD_MODEL 2
179 
180 /*****************/
181 /* configuration */
182 /*****************/
183 
184 /* gcc ver. check */
185 #if defined(__GNUC__) && __GNUC__ >= 2
186 
187 #if OPT_TOKEN_THREADED_CODE
188 #if OPT_DIRECT_THREADED_CODE
189 #undef OPT_DIRECT_THREADED_CODE
190 #endif
191 #endif
192 
193 #else /* defined(__GNUC__) && __GNUC__ >= 2 */
194 
195 /* disable threaded code options */
196 #if OPT_DIRECT_THREADED_CODE
197 #undef OPT_DIRECT_THREADED_CODE
198 #endif
199 #if OPT_TOKEN_THREADED_CODE
200 #undef OPT_TOKEN_THREADED_CODE
201 #endif
202 #endif
203 
204 /* call threaded code */
205 #if OPT_CALL_THREADED_CODE
206 #if OPT_DIRECT_THREADED_CODE
207 #undef OPT_DIRECT_THREADED_CODE
208 #endif /* OPT_DIRECT_THREADED_CODE */
209 #endif /* OPT_CALL_THREADED_CODE */
210 
211 void rb_vm_encoded_insn_data_table_init(void);
212 typedef unsigned long rb_num_t;
213 typedef signed long rb_snum_t;
214 
215 enum ruby_tag_type {
216  RUBY_TAG_NONE = 0x0,
217  RUBY_TAG_RETURN = 0x1,
218  RUBY_TAG_BREAK = 0x2,
219  RUBY_TAG_NEXT = 0x3,
220  RUBY_TAG_RETRY = 0x4,
221  RUBY_TAG_REDO = 0x5,
222  RUBY_TAG_RAISE = 0x6,
223  RUBY_TAG_THROW = 0x7,
224  RUBY_TAG_FATAL = 0x8,
225  RUBY_TAG_MASK = 0xf
226 };
227 
228 #define TAG_NONE RUBY_TAG_NONE
229 #define TAG_RETURN RUBY_TAG_RETURN
230 #define TAG_BREAK RUBY_TAG_BREAK
231 #define TAG_NEXT RUBY_TAG_NEXT
232 #define TAG_RETRY RUBY_TAG_RETRY
233 #define TAG_REDO RUBY_TAG_REDO
234 #define TAG_RAISE RUBY_TAG_RAISE
235 #define TAG_THROW RUBY_TAG_THROW
236 #define TAG_FATAL RUBY_TAG_FATAL
237 #define TAG_MASK RUBY_TAG_MASK
238 
239 enum ruby_vm_throw_flags {
240  VM_THROW_NO_ESCAPE_FLAG = 0x8000,
241  VM_THROW_STATE_MASK = 0xff
242 };
243 
244 /* forward declarations */
245 struct rb_thread_struct;
247 
248 /* iseq data type */
250 
252  rb_serial_t raw;
253  VALUE data[2];
254 };
255 
256 #define IMEMO_CONST_CACHE_SHAREABLE IMEMO_FL_USER0
257 
258 // imemo_constcache
260  VALUE flags;
261 
262  VALUE value; // v0
263  VALUE _unused1; // v1
264  VALUE _unused2; // v2
265  const rb_cref_t *ic_cref; // v3
266 };
267 STATIC_ASSERT(sizeof_iseq_inline_constant_cache_entry,
268  (offsetof(struct iseq_inline_constant_cache_entry, ic_cref) +
269  sizeof(const rb_cref_t *)) <= RVALUE_SIZE);
270 
272  struct iseq_inline_constant_cache_entry *entry;
273 
285  const ID *segments;
286 };
287 
289  uintptr_t value; // attr_index in lower bits, dest_shape_id in upper bits
290  ID iv_set_name;
291 };
292 
294  struct rb_cvar_class_tbl_entry *entry;
295 };
296 
298  struct {
299  struct rb_thread_struct *running_thread;
300  VALUE value;
301  } once;
302  struct iseq_inline_constant_cache ic_cache;
303  struct iseq_inline_iv_cache_entry iv_cache;
304 };
305 
307  const struct rb_call_data *cd;
308  const struct rb_callcache *cc;
309  VALUE block_handler;
310  VALUE recv;
311  int argc;
312  bool kw_splat;
313  VALUE heap_argv;
314 };
315 
316 #ifndef VM_ARGC_STACK_MAX
317 #define VM_ARGC_STACK_MAX 128
318 #endif
319 
320 # define CALLING_ARGC(calling) ((calling)->heap_argv ? RARRAY_LENINT((calling)->heap_argv) : (calling)->argc)
321 
323 
324 #if 1
325 #define CoreDataFromValue(obj, type) (type*)DATA_PTR(obj)
326 #else
327 #define CoreDataFromValue(obj, type) (type*)rb_data_object_get(obj)
328 #endif
329 #define GetCoreDataFromValue(obj, type, ptr) ((ptr) = CoreDataFromValue((obj), type))
330 
331 typedef struct rb_iseq_location_struct {
332  VALUE pathobj; /* String (path) or Array [path, realpath]. Frozen. */
333  VALUE base_label; /* String */
334  VALUE label; /* String */
335  int first_lineno;
336  int node_id;
337  rb_code_location_t code_location;
339 
340 #define PATHOBJ_PATH 0
341 #define PATHOBJ_REALPATH 1
342 
343 static inline VALUE
344 pathobj_path(VALUE pathobj)
345 {
346  if (RB_TYPE_P(pathobj, T_STRING)) {
347  return pathobj;
348  }
349  else {
350  VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
351  return RARRAY_AREF(pathobj, PATHOBJ_PATH);
352  }
353 }
354 
355 static inline VALUE
356 pathobj_realpath(VALUE pathobj)
357 {
358  if (RB_TYPE_P(pathobj, T_STRING)) {
359  return pathobj;
360  }
361  else {
362  VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
363  return RARRAY_AREF(pathobj, PATHOBJ_REALPATH);
364  }
365 }
366 
367 /* Forward declarations */
368 struct rb_rjit_unit;
369 
370 typedef uintptr_t iseq_bits_t;
371 
372 #define ISEQ_IS_SIZE(body) (body->ic_size + body->ivc_size + body->ise_size + body->icvarc_size)
373 
374 /* [ TS_IVC | TS_ICVARC | TS_ISE | TS_IC ] */
375 #define ISEQ_IS_IC_ENTRY(body, idx) (body->is_entries[(idx) + body->ise_size + body->icvarc_size + body->ivc_size].ic_cache);
376 
377 /* instruction sequence type */
378 enum rb_iseq_type {
379  ISEQ_TYPE_TOP,
380  ISEQ_TYPE_METHOD,
381  ISEQ_TYPE_BLOCK,
382  ISEQ_TYPE_CLASS,
383  ISEQ_TYPE_RESCUE,
384  ISEQ_TYPE_ENSURE,
385  ISEQ_TYPE_EVAL,
386  ISEQ_TYPE_MAIN,
387  ISEQ_TYPE_PLAIN
388 };
389 
390 // Attributes specified by Primitive.attr!
391 enum rb_builtin_attr {
392  // The iseq does not call methods.
393  BUILTIN_ATTR_LEAF = 0x01,
394  // This iseq only contains single `opt_invokebuiltin_delegate_leave` instruction with 0 arguments.
395  BUILTIN_ATTR_SINGLE_NOARG_LEAF = 0x02,
396  // This attribute signals JIT to duplicate the iseq for each block iseq so that its `yield` will be monomorphic.
397  BUILTIN_ATTR_INLINE_BLOCK = 0x04,
398  // The iseq acts like a C method in backtraces.
399  BUILTIN_ATTR_C_TRACE = 0x08,
400 };
401 
402 typedef VALUE (*rb_jit_func_t)(struct rb_execution_context_struct *, struct rb_control_frame_struct *);
403 
405  enum rb_iseq_type type;
406 
407  unsigned int iseq_size;
408  VALUE *iseq_encoded; /* encoded iseq (insn addr and operands) */
409 
433  struct {
434  struct {
435  unsigned int has_lead : 1;
436  unsigned int has_opt : 1;
437  unsigned int has_rest : 1;
438  unsigned int has_post : 1;
439  unsigned int has_kw : 1;
440  unsigned int has_kwrest : 1;
441  unsigned int has_block : 1;
442 
443  unsigned int ambiguous_param0 : 1; /* {|a|} */
444  unsigned int accepts_no_kwarg : 1;
445  unsigned int ruby2_keywords: 1;
446  unsigned int anon_rest: 1;
447  unsigned int anon_kwrest: 1;
448  unsigned int use_block: 1;
449  unsigned int forwardable: 1;
450  } flags;
451 
452  unsigned int size;
453 
454  int lead_num;
455  int opt_num;
456  int rest_start;
457  int post_start;
458  int post_num;
459  int block_start;
460 
461  const VALUE *opt_table; /* (opt_num + 1) entries. */
462  /* opt_num and opt_table:
463  *
464  * def foo o1=e1, o2=e2, ..., oN=eN
465  * #=>
466  * # prologue code
467  * A1: e1
468  * A2: e2
469  * ...
470  * AN: eN
471  * AL: body
472  * opt_num = N
473  * opt_table = [A1, A2, ..., AN, AL]
474  */
475 
476  const struct rb_iseq_param_keyword {
477  int num;
478  int required_num;
479  int bits_start;
480  int rest_start;
481  const ID *table;
482  VALUE *default_values;
483  } *keyword;
484  } param;
485 
486  rb_iseq_location_t location;
487 
488  /* insn info, must be freed */
489  struct iseq_insn_info {
490  const struct iseq_insn_info_entry *body;
491  unsigned int *positions;
492  unsigned int size;
493 #if VM_INSN_INFO_TABLE_IMPL == 2
495 #endif
496  } insns_info;
497 
498  const ID *local_table; /* must free */
499 
500  /* catch table */
501  struct iseq_catch_table *catch_table;
502 
503  /* for child iseq */
504  const struct rb_iseq_struct *parent_iseq;
505  struct rb_iseq_struct *local_iseq; /* local_iseq->flip_cnt can be modified */
506 
507  union iseq_inline_storage_entry *is_entries; /* [ TS_IVC | TS_ICVARC | TS_ISE | TS_IC ] */
508  struct rb_call_data *call_data; //struct rb_call_data calls[ci_size];
509 
510  struct {
511  rb_snum_t flip_count;
512  VALUE script_lines;
513  VALUE coverage;
514  VALUE pc2branchindex;
515  VALUE *original_iseq;
516  } variable;
517 
518  unsigned int local_table_size;
519  unsigned int ic_size; // Number of IC caches
520  unsigned int ise_size; // Number of ISE caches
521  unsigned int ivc_size; // Number of IVC caches
522  unsigned int icvarc_size; // Number of ICVARC caches
523  unsigned int ci_size;
524  unsigned int stack_max; /* for stack overflow check */
525 
526  unsigned int builtin_attrs; // Union of rb_builtin_attr
527 
528  bool prism; // ISEQ was generated from prism compiler
529 
530  union {
531  iseq_bits_t * list; /* Find references for GC */
532  iseq_bits_t single;
533  } mark_bits;
534 
535  struct rb_id_table *outer_variables;
536 
537  const rb_iseq_t *mandatory_only_iseq;
538 
539 #if USE_RJIT || USE_YJIT
540  // Function pointer for JIT code on jit_exec()
541  rb_jit_func_t jit_entry;
542  // Number of calls on jit_exec()
543  long unsigned jit_entry_calls;
544 #endif
545 
546 #if USE_YJIT
547  // Function pointer for JIT code on jit_exec_exception()
548  rb_jit_func_t jit_exception;
549  // Number of calls on jit_exec_exception()
550  long unsigned jit_exception_calls;
551 #endif
552 
553 #if USE_RJIT
554  // RJIT stores some data on each iseq.
555  VALUE rjit_blocks;
556 #endif
557 
558 #if USE_YJIT
559  // YJIT stores some data on each iseq.
560  void *yjit_payload;
561  // Used to estimate how frequently this ISEQ gets called
562  uint64_t yjit_calls_at_interv;
563 #endif
564 };
565 
566 /* T_IMEMO/iseq */
567 /* typedef rb_iseq_t is in method.h */
569  VALUE flags; /* 1 */
570  VALUE wrapper; /* 2 */
571 
572  struct rb_iseq_constant_body *body; /* 3 */
573 
574  union { /* 4, 5 words */
575  struct iseq_compile_data *compile_data; /* used at compile time */
576 
577  struct {
578  VALUE obj;
579  int index;
580  } loader;
581 
582  struct {
583  struct rb_hook_list_struct *local_hooks;
584  rb_event_flag_t global_trace_events;
585  } exec;
586  } aux;
587 };
588 
589 #define ISEQ_BODY(iseq) ((iseq)->body)
590 
591 #if !defined(USE_LAZY_LOAD) || !(USE_LAZY_LOAD+0)
592 #define USE_LAZY_LOAD 0
593 #endif
594 
595 #if !USE_LAZY_LOAD
596 static inline const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq) {return 0;}
597 #endif
598 const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq);
599 
600 static inline const rb_iseq_t *
601 rb_iseq_check(const rb_iseq_t *iseq)
602 {
603  if (USE_LAZY_LOAD && ISEQ_BODY(iseq) == NULL) {
604  rb_iseq_complete((rb_iseq_t *)iseq);
605  }
606  return iseq;
607 }
608 
609 static inline bool
610 rb_iseq_attr_p(const rb_iseq_t *iseq, enum rb_builtin_attr attr)
611 {
612  return (ISEQ_BODY(iseq)->builtin_attrs & attr) == attr;
613 }
614 
615 static inline const rb_iseq_t *
616 def_iseq_ptr(rb_method_definition_t *def)
617 {
618 //TODO: re-visit. to check the bug, enable this assertion.
619 #if VM_CHECK_MODE > 0
620  if (def->type != VM_METHOD_TYPE_ISEQ) rb_bug("def_iseq_ptr: not iseq (%d)", def->type);
621 #endif
622  return rb_iseq_check(def->body.iseq.iseqptr);
623 }
624 
625 enum ruby_special_exceptions {
626  ruby_error_reenter,
627  ruby_error_nomemory,
628  ruby_error_sysstack,
629  ruby_error_stackfatal,
630  ruby_error_stream_closed,
631  ruby_special_error_count
632 };
633 
634 #define GetVMPtr(obj, ptr) \
635  GetCoreDataFromValue((obj), rb_vm_t, (ptr))
636 
637 struct rb_vm_struct;
638 typedef void rb_vm_at_exit_func(struct rb_vm_struct*);
639 
640 typedef struct rb_at_exit_list {
641  rb_vm_at_exit_func *func;
642  struct rb_at_exit_list *next;
644 
645 void *rb_objspace_alloc(void);
646 void rb_objspace_free(void *objspace);
647 void rb_objspace_call_finalizer(void);
648 
649 typedef struct rb_hook_list_struct {
650  struct rb_event_hook_struct *hooks;
651  rb_event_flag_t events;
652  unsigned int running;
653  bool need_clean;
654  bool is_local;
656 
657 
658 // see builtin.h for definition
659 typedef const struct rb_builtin_function *RB_BUILTIN;
660 
662  VALUE *varptr;
663  struct global_object_list *next;
664 };
665 
666 typedef struct rb_vm_struct {
667  VALUE self;
668 
669  struct {
670  struct ccan_list_head set;
671  unsigned int cnt;
672  unsigned int blocking_cnt;
673 
674  struct rb_ractor_struct *main_ractor;
675  struct rb_thread_struct *main_thread; // == vm->ractor.main_ractor->threads.main
676 
677  struct {
678  // monitor
679  rb_nativethread_lock_t lock;
680  struct rb_ractor_struct *lock_owner;
681  unsigned int lock_rec;
682 
683  // join at exit
684  rb_nativethread_cond_t terminate_cond;
685  bool terminate_waiting;
686 
687 #ifndef RUBY_THREAD_PTHREAD_H
688  bool barrier_waiting;
689  unsigned int barrier_cnt;
690  rb_nativethread_cond_t barrier_cond;
691 #endif
692  } sync;
693 
694  // ractor scheduling
695  struct {
696  rb_nativethread_lock_t lock;
697  struct rb_ractor_struct *lock_owner;
698  bool locked;
699 
700  rb_nativethread_cond_t cond; // GRQ
701  unsigned int snt_cnt; // count of shared NTs
702  unsigned int dnt_cnt; // count of dedicated NTs
703 
704  unsigned int running_cnt;
705 
706  unsigned int max_cpu;
707  struct ccan_list_head grq; // // Global Ready Queue
708  unsigned int grq_cnt;
709 
710  // running threads
711  struct ccan_list_head running_threads;
712 
713  // threads which switch context by timeslice
714  struct ccan_list_head timeslice_threads;
715 
716  struct ccan_list_head zombie_threads;
717 
718  // true if timeslice timer is not enable
719  bool timeslice_wait_inf;
720 
721  // barrier
722  rb_nativethread_cond_t barrier_complete_cond;
723  rb_nativethread_cond_t barrier_release_cond;
724  bool barrier_waiting;
725  unsigned int barrier_waiting_cnt;
726  unsigned int barrier_serial;
727  } sched;
728  } ractor;
729 
730 #ifdef USE_SIGALTSTACK
731  void *main_altstack;
732 #endif
733 
734  rb_serial_t fork_gen;
735  struct ccan_list_head waiting_fds; /* <=> struct waiting_fd */
736 
737  /* set in single-threaded processes only: */
738  volatile int ubf_async_safe;
739 
740  unsigned int running: 1;
741  unsigned int thread_abort_on_exception: 1;
742  unsigned int thread_report_on_exception: 1;
743  unsigned int thread_ignore_deadlock: 1;
744 
745  /* object management */
746  VALUE mark_object_ary;
748  const VALUE special_exceptions[ruby_special_error_count];
749 
750  /* load */
751  VALUE top_self;
752  VALUE load_path;
753  VALUE load_path_snapshot;
754  VALUE load_path_check_cache;
755  VALUE expanded_load_path;
756  VALUE loaded_features;
757  VALUE loaded_features_snapshot;
758  VALUE loaded_features_realpaths;
759  VALUE loaded_features_realpath_map;
760  struct st_table *loaded_features_index;
761  struct st_table *loading_table;
762  // For running the init function of statically linked
763  // extensions when they are loaded
764  struct st_table *static_ext_inits;
765 
766  /* signal */
767  struct {
768  VALUE cmd[RUBY_NSIG];
769  } trap_list;
770 
771  /* postponed_job (async-signal-safe, and thread-safe) */
772  struct rb_postponed_job_queue *postponed_job_queue;
773 
774  int src_encoding_index;
775 
776  /* workqueue (thread-safe, NOT async-signal-safe) */
777  struct ccan_list_head workqueue; /* <=> rb_workqueue_job.jnode */
778  rb_nativethread_lock_t workqueue_lock;
779 
780  VALUE orig_progname, progname;
781  VALUE coverages, me2counter;
782  int coverage_mode;
783 
784  struct {
785  struct rb_objspace *objspace;
786  struct gc_mark_func_data_struct {
787  void *data;
788  void (*mark_func)(VALUE v, void *data);
789  } *mark_func_data;
790  } gc;
791 
792  rb_at_exit_list *at_exit;
793 
794  st_table *frozen_strings;
795 
796  const struct rb_builtin_function *builtin_function_table;
797 
798  st_table *ci_table;
799  struct rb_id_table *negative_cme_table;
800  st_table *overloaded_cme_table; // cme -> overloaded_cme
801  st_table *unused_block_warning_table;
802 
803  // This id table contains a mapping from ID to ICs. It does this with ID
804  // keys and nested st_tables as values. The nested tables have ICs as keys
805  // and Qtrue as values. It is used when inline constant caches need to be
806  // invalidated or ISEQs are being freed.
807  struct rb_id_table *constant_cache;
808  ID inserting_constant_cache_id;
809 
810 #ifndef VM_GLOBAL_CC_CACHE_TABLE_SIZE
811 #define VM_GLOBAL_CC_CACHE_TABLE_SIZE 1023
812 #endif
813  const struct rb_callcache *global_cc_cache_table[VM_GLOBAL_CC_CACHE_TABLE_SIZE]; // vm_eval.c
814 
815 #if defined(USE_VM_CLOCK) && USE_VM_CLOCK
816  uint32_t clock;
817 #endif
818 
819  /* params */
820  struct { /* size in byte */
821  size_t thread_vm_stack_size;
822  size_t thread_machine_stack_size;
823  size_t fiber_vm_stack_size;
824  size_t fiber_machine_stack_size;
825  } default_params;
826 
827 } rb_vm_t;
828 
829 /* default values */
830 
831 #define RUBY_VM_SIZE_ALIGN 4096
832 
833 #define RUBY_VM_THREAD_VM_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
834 #define RUBY_VM_THREAD_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
835 #define RUBY_VM_THREAD_MACHINE_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
836 #define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
837 
838 #define RUBY_VM_FIBER_VM_STACK_SIZE ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
839 #define RUBY_VM_FIBER_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
840 #define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 64 * 1024 * sizeof(VALUE)) /* 256 KB or 512 KB */
841 #if defined(__powerpc64__) || defined(__ppc64__) // macOS has __ppc64__
842 #define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 32 * 1024 * sizeof(VALUE)) /* 128 KB or 256 KB */
843 #else
844 #define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
845 #endif
846 
847 #if __has_feature(memory_sanitizer) || __has_feature(address_sanitizer)
848 /* It seems sanitizers consume A LOT of machine stacks */
849 #undef RUBY_VM_THREAD_MACHINE_STACK_SIZE
850 #define RUBY_VM_THREAD_MACHINE_STACK_SIZE (1024 * 1024 * sizeof(VALUE))
851 #undef RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN
852 #define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 512 * 1024 * sizeof(VALUE))
853 #undef RUBY_VM_FIBER_MACHINE_STACK_SIZE
854 #define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 256 * 1024 * sizeof(VALUE))
855 #undef RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN
856 #define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 128 * 1024 * sizeof(VALUE))
857 #endif
858 
859 #ifndef VM_DEBUG_BP_CHECK
860 #define VM_DEBUG_BP_CHECK 0
861 #endif
862 
863 #ifndef VM_DEBUG_VERIFY_METHOD_CACHE
864 #define VM_DEBUG_VERIFY_METHOD_CACHE (VMDEBUG != 0)
865 #endif
866 
868  VALUE self;
869  const VALUE *ep;
870  union {
871  const rb_iseq_t *iseq;
872  const struct vm_ifunc *ifunc;
873  VALUE val;
874  } code;
875 };
876 
877 enum rb_block_handler_type {
878  block_handler_type_iseq,
879  block_handler_type_ifunc,
880  block_handler_type_symbol,
881  block_handler_type_proc
882 };
883 
884 enum rb_block_type {
885  block_type_iseq,
886  block_type_ifunc,
887  block_type_symbol,
888  block_type_proc
889 };
890 
891 struct rb_block {
892  union {
893  struct rb_captured_block captured;
894  VALUE symbol;
895  VALUE proc;
896  } as;
897  enum rb_block_type type;
898 };
899 
900 typedef struct rb_control_frame_struct {
901  const VALUE *pc; // cfp[0]
902  VALUE *sp; // cfp[1]
903  const rb_iseq_t *iseq; // cfp[2]
904  VALUE self; // cfp[3] / block[0]
905  const VALUE *ep; // cfp[4] / block[1]
906  const void *block_code; // cfp[5] / block[2] -- iseq, ifunc, or forwarded block handler
907  void *jit_return; // cfp[6] -- return address for JIT code
908 #if VM_DEBUG_BP_CHECK
909  VALUE *bp_check; // cfp[7]
910 #endif
912 
913 extern const rb_data_type_t ruby_threadptr_data_type;
914 
915 static inline struct rb_thread_struct *
916 rb_thread_ptr(VALUE thval)
917 {
918  return (struct rb_thread_struct *)rb_check_typeddata(thval, &ruby_threadptr_data_type);
919 }
920 
921 enum rb_thread_status {
922  THREAD_RUNNABLE,
923  THREAD_STOPPED,
924  THREAD_STOPPED_FOREVER,
925  THREAD_KILLED
926 };
927 
928 #ifdef RUBY_JMP_BUF
929 typedef RUBY_JMP_BUF rb_jmpbuf_t;
930 #else
931 typedef void *rb_jmpbuf_t[5];
932 #endif
933 
934 /*
935  `rb_vm_tag_jmpbuf_t` type represents a buffer used to
936  long jump to a C frame associated with `rb_vm_tag`.
937 
938  Use-site of `rb_vm_tag_jmpbuf_t` is responsible for calling the
939  following functions:
940  - `rb_vm_tag_jmpbuf_init` once `rb_vm_tag_jmpbuf_t` is allocated.
941  - `rb_vm_tag_jmpbuf_deinit` once `rb_vm_tag_jmpbuf_t` is no longer necessary.
942 
943  `RB_VM_TAG_JMPBUF_GET` transforms a `rb_vm_tag_jmpbuf_t` into a
944  `rb_jmpbuf_t` to be passed to `rb_setjmp/rb_longjmp`.
945 */
946 #if defined(__wasm__) && !defined(__EMSCRIPTEN__)
947 /*
948  WebAssembly target with Asyncify-based SJLJ needs
949  to capture the execution context by unwind/rewind-ing
950  call frames into a jump buffer. The buffer space tends
951  to be considerably large unlike other architectures'
952  register-based buffers.
953  Therefore, we allocates the buffer on the heap on such
954  environments.
955 */
956 typedef rb_jmpbuf_t *rb_vm_tag_jmpbuf_t;
957 
958 #define RB_VM_TAG_JMPBUF_GET(buf) (*buf)
959 
960 static inline void
961 rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
962 {
963  *jmpbuf = ruby_xmalloc(sizeof(rb_jmpbuf_t));
964 }
965 
966 static inline void
967 rb_vm_tag_jmpbuf_deinit(const rb_vm_tag_jmpbuf_t *jmpbuf)
968 {
969  ruby_xfree(*jmpbuf);
970 }
971 #else
972 typedef rb_jmpbuf_t rb_vm_tag_jmpbuf_t;
973 
974 #define RB_VM_TAG_JMPBUF_GET(buf) (buf)
975 
976 static inline void
977 rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
978 {
979  // no-op
980 }
981 
982 static inline void
983 rb_vm_tag_jmpbuf_deinit(const rb_vm_tag_jmpbuf_t *jmpbuf)
984 {
985  // no-op
986 }
987 #endif
988 
989 /*
990  the members which are written in EC_PUSH_TAG() should be placed at
991  the beginning and the end, so that entire region is accessible.
992 */
993 struct rb_vm_tag {
994  VALUE tag;
995  VALUE retval;
996  rb_vm_tag_jmpbuf_t buf;
997  struct rb_vm_tag *prev;
998  enum ruby_tag_type state;
999  unsigned int lock_rec;
1000 };
1001 
1002 STATIC_ASSERT(rb_vm_tag_buf_offset, offsetof(struct rb_vm_tag, buf) > 0);
1003 STATIC_ASSERT(rb_vm_tag_buf_end,
1004  offsetof(struct rb_vm_tag, buf) + sizeof(rb_vm_tag_jmpbuf_t) <
1005  sizeof(struct rb_vm_tag));
1006 
1008  rb_unblock_function_t *func;
1009  void *arg;
1010 };
1011 
1012 struct rb_mutex_struct;
1013 
1014 typedef struct rb_fiber_struct rb_fiber_t;
1015 
1017  struct rb_waiting_list *next;
1018  struct rb_thread_struct *thread;
1019  struct rb_fiber_struct *fiber;
1020 };
1021 
1023  /* execution information */
1024  VALUE *vm_stack; /* must free, must mark */
1025  size_t vm_stack_size; /* size in word (byte size / sizeof(VALUE)) */
1026  rb_control_frame_t *cfp;
1027 
1028  struct rb_vm_tag *tag;
1029 
1030  /* interrupt flags */
1031  rb_atomic_t interrupt_flag;
1032  rb_atomic_t interrupt_mask; /* size should match flag */
1033 #if defined(USE_VM_CLOCK) && USE_VM_CLOCK
1034  uint32_t checked_clock;
1035 #endif
1036 
1037  rb_fiber_t *fiber_ptr;
1038  struct rb_thread_struct *thread_ptr;
1039 
1040  /* storage (ec (fiber) local) */
1041  struct rb_id_table *local_storage;
1042  VALUE local_storage_recursive_hash;
1043  VALUE local_storage_recursive_hash_for_trace;
1044 
1045  /* Inheritable fiber storage. */
1046  VALUE storage;
1047 
1048  /* eval env */
1049  const VALUE *root_lep;
1050  VALUE root_svar;
1051 
1052  /* trace information */
1053  struct rb_trace_arg_struct *trace_arg;
1054 
1055  /* temporary places */
1056  VALUE errinfo;
1057  VALUE passed_block_handler; /* for rb_iterate */
1058 
1059  uint8_t raised_flag; /* only 3 bits needed */
1060 
1061  /* n.b. only 7 bits needed, really: */
1062  BITFIELD(enum method_missing_reason, method_missing_reason, 8);
1063 
1064  VALUE private_const_reference;
1065 
1066  /* for GC */
1067  struct {
1068  VALUE *stack_start;
1069  VALUE *stack_end;
1070  size_t stack_maxsize;
1072 
1073 #ifdef RUBY_ASAN_ENABLED
1074  void *asan_fake_stack_handle;
1075 #endif
1076  } machine;
1077 };
1078 
1079 #ifndef rb_execution_context_t
1081 #define rb_execution_context_t rb_execution_context_t
1082 #endif
1083 
1084 // for builtin.h
1085 #define VM_CORE_H_EC_DEFINED 1
1086 
1087 // Set the vm_stack pointer in the execution context.
1088 void rb_ec_set_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
1089 
1090 // Initialize the vm_stack pointer in the execution context and push the initial stack frame.
1091 // @param ec the execution context to update.
1092 // @param stack a pointer to the stack to use.
1093 // @param size the size of the stack, as in `VALUE stack[size]`.
1094 void rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
1095 
1096 // Clear (set to `NULL`) the vm_stack pointer.
1097 // @param ec the execution context to update.
1098 void rb_ec_clear_vm_stack(rb_execution_context_t *ec);
1099 
1101  bool ractor_safe;
1102 };
1103 
1104 typedef struct rb_ractor_struct rb_ractor_t;
1105 
1106 struct rb_native_thread;
1107 
1108 typedef struct rb_thread_struct {
1109  struct ccan_list_node lt_node; // managed by a ractor
1110  VALUE self;
1111  rb_ractor_t *ractor;
1112  rb_vm_t *vm;
1113  struct rb_native_thread *nt;
1115 
1116  struct rb_thread_sched_item sched;
1117  bool mn_schedulable;
1118  rb_atomic_t serial; // only for RUBY_DEBUG_LOG()
1119 
1120  VALUE last_status; /* $? */
1121 
1122  /* for cfunc */
1123  struct rb_calling_info *calling;
1124 
1125  /* for load(true) */
1126  VALUE top_self;
1127  VALUE top_wrapper;
1128 
1129  /* thread control */
1130 
1131  BITFIELD(enum rb_thread_status, status, 2);
1132  /* bit flags */
1133  unsigned int has_dedicated_nt : 1;
1134  unsigned int to_kill : 1;
1135  unsigned int abort_on_exception: 1;
1136  unsigned int report_on_exception: 1;
1137  unsigned int pending_interrupt_queue_checked: 1;
1138  int8_t priority; /* -3 .. 3 (RUBY_THREAD_PRIORITY_{MIN,MAX}) */
1139  uint32_t running_time_us; /* 12500..800000 */
1140 
1141  void *blocking_region_buffer;
1142 
1143  VALUE thgroup;
1144  VALUE value;
1145 
1146  /* temporary place of retval on OPT_CALL_THREADED_CODE */
1147 #if OPT_CALL_THREADED_CODE
1148  VALUE retval;
1149 #endif
1150 
1151  /* async errinfo queue */
1152  VALUE pending_interrupt_queue;
1153  VALUE pending_interrupt_mask_stack;
1154 
1155  /* interrupt management */
1156  rb_nativethread_lock_t interrupt_lock;
1157  struct rb_unblock_callback unblock;
1158  VALUE locking_mutex;
1159  struct rb_mutex_struct *keeping_mutexes;
1160  struct ccan_list_head interrupt_exec_tasks;
1161 
1162  struct rb_waiting_list *join_list;
1163 
1164  union {
1165  struct {
1166  VALUE proc;
1167  VALUE args;
1168  int kw_splat;
1169  } proc;
1170  struct {
1171  VALUE (*func)(void *);
1172  void *arg;
1173  } func;
1174  } invoke_arg;
1175 
1176  enum thread_invoke_type {
1177  thread_invoke_type_none = 0,
1178  thread_invoke_type_proc,
1179  thread_invoke_type_ractor_proc,
1180  thread_invoke_type_func
1181  } invoke_type;
1182 
1183  /* statistics data for profiler */
1184  VALUE stat_insn_usage;
1185 
1186  /* fiber */
1187  rb_fiber_t *root_fiber;
1188 
1189  VALUE scheduler;
1190  unsigned int blocking;
1191 
1192  /* misc */
1193  VALUE name;
1194  void **specific_storage;
1195 
1196  struct rb_ext_config ext_config;
1197 } rb_thread_t;
1198 
1199 static inline unsigned int
1200 rb_th_serial(const rb_thread_t *th)
1201 {
1202  return th ? (unsigned int)th->serial : 0;
1203 }
1204 
1205 typedef enum {
1206  VM_DEFINECLASS_TYPE_CLASS = 0x00,
1207  VM_DEFINECLASS_TYPE_SINGLETON_CLASS = 0x01,
1208  VM_DEFINECLASS_TYPE_MODULE = 0x02,
1209  /* 0x03..0x06 is reserved */
1210  VM_DEFINECLASS_TYPE_MASK = 0x07
1211 } rb_vm_defineclass_type_t;
1212 
1213 #define VM_DEFINECLASS_TYPE(x) ((rb_vm_defineclass_type_t)(x) & VM_DEFINECLASS_TYPE_MASK)
1214 #define VM_DEFINECLASS_FLAG_SCOPED 0x08
1215 #define VM_DEFINECLASS_FLAG_HAS_SUPERCLASS 0x10
1216 #define VM_DEFINECLASS_SCOPED_P(x) ((x) & VM_DEFINECLASS_FLAG_SCOPED)
1217 #define VM_DEFINECLASS_HAS_SUPERCLASS_P(x) \
1218  ((x) & VM_DEFINECLASS_FLAG_HAS_SUPERCLASS)
1219 
1220 /* iseq.c */
1221 RUBY_SYMBOL_EXPORT_BEGIN
1222 
1223 /* node -> iseq */
1224 rb_iseq_t *rb_iseq_new (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent, enum rb_iseq_type);
1225 rb_iseq_t *rb_iseq_new_top (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent);
1226 rb_iseq_t *rb_iseq_new_main (const VALUE ast_value, VALUE path, VALUE realpath, const rb_iseq_t *parent, int opt);
1227 rb_iseq_t *rb_iseq_new_eval (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, int first_lineno, const rb_iseq_t *parent, int isolated_depth);
1228 rb_iseq_t *rb_iseq_new_with_opt( VALUE ast_value, VALUE name, VALUE path, VALUE realpath, int first_lineno, const rb_iseq_t *parent, int isolated_depth,
1229  enum rb_iseq_type, const rb_compile_option_t*,
1230  VALUE script_lines);
1231 
1232 struct iseq_link_anchor;
1234  VALUE flags;
1235  VALUE reserved;
1236  void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *);
1237  const void *data;
1238 };
1239 static inline struct rb_iseq_new_with_callback_callback_func *
1240 rb_iseq_new_with_callback_new_callback(
1241  void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *), const void *ptr)
1242 {
1244  IMEMO_NEW(struct rb_iseq_new_with_callback_callback_func, imemo_ifunc, Qfalse);
1245  memo->func = func;
1246  memo->data = ptr;
1247 
1248  return memo;
1249 }
1250 rb_iseq_t *rb_iseq_new_with_callback(const struct rb_iseq_new_with_callback_callback_func * ifunc,
1251  VALUE name, VALUE path, VALUE realpath, int first_lineno,
1252  const rb_iseq_t *parent, enum rb_iseq_type, const rb_compile_option_t*);
1253 
1254 VALUE rb_iseq_disasm(const rb_iseq_t *iseq);
1255 int rb_iseq_disasm_insn(VALUE str, const VALUE *iseqval, size_t pos, const rb_iseq_t *iseq, VALUE child);
1256 
1257 VALUE rb_iseq_coverage(const rb_iseq_t *iseq);
1258 
1259 RUBY_EXTERN VALUE rb_cISeq;
1260 RUBY_EXTERN VALUE rb_cRubyVM;
1261 RUBY_EXTERN VALUE rb_mRubyVMFrozenCore;
1262 RUBY_EXTERN VALUE rb_block_param_proxy;
1263 RUBY_SYMBOL_EXPORT_END
1264 
1265 #define GetProcPtr(obj, ptr) \
1266  GetCoreDataFromValue((obj), rb_proc_t, (ptr))
1267 
1268 typedef struct {
1269  const struct rb_block block;
1270  unsigned int is_from_method: 1; /* bool */
1271  unsigned int is_lambda: 1; /* bool */
1272  unsigned int is_isolated: 1; /* bool */
1273 } rb_proc_t;
1274 
1275 RUBY_SYMBOL_EXPORT_BEGIN
1276 VALUE rb_proc_isolate(VALUE self);
1277 VALUE rb_proc_isolate_bang(VALUE self);
1278 VALUE rb_proc_ractor_make_shareable(VALUE self);
1279 RUBY_SYMBOL_EXPORT_END
1280 
1281 typedef struct {
1282  VALUE flags; /* imemo header */
1283  rb_iseq_t *iseq;
1284  const VALUE *ep;
1285  const VALUE *env;
1286  unsigned int env_size;
1287 } rb_env_t;
1288 
1289 extern const rb_data_type_t ruby_binding_data_type;
1290 
1291 #define GetBindingPtr(obj, ptr) \
1292  GetCoreDataFromValue((obj), rb_binding_t, (ptr))
1293 
1294 typedef struct {
1295  const struct rb_block block;
1296  const VALUE pathobj;
1297  int first_lineno;
1298 } rb_binding_t;
1299 
1300 /* used by compile time and send insn */
1301 
1302 enum vm_check_match_type {
1303  VM_CHECKMATCH_TYPE_WHEN = 1,
1304  VM_CHECKMATCH_TYPE_CASE = 2,
1305  VM_CHECKMATCH_TYPE_RESCUE = 3
1306 };
1307 
1308 #define VM_CHECKMATCH_TYPE_MASK 0x03
1309 #define VM_CHECKMATCH_ARRAY 0x04
1310 
1311 enum vm_opt_newarray_send_type {
1312  VM_OPT_NEWARRAY_SEND_MAX = 1,
1313  VM_OPT_NEWARRAY_SEND_MIN = 2,
1314  VM_OPT_NEWARRAY_SEND_HASH = 3,
1315  VM_OPT_NEWARRAY_SEND_PACK = 4,
1316  VM_OPT_NEWARRAY_SEND_PACK_BUFFER = 5,
1317  VM_OPT_NEWARRAY_SEND_INCLUDE_P = 6,
1318 };
1319 
1320 enum vm_special_object_type {
1321  VM_SPECIAL_OBJECT_VMCORE = 1,
1322  VM_SPECIAL_OBJECT_CBASE,
1323  VM_SPECIAL_OBJECT_CONST_BASE
1324 };
1325 
1326 enum vm_svar_index {
1327  VM_SVAR_LASTLINE = 0, /* $_ */
1328  VM_SVAR_BACKREF = 1, /* $~ */
1329 
1330  VM_SVAR_EXTRA_START = 2,
1331  VM_SVAR_FLIPFLOP_START = 2 /* flipflop */
1332 };
1333 
1334 /* inline cache */
1335 typedef struct iseq_inline_constant_cache *IC;
1336 typedef struct iseq_inline_iv_cache_entry *IVC;
1337 typedef struct iseq_inline_cvar_cache_entry *ICVARC;
1338 typedef union iseq_inline_storage_entry *ISE;
1339 typedef const struct rb_callinfo *CALL_INFO;
1340 typedef const struct rb_callcache *CALL_CACHE;
1341 typedef struct rb_call_data *CALL_DATA;
1342 
1343 typedef VALUE CDHASH;
1344 
1345 #ifndef FUNC_FASTCALL
1346 #define FUNC_FASTCALL(x) x
1347 #endif
1348 
1349 typedef rb_control_frame_t *
1350  (FUNC_FASTCALL(*rb_insn_func_t))(rb_execution_context_t *, rb_control_frame_t *);
1351 
1352 #define VM_TAGGED_PTR_SET(p, tag) ((VALUE)(p) | (tag))
1353 #define VM_TAGGED_PTR_REF(v, mask) ((void *)((v) & ~mask))
1354 
1355 #define GC_GUARDED_PTR(p) VM_TAGGED_PTR_SET((p), 0x01)
1356 #define GC_GUARDED_PTR_REF(p) VM_TAGGED_PTR_REF((p), 0x03)
1357 #define GC_GUARDED_PTR_P(p) (((VALUE)(p)) & 0x01)
1358 
1359 enum vm_frame_env_flags {
1360  /* Frame/Environment flag bits:
1361  * MMMM MMMM MMMM MMMM ____ FFFF FFFE EEEX (LSB)
1362  *
1363  * X : tag for GC marking (It seems as Fixnum)
1364  * EEE : 4 bits Env flags
1365  * FF..: 7 bits Frame flags
1366  * MM..: 15 bits frame magic (to check frame corruption)
1367  */
1368 
1369  /* frame types */
1370  VM_FRAME_MAGIC_METHOD = 0x11110001,
1371  VM_FRAME_MAGIC_BLOCK = 0x22220001,
1372  VM_FRAME_MAGIC_CLASS = 0x33330001,
1373  VM_FRAME_MAGIC_TOP = 0x44440001,
1374  VM_FRAME_MAGIC_CFUNC = 0x55550001,
1375  VM_FRAME_MAGIC_IFUNC = 0x66660001,
1376  VM_FRAME_MAGIC_EVAL = 0x77770001,
1377  VM_FRAME_MAGIC_RESCUE = 0x78880001,
1378  VM_FRAME_MAGIC_DUMMY = 0x79990001,
1379 
1380  VM_FRAME_MAGIC_MASK = 0x7fff0001,
1381 
1382  /* frame flag */
1383  VM_FRAME_FLAG_FINISH = 0x0020,
1384  VM_FRAME_FLAG_BMETHOD = 0x0040,
1385  VM_FRAME_FLAG_CFRAME = 0x0080,
1386  VM_FRAME_FLAG_LAMBDA = 0x0100,
1387  VM_FRAME_FLAG_MODIFIED_BLOCK_PARAM = 0x0200,
1388  VM_FRAME_FLAG_CFRAME_KW = 0x0400,
1389  VM_FRAME_FLAG_PASSED = 0x0800,
1390 
1391  /* env flag */
1392  VM_ENV_FLAG_LOCAL = 0x0002,
1393  VM_ENV_FLAG_ESCAPED = 0x0004,
1394  VM_ENV_FLAG_WB_REQUIRED = 0x0008,
1395  VM_ENV_FLAG_ISOLATED = 0x0010,
1396 };
1397 
1398 #define VM_ENV_DATA_SIZE ( 3)
1399 
1400 #define VM_ENV_DATA_INDEX_ME_CREF (-2) /* ep[-2] */
1401 #define VM_ENV_DATA_INDEX_SPECVAL (-1) /* ep[-1] */
1402 #define VM_ENV_DATA_INDEX_FLAGS ( 0) /* ep[ 0] */
1403 #define VM_ENV_DATA_INDEX_ENV ( 1) /* ep[ 1] */
1404 
1405 #define VM_ENV_INDEX_LAST_LVAR (-VM_ENV_DATA_SIZE)
1406 
1407 static inline void VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value);
1408 
1409 static inline void
1410 VM_ENV_FLAGS_SET(const VALUE *ep, VALUE flag)
1411 {
1412  VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1413  VM_ASSERT(FIXNUM_P(flags));
1414  VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags | flag);
1415 }
1416 
1417 static inline void
1418 VM_ENV_FLAGS_UNSET(const VALUE *ep, VALUE flag)
1419 {
1420  VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1421  VM_ASSERT(FIXNUM_P(flags));
1422  VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags & ~flag);
1423 }
1424 
1425 static inline unsigned long
1426 VM_ENV_FLAGS(const VALUE *ep, long flag)
1427 {
1428  VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1429  VM_ASSERT(FIXNUM_P(flags));
1430  return flags & flag;
1431 }
1432 
1433 static inline unsigned long
1434 VM_FRAME_TYPE(const rb_control_frame_t *cfp)
1435 {
1436  return VM_ENV_FLAGS(cfp->ep, VM_FRAME_MAGIC_MASK);
1437 }
1438 
1439 static inline int
1440 VM_FRAME_LAMBDA_P(const rb_control_frame_t *cfp)
1441 {
1442  return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_LAMBDA) != 0;
1443 }
1444 
1445 static inline int
1446 VM_FRAME_CFRAME_KW_P(const rb_control_frame_t *cfp)
1447 {
1448  return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME_KW) != 0;
1449 }
1450 
1451 static inline int
1452 VM_FRAME_FINISHED_P(const rb_control_frame_t *cfp)
1453 {
1454  return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_FINISH) != 0;
1455 }
1456 
1457 static inline int
1458 VM_FRAME_BMETHOD_P(const rb_control_frame_t *cfp)
1459 {
1460  return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_BMETHOD) != 0;
1461 }
1462 
1463 static inline int
1464 rb_obj_is_iseq(VALUE iseq)
1465 {
1466  return imemo_type_p(iseq, imemo_iseq);
1467 }
1468 
1469 #if VM_CHECK_MODE > 0
1470 #define RUBY_VM_NORMAL_ISEQ_P(iseq) rb_obj_is_iseq((VALUE)iseq)
1471 #endif
1472 
1473 static inline int
1474 VM_FRAME_CFRAME_P(const rb_control_frame_t *cfp)
1475 {
1476  int cframe_p = VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME) != 0;
1477  VM_ASSERT(RUBY_VM_NORMAL_ISEQ_P(cfp->iseq) != cframe_p ||
1478  (VM_FRAME_TYPE(cfp) & VM_FRAME_MAGIC_MASK) == VM_FRAME_MAGIC_DUMMY);
1479  return cframe_p;
1480 }
1481 
1482 static inline int
1483 VM_FRAME_RUBYFRAME_P(const rb_control_frame_t *cfp)
1484 {
1485  return !VM_FRAME_CFRAME_P(cfp);
1486 }
1487 
1488 #define RUBYVM_CFUNC_FRAME_P(cfp) \
1489  (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_CFUNC)
1490 
1491 #define VM_GUARDED_PREV_EP(ep) GC_GUARDED_PTR(ep)
1492 #define VM_BLOCK_HANDLER_NONE 0
1493 
1494 static inline int
1495 VM_ENV_LOCAL_P(const VALUE *ep)
1496 {
1497  return VM_ENV_FLAGS(ep, VM_ENV_FLAG_LOCAL) ? 1 : 0;
1498 }
1499 
1500 static inline const VALUE *
1501 VM_ENV_PREV_EP(const VALUE *ep)
1502 {
1503  VM_ASSERT(VM_ENV_LOCAL_P(ep) == 0);
1504  return GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
1505 }
1506 
1507 static inline VALUE
1508 VM_ENV_BLOCK_HANDLER(const VALUE *ep)
1509 {
1510  VM_ASSERT(VM_ENV_LOCAL_P(ep));
1511  return ep[VM_ENV_DATA_INDEX_SPECVAL];
1512 }
1513 
1514 #if VM_CHECK_MODE > 0
1515 int rb_vm_ep_in_heap_p(const VALUE *ep);
1516 #endif
1517 
1518 static inline int
1519 VM_ENV_ESCAPED_P(const VALUE *ep)
1520 {
1521  VM_ASSERT(rb_vm_ep_in_heap_p(ep) == !!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED));
1522  return VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) ? 1 : 0;
1523 }
1524 
1526 static inline VALUE
1527 VM_ENV_ENVVAL(const VALUE *ep)
1528 {
1529  VALUE envval = ep[VM_ENV_DATA_INDEX_ENV];
1530  VM_ASSERT(VM_ENV_ESCAPED_P(ep));
1531  VM_ASSERT(envval == Qundef || imemo_type_p(envval, imemo_env));
1532  return envval;
1533 }
1534 
1536 static inline const rb_env_t *
1537 VM_ENV_ENVVAL_PTR(const VALUE *ep)
1538 {
1539  return (const rb_env_t *)VM_ENV_ENVVAL(ep);
1540 }
1541 
1542 static inline const rb_env_t *
1543 vm_env_new(VALUE *env_ep, VALUE *env_body, unsigned int env_size, const rb_iseq_t *iseq)
1544 {
1545  rb_env_t *env = IMEMO_NEW(rb_env_t, imemo_env, (VALUE)iseq);
1546  env->ep = env_ep;
1547  env->env = env_body;
1548  env->env_size = env_size;
1549  env_ep[VM_ENV_DATA_INDEX_ENV] = (VALUE)env;
1550  return env;
1551 }
1552 
1553 static inline void
1554 VM_FORCE_WRITE(const VALUE *ptr, VALUE v)
1555 {
1556  *((VALUE *)ptr) = v;
1557 }
1558 
1559 static inline void
1560 VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value)
1561 {
1562  VM_ASSERT(RB_SPECIAL_CONST_P(special_const_value));
1563  VM_FORCE_WRITE(ptr, special_const_value);
1564 }
1565 
1566 static inline void
1567 VM_STACK_ENV_WRITE(const VALUE *ep, int index, VALUE v)
1568 {
1569  VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_WB_REQUIRED) == 0);
1570  VM_FORCE_WRITE(&ep[index], v);
1571 }
1572 
1573 const VALUE *rb_vm_ep_local_ep(const VALUE *ep);
1574 const VALUE *rb_vm_proc_local_ep(VALUE proc);
1575 void rb_vm_block_ep_update(VALUE obj, const struct rb_block *dst, const VALUE *ep);
1576 void rb_vm_block_copy(VALUE obj, const struct rb_block *dst, const struct rb_block *src);
1577 
1578 VALUE rb_vm_frame_block_handler(const rb_control_frame_t *cfp);
1579 
1580 #define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp) ((cfp)+1)
1581 #define RUBY_VM_NEXT_CONTROL_FRAME(cfp) ((cfp)-1)
1582 
1583 #define RUBY_VM_VALID_CONTROL_FRAME_P(cfp, ecfp) \
1584  ((void *)(ecfp) > (void *)(cfp))
1585 
1586 static inline const rb_control_frame_t *
1587 RUBY_VM_END_CONTROL_FRAME(const rb_execution_context_t *ec)
1588 {
1589  return (rb_control_frame_t *)(ec->vm_stack + ec->vm_stack_size);
1590 }
1591 
1592 static inline int
1593 RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
1594 {
1595  return !RUBY_VM_VALID_CONTROL_FRAME_P(cfp, RUBY_VM_END_CONTROL_FRAME(ec));
1596 }
1597 
1598 static inline int
1599 VM_BH_ISEQ_BLOCK_P(VALUE block_handler)
1600 {
1601  if ((block_handler & 0x03) == 0x01) {
1602 #if VM_CHECK_MODE > 0
1603  struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1604  VM_ASSERT(imemo_type_p(captured->code.val, imemo_iseq));
1605 #endif
1606  return 1;
1607  }
1608  else {
1609  return 0;
1610  }
1611 }
1612 
1613 static inline VALUE
1614 VM_BH_FROM_ISEQ_BLOCK(const struct rb_captured_block *captured)
1615 {
1616  VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x01);
1617  VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1618  return block_handler;
1619 }
1620 
1621 static inline const struct rb_captured_block *
1622 VM_BH_TO_ISEQ_BLOCK(VALUE block_handler)
1623 {
1624  struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1625  VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1626  return captured;
1627 }
1628 
1629 static inline int
1630 VM_BH_IFUNC_P(VALUE block_handler)
1631 {
1632  if ((block_handler & 0x03) == 0x03) {
1633 #if VM_CHECK_MODE > 0
1634  struct rb_captured_block *captured = (void *)(block_handler & ~0x03);
1635  VM_ASSERT(imemo_type_p(captured->code.val, imemo_ifunc));
1636 #endif
1637  return 1;
1638  }
1639  else {
1640  return 0;
1641  }
1642 }
1643 
1644 static inline VALUE
1645 VM_BH_FROM_IFUNC_BLOCK(const struct rb_captured_block *captured)
1646 {
1647  VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x03);
1648  VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1649  return block_handler;
1650 }
1651 
1652 static inline const struct rb_captured_block *
1653 VM_BH_TO_IFUNC_BLOCK(VALUE block_handler)
1654 {
1655  struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1656  VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1657  return captured;
1658 }
1659 
1660 static inline const struct rb_captured_block *
1661 VM_BH_TO_CAPT_BLOCK(VALUE block_handler)
1662 {
1663  struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1664  VM_ASSERT(VM_BH_IFUNC_P(block_handler) || VM_BH_ISEQ_BLOCK_P(block_handler));
1665  return captured;
1666 }
1667 
1668 static inline enum rb_block_handler_type
1669 vm_block_handler_type(VALUE block_handler)
1670 {
1671  if (VM_BH_ISEQ_BLOCK_P(block_handler)) {
1672  return block_handler_type_iseq;
1673  }
1674  else if (VM_BH_IFUNC_P(block_handler)) {
1675  return block_handler_type_ifunc;
1676  }
1677  else if (SYMBOL_P(block_handler)) {
1678  return block_handler_type_symbol;
1679  }
1680  else {
1681  VM_ASSERT(rb_obj_is_proc(block_handler));
1682  return block_handler_type_proc;
1683  }
1684 }
1685 
1686 static inline void
1687 vm_block_handler_verify(MAYBE_UNUSED(VALUE block_handler))
1688 {
1689  VM_ASSERT(block_handler == VM_BLOCK_HANDLER_NONE ||
1690  (vm_block_handler_type(block_handler), 1));
1691 }
1692 
1693 static inline enum rb_block_type
1694 vm_block_type(const struct rb_block *block)
1695 {
1696 #if VM_CHECK_MODE > 0
1697  switch (block->type) {
1698  case block_type_iseq:
1699  VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_iseq));
1700  break;
1701  case block_type_ifunc:
1702  VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_ifunc));
1703  break;
1704  case block_type_symbol:
1705  VM_ASSERT(SYMBOL_P(block->as.symbol));
1706  break;
1707  case block_type_proc:
1708  VM_ASSERT(rb_obj_is_proc(block->as.proc));
1709  break;
1710  }
1711 #endif
1712  return block->type;
1713 }
1714 
1715 static inline void
1716 vm_block_type_set(const struct rb_block *block, enum rb_block_type type)
1717 {
1718  struct rb_block *mb = (struct rb_block *)block;
1719  mb->type = type;
1720 }
1721 
1722 static inline const struct rb_block *
1723 vm_proc_block(VALUE procval)
1724 {
1725  VM_ASSERT(rb_obj_is_proc(procval));
1726  return &((rb_proc_t *)RTYPEDDATA_DATA(procval))->block;
1727 }
1728 
1729 static inline const rb_iseq_t *vm_block_iseq(const struct rb_block *block);
1730 static inline const VALUE *vm_block_ep(const struct rb_block *block);
1731 
1732 static inline const rb_iseq_t *
1733 vm_proc_iseq(VALUE procval)
1734 {
1735  return vm_block_iseq(vm_proc_block(procval));
1736 }
1737 
1738 static inline const VALUE *
1739 vm_proc_ep(VALUE procval)
1740 {
1741  return vm_block_ep(vm_proc_block(procval));
1742 }
1743 
1744 static inline const rb_iseq_t *
1745 vm_block_iseq(const struct rb_block *block)
1746 {
1747  switch (vm_block_type(block)) {
1748  case block_type_iseq: return rb_iseq_check(block->as.captured.code.iseq);
1749  case block_type_proc: return vm_proc_iseq(block->as.proc);
1750  case block_type_ifunc:
1751  case block_type_symbol: return NULL;
1752  }
1753  VM_UNREACHABLE(vm_block_iseq);
1754  return NULL;
1755 }
1756 
1757 static inline const VALUE *
1758 vm_block_ep(const struct rb_block *block)
1759 {
1760  switch (vm_block_type(block)) {
1761  case block_type_iseq:
1762  case block_type_ifunc: return block->as.captured.ep;
1763  case block_type_proc: return vm_proc_ep(block->as.proc);
1764  case block_type_symbol: return NULL;
1765  }
1766  VM_UNREACHABLE(vm_block_ep);
1767  return NULL;
1768 }
1769 
1770 static inline VALUE
1771 vm_block_self(const struct rb_block *block)
1772 {
1773  switch (vm_block_type(block)) {
1774  case block_type_iseq:
1775  case block_type_ifunc:
1776  return block->as.captured.self;
1777  case block_type_proc:
1778  return vm_block_self(vm_proc_block(block->as.proc));
1779  case block_type_symbol:
1780  return Qundef;
1781  }
1782  VM_UNREACHABLE(vm_block_self);
1783  return Qundef;
1784 }
1785 
1786 static inline VALUE
1787 VM_BH_TO_SYMBOL(VALUE block_handler)
1788 {
1789  VM_ASSERT(SYMBOL_P(block_handler));
1790  return block_handler;
1791 }
1792 
1793 static inline VALUE
1794 VM_BH_FROM_SYMBOL(VALUE symbol)
1795 {
1796  VM_ASSERT(SYMBOL_P(symbol));
1797  return symbol;
1798 }
1799 
1800 static inline VALUE
1801 VM_BH_TO_PROC(VALUE block_handler)
1802 {
1803  VM_ASSERT(rb_obj_is_proc(block_handler));
1804  return block_handler;
1805 }
1806 
1807 static inline VALUE
1808 VM_BH_FROM_PROC(VALUE procval)
1809 {
1810  VM_ASSERT(rb_obj_is_proc(procval));
1811  return procval;
1812 }
1813 
1814 /* VM related object allocate functions */
1815 VALUE rb_thread_alloc(VALUE klass);
1816 VALUE rb_binding_alloc(VALUE klass);
1817 VALUE rb_proc_alloc(VALUE klass);
1818 VALUE rb_proc_dup(VALUE self);
1819 
1820 /* for debug */
1821 extern bool rb_vmdebug_stack_dump_raw(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, FILE *);
1822 extern bool rb_vmdebug_debug_print_pre(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, const VALUE *_pc, FILE *);
1823 extern bool rb_vmdebug_debug_print_post(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, FILE *);
1824 
1825 #define SDR() rb_vmdebug_stack_dump_raw(GET_EC(), GET_EC()->cfp, stderr)
1826 #define SDR2(cfp) rb_vmdebug_stack_dump_raw(GET_EC(), (cfp), stderr)
1827 bool rb_vm_bugreport(const void *, FILE *);
1828 typedef void (*ruby_sighandler_t)(int);
1829 RBIMPL_ATTR_FORMAT(RBIMPL_PRINTF_FORMAT, 4, 5)
1830 NORETURN(void rb_bug_for_fatal_signal(ruby_sighandler_t default_sighandler, int sig, const void *, const char *fmt, ...));
1831 
1832 /* functions about thread/vm execution */
1833 RUBY_SYMBOL_EXPORT_BEGIN
1834 VALUE rb_iseq_eval(const rb_iseq_t *iseq);
1835 VALUE rb_iseq_eval_main(const rb_iseq_t *iseq);
1836 VALUE rb_iseq_path(const rb_iseq_t *iseq);
1837 VALUE rb_iseq_realpath(const rb_iseq_t *iseq);
1838 RUBY_SYMBOL_EXPORT_END
1839 
1840 VALUE rb_iseq_pathobj_new(VALUE path, VALUE realpath);
1841 void rb_iseq_pathobj_set(const rb_iseq_t *iseq, VALUE path, VALUE realpath);
1842 
1843 int rb_ec_frame_method_id_and_class(const rb_execution_context_t *ec, ID *idp, ID *called_idp, VALUE *klassp);
1844 void rb_ec_setup_exception(const rb_execution_context_t *ec, VALUE mesg, VALUE cause);
1845 
1846 VALUE rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, int argc, const VALUE *argv, int kw_splat, VALUE block_handler);
1847 
1848 VALUE rb_vm_make_proc_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda);
1849 static inline VALUE
1850 rb_vm_make_proc(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1851 {
1852  return rb_vm_make_proc_lambda(ec, captured, klass, 0);
1853 }
1854 
1855 static inline VALUE
1856 rb_vm_make_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1857 {
1858  return rb_vm_make_proc_lambda(ec, captured, klass, 1);
1859 }
1860 
1861 VALUE rb_vm_make_binding(const rb_execution_context_t *ec, const rb_control_frame_t *src_cfp);
1862 VALUE rb_vm_env_local_variables(const rb_env_t *env);
1863 const rb_env_t *rb_vm_env_prev_env(const rb_env_t *env);
1864 const VALUE *rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const ID *dynvars);
1865 void rb_vm_inc_const_missing_count(void);
1866 VALUE rb_vm_call_kw(rb_execution_context_t *ec, VALUE recv, VALUE id, int argc,
1867  const VALUE *argv, const rb_callable_method_entry_t *me, int kw_splat);
1868 void rb_vm_pop_frame_no_int(rb_execution_context_t *ec);
1869 void rb_vm_pop_frame(rb_execution_context_t *ec);
1870 
1871 void rb_thread_start_timer_thread(void);
1872 void rb_thread_stop_timer_thread(void);
1873 void rb_thread_reset_timer_thread(void);
1874 void rb_thread_wakeup_timer_thread(int);
1875 
1876 static inline void
1877 rb_vm_living_threads_init(rb_vm_t *vm)
1878 {
1879  ccan_list_head_init(&vm->waiting_fds);
1880  ccan_list_head_init(&vm->workqueue);
1881  ccan_list_head_init(&vm->ractor.set);
1882  ccan_list_head_init(&vm->ractor.sched.zombie_threads);
1883 }
1884 
1885 typedef int rb_backtrace_iter_func(void *, VALUE, int, VALUE);
1886 rb_control_frame_t *rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1887 rb_control_frame_t *rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1888 VALUE *rb_vm_svar_lep(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1889 int rb_vm_get_sourceline(const rb_control_frame_t *);
1890 void rb_vm_stack_to_heap(rb_execution_context_t *ec);
1891 void ruby_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame);
1892 rb_thread_t * ruby_thread_from_native(void);
1893 int ruby_thread_set_native(rb_thread_t *th);
1894 int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp);
1895 void rb_vm_rewind_cfp(rb_execution_context_t *ec, rb_control_frame_t *cfp);
1896 void rb_vm_env_write(const VALUE *ep, int index, VALUE v);
1897 VALUE rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler);
1898 
1899 void rb_vm_register_special_exception_str(enum ruby_special_exceptions sp, VALUE exception_class, VALUE mesg);
1900 
1901 #define rb_vm_register_special_exception(sp, e, m) \
1902  rb_vm_register_special_exception_str(sp, e, rb_usascii_str_new_static((m), (long)rb_strlen_lit(m)))
1903 
1904 void rb_gc_mark_machine_context(const rb_execution_context_t *ec);
1905 
1906 void rb_vm_rewrite_cref(rb_cref_t *node, VALUE old_klass, VALUE new_klass, rb_cref_t **new_cref_ptr);
1907 
1908 const rb_callable_method_entry_t *rb_vm_frame_method_entry(const rb_control_frame_t *cfp);
1909 
1910 #define sysstack_error GET_VM()->special_exceptions[ruby_error_sysstack]
1911 
1912 #define CHECK_VM_STACK_OVERFLOW0(cfp, sp, margin) do { \
1913  STATIC_ASSERT(sizeof_sp, sizeof(*(sp)) == sizeof(VALUE)); \
1914  STATIC_ASSERT(sizeof_cfp, sizeof(*(cfp)) == sizeof(rb_control_frame_t)); \
1915  const struct rb_control_frame_struct *bound = (void *)&(sp)[(margin)]; \
1916  if (UNLIKELY((cfp) <= &bound[1])) { \
1917  vm_stackoverflow(); \
1918  } \
1919 } while (0)
1920 
1921 #define CHECK_VM_STACK_OVERFLOW(cfp, margin) \
1922  CHECK_VM_STACK_OVERFLOW0((cfp), (cfp)->sp, (margin))
1923 
1924 VALUE rb_catch_protect(VALUE t, rb_block_call_func *func, VALUE data, enum ruby_tag_type *stateptr);
1925 
1926 rb_execution_context_t *rb_vm_main_ractor_ec(rb_vm_t *vm); // ractor.c
1927 
1928 /* for thread */
1929 
1930 #if RUBY_VM_THREAD_MODEL == 2
1931 
1932 RUBY_EXTERN struct rb_ractor_struct *ruby_single_main_ractor; // ractor.c
1933 RUBY_EXTERN rb_vm_t *ruby_current_vm_ptr;
1934 RUBY_EXTERN rb_event_flag_t ruby_vm_event_flags;
1935 RUBY_EXTERN rb_event_flag_t ruby_vm_event_enabled_global_flags;
1936 RUBY_EXTERN unsigned int ruby_vm_event_local_num;
1937 
1938 #define GET_VM() rb_current_vm()
1939 #define GET_RACTOR() rb_current_ractor()
1940 #define GET_THREAD() rb_current_thread()
1941 #define GET_EC() rb_current_execution_context(true)
1942 
1943 static inline rb_thread_t *
1944 rb_ec_thread_ptr(const rb_execution_context_t *ec)
1945 {
1946  return ec->thread_ptr;
1947 }
1948 
1949 static inline rb_ractor_t *
1950 rb_ec_ractor_ptr(const rb_execution_context_t *ec)
1951 {
1952  const rb_thread_t *th = rb_ec_thread_ptr(ec);
1953  if (th) {
1954  VM_ASSERT(th->ractor != NULL);
1955  return th->ractor;
1956  }
1957  else {
1958  return NULL;
1959  }
1960 }
1961 
1962 static inline rb_vm_t *
1963 rb_ec_vm_ptr(const rb_execution_context_t *ec)
1964 {
1965  const rb_thread_t *th = rb_ec_thread_ptr(ec);
1966  if (th) {
1967  return th->vm;
1968  }
1969  else {
1970  return NULL;
1971  }
1972 }
1973 
1974 NOINLINE(struct rb_execution_context_struct *rb_current_ec_noinline(void));
1975 
1976 static inline rb_execution_context_t *
1977 rb_current_execution_context(bool expect_ec)
1978 {
1979 #ifdef RB_THREAD_LOCAL_SPECIFIER
1980  #ifdef __APPLE__
1981  rb_execution_context_t *ec = rb_current_ec();
1982  #else
1983  rb_execution_context_t *ec = ruby_current_ec;
1984  #endif
1985 
1986  /* On the shared objects, `__tls_get_addr()` is used to access the TLS
1987  * and the address of the `ruby_current_ec` can be stored on a function
1988  * frame. However, this address can be mis-used after native thread
1989  * migration of a coroutine.
1990  * 1) Get `ptr =&ruby_current_ec` op NT1 and store it on the frame.
1991  * 2) Context switch and resume it on the NT2.
1992  * 3) `ptr` is used on NT2 but it accesses to the TLS on NT1.
1993  * This assertion checks such misusage.
1994  *
1995  * To avoid accidents, `GET_EC()` should be called once on the frame.
1996  * Note that inlining can produce the problem.
1997  */
1998  VM_ASSERT(ec == rb_current_ec_noinline());
1999 #else
2000  rb_execution_context_t *ec = native_tls_get(ruby_current_ec_key);
2001 #endif
2002  VM_ASSERT(!expect_ec || ec != NULL);
2003  return ec;
2004 }
2005 
2006 static inline rb_thread_t *
2007 rb_current_thread(void)
2008 {
2009  const rb_execution_context_t *ec = GET_EC();
2010  return rb_ec_thread_ptr(ec);
2011 }
2012 
2013 static inline rb_ractor_t *
2014 rb_current_ractor_raw(bool expect)
2015 {
2016  if (ruby_single_main_ractor) {
2017  return ruby_single_main_ractor;
2018  }
2019  else {
2020  const rb_execution_context_t *ec = rb_current_execution_context(expect);
2021  return (expect || ec) ? rb_ec_ractor_ptr(ec) : NULL;
2022  }
2023 }
2024 
2025 static inline rb_ractor_t *
2026 rb_current_ractor(void)
2027 {
2028  return rb_current_ractor_raw(true);
2029 }
2030 
2031 static inline rb_vm_t *
2032 rb_current_vm(void)
2033 {
2034 #if 0 // TODO: reconsider the assertions
2035  VM_ASSERT(ruby_current_vm_ptr == NULL ||
2036  ruby_current_execution_context_ptr == NULL ||
2037  rb_ec_thread_ptr(GET_EC()) == NULL ||
2038  rb_ec_thread_ptr(GET_EC())->status == THREAD_KILLED ||
2039  rb_ec_vm_ptr(GET_EC()) == ruby_current_vm_ptr);
2040 #endif
2041 
2042  return ruby_current_vm_ptr;
2043 }
2044 
2045 void rb_ec_vm_lock_rec_release(const rb_execution_context_t *ec,
2046  unsigned int recorded_lock_rec,
2047  unsigned int current_lock_rec);
2048 
2049 static inline unsigned int
2050 rb_ec_vm_lock_rec(const rb_execution_context_t *ec)
2051 {
2052  rb_vm_t *vm = rb_ec_vm_ptr(ec);
2053 
2054  if (vm->ractor.sync.lock_owner != rb_ec_ractor_ptr(ec)) {
2055  return 0;
2056  }
2057  else {
2058  return vm->ractor.sync.lock_rec;
2059  }
2060 }
2061 
2062 #else
2063 #error "unsupported thread model"
2064 #endif
2065 
2066 enum {
2067  TIMER_INTERRUPT_MASK = 0x01,
2068  PENDING_INTERRUPT_MASK = 0x02,
2069  POSTPONED_JOB_INTERRUPT_MASK = 0x04,
2070  TRAP_INTERRUPT_MASK = 0x08,
2071  TERMINATE_INTERRUPT_MASK = 0x10,
2072  VM_BARRIER_INTERRUPT_MASK = 0x20,
2073 };
2074 
2075 #define RUBY_VM_SET_TIMER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TIMER_INTERRUPT_MASK)
2076 #define RUBY_VM_SET_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, PENDING_INTERRUPT_MASK)
2077 #define RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, POSTPONED_JOB_INTERRUPT_MASK)
2078 #define RUBY_VM_SET_TRAP_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TRAP_INTERRUPT_MASK)
2079 #define RUBY_VM_SET_TERMINATE_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TERMINATE_INTERRUPT_MASK)
2080 #define RUBY_VM_SET_VM_BARRIER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, VM_BARRIER_INTERRUPT_MASK)
2081 #define RUBY_VM_INTERRUPTED(ec) ((ec)->interrupt_flag & ~(ec)->interrupt_mask & \
2082  (PENDING_INTERRUPT_MASK|TRAP_INTERRUPT_MASK))
2083 
2084 static inline bool
2085 RUBY_VM_INTERRUPTED_ANY(rb_execution_context_t *ec)
2086 {
2087 #if defined(USE_VM_CLOCK) && USE_VM_CLOCK
2088  uint32_t current_clock = rb_ec_vm_ptr(ec)->clock;
2089 
2090  if (current_clock != ec->checked_clock) {
2091  ec->checked_clock = current_clock;
2092  RUBY_VM_SET_TIMER_INTERRUPT(ec);
2093  }
2094 #endif
2095  return ec->interrupt_flag & ~(ec)->interrupt_mask;
2096 }
2097 
2098 VALUE rb_exc_set_backtrace(VALUE exc, VALUE bt);
2099 int rb_signal_buff_size(void);
2100 int rb_signal_exec(rb_thread_t *th, int sig);
2101 void rb_threadptr_check_signal(rb_thread_t *mth);
2102 void rb_threadptr_signal_raise(rb_thread_t *th, int sig);
2103 void rb_threadptr_signal_exit(rb_thread_t *th);
2104 int rb_threadptr_execute_interrupts(rb_thread_t *, int);
2105 void rb_threadptr_interrupt(rb_thread_t *th);
2106 void rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th);
2107 void rb_threadptr_pending_interrupt_clear(rb_thread_t *th);
2108 void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v);
2109 VALUE rb_ec_get_errinfo(const rb_execution_context_t *ec);
2110 void rb_ec_error_print(rb_execution_context_t * volatile ec, volatile VALUE errinfo);
2111 void rb_execution_context_update(rb_execution_context_t *ec);
2112 void rb_execution_context_mark(const rb_execution_context_t *ec);
2113 void rb_fiber_close(rb_fiber_t *fib);
2114 void Init_native_thread(rb_thread_t *th);
2115 int rb_vm_check_ints_blocking(rb_execution_context_t *ec);
2116 
2117 // vm_sync.h
2118 void rb_vm_cond_wait(rb_vm_t *vm, rb_nativethread_cond_t *cond);
2119 void rb_vm_cond_timedwait(rb_vm_t *vm, rb_nativethread_cond_t *cond, unsigned long msec);
2120 
2121 #define RUBY_VM_CHECK_INTS(ec) rb_vm_check_ints(ec)
2122 static inline void
2123 rb_vm_check_ints(rb_execution_context_t *ec)
2124 {
2125 #ifdef RUBY_ASSERT_CRITICAL_SECTION
2126  VM_ASSERT(ruby_assert_critical_section_entered == 0);
2127 #endif
2128 
2129  VM_ASSERT(ec == GET_EC());
2130 
2131  if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(ec))) {
2132  rb_threadptr_execute_interrupts(rb_ec_thread_ptr(ec), 0);
2133  }
2134 }
2135 
2136 /* tracer */
2137 
2139  rb_event_flag_t event;
2141  const rb_control_frame_t *cfp;
2142  VALUE self;
2143  ID id;
2144  ID called_id;
2145  VALUE klass;
2146  VALUE data;
2147 
2148  int klass_solved;
2149 
2150  /* calc from cfp */
2151  int lineno;
2152  VALUE path;
2153 };
2154 
2155 void rb_hook_list_mark(rb_hook_list_t *hooks);
2156 void rb_hook_list_mark_and_update(rb_hook_list_t *hooks);
2157 void rb_hook_list_free(rb_hook_list_t *hooks);
2158 void rb_hook_list_connect_tracepoint(VALUE target, rb_hook_list_t *list, VALUE tpval, unsigned int target_line);
2159 void rb_hook_list_remove_tracepoint(rb_hook_list_t *list, VALUE tpval);
2160 
2161 void rb_exec_event_hooks(struct rb_trace_arg_struct *trace_arg, rb_hook_list_t *hooks, int pop_p);
2162 
2163 #define EXEC_EVENT_HOOK_ORIG(ec_, hooks_, flag_, self_, id_, called_id_, klass_, data_, pop_p_) do { \
2164  const rb_event_flag_t flag_arg_ = (flag_); \
2165  rb_hook_list_t *hooks_arg_ = (hooks_); \
2166  if (UNLIKELY((hooks_arg_)->events & (flag_arg_))) { \
2167  /* defer evaluating the other arguments */ \
2168  rb_exec_event_hook_orig(ec_, hooks_arg_, flag_arg_, self_, id_, called_id_, klass_, data_, pop_p_); \
2169  } \
2170 } while (0)
2171 
2172 static inline void
2173 rb_exec_event_hook_orig(rb_execution_context_t *ec, rb_hook_list_t *hooks, rb_event_flag_t flag,
2174  VALUE self, ID id, ID called_id, VALUE klass, VALUE data, int pop_p)
2175 {
2176  struct rb_trace_arg_struct trace_arg;
2177 
2178  VM_ASSERT((hooks->events & flag) != 0);
2179 
2180  trace_arg.event = flag;
2181  trace_arg.ec = ec;
2182  trace_arg.cfp = ec->cfp;
2183  trace_arg.self = self;
2184  trace_arg.id = id;
2185  trace_arg.called_id = called_id;
2186  trace_arg.klass = klass;
2187  trace_arg.data = data;
2188  trace_arg.path = Qundef;
2189  trace_arg.klass_solved = 0;
2190 
2191  rb_exec_event_hooks(&trace_arg, hooks, pop_p);
2192 }
2193 
2195  VALUE self;
2196  uint32_t id;
2197  rb_hook_list_t hooks;
2198 };
2199 
2200 static inline rb_hook_list_t *
2201 rb_ec_ractor_hooks(const rb_execution_context_t *ec)
2202 {
2203  struct rb_ractor_pub *cr_pub = (struct rb_ractor_pub *)rb_ec_ractor_ptr(ec);
2204  return &cr_pub->hooks;
2205 }
2206 
2207 #define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2208  EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_ractor_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 0)
2209 
2210 #define EXEC_EVENT_HOOK_AND_POP_FRAME(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2211  EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_ractor_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 1)
2212 
2213 static inline void
2214 rb_exec_event_hook_script_compiled(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE eval_script)
2215 {
2216  EXEC_EVENT_HOOK(ec, RUBY_EVENT_SCRIPT_COMPILED, ec->cfp->self, 0, 0, 0,
2217  NIL_P(eval_script) ? (VALUE)iseq :
2218  rb_ary_new_from_args(2, eval_script, (VALUE)iseq));
2219 }
2220 
2221 void rb_vm_trap_exit(rb_vm_t *vm);
2222 void rb_vm_postponed_job_atfork(void); /* vm_trace.c */
2223 void rb_vm_postponed_job_free(void); /* vm_trace.c */
2224 size_t rb_vm_memsize_postponed_job_queue(void); /* vm_trace.c */
2225 void rb_vm_postponed_job_queue_init(rb_vm_t *vm); /* vm_trace.c */
2226 
2227 RUBY_SYMBOL_EXPORT_BEGIN
2228 
2229 int rb_thread_check_trap_pending(void);
2230 
2231 /* #define RUBY_EVENT_RESERVED_FOR_INTERNAL_USE 0x030000 */ /* from vm_core.h */
2232 #define RUBY_EVENT_COVERAGE_LINE 0x010000
2233 #define RUBY_EVENT_COVERAGE_BRANCH 0x020000
2234 
2235 extern VALUE rb_get_coverages(void);
2236 extern void rb_set_coverages(VALUE, int, VALUE);
2237 extern void rb_clear_coverages(void);
2238 extern void rb_reset_coverages(void);
2239 extern void rb_resume_coverages(void);
2240 extern void rb_suspend_coverages(void);
2241 
2242 void rb_postponed_job_flush(rb_vm_t *vm);
2243 
2244 // ractor.c
2245 RUBY_EXTERN VALUE rb_eRactorUnsafeError;
2246 RUBY_EXTERN VALUE rb_eRactorIsolationError;
2247 
2248 RUBY_SYMBOL_EXPORT_END
2249 
2250 #endif /* RUBY_VM_CORE_H */
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition: atomic.h:69
#define RUBY_ALIGNAS
Wraps (or simulates) alignas.
Definition: stdalign.h:27
#define RUBY_EXTERN
Declaration of externally visible global variables.
Definition: dllexport.h:45
#define RUBY_EVENT_SCRIPT_COMPILED
Encountered an eval.
Definition: event.h:60
uint32_t rb_event_flag_t
Represents event(s).
Definition: event.h:108
#define RBIMPL_ATTR_FORMAT(x, y, z)
Wraps (or simulates) __attribute__((format))
Definition: format.h:27
#define T_STRING
Old name of RUBY_T_STRING.
Definition: value_type.h:78
#define Qundef
Old name of RUBY_Qundef.
#define Qfalse
Old name of RUBY_Qfalse.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition: value_type.h:56
#define NIL_P
Old name of RB_NIL_P.
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition: value_type.h:88
void * rb_check_typeddata(VALUE obj, const rb_data_type_t *data_type)
Identical to rb_typeddata_is_kind_of(), except it raises exceptions instead of returning false.
Definition: error.c:1375
void rb_bug(const char *fmt,...)
Interpreter panic switch.
Definition: error.c:1089
VALUE rb_ary_new_from_args(long n,...)
Constructs an array from the passed objects.
Definition: array.c:747
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
Definition: proc.c:119
void rb_unblock_function_t(void *)
This is the type of UBFs.
Definition: thread.h:336
char * ptr
Pointer to the underlying memory region, of at least capa bytes.
Definition: io.h:2
VALUE rb_block_call_func(RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg))
This is the type of a function that the interpreter expect for C-backended blocks.
Definition: iterator.h:83
VALUE type(ANYARGS)
ANYARGS-ed function type.
Definition: cxxanyargs.hpp:56
#define RBIMPL_ATTR_NONNULL(list)
Wraps (or simulates) __attribute__((nonnull))
Definition: nonnull.h:27
#define inline
Old Visual Studio versions do not support the inline keyword, so we need to define it to be __inline.
Definition: defines.h:88
Functions related to nodes in the AST.
#define RARRAY_AREF(a, i)
Definition: rarray.h:403
#define RTYPEDDATA_DATA(v)
Convenient getter macro.
Definition: rtypeddata.h:102
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
Defines old _.
C99 shim for <stdbool.h>
Definition: vm_core.h:259
const ID * segments
A null-terminated list of ids, used to represent a constant's path idNULL is used to represent the ::...
Definition: vm_core.h:285
Definition: vm_core.h:293
Definition: vm_core.h:288
Definition: iseq.h:240
Definition: mmtk.c:19
Definition: method.h:62
CREF (Class REFerence)
Definition: method.h:44
Definition: class.h:36
This is the struct that holds necessary info for a struct.
Definition: rtypeddata.h:200
struct rb_iseq_constant_body::@154 param
parameter information
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition: method.h:135
Definition: st.h:79
IFUNC (Internal FUNCtion)
Definition: imemo.h:88
Definition: vm_core.h:251
Definition: vm_core.h:297
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition: value.h:52
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
Definition: value.h:69
uintptr_t VALUE
Type that represents a Ruby object.
Definition: value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition: value_type.h:376
void * ruby_xmalloc(size_t size)
Allocates a storage instance.
Definition: gc.c:4472
void ruby_xfree(void *ptr)
Deallocates a storage instance.
Definition: gc.c:4594