Ruby  3.4.0dev (2024-11-22 revision 37a72b0150ec36b4ea27175039afc28c62207b0c)
vm_core.h (37a72b0150ec36b4ea27175039afc28c62207b0c)
1 #ifndef RUBY_VM_CORE_H
2 #define RUBY_VM_CORE_H
3 /**********************************************************************
4 
5  vm_core.h -
6 
7  $Author$
8  created at: 04/01/01 19:41:38 JST
9 
10  Copyright (C) 2004-2007 Koichi Sasada
11 
12 **********************************************************************/
13 
14 /*
15  * Enable check mode.
16  * 1: enable local assertions.
17  */
18 #ifndef VM_CHECK_MODE
19 
20 // respect RUBY_DUBUG: if given n is 0, then use RUBY_DEBUG
21 #define N_OR_RUBY_DEBUG(n) (((n) > 0) ? (n) : RUBY_DEBUG)
22 
23 #define VM_CHECK_MODE N_OR_RUBY_DEBUG(0)
24 #endif
25 
39 #ifndef VMDEBUG
40 #define VMDEBUG 0
41 #endif
42 
43 #if 0
44 #undef VMDEBUG
45 #define VMDEBUG 3
46 #endif
47 
48 #include "ruby/internal/config.h"
49 
50 #include <stddef.h>
51 #include <signal.h>
52 #include <stdarg.h>
53 
54 #include "ruby_assert.h"
55 
56 #define RVALUE_SIZE (sizeof(struct RBasic) + sizeof(VALUE[RBIMPL_RVALUE_EMBED_LEN_MAX]))
57 
58 #if VM_CHECK_MODE > 0
59 #define VM_ASSERT(expr, ...) \
60  RUBY_ASSERT_MESG_WHEN(VM_CHECK_MODE > 0, expr, #expr RBIMPL_VA_OPT_ARGS(__VA_ARGS__))
61 #define VM_UNREACHABLE(func) rb_bug(#func ": unreachable")
62 #define RUBY_ASSERT_CRITICAL_SECTION
63 #define RUBY_DEBUG_THREAD_SCHEDULE() rb_thread_schedule()
64 #else
65 #define VM_ASSERT(/*expr, */...) ((void)0)
66 #define VM_UNREACHABLE(func) UNREACHABLE
67 #define RUBY_DEBUG_THREAD_SCHEDULE()
68 #endif
69 
70 #define RUBY_ASSERT_MUTEX_OWNED(mutex) VM_ASSERT(rb_mutex_owned_p(mutex))
71 
72 #if defined(RUBY_ASSERT_CRITICAL_SECTION)
73 /*
74 # Critical Section Assertions
75 
76 These assertions are used to ensure that context switching does not occur between two points in the code. In theory,
77 such code should already be protected by a mutex, but these assertions are used to ensure that the mutex is held.
78 
79 The specific case where it can be useful is where a mutex is held further up the call stack, and the code in question
80 may not directly hold the mutex. In this case, the critical section assertions can be used to ensure that the mutex is
81 held by someone else.
82 
83 These assertions are only enabled when RUBY_ASSERT_CRITICAL_SECTION is defined, which is only defined if VM_CHECK_MODE
84 is set.
85 
86 ## Example Usage
87 
88 ```c
89 RUBY_ASSERT_CRITICAL_SECTION_ENTER();
90 // ... some code which does not invoke rb_vm_check_ints() ...
91 RUBY_ASSERT_CRITICAL_SECTION_LEAVE();
92 ```
93 
94 If `rb_vm_check_ints()` is called between the `RUBY_ASSERT_CRITICAL_SECTION_ENTER()` and
95 `RUBY_ASSERT_CRITICAL_SECTION_LEAVE()`, a failed assertion will result.
96 */
97 extern int ruby_assert_critical_section_entered;
98 #define RUBY_ASSERT_CRITICAL_SECTION_ENTER() do{ruby_assert_critical_section_entered += 1;}while(false)
99 #define RUBY_ASSERT_CRITICAL_SECTION_LEAVE() do{VM_ASSERT(ruby_assert_critical_section_entered > 0);ruby_assert_critical_section_entered -= 1;}while(false)
100 #else
101 #define RUBY_ASSERT_CRITICAL_SECTION_ENTER()
102 #define RUBY_ASSERT_CRITICAL_SECTION_LEAVE()
103 #endif
104 
105 #if defined(__wasm__) && !defined(__EMSCRIPTEN__)
106 # include "wasm/setjmp.h"
107 #else
108 # include <setjmp.h>
109 #endif
110 
111 #if defined(__linux__) || defined(__FreeBSD__)
112 # define RB_THREAD_T_HAS_NATIVE_ID
113 #endif
114 
115 #include "ruby/internal/stdbool.h"
116 #include "ccan/list/list.h"
117 #include "id.h"
118 #include "internal.h"
119 #include "internal/array.h"
120 #include "internal/basic_operators.h"
121 #include "internal/sanitizers.h"
122 #include "internal/serial.h"
123 #include "internal/vm.h"
124 #include "method.h"
125 #include "node.h"
126 #include "ruby/ruby.h"
127 #include "ruby/st.h"
128 #include "ruby_atomic.h"
129 #include "vm_opts.h"
130 
131 #include "ruby/thread_native.h"
132 /*
133  * implementation selector of get_insn_info algorithm
134  * 0: linear search
135  * 1: binary search
136  * 2: succinct bitvector
137  */
138 #ifndef VM_INSN_INFO_TABLE_IMPL
139 # define VM_INSN_INFO_TABLE_IMPL 2
140 #endif
141 
142 #if defined(NSIG_MAX) /* POSIX issue 8 */
143 # undef NSIG
144 # define NSIG NSIG_MAX
145 #elif defined(_SIG_MAXSIG) /* FreeBSD */
146 # undef NSIG
147 # define NSIG _SIG_MAXSIG
148 #elif defined(_SIGMAX) /* QNX */
149 # define NSIG (_SIGMAX + 1)
150 #elif defined(NSIG) /* 99% of everything else */
151 # /* take it */
152 #else /* Last resort */
153 # define NSIG (sizeof(sigset_t) * CHAR_BIT + 1)
154 #endif
155 
156 #define RUBY_NSIG NSIG
157 
158 #if defined(SIGCLD)
159 # define RUBY_SIGCHLD (SIGCLD)
160 #elif defined(SIGCHLD)
161 # define RUBY_SIGCHLD (SIGCHLD)
162 #endif
163 
164 #if defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) && defined(SA_SIGINFO) && !defined(__NetBSD__)
165 # define USE_SIGALTSTACK
166 void *rb_allocate_sigaltstack(void);
167 void *rb_register_sigaltstack(void *);
168 # define RB_ALTSTACK_INIT(var, altstack) var = rb_register_sigaltstack(altstack)
169 # define RB_ALTSTACK_FREE(var) free(var)
170 # define RB_ALTSTACK(var) var
171 #else /* noop */
172 # define RB_ALTSTACK_INIT(var, altstack)
173 # define RB_ALTSTACK_FREE(var)
174 # define RB_ALTSTACK(var) (0)
175 #endif
176 
177 #include THREAD_IMPL_H
178 #define RUBY_VM_THREAD_MODEL 2
179 
180 /*****************/
181 /* configuration */
182 /*****************/
183 
184 /* gcc ver. check */
185 #if defined(__GNUC__) && __GNUC__ >= 2
186 
187 #if OPT_TOKEN_THREADED_CODE
188 #if OPT_DIRECT_THREADED_CODE
189 #undef OPT_DIRECT_THREADED_CODE
190 #endif
191 #endif
192 
193 #else /* defined(__GNUC__) && __GNUC__ >= 2 */
194 
195 /* disable threaded code options */
196 #if OPT_DIRECT_THREADED_CODE
197 #undef OPT_DIRECT_THREADED_CODE
198 #endif
199 #if OPT_TOKEN_THREADED_CODE
200 #undef OPT_TOKEN_THREADED_CODE
201 #endif
202 #endif
203 
204 /* call threaded code */
205 #if OPT_CALL_THREADED_CODE
206 #if OPT_DIRECT_THREADED_CODE
207 #undef OPT_DIRECT_THREADED_CODE
208 #endif /* OPT_DIRECT_THREADED_CODE */
209 #endif /* OPT_CALL_THREADED_CODE */
210 
211 void rb_vm_encoded_insn_data_table_init(void);
212 typedef unsigned long rb_num_t;
213 typedef signed long rb_snum_t;
214 
215 enum ruby_tag_type {
216  RUBY_TAG_NONE = 0x0,
217  RUBY_TAG_RETURN = 0x1,
218  RUBY_TAG_BREAK = 0x2,
219  RUBY_TAG_NEXT = 0x3,
220  RUBY_TAG_RETRY = 0x4,
221  RUBY_TAG_REDO = 0x5,
222  RUBY_TAG_RAISE = 0x6,
223  RUBY_TAG_THROW = 0x7,
224  RUBY_TAG_FATAL = 0x8,
225  RUBY_TAG_MASK = 0xf
226 };
227 
228 #define TAG_NONE RUBY_TAG_NONE
229 #define TAG_RETURN RUBY_TAG_RETURN
230 #define TAG_BREAK RUBY_TAG_BREAK
231 #define TAG_NEXT RUBY_TAG_NEXT
232 #define TAG_RETRY RUBY_TAG_RETRY
233 #define TAG_REDO RUBY_TAG_REDO
234 #define TAG_RAISE RUBY_TAG_RAISE
235 #define TAG_THROW RUBY_TAG_THROW
236 #define TAG_FATAL RUBY_TAG_FATAL
237 #define TAG_MASK RUBY_TAG_MASK
238 
239 enum ruby_vm_throw_flags {
240  VM_THROW_NO_ESCAPE_FLAG = 0x8000,
241  VM_THROW_STATE_MASK = 0xff
242 };
243 
244 /* forward declarations */
245 struct rb_thread_struct;
247 
248 /* iseq data type */
250 
252  rb_serial_t raw;
253  VALUE data[2];
254 };
255 
256 #define IMEMO_CONST_CACHE_SHAREABLE IMEMO_FL_USER0
257 
258 // imemo_constcache
260  VALUE flags;
261 
262  VALUE value; // v0
263  VALUE _unused1; // v1
264  VALUE _unused2; // v2
265  const rb_cref_t *ic_cref; // v3
266 };
267 STATIC_ASSERT(sizeof_iseq_inline_constant_cache_entry,
268  (offsetof(struct iseq_inline_constant_cache_entry, ic_cref) +
269  sizeof(const rb_cref_t *)) <= RVALUE_SIZE);
270 
272  struct iseq_inline_constant_cache_entry *entry;
273 
285  const ID *segments;
286 };
287 
289  uintptr_t value; // attr_index in lower bits, dest_shape_id in upper bits
290  ID iv_set_name;
291 };
292 
294  struct rb_cvar_class_tbl_entry *entry;
295 };
296 
298  struct {
299  struct rb_thread_struct *running_thread;
300  VALUE value;
301  } once;
302  struct iseq_inline_constant_cache ic_cache;
303  struct iseq_inline_iv_cache_entry iv_cache;
304 };
305 
307  const struct rb_call_data *cd;
308  const struct rb_callcache *cc;
309  VALUE block_handler;
310  VALUE recv;
311  int argc;
312  bool kw_splat;
313  VALUE heap_argv;
314 };
315 
316 #ifndef VM_ARGC_STACK_MAX
317 #define VM_ARGC_STACK_MAX 128
318 #endif
319 
320 # define CALLING_ARGC(calling) ((calling)->heap_argv ? RARRAY_LENINT((calling)->heap_argv) : (calling)->argc)
321 
323 
324 #if 1
325 #define CoreDataFromValue(obj, type) (type*)DATA_PTR(obj)
326 #else
327 #define CoreDataFromValue(obj, type) (type*)rb_data_object_get(obj)
328 #endif
329 #define GetCoreDataFromValue(obj, type, ptr) ((ptr) = CoreDataFromValue((obj), type))
330 
331 typedef struct rb_iseq_location_struct {
332  VALUE pathobj; /* String (path) or Array [path, realpath]. Frozen. */
333  VALUE base_label; /* String */
334  VALUE label; /* String */
335  int first_lineno;
336  int node_id;
337  rb_code_location_t code_location;
339 
340 #define PATHOBJ_PATH 0
341 #define PATHOBJ_REALPATH 1
342 
343 static inline VALUE
344 pathobj_path(VALUE pathobj)
345 {
346  if (RB_TYPE_P(pathobj, T_STRING)) {
347  return pathobj;
348  }
349  else {
350  VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
351  return RARRAY_AREF(pathobj, PATHOBJ_PATH);
352  }
353 }
354 
355 static inline VALUE
356 pathobj_realpath(VALUE pathobj)
357 {
358  if (RB_TYPE_P(pathobj, T_STRING)) {
359  return pathobj;
360  }
361  else {
362  VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
363  return RARRAY_AREF(pathobj, PATHOBJ_REALPATH);
364  }
365 }
366 
367 /* Forward declarations */
368 struct rb_rjit_unit;
369 
370 typedef uintptr_t iseq_bits_t;
371 
372 #define ISEQ_IS_SIZE(body) (body->ic_size + body->ivc_size + body->ise_size + body->icvarc_size)
373 
374 /* [ TS_IVC | TS_ICVARC | TS_ISE | TS_IC ] */
375 #define ISEQ_IS_IC_ENTRY(body, idx) (body->is_entries[(idx) + body->ise_size + body->icvarc_size + body->ivc_size].ic_cache);
376 
377 /* instruction sequence type */
378 enum rb_iseq_type {
379  ISEQ_TYPE_TOP,
380  ISEQ_TYPE_METHOD,
381  ISEQ_TYPE_BLOCK,
382  ISEQ_TYPE_CLASS,
383  ISEQ_TYPE_RESCUE,
384  ISEQ_TYPE_ENSURE,
385  ISEQ_TYPE_EVAL,
386  ISEQ_TYPE_MAIN,
387  ISEQ_TYPE_PLAIN
388 };
389 
390 // Attributes specified by Primitive.attr!
391 enum rb_builtin_attr {
392  // The iseq does not call methods.
393  BUILTIN_ATTR_LEAF = 0x01,
394  // This iseq only contains single `opt_invokebuiltin_delegate_leave` instruction with 0 arguments.
395  BUILTIN_ATTR_SINGLE_NOARG_LEAF = 0x02,
396  // This attribute signals JIT to duplicate the iseq for each block iseq so that its `yield` will be monomorphic.
397  BUILTIN_ATTR_INLINE_BLOCK = 0x04,
398  // The iseq acts like a C method in backtraces.
399  BUILTIN_ATTR_C_TRACE = 0x08,
400 };
401 
402 typedef VALUE (*rb_jit_func_t)(struct rb_execution_context_struct *, struct rb_control_frame_struct *);
403 
405  enum rb_iseq_type type;
406 
407  unsigned int iseq_size;
408  VALUE *iseq_encoded; /* encoded iseq (insn addr and operands) */
409 
433  struct {
434  struct {
435  unsigned int has_lead : 1;
436  unsigned int has_opt : 1;
437  unsigned int has_rest : 1;
438  unsigned int has_post : 1;
439  unsigned int has_kw : 1;
440  unsigned int has_kwrest : 1;
441  unsigned int has_block : 1;
442 
443  unsigned int ambiguous_param0 : 1; /* {|a|} */
444  unsigned int accepts_no_kwarg : 1;
445  unsigned int ruby2_keywords: 1;
446  unsigned int anon_rest: 1;
447  unsigned int anon_kwrest: 1;
448  unsigned int use_block: 1;
449  unsigned int forwardable: 1;
450  } flags;
451 
452  unsigned int size;
453 
454  int lead_num;
455  int opt_num;
456  int rest_start;
457  int post_start;
458  int post_num;
459  int block_start;
460 
461  const VALUE *opt_table; /* (opt_num + 1) entries. */
462  /* opt_num and opt_table:
463  *
464  * def foo o1=e1, o2=e2, ..., oN=eN
465  * #=>
466  * # prologue code
467  * A1: e1
468  * A2: e2
469  * ...
470  * AN: eN
471  * AL: body
472  * opt_num = N
473  * opt_table = [A1, A2, ..., AN, AL]
474  */
475 
476  const struct rb_iseq_param_keyword {
477  int num;
478  int required_num;
479  int bits_start;
480  int rest_start;
481  const ID *table;
482  VALUE *default_values;
483  } *keyword;
484  } param;
485 
486  rb_iseq_location_t location;
487 
488  /* insn info, must be freed */
489  struct iseq_insn_info {
490  const struct iseq_insn_info_entry *body;
491  unsigned int *positions;
492  unsigned int size;
493 #if VM_INSN_INFO_TABLE_IMPL == 2
495 #endif
496  } insns_info;
497 
498  const ID *local_table; /* must free */
499 
500  /* catch table */
501  struct iseq_catch_table *catch_table;
502 
503  /* for child iseq */
504  const struct rb_iseq_struct *parent_iseq;
505  struct rb_iseq_struct *local_iseq; /* local_iseq->flip_cnt can be modified */
506 
507  union iseq_inline_storage_entry *is_entries; /* [ TS_IVC | TS_ICVARC | TS_ISE | TS_IC ] */
508  struct rb_call_data *call_data; //struct rb_call_data calls[ci_size];
509 
510  struct {
511  rb_snum_t flip_count;
512  VALUE script_lines;
513  VALUE coverage;
514  VALUE pc2branchindex;
515  VALUE *original_iseq;
516  } variable;
517 
518  unsigned int local_table_size;
519  unsigned int ic_size; // Number of IC caches
520  unsigned int ise_size; // Number of ISE caches
521  unsigned int ivc_size; // Number of IVC caches
522  unsigned int icvarc_size; // Number of ICVARC caches
523  unsigned int ci_size;
524  unsigned int stack_max; /* for stack overflow check */
525 
526  unsigned int builtin_attrs; // Union of rb_builtin_attr
527 
528  bool prism; // ISEQ was generated from prism compiler
529 
530  union {
531  iseq_bits_t * list; /* Find references for GC */
532  iseq_bits_t single;
533  } mark_bits;
534 
535  struct rb_id_table *outer_variables;
536 
537  const rb_iseq_t *mandatory_only_iseq;
538 
539 #if USE_RJIT || USE_YJIT
540  // Function pointer for JIT code on jit_exec()
541  rb_jit_func_t jit_entry;
542  // Number of calls on jit_exec()
543  long unsigned jit_entry_calls;
544 #endif
545 
546 #if USE_YJIT
547  // Function pointer for JIT code on jit_exec_exception()
548  rb_jit_func_t jit_exception;
549  // Number of calls on jit_exec_exception()
550  long unsigned jit_exception_calls;
551 #endif
552 
553 #if USE_RJIT
554  // RJIT stores some data on each iseq.
555  VALUE rjit_blocks;
556 #endif
557 
558 #if USE_YJIT
559  // YJIT stores some data on each iseq.
560  void *yjit_payload;
561  // Used to estimate how frequently this ISEQ gets called
562  uint64_t yjit_calls_at_interv;
563 #endif
564 };
565 
566 /* T_IMEMO/iseq */
567 /* typedef rb_iseq_t is in method.h */
569  VALUE flags; /* 1 */
570  VALUE wrapper; /* 2 */
571 
572  struct rb_iseq_constant_body *body; /* 3 */
573 
574  union { /* 4, 5 words */
575  struct iseq_compile_data *compile_data; /* used at compile time */
576 
577  struct {
578  VALUE obj;
579  int index;
580  } loader;
581 
582  struct {
583  struct rb_hook_list_struct *local_hooks;
584  rb_event_flag_t global_trace_events;
585  } exec;
586  } aux;
587 };
588 
589 #define ISEQ_BODY(iseq) ((iseq)->body)
590 
591 #if !defined(USE_LAZY_LOAD) || !(USE_LAZY_LOAD+0)
592 #define USE_LAZY_LOAD 0
593 #endif
594 
595 #if !USE_LAZY_LOAD
596 static inline const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq) {return 0;}
597 #endif
598 const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq);
599 
600 static inline const rb_iseq_t *
601 rb_iseq_check(const rb_iseq_t *iseq)
602 {
603  if (USE_LAZY_LOAD && ISEQ_BODY(iseq) == NULL) {
604  rb_iseq_complete((rb_iseq_t *)iseq);
605  }
606  return iseq;
607 }
608 
609 static inline bool
610 rb_iseq_attr_p(const rb_iseq_t *iseq, enum rb_builtin_attr attr)
611 {
612  return (ISEQ_BODY(iseq)->builtin_attrs & attr) == attr;
613 }
614 
615 static inline const rb_iseq_t *
616 def_iseq_ptr(rb_method_definition_t *def)
617 {
618 //TODO: re-visit. to check the bug, enable this assertion.
619 #if VM_CHECK_MODE > 0
620  if (def->type != VM_METHOD_TYPE_ISEQ) rb_bug("def_iseq_ptr: not iseq (%d)", def->type);
621 #endif
622  return rb_iseq_check(def->body.iseq.iseqptr);
623 }
624 
625 enum ruby_special_exceptions {
626  ruby_error_reenter,
627  ruby_error_nomemory,
628  ruby_error_sysstack,
629  ruby_error_stackfatal,
630  ruby_error_stream_closed,
631  ruby_special_error_count
632 };
633 
634 #define GetVMPtr(obj, ptr) \
635  GetCoreDataFromValue((obj), rb_vm_t, (ptr))
636 
637 struct rb_vm_struct;
638 typedef void rb_vm_at_exit_func(struct rb_vm_struct*);
639 
640 typedef struct rb_at_exit_list {
641  rb_vm_at_exit_func *func;
642  struct rb_at_exit_list *next;
644 
645 void *rb_objspace_alloc(void);
646 void rb_objspace_free(void *objspace);
647 void rb_objspace_call_finalizer(void);
648 
649 typedef struct rb_hook_list_struct {
650  struct rb_event_hook_struct *hooks;
651  rb_event_flag_t events;
652  unsigned int running;
653  bool need_clean;
654  bool is_local;
656 
657 
658 // see builtin.h for definition
659 typedef const struct rb_builtin_function *RB_BUILTIN;
660 
662  VALUE *varptr;
663  struct global_object_list *next;
664 };
665 
666 typedef struct rb_vm_struct {
667  VALUE self;
668 
669  struct {
670  struct ccan_list_head set;
671  unsigned int cnt;
672  unsigned int blocking_cnt;
673 
674  struct rb_ractor_struct *main_ractor;
675  struct rb_thread_struct *main_thread; // == vm->ractor.main_ractor->threads.main
676 
677  struct {
678  // monitor
679  rb_nativethread_lock_t lock;
680  struct rb_ractor_struct *lock_owner;
681  unsigned int lock_rec;
682 
683  // join at exit
684  rb_nativethread_cond_t terminate_cond;
685  bool terminate_waiting;
686 
687 #ifndef RUBY_THREAD_PTHREAD_H
688  bool barrier_waiting;
689  unsigned int barrier_cnt;
690  rb_nativethread_cond_t barrier_cond;
691 #endif
692  } sync;
693 
694  // ractor scheduling
695  struct {
696  rb_nativethread_lock_t lock;
697  struct rb_ractor_struct *lock_owner;
698  bool locked;
699 
700  rb_nativethread_cond_t cond; // GRQ
701  unsigned int snt_cnt; // count of shared NTs
702  unsigned int dnt_cnt; // count of dedicated NTs
703 
704  unsigned int running_cnt;
705 
706  unsigned int max_cpu;
707  struct ccan_list_head grq; // // Global Ready Queue
708  unsigned int grq_cnt;
709 
710  // running threads
711  struct ccan_list_head running_threads;
712 
713  // threads which switch context by timeslice
714  struct ccan_list_head timeslice_threads;
715 
716  struct ccan_list_head zombie_threads;
717 
718  // true if timeslice timer is not enable
719  bool timeslice_wait_inf;
720 
721  // barrier
722  rb_nativethread_cond_t barrier_complete_cond;
723  rb_nativethread_cond_t barrier_release_cond;
724  bool barrier_waiting;
725  unsigned int barrier_waiting_cnt;
726  unsigned int barrier_serial;
727  } sched;
728  } ractor;
729 
730 #ifdef USE_SIGALTSTACK
731  void *main_altstack;
732 #endif
733 
734  rb_serial_t fork_gen;
735  struct ccan_list_head waiting_fds; /* <=> struct waiting_fd */
736 
737  /* set in single-threaded processes only: */
738  volatile int ubf_async_safe;
739 
740  unsigned int running: 1;
741  unsigned int thread_abort_on_exception: 1;
742  unsigned int thread_report_on_exception: 1;
743  unsigned int thread_ignore_deadlock: 1;
744 
745  /* object management */
746  VALUE mark_object_ary;
748  const VALUE special_exceptions[ruby_special_error_count];
749 
750  /* load */
751  VALUE top_self;
752  VALUE load_path;
753  VALUE load_path_snapshot;
754  VALUE load_path_check_cache;
755  VALUE expanded_load_path;
756  VALUE loaded_features;
757  VALUE loaded_features_snapshot;
758  VALUE loaded_features_realpaths;
759  VALUE loaded_features_realpath_map;
760  struct st_table *loaded_features_index;
761  struct st_table *loading_table;
762  // For running the init function of statically linked
763  // extensions when they are loaded
764  struct st_table *static_ext_inits;
765 
766  /* signal */
767  struct {
768  VALUE cmd[RUBY_NSIG];
769  } trap_list;
770 
771  /* postponed_job (async-signal-safe, and thread-safe) */
772  struct rb_postponed_job_queue *postponed_job_queue;
773 
774  int src_encoding_index;
775 
776  /* workqueue (thread-safe, NOT async-signal-safe) */
777  struct ccan_list_head workqueue; /* <=> rb_workqueue_job.jnode */
778  rb_nativethread_lock_t workqueue_lock;
779 
780  VALUE orig_progname, progname;
781  VALUE coverages, me2counter;
782  int coverage_mode;
783 
784  struct {
785  struct rb_objspace *objspace;
786  struct gc_mark_func_data_struct {
787  void *data;
788  void (*mark_func)(VALUE v, void *data);
789  } *mark_func_data;
790  } gc;
791 
792  rb_at_exit_list *at_exit;
793 
794  st_table *frozen_strings;
795 
796  const struct rb_builtin_function *builtin_function_table;
797 
798  st_table *ci_table;
799  struct rb_id_table *negative_cme_table;
800  st_table *overloaded_cme_table; // cme -> overloaded_cme
801  st_table *unused_block_warning_table;
802 
803  // This id table contains a mapping from ID to ICs. It does this with ID
804  // keys and nested st_tables as values. The nested tables have ICs as keys
805  // and Qtrue as values. It is used when inline constant caches need to be
806  // invalidated or ISEQs are being freed.
807  struct rb_id_table *constant_cache;
808 
809 #ifndef VM_GLOBAL_CC_CACHE_TABLE_SIZE
810 #define VM_GLOBAL_CC_CACHE_TABLE_SIZE 1023
811 #endif
812  const struct rb_callcache *global_cc_cache_table[VM_GLOBAL_CC_CACHE_TABLE_SIZE]; // vm_eval.c
813 
814 #if defined(USE_VM_CLOCK) && USE_VM_CLOCK
815  uint32_t clock;
816 #endif
817 
818  /* params */
819  struct { /* size in byte */
820  size_t thread_vm_stack_size;
821  size_t thread_machine_stack_size;
822  size_t fiber_vm_stack_size;
823  size_t fiber_machine_stack_size;
824  } default_params;
825 
826 } rb_vm_t;
827 
828 /* default values */
829 
830 #define RUBY_VM_SIZE_ALIGN 4096
831 
832 #define RUBY_VM_THREAD_VM_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
833 #define RUBY_VM_THREAD_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
834 #define RUBY_VM_THREAD_MACHINE_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
835 #define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
836 
837 #define RUBY_VM_FIBER_VM_STACK_SIZE ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
838 #define RUBY_VM_FIBER_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
839 #define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 64 * 1024 * sizeof(VALUE)) /* 256 KB or 512 KB */
840 #if defined(__powerpc64__) || defined(__ppc64__) // macOS has __ppc64__
841 #define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 32 * 1024 * sizeof(VALUE)) /* 128 KB or 256 KB */
842 #else
843 #define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
844 #endif
845 
846 #if __has_feature(memory_sanitizer) || __has_feature(address_sanitizer)
847 /* It seems sanitizers consume A LOT of machine stacks */
848 #undef RUBY_VM_THREAD_MACHINE_STACK_SIZE
849 #define RUBY_VM_THREAD_MACHINE_STACK_SIZE (1024 * 1024 * sizeof(VALUE))
850 #undef RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN
851 #define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 512 * 1024 * sizeof(VALUE))
852 #undef RUBY_VM_FIBER_MACHINE_STACK_SIZE
853 #define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 256 * 1024 * sizeof(VALUE))
854 #undef RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN
855 #define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 128 * 1024 * sizeof(VALUE))
856 #endif
857 
858 #ifndef VM_DEBUG_BP_CHECK
859 #define VM_DEBUG_BP_CHECK 0
860 #endif
861 
862 #ifndef VM_DEBUG_VERIFY_METHOD_CACHE
863 #define VM_DEBUG_VERIFY_METHOD_CACHE (VMDEBUG != 0)
864 #endif
865 
867  VALUE self;
868  const VALUE *ep;
869  union {
870  const rb_iseq_t *iseq;
871  const struct vm_ifunc *ifunc;
872  VALUE val;
873  } code;
874 };
875 
876 enum rb_block_handler_type {
877  block_handler_type_iseq,
878  block_handler_type_ifunc,
879  block_handler_type_symbol,
880  block_handler_type_proc
881 };
882 
883 enum rb_block_type {
884  block_type_iseq,
885  block_type_ifunc,
886  block_type_symbol,
887  block_type_proc
888 };
889 
890 struct rb_block {
891  union {
892  struct rb_captured_block captured;
893  VALUE symbol;
894  VALUE proc;
895  } as;
896  enum rb_block_type type;
897 };
898 
899 typedef struct rb_control_frame_struct {
900  const VALUE *pc; // cfp[0]
901  VALUE *sp; // cfp[1]
902  const rb_iseq_t *iseq; // cfp[2]
903  VALUE self; // cfp[3] / block[0]
904  const VALUE *ep; // cfp[4] / block[1]
905  const void *block_code; // cfp[5] / block[2] -- iseq, ifunc, or forwarded block handler
906  void *jit_return; // cfp[6] -- return address for JIT code
907 #if VM_DEBUG_BP_CHECK
908  VALUE *bp_check; // cfp[7]
909 #endif
911 
912 extern const rb_data_type_t ruby_threadptr_data_type;
913 
914 static inline struct rb_thread_struct *
915 rb_thread_ptr(VALUE thval)
916 {
917  return (struct rb_thread_struct *)rb_check_typeddata(thval, &ruby_threadptr_data_type);
918 }
919 
920 enum rb_thread_status {
921  THREAD_RUNNABLE,
922  THREAD_STOPPED,
923  THREAD_STOPPED_FOREVER,
924  THREAD_KILLED
925 };
926 
927 #ifdef RUBY_JMP_BUF
928 typedef RUBY_JMP_BUF rb_jmpbuf_t;
929 #else
930 typedef void *rb_jmpbuf_t[5];
931 #endif
932 
933 /*
934  `rb_vm_tag_jmpbuf_t` type represents a buffer used to
935  long jump to a C frame associated with `rb_vm_tag`.
936 
937  Use-site of `rb_vm_tag_jmpbuf_t` is responsible for calling the
938  following functions:
939  - `rb_vm_tag_jmpbuf_init` once `rb_vm_tag_jmpbuf_t` is allocated.
940  - `rb_vm_tag_jmpbuf_deinit` once `rb_vm_tag_jmpbuf_t` is no longer necessary.
941 
942  `RB_VM_TAG_JMPBUF_GET` transforms a `rb_vm_tag_jmpbuf_t` into a
943  `rb_jmpbuf_t` to be passed to `rb_setjmp/rb_longjmp`.
944 */
945 #if defined(__wasm__) && !defined(__EMSCRIPTEN__)
946 /*
947  WebAssembly target with Asyncify-based SJLJ needs
948  to capture the execution context by unwind/rewind-ing
949  call frames into a jump buffer. The buffer space tends
950  to be considerably large unlike other architectures'
951  register-based buffers.
952  Therefore, we allocates the buffer on the heap on such
953  environments.
954 */
955 typedef rb_jmpbuf_t *rb_vm_tag_jmpbuf_t;
956 
957 #define RB_VM_TAG_JMPBUF_GET(buf) (*buf)
958 
959 static inline void
960 rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
961 {
962  *jmpbuf = ruby_xmalloc(sizeof(rb_jmpbuf_t));
963 }
964 
965 static inline void
966 rb_vm_tag_jmpbuf_deinit(const rb_vm_tag_jmpbuf_t *jmpbuf)
967 {
968  ruby_xfree(*jmpbuf);
969 }
970 #else
971 typedef rb_jmpbuf_t rb_vm_tag_jmpbuf_t;
972 
973 #define RB_VM_TAG_JMPBUF_GET(buf) (buf)
974 
975 static inline void
976 rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
977 {
978  // no-op
979 }
980 
981 static inline void
982 rb_vm_tag_jmpbuf_deinit(const rb_vm_tag_jmpbuf_t *jmpbuf)
983 {
984  // no-op
985 }
986 #endif
987 
988 /*
989  the members which are written in EC_PUSH_TAG() should be placed at
990  the beginning and the end, so that entire region is accessible.
991 */
992 struct rb_vm_tag {
993  VALUE tag;
994  VALUE retval;
995  rb_vm_tag_jmpbuf_t buf;
996  struct rb_vm_tag *prev;
997  enum ruby_tag_type state;
998  unsigned int lock_rec;
999 };
1000 
1001 STATIC_ASSERT(rb_vm_tag_buf_offset, offsetof(struct rb_vm_tag, buf) > 0);
1002 STATIC_ASSERT(rb_vm_tag_buf_end,
1003  offsetof(struct rb_vm_tag, buf) + sizeof(rb_vm_tag_jmpbuf_t) <
1004  sizeof(struct rb_vm_tag));
1005 
1007  rb_unblock_function_t *func;
1008  void *arg;
1009 };
1010 
1011 struct rb_mutex_struct;
1012 
1013 typedef struct rb_fiber_struct rb_fiber_t;
1014 
1016  struct rb_waiting_list *next;
1017  struct rb_thread_struct *thread;
1018  struct rb_fiber_struct *fiber;
1019 };
1020 
1022  /* execution information */
1023  VALUE *vm_stack; /* must free, must mark */
1024  size_t vm_stack_size; /* size in word (byte size / sizeof(VALUE)) */
1025  rb_control_frame_t *cfp;
1026 
1027  struct rb_vm_tag *tag;
1028 
1029  /* interrupt flags */
1030  rb_atomic_t interrupt_flag;
1031  rb_atomic_t interrupt_mask; /* size should match flag */
1032 #if defined(USE_VM_CLOCK) && USE_VM_CLOCK
1033  uint32_t checked_clock;
1034 #endif
1035 
1036  rb_fiber_t *fiber_ptr;
1037  struct rb_thread_struct *thread_ptr;
1038 
1039  /* storage (ec (fiber) local) */
1040  struct rb_id_table *local_storage;
1041  VALUE local_storage_recursive_hash;
1042  VALUE local_storage_recursive_hash_for_trace;
1043 
1044  /* Inheritable fiber storage. */
1045  VALUE storage;
1046 
1047  /* eval env */
1048  const VALUE *root_lep;
1049  VALUE root_svar;
1050 
1051  /* trace information */
1052  struct rb_trace_arg_struct *trace_arg;
1053 
1054  /* temporary places */
1055  VALUE errinfo;
1056  VALUE passed_block_handler; /* for rb_iterate */
1057 
1058  uint8_t raised_flag; /* only 3 bits needed */
1059 
1060  /* n.b. only 7 bits needed, really: */
1061  BITFIELD(enum method_missing_reason, method_missing_reason, 8);
1062 
1063  VALUE private_const_reference;
1064 
1065  /* for GC */
1066  struct {
1067  VALUE *stack_start;
1068  VALUE *stack_end;
1069  size_t stack_maxsize;
1071 
1072 #ifdef RUBY_ASAN_ENABLED
1073  void *asan_fake_stack_handle;
1074 #endif
1075  } machine;
1076 };
1077 
1078 #ifndef rb_execution_context_t
1080 #define rb_execution_context_t rb_execution_context_t
1081 #endif
1082 
1083 // for builtin.h
1084 #define VM_CORE_H_EC_DEFINED 1
1085 
1086 // Set the vm_stack pointer in the execution context.
1087 void rb_ec_set_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
1088 
1089 // Initialize the vm_stack pointer in the execution context and push the initial stack frame.
1090 // @param ec the execution context to update.
1091 // @param stack a pointer to the stack to use.
1092 // @param size the size of the stack, as in `VALUE stack[size]`.
1093 void rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
1094 
1095 // Clear (set to `NULL`) the vm_stack pointer.
1096 // @param ec the execution context to update.
1097 void rb_ec_clear_vm_stack(rb_execution_context_t *ec);
1098 
1100  bool ractor_safe;
1101 };
1102 
1103 typedef struct rb_ractor_struct rb_ractor_t;
1104 
1105 struct rb_native_thread;
1106 
1107 typedef struct rb_thread_struct {
1108  struct ccan_list_node lt_node; // managed by a ractor
1109  VALUE self;
1110  rb_ractor_t *ractor;
1111  rb_vm_t *vm;
1112  struct rb_native_thread *nt;
1114 
1115  struct rb_thread_sched_item sched;
1116  bool mn_schedulable;
1117  rb_atomic_t serial; // only for RUBY_DEBUG_LOG()
1118 
1119  VALUE last_status; /* $? */
1120 
1121  /* for cfunc */
1122  struct rb_calling_info *calling;
1123 
1124  /* for load(true) */
1125  VALUE top_self;
1126  VALUE top_wrapper;
1127 
1128  /* thread control */
1129 
1130  BITFIELD(enum rb_thread_status, status, 2);
1131  /* bit flags */
1132  unsigned int has_dedicated_nt : 1;
1133  unsigned int to_kill : 1;
1134  unsigned int abort_on_exception: 1;
1135  unsigned int report_on_exception: 1;
1136  unsigned int pending_interrupt_queue_checked: 1;
1137  int8_t priority; /* -3 .. 3 (RUBY_THREAD_PRIORITY_{MIN,MAX}) */
1138  uint32_t running_time_us; /* 12500..800000 */
1139 
1140  void *blocking_region_buffer;
1141 
1142  VALUE thgroup;
1143  VALUE value;
1144 
1145  /* temporary place of retval on OPT_CALL_THREADED_CODE */
1146 #if OPT_CALL_THREADED_CODE
1147  VALUE retval;
1148 #endif
1149 
1150  /* async errinfo queue */
1151  VALUE pending_interrupt_queue;
1152  VALUE pending_interrupt_mask_stack;
1153 
1154  /* interrupt management */
1155  rb_nativethread_lock_t interrupt_lock;
1156  struct rb_unblock_callback unblock;
1157  VALUE locking_mutex;
1158  struct rb_mutex_struct *keeping_mutexes;
1159  struct ccan_list_head interrupt_exec_tasks;
1160 
1161  struct rb_waiting_list *join_list;
1162 
1163  union {
1164  struct {
1165  VALUE proc;
1166  VALUE args;
1167  int kw_splat;
1168  } proc;
1169  struct {
1170  VALUE (*func)(void *);
1171  void *arg;
1172  } func;
1173  } invoke_arg;
1174 
1175  enum thread_invoke_type {
1176  thread_invoke_type_none = 0,
1177  thread_invoke_type_proc,
1178  thread_invoke_type_ractor_proc,
1179  thread_invoke_type_func
1180  } invoke_type;
1181 
1182  /* statistics data for profiler */
1183  VALUE stat_insn_usage;
1184 
1185  /* fiber */
1186  rb_fiber_t *root_fiber;
1187 
1188  VALUE scheduler;
1189  unsigned int blocking;
1190 
1191  /* misc */
1192  VALUE name;
1193  void **specific_storage;
1194 
1195  struct rb_ext_config ext_config;
1196 } rb_thread_t;
1197 
1198 static inline unsigned int
1199 rb_th_serial(const rb_thread_t *th)
1200 {
1201  return th ? (unsigned int)th->serial : 0;
1202 }
1203 
1204 typedef enum {
1205  VM_DEFINECLASS_TYPE_CLASS = 0x00,
1206  VM_DEFINECLASS_TYPE_SINGLETON_CLASS = 0x01,
1207  VM_DEFINECLASS_TYPE_MODULE = 0x02,
1208  /* 0x03..0x06 is reserved */
1209  VM_DEFINECLASS_TYPE_MASK = 0x07
1210 } rb_vm_defineclass_type_t;
1211 
1212 #define VM_DEFINECLASS_TYPE(x) ((rb_vm_defineclass_type_t)(x) & VM_DEFINECLASS_TYPE_MASK)
1213 #define VM_DEFINECLASS_FLAG_SCOPED 0x08
1214 #define VM_DEFINECLASS_FLAG_HAS_SUPERCLASS 0x10
1215 #define VM_DEFINECLASS_SCOPED_P(x) ((x) & VM_DEFINECLASS_FLAG_SCOPED)
1216 #define VM_DEFINECLASS_HAS_SUPERCLASS_P(x) \
1217  ((x) & VM_DEFINECLASS_FLAG_HAS_SUPERCLASS)
1218 
1219 /* iseq.c */
1220 RUBY_SYMBOL_EXPORT_BEGIN
1221 
1222 /* node -> iseq */
1223 rb_iseq_t *rb_iseq_new (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent, enum rb_iseq_type);
1224 rb_iseq_t *rb_iseq_new_top (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent);
1225 rb_iseq_t *rb_iseq_new_main (const VALUE ast_value, VALUE path, VALUE realpath, const rb_iseq_t *parent, int opt);
1226 rb_iseq_t *rb_iseq_new_eval (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, int first_lineno, const rb_iseq_t *parent, int isolated_depth);
1227 rb_iseq_t *rb_iseq_new_with_opt( VALUE ast_value, VALUE name, VALUE path, VALUE realpath, int first_lineno, const rb_iseq_t *parent, int isolated_depth,
1228  enum rb_iseq_type, const rb_compile_option_t*,
1229  VALUE script_lines);
1230 
1231 struct iseq_link_anchor;
1233  VALUE flags;
1234  VALUE reserved;
1235  void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *);
1236  const void *data;
1237 };
1238 static inline struct rb_iseq_new_with_callback_callback_func *
1239 rb_iseq_new_with_callback_new_callback(
1240  void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *), const void *ptr)
1241 {
1243  IMEMO_NEW(struct rb_iseq_new_with_callback_callback_func, imemo_ifunc, Qfalse);
1244  memo->func = func;
1245  memo->data = ptr;
1246 
1247  return memo;
1248 }
1249 rb_iseq_t *rb_iseq_new_with_callback(const struct rb_iseq_new_with_callback_callback_func * ifunc,
1250  VALUE name, VALUE path, VALUE realpath, int first_lineno,
1251  const rb_iseq_t *parent, enum rb_iseq_type, const rb_compile_option_t*);
1252 
1253 VALUE rb_iseq_disasm(const rb_iseq_t *iseq);
1254 int rb_iseq_disasm_insn(VALUE str, const VALUE *iseqval, size_t pos, const rb_iseq_t *iseq, VALUE child);
1255 
1256 VALUE rb_iseq_coverage(const rb_iseq_t *iseq);
1257 
1258 RUBY_EXTERN VALUE rb_cISeq;
1259 RUBY_EXTERN VALUE rb_cRubyVM;
1260 RUBY_EXTERN VALUE rb_mRubyVMFrozenCore;
1261 RUBY_EXTERN VALUE rb_block_param_proxy;
1262 RUBY_SYMBOL_EXPORT_END
1263 
1264 #define GetProcPtr(obj, ptr) \
1265  GetCoreDataFromValue((obj), rb_proc_t, (ptr))
1266 
1267 typedef struct {
1268  const struct rb_block block;
1269  unsigned int is_from_method: 1; /* bool */
1270  unsigned int is_lambda: 1; /* bool */
1271  unsigned int is_isolated: 1; /* bool */
1272 } rb_proc_t;
1273 
1274 RUBY_SYMBOL_EXPORT_BEGIN
1275 VALUE rb_proc_isolate(VALUE self);
1276 VALUE rb_proc_isolate_bang(VALUE self);
1277 VALUE rb_proc_ractor_make_shareable(VALUE self);
1278 RUBY_SYMBOL_EXPORT_END
1279 
1280 typedef struct {
1281  VALUE flags; /* imemo header */
1282  rb_iseq_t *iseq;
1283  const VALUE *ep;
1284  const VALUE *env;
1285  unsigned int env_size;
1286 } rb_env_t;
1287 
1288 extern const rb_data_type_t ruby_binding_data_type;
1289 
1290 #define GetBindingPtr(obj, ptr) \
1291  GetCoreDataFromValue((obj), rb_binding_t, (ptr))
1292 
1293 typedef struct {
1294  const struct rb_block block;
1295  const VALUE pathobj;
1296  int first_lineno;
1297 } rb_binding_t;
1298 
1299 /* used by compile time and send insn */
1300 
1301 enum vm_check_match_type {
1302  VM_CHECKMATCH_TYPE_WHEN = 1,
1303  VM_CHECKMATCH_TYPE_CASE = 2,
1304  VM_CHECKMATCH_TYPE_RESCUE = 3
1305 };
1306 
1307 #define VM_CHECKMATCH_TYPE_MASK 0x03
1308 #define VM_CHECKMATCH_ARRAY 0x04
1309 
1310 enum vm_opt_newarray_send_type {
1311  VM_OPT_NEWARRAY_SEND_MAX = 1,
1312  VM_OPT_NEWARRAY_SEND_MIN = 2,
1313  VM_OPT_NEWARRAY_SEND_HASH = 3,
1314  VM_OPT_NEWARRAY_SEND_PACK = 4,
1315  VM_OPT_NEWARRAY_SEND_PACK_BUFFER = 5,
1316 };
1317 
1318 enum vm_special_object_type {
1319  VM_SPECIAL_OBJECT_VMCORE = 1,
1320  VM_SPECIAL_OBJECT_CBASE,
1321  VM_SPECIAL_OBJECT_CONST_BASE
1322 };
1323 
1324 enum vm_svar_index {
1325  VM_SVAR_LASTLINE = 0, /* $_ */
1326  VM_SVAR_BACKREF = 1, /* $~ */
1327 
1328  VM_SVAR_EXTRA_START = 2,
1329  VM_SVAR_FLIPFLOP_START = 2 /* flipflop */
1330 };
1331 
1332 /* inline cache */
1333 typedef struct iseq_inline_constant_cache *IC;
1334 typedef struct iseq_inline_iv_cache_entry *IVC;
1335 typedef struct iseq_inline_cvar_cache_entry *ICVARC;
1336 typedef union iseq_inline_storage_entry *ISE;
1337 typedef const struct rb_callinfo *CALL_INFO;
1338 typedef const struct rb_callcache *CALL_CACHE;
1339 typedef struct rb_call_data *CALL_DATA;
1340 
1341 typedef VALUE CDHASH;
1342 
1343 #ifndef FUNC_FASTCALL
1344 #define FUNC_FASTCALL(x) x
1345 #endif
1346 
1347 typedef rb_control_frame_t *
1348  (FUNC_FASTCALL(*rb_insn_func_t))(rb_execution_context_t *, rb_control_frame_t *);
1349 
1350 #define VM_TAGGED_PTR_SET(p, tag) ((VALUE)(p) | (tag))
1351 #define VM_TAGGED_PTR_REF(v, mask) ((void *)((v) & ~mask))
1352 
1353 #define GC_GUARDED_PTR(p) VM_TAGGED_PTR_SET((p), 0x01)
1354 #define GC_GUARDED_PTR_REF(p) VM_TAGGED_PTR_REF((p), 0x03)
1355 #define GC_GUARDED_PTR_P(p) (((VALUE)(p)) & 0x01)
1356 
1357 enum vm_frame_env_flags {
1358  /* Frame/Environment flag bits:
1359  * MMMM MMMM MMMM MMMM ____ FFFF FFFE EEEX (LSB)
1360  *
1361  * X : tag for GC marking (It seems as Fixnum)
1362  * EEE : 4 bits Env flags
1363  * FF..: 7 bits Frame flags
1364  * MM..: 15 bits frame magic (to check frame corruption)
1365  */
1366 
1367  /* frame types */
1368  VM_FRAME_MAGIC_METHOD = 0x11110001,
1369  VM_FRAME_MAGIC_BLOCK = 0x22220001,
1370  VM_FRAME_MAGIC_CLASS = 0x33330001,
1371  VM_FRAME_MAGIC_TOP = 0x44440001,
1372  VM_FRAME_MAGIC_CFUNC = 0x55550001,
1373  VM_FRAME_MAGIC_IFUNC = 0x66660001,
1374  VM_FRAME_MAGIC_EVAL = 0x77770001,
1375  VM_FRAME_MAGIC_RESCUE = 0x78880001,
1376  VM_FRAME_MAGIC_DUMMY = 0x79990001,
1377 
1378  VM_FRAME_MAGIC_MASK = 0x7fff0001,
1379 
1380  /* frame flag */
1381  VM_FRAME_FLAG_FINISH = 0x0020,
1382  VM_FRAME_FLAG_BMETHOD = 0x0040,
1383  VM_FRAME_FLAG_CFRAME = 0x0080,
1384  VM_FRAME_FLAG_LAMBDA = 0x0100,
1385  VM_FRAME_FLAG_MODIFIED_BLOCK_PARAM = 0x0200,
1386  VM_FRAME_FLAG_CFRAME_KW = 0x0400,
1387  VM_FRAME_FLAG_PASSED = 0x0800,
1388 
1389  /* env flag */
1390  VM_ENV_FLAG_LOCAL = 0x0002,
1391  VM_ENV_FLAG_ESCAPED = 0x0004,
1392  VM_ENV_FLAG_WB_REQUIRED = 0x0008,
1393  VM_ENV_FLAG_ISOLATED = 0x0010,
1394 };
1395 
1396 #define VM_ENV_DATA_SIZE ( 3)
1397 
1398 #define VM_ENV_DATA_INDEX_ME_CREF (-2) /* ep[-2] */
1399 #define VM_ENV_DATA_INDEX_SPECVAL (-1) /* ep[-1] */
1400 #define VM_ENV_DATA_INDEX_FLAGS ( 0) /* ep[ 0] */
1401 #define VM_ENV_DATA_INDEX_ENV ( 1) /* ep[ 1] */
1402 
1403 #define VM_ENV_INDEX_LAST_LVAR (-VM_ENV_DATA_SIZE)
1404 
1405 static inline void VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value);
1406 
1407 static inline void
1408 VM_ENV_FLAGS_SET(const VALUE *ep, VALUE flag)
1409 {
1410  VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1411  VM_ASSERT(FIXNUM_P(flags));
1412  VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags | flag);
1413 }
1414 
1415 static inline void
1416 VM_ENV_FLAGS_UNSET(const VALUE *ep, VALUE flag)
1417 {
1418  VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1419  VM_ASSERT(FIXNUM_P(flags));
1420  VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags & ~flag);
1421 }
1422 
1423 static inline unsigned long
1424 VM_ENV_FLAGS(const VALUE *ep, long flag)
1425 {
1426  VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1427  VM_ASSERT(FIXNUM_P(flags));
1428  return flags & flag;
1429 }
1430 
1431 static inline unsigned long
1432 VM_FRAME_TYPE(const rb_control_frame_t *cfp)
1433 {
1434  return VM_ENV_FLAGS(cfp->ep, VM_FRAME_MAGIC_MASK);
1435 }
1436 
1437 static inline int
1438 VM_FRAME_LAMBDA_P(const rb_control_frame_t *cfp)
1439 {
1440  return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_LAMBDA) != 0;
1441 }
1442 
1443 static inline int
1444 VM_FRAME_CFRAME_KW_P(const rb_control_frame_t *cfp)
1445 {
1446  return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME_KW) != 0;
1447 }
1448 
1449 static inline int
1450 VM_FRAME_FINISHED_P(const rb_control_frame_t *cfp)
1451 {
1452  return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_FINISH) != 0;
1453 }
1454 
1455 static inline int
1456 VM_FRAME_BMETHOD_P(const rb_control_frame_t *cfp)
1457 {
1458  return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_BMETHOD) != 0;
1459 }
1460 
1461 static inline int
1462 rb_obj_is_iseq(VALUE iseq)
1463 {
1464  return imemo_type_p(iseq, imemo_iseq);
1465 }
1466 
1467 #if VM_CHECK_MODE > 0
1468 #define RUBY_VM_NORMAL_ISEQ_P(iseq) rb_obj_is_iseq((VALUE)iseq)
1469 #endif
1470 
1471 static inline int
1472 VM_FRAME_CFRAME_P(const rb_control_frame_t *cfp)
1473 {
1474  int cframe_p = VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME) != 0;
1475  VM_ASSERT(RUBY_VM_NORMAL_ISEQ_P(cfp->iseq) != cframe_p ||
1476  (VM_FRAME_TYPE(cfp) & VM_FRAME_MAGIC_MASK) == VM_FRAME_MAGIC_DUMMY);
1477  return cframe_p;
1478 }
1479 
1480 static inline int
1481 VM_FRAME_RUBYFRAME_P(const rb_control_frame_t *cfp)
1482 {
1483  return !VM_FRAME_CFRAME_P(cfp);
1484 }
1485 
1486 #define RUBYVM_CFUNC_FRAME_P(cfp) \
1487  (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_CFUNC)
1488 
1489 #define VM_GUARDED_PREV_EP(ep) GC_GUARDED_PTR(ep)
1490 #define VM_BLOCK_HANDLER_NONE 0
1491 
1492 static inline int
1493 VM_ENV_LOCAL_P(const VALUE *ep)
1494 {
1495  return VM_ENV_FLAGS(ep, VM_ENV_FLAG_LOCAL) ? 1 : 0;
1496 }
1497 
1498 static inline const VALUE *
1499 VM_ENV_PREV_EP(const VALUE *ep)
1500 {
1501  VM_ASSERT(VM_ENV_LOCAL_P(ep) == 0);
1502  return GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
1503 }
1504 
1505 static inline VALUE
1506 VM_ENV_BLOCK_HANDLER(const VALUE *ep)
1507 {
1508  VM_ASSERT(VM_ENV_LOCAL_P(ep));
1509  return ep[VM_ENV_DATA_INDEX_SPECVAL];
1510 }
1511 
1512 #if VM_CHECK_MODE > 0
1513 int rb_vm_ep_in_heap_p(const VALUE *ep);
1514 #endif
1515 
1516 static inline int
1517 VM_ENV_ESCAPED_P(const VALUE *ep)
1518 {
1519  VM_ASSERT(rb_vm_ep_in_heap_p(ep) == !!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED));
1520  return VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) ? 1 : 0;
1521 }
1522 
1524 static inline VALUE
1525 VM_ENV_ENVVAL(const VALUE *ep)
1526 {
1527  VALUE envval = ep[VM_ENV_DATA_INDEX_ENV];
1528  VM_ASSERT(VM_ENV_ESCAPED_P(ep));
1529  VM_ASSERT(envval == Qundef || imemo_type_p(envval, imemo_env));
1530  return envval;
1531 }
1532 
1534 static inline const rb_env_t *
1535 VM_ENV_ENVVAL_PTR(const VALUE *ep)
1536 {
1537  return (const rb_env_t *)VM_ENV_ENVVAL(ep);
1538 }
1539 
1540 static inline const rb_env_t *
1541 vm_env_new(VALUE *env_ep, VALUE *env_body, unsigned int env_size, const rb_iseq_t *iseq)
1542 {
1543  rb_env_t *env = IMEMO_NEW(rb_env_t, imemo_env, (VALUE)iseq);
1544  env->ep = env_ep;
1545  env->env = env_body;
1546  env->env_size = env_size;
1547  env_ep[VM_ENV_DATA_INDEX_ENV] = (VALUE)env;
1548  return env;
1549 }
1550 
1551 static inline void
1552 VM_FORCE_WRITE(const VALUE *ptr, VALUE v)
1553 {
1554  *((VALUE *)ptr) = v;
1555 }
1556 
1557 static inline void
1558 VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value)
1559 {
1560  VM_ASSERT(RB_SPECIAL_CONST_P(special_const_value));
1561  VM_FORCE_WRITE(ptr, special_const_value);
1562 }
1563 
1564 static inline void
1565 VM_STACK_ENV_WRITE(const VALUE *ep, int index, VALUE v)
1566 {
1567  VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_WB_REQUIRED) == 0);
1568  VM_FORCE_WRITE(&ep[index], v);
1569 }
1570 
1571 const VALUE *rb_vm_ep_local_ep(const VALUE *ep);
1572 const VALUE *rb_vm_proc_local_ep(VALUE proc);
1573 void rb_vm_block_ep_update(VALUE obj, const struct rb_block *dst, const VALUE *ep);
1574 void rb_vm_block_copy(VALUE obj, const struct rb_block *dst, const struct rb_block *src);
1575 
1576 VALUE rb_vm_frame_block_handler(const rb_control_frame_t *cfp);
1577 
1578 #define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp) ((cfp)+1)
1579 #define RUBY_VM_NEXT_CONTROL_FRAME(cfp) ((cfp)-1)
1580 
1581 #define RUBY_VM_VALID_CONTROL_FRAME_P(cfp, ecfp) \
1582  ((void *)(ecfp) > (void *)(cfp))
1583 
1584 static inline const rb_control_frame_t *
1585 RUBY_VM_END_CONTROL_FRAME(const rb_execution_context_t *ec)
1586 {
1587  return (rb_control_frame_t *)(ec->vm_stack + ec->vm_stack_size);
1588 }
1589 
1590 static inline int
1591 RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
1592 {
1593  return !RUBY_VM_VALID_CONTROL_FRAME_P(cfp, RUBY_VM_END_CONTROL_FRAME(ec));
1594 }
1595 
1596 static inline int
1597 VM_BH_ISEQ_BLOCK_P(VALUE block_handler)
1598 {
1599  if ((block_handler & 0x03) == 0x01) {
1600 #if VM_CHECK_MODE > 0
1601  struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1602  VM_ASSERT(imemo_type_p(captured->code.val, imemo_iseq));
1603 #endif
1604  return 1;
1605  }
1606  else {
1607  return 0;
1608  }
1609 }
1610 
1611 static inline VALUE
1612 VM_BH_FROM_ISEQ_BLOCK(const struct rb_captured_block *captured)
1613 {
1614  VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x01);
1615  VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1616  return block_handler;
1617 }
1618 
1619 static inline const struct rb_captured_block *
1620 VM_BH_TO_ISEQ_BLOCK(VALUE block_handler)
1621 {
1622  struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1623  VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1624  return captured;
1625 }
1626 
1627 static inline int
1628 VM_BH_IFUNC_P(VALUE block_handler)
1629 {
1630  if ((block_handler & 0x03) == 0x03) {
1631 #if VM_CHECK_MODE > 0
1632  struct rb_captured_block *captured = (void *)(block_handler & ~0x03);
1633  VM_ASSERT(imemo_type_p(captured->code.val, imemo_ifunc));
1634 #endif
1635  return 1;
1636  }
1637  else {
1638  return 0;
1639  }
1640 }
1641 
1642 static inline VALUE
1643 VM_BH_FROM_IFUNC_BLOCK(const struct rb_captured_block *captured)
1644 {
1645  VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x03);
1646  VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1647  return block_handler;
1648 }
1649 
1650 static inline const struct rb_captured_block *
1651 VM_BH_TO_IFUNC_BLOCK(VALUE block_handler)
1652 {
1653  struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1654  VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1655  return captured;
1656 }
1657 
1658 static inline const struct rb_captured_block *
1659 VM_BH_TO_CAPT_BLOCK(VALUE block_handler)
1660 {
1661  struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1662  VM_ASSERT(VM_BH_IFUNC_P(block_handler) || VM_BH_ISEQ_BLOCK_P(block_handler));
1663  return captured;
1664 }
1665 
1666 static inline enum rb_block_handler_type
1667 vm_block_handler_type(VALUE block_handler)
1668 {
1669  if (VM_BH_ISEQ_BLOCK_P(block_handler)) {
1670  return block_handler_type_iseq;
1671  }
1672  else if (VM_BH_IFUNC_P(block_handler)) {
1673  return block_handler_type_ifunc;
1674  }
1675  else if (SYMBOL_P(block_handler)) {
1676  return block_handler_type_symbol;
1677  }
1678  else {
1679  VM_ASSERT(rb_obj_is_proc(block_handler));
1680  return block_handler_type_proc;
1681  }
1682 }
1683 
1684 static inline void
1685 vm_block_handler_verify(MAYBE_UNUSED(VALUE block_handler))
1686 {
1687  VM_ASSERT(block_handler == VM_BLOCK_HANDLER_NONE ||
1688  (vm_block_handler_type(block_handler), 1));
1689 }
1690 
1691 static inline enum rb_block_type
1692 vm_block_type(const struct rb_block *block)
1693 {
1694 #if VM_CHECK_MODE > 0
1695  switch (block->type) {
1696  case block_type_iseq:
1697  VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_iseq));
1698  break;
1699  case block_type_ifunc:
1700  VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_ifunc));
1701  break;
1702  case block_type_symbol:
1703  VM_ASSERT(SYMBOL_P(block->as.symbol));
1704  break;
1705  case block_type_proc:
1706  VM_ASSERT(rb_obj_is_proc(block->as.proc));
1707  break;
1708  }
1709 #endif
1710  return block->type;
1711 }
1712 
1713 static inline void
1714 vm_block_type_set(const struct rb_block *block, enum rb_block_type type)
1715 {
1716  struct rb_block *mb = (struct rb_block *)block;
1717  mb->type = type;
1718 }
1719 
1720 static inline const struct rb_block *
1721 vm_proc_block(VALUE procval)
1722 {
1723  VM_ASSERT(rb_obj_is_proc(procval));
1724  return &((rb_proc_t *)RTYPEDDATA_DATA(procval))->block;
1725 }
1726 
1727 static inline const rb_iseq_t *vm_block_iseq(const struct rb_block *block);
1728 static inline const VALUE *vm_block_ep(const struct rb_block *block);
1729 
1730 static inline const rb_iseq_t *
1731 vm_proc_iseq(VALUE procval)
1732 {
1733  return vm_block_iseq(vm_proc_block(procval));
1734 }
1735 
1736 static inline const VALUE *
1737 vm_proc_ep(VALUE procval)
1738 {
1739  return vm_block_ep(vm_proc_block(procval));
1740 }
1741 
1742 static inline const rb_iseq_t *
1743 vm_block_iseq(const struct rb_block *block)
1744 {
1745  switch (vm_block_type(block)) {
1746  case block_type_iseq: return rb_iseq_check(block->as.captured.code.iseq);
1747  case block_type_proc: return vm_proc_iseq(block->as.proc);
1748  case block_type_ifunc:
1749  case block_type_symbol: return NULL;
1750  }
1751  VM_UNREACHABLE(vm_block_iseq);
1752  return NULL;
1753 }
1754 
1755 static inline const VALUE *
1756 vm_block_ep(const struct rb_block *block)
1757 {
1758  switch (vm_block_type(block)) {
1759  case block_type_iseq:
1760  case block_type_ifunc: return block->as.captured.ep;
1761  case block_type_proc: return vm_proc_ep(block->as.proc);
1762  case block_type_symbol: return NULL;
1763  }
1764  VM_UNREACHABLE(vm_block_ep);
1765  return NULL;
1766 }
1767 
1768 static inline VALUE
1769 vm_block_self(const struct rb_block *block)
1770 {
1771  switch (vm_block_type(block)) {
1772  case block_type_iseq:
1773  case block_type_ifunc:
1774  return block->as.captured.self;
1775  case block_type_proc:
1776  return vm_block_self(vm_proc_block(block->as.proc));
1777  case block_type_symbol:
1778  return Qundef;
1779  }
1780  VM_UNREACHABLE(vm_block_self);
1781  return Qundef;
1782 }
1783 
1784 static inline VALUE
1785 VM_BH_TO_SYMBOL(VALUE block_handler)
1786 {
1787  VM_ASSERT(SYMBOL_P(block_handler));
1788  return block_handler;
1789 }
1790 
1791 static inline VALUE
1792 VM_BH_FROM_SYMBOL(VALUE symbol)
1793 {
1794  VM_ASSERT(SYMBOL_P(symbol));
1795  return symbol;
1796 }
1797 
1798 static inline VALUE
1799 VM_BH_TO_PROC(VALUE block_handler)
1800 {
1801  VM_ASSERT(rb_obj_is_proc(block_handler));
1802  return block_handler;
1803 }
1804 
1805 static inline VALUE
1806 VM_BH_FROM_PROC(VALUE procval)
1807 {
1808  VM_ASSERT(rb_obj_is_proc(procval));
1809  return procval;
1810 }
1811 
1812 /* VM related object allocate functions */
1813 VALUE rb_thread_alloc(VALUE klass);
1814 VALUE rb_binding_alloc(VALUE klass);
1815 VALUE rb_proc_alloc(VALUE klass);
1816 VALUE rb_proc_dup(VALUE self);
1817 
1818 /* for debug */
1819 extern bool rb_vmdebug_stack_dump_raw(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, FILE *);
1820 extern bool rb_vmdebug_debug_print_pre(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, const VALUE *_pc, FILE *);
1821 extern bool rb_vmdebug_debug_print_post(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, FILE *);
1822 
1823 #define SDR() rb_vmdebug_stack_dump_raw(GET_EC(), GET_EC()->cfp, stderr)
1824 #define SDR2(cfp) rb_vmdebug_stack_dump_raw(GET_EC(), (cfp), stderr)
1825 bool rb_vm_bugreport(const void *, FILE *);
1826 typedef void (*ruby_sighandler_t)(int);
1827 RBIMPL_ATTR_FORMAT(RBIMPL_PRINTF_FORMAT, 4, 5)
1828 NORETURN(void rb_bug_for_fatal_signal(ruby_sighandler_t default_sighandler, int sig, const void *, const char *fmt, ...));
1829 
1830 /* functions about thread/vm execution */
1831 RUBY_SYMBOL_EXPORT_BEGIN
1832 VALUE rb_iseq_eval(const rb_iseq_t *iseq);
1833 VALUE rb_iseq_eval_main(const rb_iseq_t *iseq);
1834 VALUE rb_iseq_path(const rb_iseq_t *iseq);
1835 VALUE rb_iseq_realpath(const rb_iseq_t *iseq);
1836 RUBY_SYMBOL_EXPORT_END
1837 
1838 VALUE rb_iseq_pathobj_new(VALUE path, VALUE realpath);
1839 void rb_iseq_pathobj_set(const rb_iseq_t *iseq, VALUE path, VALUE realpath);
1840 
1841 int rb_ec_frame_method_id_and_class(const rb_execution_context_t *ec, ID *idp, ID *called_idp, VALUE *klassp);
1842 void rb_ec_setup_exception(const rb_execution_context_t *ec, VALUE mesg, VALUE cause);
1843 
1844 VALUE rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, int argc, const VALUE *argv, int kw_splat, VALUE block_handler);
1845 
1846 VALUE rb_vm_make_proc_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda);
1847 static inline VALUE
1848 rb_vm_make_proc(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1849 {
1850  return rb_vm_make_proc_lambda(ec, captured, klass, 0);
1851 }
1852 
1853 static inline VALUE
1854 rb_vm_make_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1855 {
1856  return rb_vm_make_proc_lambda(ec, captured, klass, 1);
1857 }
1858 
1859 VALUE rb_vm_make_binding(const rb_execution_context_t *ec, const rb_control_frame_t *src_cfp);
1860 VALUE rb_vm_env_local_variables(const rb_env_t *env);
1861 const rb_env_t *rb_vm_env_prev_env(const rb_env_t *env);
1862 const VALUE *rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const ID *dynvars);
1863 void rb_vm_inc_const_missing_count(void);
1864 VALUE rb_vm_call_kw(rb_execution_context_t *ec, VALUE recv, VALUE id, int argc,
1865  const VALUE *argv, const rb_callable_method_entry_t *me, int kw_splat);
1866 void rb_vm_pop_frame_no_int(rb_execution_context_t *ec);
1867 void rb_vm_pop_frame(rb_execution_context_t *ec);
1868 
1869 void rb_thread_start_timer_thread(void);
1870 void rb_thread_stop_timer_thread(void);
1871 void rb_thread_reset_timer_thread(void);
1872 void rb_thread_wakeup_timer_thread(int);
1873 
1874 static inline void
1875 rb_vm_living_threads_init(rb_vm_t *vm)
1876 {
1877  ccan_list_head_init(&vm->waiting_fds);
1878  ccan_list_head_init(&vm->workqueue);
1879  ccan_list_head_init(&vm->ractor.set);
1880  ccan_list_head_init(&vm->ractor.sched.zombie_threads);
1881 }
1882 
1883 typedef int rb_backtrace_iter_func(void *, VALUE, int, VALUE);
1884 rb_control_frame_t *rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1885 rb_control_frame_t *rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1886 VALUE *rb_vm_svar_lep(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1887 int rb_vm_get_sourceline(const rb_control_frame_t *);
1888 void rb_vm_stack_to_heap(rb_execution_context_t *ec);
1889 void ruby_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame);
1890 rb_thread_t * ruby_thread_from_native(void);
1891 int ruby_thread_set_native(rb_thread_t *th);
1892 int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp);
1893 void rb_vm_rewind_cfp(rb_execution_context_t *ec, rb_control_frame_t *cfp);
1894 void rb_vm_env_write(const VALUE *ep, int index, VALUE v);
1895 VALUE rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler);
1896 
1897 void rb_vm_register_special_exception_str(enum ruby_special_exceptions sp, VALUE exception_class, VALUE mesg);
1898 
1899 #define rb_vm_register_special_exception(sp, e, m) \
1900  rb_vm_register_special_exception_str(sp, e, rb_usascii_str_new_static((m), (long)rb_strlen_lit(m)))
1901 
1902 void rb_gc_mark_machine_context(const rb_execution_context_t *ec);
1903 
1904 void rb_vm_rewrite_cref(rb_cref_t *node, VALUE old_klass, VALUE new_klass, rb_cref_t **new_cref_ptr);
1905 
1906 const rb_callable_method_entry_t *rb_vm_frame_method_entry(const rb_control_frame_t *cfp);
1907 
1908 #define sysstack_error GET_VM()->special_exceptions[ruby_error_sysstack]
1909 
1910 #define CHECK_VM_STACK_OVERFLOW0(cfp, sp, margin) do { \
1911  STATIC_ASSERT(sizeof_sp, sizeof(*(sp)) == sizeof(VALUE)); \
1912  STATIC_ASSERT(sizeof_cfp, sizeof(*(cfp)) == sizeof(rb_control_frame_t)); \
1913  const struct rb_control_frame_struct *bound = (void *)&(sp)[(margin)]; \
1914  if (UNLIKELY((cfp) <= &bound[1])) { \
1915  vm_stackoverflow(); \
1916  } \
1917 } while (0)
1918 
1919 #define CHECK_VM_STACK_OVERFLOW(cfp, margin) \
1920  CHECK_VM_STACK_OVERFLOW0((cfp), (cfp)->sp, (margin))
1921 
1922 VALUE rb_catch_protect(VALUE t, rb_block_call_func *func, VALUE data, enum ruby_tag_type *stateptr);
1923 
1924 rb_execution_context_t *rb_vm_main_ractor_ec(rb_vm_t *vm); // ractor.c
1925 
1926 /* for thread */
1927 
1928 #if RUBY_VM_THREAD_MODEL == 2
1929 
1930 RUBY_EXTERN struct rb_ractor_struct *ruby_single_main_ractor; // ractor.c
1931 RUBY_EXTERN rb_vm_t *ruby_current_vm_ptr;
1932 RUBY_EXTERN rb_event_flag_t ruby_vm_event_flags;
1933 RUBY_EXTERN rb_event_flag_t ruby_vm_event_enabled_global_flags;
1934 RUBY_EXTERN unsigned int ruby_vm_event_local_num;
1935 
1936 #define GET_VM() rb_current_vm()
1937 #define GET_RACTOR() rb_current_ractor()
1938 #define GET_THREAD() rb_current_thread()
1939 #define GET_EC() rb_current_execution_context(true)
1940 
1941 static inline rb_thread_t *
1942 rb_ec_thread_ptr(const rb_execution_context_t *ec)
1943 {
1944  return ec->thread_ptr;
1945 }
1946 
1947 static inline rb_ractor_t *
1948 rb_ec_ractor_ptr(const rb_execution_context_t *ec)
1949 {
1950  const rb_thread_t *th = rb_ec_thread_ptr(ec);
1951  if (th) {
1952  VM_ASSERT(th->ractor != NULL);
1953  return th->ractor;
1954  }
1955  else {
1956  return NULL;
1957  }
1958 }
1959 
1960 static inline rb_vm_t *
1961 rb_ec_vm_ptr(const rb_execution_context_t *ec)
1962 {
1963  const rb_thread_t *th = rb_ec_thread_ptr(ec);
1964  if (th) {
1965  return th->vm;
1966  }
1967  else {
1968  return NULL;
1969  }
1970 }
1971 
1972 NOINLINE(struct rb_execution_context_struct *rb_current_ec_noinline(void));
1973 
1974 static inline rb_execution_context_t *
1975 rb_current_execution_context(bool expect_ec)
1976 {
1977 #ifdef RB_THREAD_LOCAL_SPECIFIER
1978  #ifdef __APPLE__
1979  rb_execution_context_t *ec = rb_current_ec();
1980  #else
1981  rb_execution_context_t *ec = ruby_current_ec;
1982  #endif
1983 
1984  /* On the shared objects, `__tls_get_addr()` is used to access the TLS
1985  * and the address of the `ruby_current_ec` can be stored on a function
1986  * frame. However, this address can be mis-used after native thread
1987  * migration of a coroutine.
1988  * 1) Get `ptr =&ruby_current_ec` op NT1 and store it on the frame.
1989  * 2) Context switch and resume it on the NT2.
1990  * 3) `ptr` is used on NT2 but it accesses to the TLS on NT1.
1991  * This assertion checks such misusage.
1992  *
1993  * To avoid accidents, `GET_EC()` should be called once on the frame.
1994  * Note that inlining can produce the problem.
1995  */
1996  VM_ASSERT(ec == rb_current_ec_noinline());
1997 #else
1998  rb_execution_context_t *ec = native_tls_get(ruby_current_ec_key);
1999 #endif
2000  VM_ASSERT(!expect_ec || ec != NULL);
2001  return ec;
2002 }
2003 
2004 static inline rb_thread_t *
2005 rb_current_thread(void)
2006 {
2007  const rb_execution_context_t *ec = GET_EC();
2008  return rb_ec_thread_ptr(ec);
2009 }
2010 
2011 static inline rb_ractor_t *
2012 rb_current_ractor_raw(bool expect)
2013 {
2014  if (ruby_single_main_ractor) {
2015  return ruby_single_main_ractor;
2016  }
2017  else {
2018  const rb_execution_context_t *ec = rb_current_execution_context(expect);
2019  return (expect || ec) ? rb_ec_ractor_ptr(ec) : NULL;
2020  }
2021 }
2022 
2023 static inline rb_ractor_t *
2024 rb_current_ractor(void)
2025 {
2026  return rb_current_ractor_raw(true);
2027 }
2028 
2029 static inline rb_vm_t *
2030 rb_current_vm(void)
2031 {
2032 #if 0 // TODO: reconsider the assertions
2033  VM_ASSERT(ruby_current_vm_ptr == NULL ||
2034  ruby_current_execution_context_ptr == NULL ||
2035  rb_ec_thread_ptr(GET_EC()) == NULL ||
2036  rb_ec_thread_ptr(GET_EC())->status == THREAD_KILLED ||
2037  rb_ec_vm_ptr(GET_EC()) == ruby_current_vm_ptr);
2038 #endif
2039 
2040  return ruby_current_vm_ptr;
2041 }
2042 
2043 void rb_ec_vm_lock_rec_release(const rb_execution_context_t *ec,
2044  unsigned int recorded_lock_rec,
2045  unsigned int current_lock_rec);
2046 
2047 static inline unsigned int
2048 rb_ec_vm_lock_rec(const rb_execution_context_t *ec)
2049 {
2050  rb_vm_t *vm = rb_ec_vm_ptr(ec);
2051 
2052  if (vm->ractor.sync.lock_owner != rb_ec_ractor_ptr(ec)) {
2053  return 0;
2054  }
2055  else {
2056  return vm->ractor.sync.lock_rec;
2057  }
2058 }
2059 
2060 #else
2061 #error "unsupported thread model"
2062 #endif
2063 
2064 enum {
2065  TIMER_INTERRUPT_MASK = 0x01,
2066  PENDING_INTERRUPT_MASK = 0x02,
2067  POSTPONED_JOB_INTERRUPT_MASK = 0x04,
2068  TRAP_INTERRUPT_MASK = 0x08,
2069  TERMINATE_INTERRUPT_MASK = 0x10,
2070  VM_BARRIER_INTERRUPT_MASK = 0x20,
2071 };
2072 
2073 #define RUBY_VM_SET_TIMER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TIMER_INTERRUPT_MASK)
2074 #define RUBY_VM_SET_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, PENDING_INTERRUPT_MASK)
2075 #define RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, POSTPONED_JOB_INTERRUPT_MASK)
2076 #define RUBY_VM_SET_TRAP_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TRAP_INTERRUPT_MASK)
2077 #define RUBY_VM_SET_TERMINATE_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TERMINATE_INTERRUPT_MASK)
2078 #define RUBY_VM_SET_VM_BARRIER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, VM_BARRIER_INTERRUPT_MASK)
2079 #define RUBY_VM_INTERRUPTED(ec) ((ec)->interrupt_flag & ~(ec)->interrupt_mask & \
2080  (PENDING_INTERRUPT_MASK|TRAP_INTERRUPT_MASK))
2081 
2082 static inline bool
2083 RUBY_VM_INTERRUPTED_ANY(rb_execution_context_t *ec)
2084 {
2085 #if defined(USE_VM_CLOCK) && USE_VM_CLOCK
2086  uint32_t current_clock = rb_ec_vm_ptr(ec)->clock;
2087 
2088  if (current_clock != ec->checked_clock) {
2089  ec->checked_clock = current_clock;
2090  RUBY_VM_SET_TIMER_INTERRUPT(ec);
2091  }
2092 #endif
2093  return ec->interrupt_flag & ~(ec)->interrupt_mask;
2094 }
2095 
2096 VALUE rb_exc_set_backtrace(VALUE exc, VALUE bt);
2097 int rb_signal_buff_size(void);
2098 int rb_signal_exec(rb_thread_t *th, int sig);
2099 void rb_threadptr_check_signal(rb_thread_t *mth);
2100 void rb_threadptr_signal_raise(rb_thread_t *th, int sig);
2101 void rb_threadptr_signal_exit(rb_thread_t *th);
2102 int rb_threadptr_execute_interrupts(rb_thread_t *, int);
2103 void rb_threadptr_interrupt(rb_thread_t *th);
2104 void rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th);
2105 void rb_threadptr_pending_interrupt_clear(rb_thread_t *th);
2106 void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v);
2107 VALUE rb_ec_get_errinfo(const rb_execution_context_t *ec);
2108 void rb_ec_error_print(rb_execution_context_t * volatile ec, volatile VALUE errinfo);
2109 void rb_execution_context_update(rb_execution_context_t *ec);
2110 void rb_execution_context_mark(const rb_execution_context_t *ec);
2111 void rb_fiber_close(rb_fiber_t *fib);
2112 void Init_native_thread(rb_thread_t *th);
2113 int rb_vm_check_ints_blocking(rb_execution_context_t *ec);
2114 
2115 // vm_sync.h
2116 void rb_vm_cond_wait(rb_vm_t *vm, rb_nativethread_cond_t *cond);
2117 void rb_vm_cond_timedwait(rb_vm_t *vm, rb_nativethread_cond_t *cond, unsigned long msec);
2118 
2119 #define RUBY_VM_CHECK_INTS(ec) rb_vm_check_ints(ec)
2120 static inline void
2121 rb_vm_check_ints(rb_execution_context_t *ec)
2122 {
2123 #ifdef RUBY_ASSERT_CRITICAL_SECTION
2124  VM_ASSERT(ruby_assert_critical_section_entered == 0);
2125 #endif
2126 
2127  VM_ASSERT(ec == GET_EC());
2128 
2129  if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(ec))) {
2130  rb_threadptr_execute_interrupts(rb_ec_thread_ptr(ec), 0);
2131  }
2132 }
2133 
2134 /* tracer */
2135 
2137  rb_event_flag_t event;
2139  const rb_control_frame_t *cfp;
2140  VALUE self;
2141  ID id;
2142  ID called_id;
2143  VALUE klass;
2144  VALUE data;
2145 
2146  int klass_solved;
2147 
2148  /* calc from cfp */
2149  int lineno;
2150  VALUE path;
2151 };
2152 
2153 void rb_hook_list_mark(rb_hook_list_t *hooks);
2154 void rb_hook_list_mark_and_update(rb_hook_list_t *hooks);
2155 void rb_hook_list_free(rb_hook_list_t *hooks);
2156 void rb_hook_list_connect_tracepoint(VALUE target, rb_hook_list_t *list, VALUE tpval, unsigned int target_line);
2157 void rb_hook_list_remove_tracepoint(rb_hook_list_t *list, VALUE tpval);
2158 
2159 void rb_exec_event_hooks(struct rb_trace_arg_struct *trace_arg, rb_hook_list_t *hooks, int pop_p);
2160 
2161 #define EXEC_EVENT_HOOK_ORIG(ec_, hooks_, flag_, self_, id_, called_id_, klass_, data_, pop_p_) do { \
2162  const rb_event_flag_t flag_arg_ = (flag_); \
2163  rb_hook_list_t *hooks_arg_ = (hooks_); \
2164  if (UNLIKELY((hooks_arg_)->events & (flag_arg_))) { \
2165  /* defer evaluating the other arguments */ \
2166  rb_exec_event_hook_orig(ec_, hooks_arg_, flag_arg_, self_, id_, called_id_, klass_, data_, pop_p_); \
2167  } \
2168 } while (0)
2169 
2170 static inline void
2171 rb_exec_event_hook_orig(rb_execution_context_t *ec, rb_hook_list_t *hooks, rb_event_flag_t flag,
2172  VALUE self, ID id, ID called_id, VALUE klass, VALUE data, int pop_p)
2173 {
2174  struct rb_trace_arg_struct trace_arg;
2175 
2176  VM_ASSERT((hooks->events & flag) != 0);
2177 
2178  trace_arg.event = flag;
2179  trace_arg.ec = ec;
2180  trace_arg.cfp = ec->cfp;
2181  trace_arg.self = self;
2182  trace_arg.id = id;
2183  trace_arg.called_id = called_id;
2184  trace_arg.klass = klass;
2185  trace_arg.data = data;
2186  trace_arg.path = Qundef;
2187  trace_arg.klass_solved = 0;
2188 
2189  rb_exec_event_hooks(&trace_arg, hooks, pop_p);
2190 }
2191 
2193  VALUE self;
2194  uint32_t id;
2195  rb_hook_list_t hooks;
2196 };
2197 
2198 static inline rb_hook_list_t *
2199 rb_ec_ractor_hooks(const rb_execution_context_t *ec)
2200 {
2201  struct rb_ractor_pub *cr_pub = (struct rb_ractor_pub *)rb_ec_ractor_ptr(ec);
2202  return &cr_pub->hooks;
2203 }
2204 
2205 #define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2206  EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_ractor_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 0)
2207 
2208 #define EXEC_EVENT_HOOK_AND_POP_FRAME(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2209  EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_ractor_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 1)
2210 
2211 static inline void
2212 rb_exec_event_hook_script_compiled(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE eval_script)
2213 {
2214  EXEC_EVENT_HOOK(ec, RUBY_EVENT_SCRIPT_COMPILED, ec->cfp->self, 0, 0, 0,
2215  NIL_P(eval_script) ? (VALUE)iseq :
2216  rb_ary_new_from_args(2, eval_script, (VALUE)iseq));
2217 }
2218 
2219 void rb_vm_trap_exit(rb_vm_t *vm);
2220 void rb_vm_postponed_job_atfork(void); /* vm_trace.c */
2221 void rb_vm_postponed_job_free(void); /* vm_trace.c */
2222 size_t rb_vm_memsize_postponed_job_queue(void); /* vm_trace.c */
2223 void rb_vm_postponed_job_queue_init(rb_vm_t *vm); /* vm_trace.c */
2224 
2225 RUBY_SYMBOL_EXPORT_BEGIN
2226 
2227 int rb_thread_check_trap_pending(void);
2228 
2229 /* #define RUBY_EVENT_RESERVED_FOR_INTERNAL_USE 0x030000 */ /* from vm_core.h */
2230 #define RUBY_EVENT_COVERAGE_LINE 0x010000
2231 #define RUBY_EVENT_COVERAGE_BRANCH 0x020000
2232 
2233 extern VALUE rb_get_coverages(void);
2234 extern void rb_set_coverages(VALUE, int, VALUE);
2235 extern void rb_clear_coverages(void);
2236 extern void rb_reset_coverages(void);
2237 extern void rb_resume_coverages(void);
2238 extern void rb_suspend_coverages(void);
2239 
2240 void rb_postponed_job_flush(rb_vm_t *vm);
2241 
2242 // ractor.c
2243 RUBY_EXTERN VALUE rb_eRactorUnsafeError;
2244 RUBY_EXTERN VALUE rb_eRactorIsolationError;
2245 
2246 RUBY_SYMBOL_EXPORT_END
2247 
2248 #endif /* RUBY_VM_CORE_H */
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition: atomic.h:69
#define RUBY_ALIGNAS
Wraps (or simulates) alignas.
Definition: stdalign.h:27
#define RUBY_EXTERN
Declaration of externally visible global variables.
Definition: dllexport.h:45
#define RUBY_EVENT_SCRIPT_COMPILED
Encountered an eval.
Definition: event.h:60
uint32_t rb_event_flag_t
Represents event(s).
Definition: event.h:108
#define RBIMPL_ATTR_FORMAT(x, y, z)
Wraps (or simulates) __attribute__((format))
Definition: format.h:27
#define T_STRING
Old name of RUBY_T_STRING.
Definition: value_type.h:78
#define Qundef
Old name of RUBY_Qundef.
#define Qfalse
Old name of RUBY_Qfalse.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition: value_type.h:56
#define NIL_P
Old name of RB_NIL_P.
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition: value_type.h:88
void * rb_check_typeddata(VALUE obj, const rb_data_type_t *data_type)
Identical to rb_typeddata_is_kind_of(), except it raises exceptions instead of returning false.
Definition: error.c:1375
void rb_bug(const char *fmt,...)
Interpreter panic switch.
Definition: error.c:1089
VALUE rb_ary_new_from_args(long n,...)
Constructs an array from the passed objects.
Definition: array.c:753
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
Definition: proc.c:119
void rb_unblock_function_t(void *)
This is the type of UBFs.
Definition: thread.h:336
char * ptr
Pointer to the underlying memory region, of at least capa bytes.
Definition: io.h:2
VALUE rb_block_call_func(RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg))
This is the type of a function that the interpreter expect for C-backended blocks.
Definition: iterator.h:83
VALUE type(ANYARGS)
ANYARGS-ed function type.
Definition: cxxanyargs.hpp:56
#define RBIMPL_ATTR_NONNULL(list)
Wraps (or simulates) __attribute__((nonnull))
Definition: nonnull.h:27
#define inline
Old Visual Studio versions do not support the inline keyword, so we need to define it to be __inline.
Definition: defines.h:88
Functions related to nodes in the AST.
#define RARRAY_AREF(a, i)
Definition: rarray.h:403
#define RTYPEDDATA_DATA(v)
Convenient getter macro.
Definition: rtypeddata.h:102
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
Defines old _.
C99 shim for <stdbool.h>
Definition: vm_core.h:259
const ID * segments
A null-terminated list of ids, used to represent a constant's path idNULL is used to represent the ::...
Definition: vm_core.h:285
Definition: vm_core.h:293
Definition: vm_core.h:288
Definition: iseq.h:240
Definition: method.h:62
CREF (Class REFerence)
Definition: method.h:44
Definition: class.h:36
This is the struct that holds necessary info for a struct.
Definition: rtypeddata.h:200
struct rb_iseq_constant_body::@150 param
parameter information
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition: method.h:135
Definition: st.h:79
IFUNC (Internal FUNCtion)
Definition: imemo.h:88
Definition: vm_core.h:251
Definition: vm_core.h:297
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition: value.h:52
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
Definition: value.h:69
uintptr_t VALUE
Type that represents a Ruby object.
Definition: value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition: value_type.h:376
void * ruby_xmalloc(size_t size)
Allocates a storage instance.
Definition: gc.c:4215
void ruby_xfree(void *ptr)
Deallocates a storage instance.
Definition: gc.c:4299