Ruby  3.4.0dev (2024-11-05 revision 348a53415339076afc4a02fcd09f3ae36e9c4c61)
vm_core.h (348a53415339076afc4a02fcd09f3ae36e9c4c61)
1 #ifndef RUBY_VM_CORE_H
2 #define RUBY_VM_CORE_H
3 /**********************************************************************
4 
5  vm_core.h -
6 
7  $Author$
8  created at: 04/01/01 19:41:38 JST
9 
10  Copyright (C) 2004-2007 Koichi Sasada
11 
12 **********************************************************************/
13 
14 /*
15  * Enable check mode.
16  * 1: enable local assertions.
17  */
18 #ifndef VM_CHECK_MODE
19 
20 // respect RUBY_DUBUG: if given n is 0, then use RUBY_DEBUG
21 #define N_OR_RUBY_DEBUG(n) (((n) > 0) ? (n) : RUBY_DEBUG)
22 
23 #define VM_CHECK_MODE N_OR_RUBY_DEBUG(0)
24 #endif
25 
39 #ifndef VMDEBUG
40 #define VMDEBUG 0
41 #endif
42 
43 #if 0
44 #undef VMDEBUG
45 #define VMDEBUG 3
46 #endif
47 
48 #include "ruby/internal/config.h"
49 
50 #include <stddef.h>
51 #include <signal.h>
52 #include <stdarg.h>
53 
54 #include "ruby_assert.h"
55 
56 #define RVALUE_SIZE (sizeof(struct RBasic) + sizeof(VALUE[RBIMPL_RVALUE_EMBED_LEN_MAX]))
57 
58 #if VM_CHECK_MODE > 0
59 #define VM_ASSERT(expr, ...) \
60  RUBY_ASSERT_MESG_WHEN(VM_CHECK_MODE > 0, expr, #expr RBIMPL_VA_OPT_ARGS(__VA_ARGS__))
61 #define VM_UNREACHABLE(func) rb_bug(#func ": unreachable")
62 #define RUBY_ASSERT_CRITICAL_SECTION
63 #define RUBY_DEBUG_THREAD_SCHEDULE() rb_thread_schedule()
64 #else
65 #define VM_ASSERT(/*expr, */...) ((void)0)
66 #define VM_UNREACHABLE(func) UNREACHABLE
67 #define RUBY_DEBUG_THREAD_SCHEDULE()
68 #endif
69 
70 #define RUBY_ASSERT_MUTEX_OWNED(mutex) VM_ASSERT(rb_mutex_owned_p(mutex))
71 
72 #if defined(RUBY_ASSERT_CRITICAL_SECTION)
73 /*
74 # Critical Section Assertions
75 
76 These assertions are used to ensure that context switching does not occur between two points in the code. In theory,
77 such code should already be protected by a mutex, but these assertions are used to ensure that the mutex is held.
78 
79 The specific case where it can be useful is where a mutex is held further up the call stack, and the code in question
80 may not directly hold the mutex. In this case, the critical section assertions can be used to ensure that the mutex is
81 held by someone else.
82 
83 These assertions are only enabled when RUBY_ASSERT_CRITICAL_SECTION is defined, which is only defined if VM_CHECK_MODE
84 is set.
85 
86 ## Example Usage
87 
88 ```c
89 RUBY_ASSERT_CRITICAL_SECTION_ENTER();
90 // ... some code which does not invoke rb_vm_check_ints() ...
91 RUBY_ASSERT_CRITICAL_SECTION_LEAVE();
92 ```
93 
94 If `rb_vm_check_ints()` is called between the `RUBY_ASSERT_CRITICAL_SECTION_ENTER()` and
95 `RUBY_ASSERT_CRITICAL_SECTION_LEAVE()`, a failed assertion will result.
96 */
97 extern int ruby_assert_critical_section_entered;
98 #define RUBY_ASSERT_CRITICAL_SECTION_ENTER() do{ruby_assert_critical_section_entered += 1;}while(false)
99 #define RUBY_ASSERT_CRITICAL_SECTION_LEAVE() do{VM_ASSERT(ruby_assert_critical_section_entered > 0);ruby_assert_critical_section_entered -= 1;}while(false)
100 #else
101 #define RUBY_ASSERT_CRITICAL_SECTION_ENTER()
102 #define RUBY_ASSERT_CRITICAL_SECTION_LEAVE()
103 #endif
104 
105 #if defined(__wasm__) && !defined(__EMSCRIPTEN__)
106 # include "wasm/setjmp.h"
107 #else
108 # include <setjmp.h>
109 #endif
110 
111 #if defined(__linux__) || defined(__FreeBSD__)
112 # define RB_THREAD_T_HAS_NATIVE_ID
113 #endif
114 
115 #include "ruby/internal/stdbool.h"
116 #include "ccan/list/list.h"
117 #include "id.h"
118 #include "internal.h"
119 #include "internal/array.h"
120 #include "internal/basic_operators.h"
121 #include "internal/sanitizers.h"
122 #include "internal/serial.h"
123 #include "internal/vm.h"
124 #include "method.h"
125 #include "node.h"
126 #include "ruby/ruby.h"
127 #include "ruby/st.h"
128 #include "ruby_atomic.h"
129 #include "vm_opts.h"
130 
131 #include "ruby/thread_native.h"
132 /*
133  * implementation selector of get_insn_info algorithm
134  * 0: linear search
135  * 1: binary search
136  * 2: succinct bitvector
137  */
138 #ifndef VM_INSN_INFO_TABLE_IMPL
139 # define VM_INSN_INFO_TABLE_IMPL 2
140 #endif
141 
142 #if defined(NSIG_MAX) /* POSIX issue 8 */
143 # undef NSIG
144 # define NSIG NSIG_MAX
145 #elif defined(_SIG_MAXSIG) /* FreeBSD */
146 # undef NSIG
147 # define NSIG _SIG_MAXSIG
148 #elif defined(_SIGMAX) /* QNX */
149 # define NSIG (_SIGMAX + 1)
150 #elif defined(NSIG) /* 99% of everything else */
151 # /* take it */
152 #else /* Last resort */
153 # define NSIG (sizeof(sigset_t) * CHAR_BIT + 1)
154 #endif
155 
156 #define RUBY_NSIG NSIG
157 
158 #if defined(SIGCLD)
159 # define RUBY_SIGCHLD (SIGCLD)
160 #elif defined(SIGCHLD)
161 # define RUBY_SIGCHLD (SIGCHLD)
162 #endif
163 
164 #if defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) && defined(SA_SIGINFO) && !defined(__NetBSD__)
165 # define USE_SIGALTSTACK
166 void *rb_allocate_sigaltstack(void);
167 void *rb_register_sigaltstack(void *);
168 # define RB_ALTSTACK_INIT(var, altstack) var = rb_register_sigaltstack(altstack)
169 # define RB_ALTSTACK_FREE(var) free(var)
170 # define RB_ALTSTACK(var) var
171 #else /* noop */
172 # define RB_ALTSTACK_INIT(var, altstack)
173 # define RB_ALTSTACK_FREE(var)
174 # define RB_ALTSTACK(var) (0)
175 #endif
176 
177 #include THREAD_IMPL_H
178 #define RUBY_VM_THREAD_MODEL 2
179 
180 /*****************/
181 /* configuration */
182 /*****************/
183 
184 /* gcc ver. check */
185 #if defined(__GNUC__) && __GNUC__ >= 2
186 
187 #if OPT_TOKEN_THREADED_CODE
188 #if OPT_DIRECT_THREADED_CODE
189 #undef OPT_DIRECT_THREADED_CODE
190 #endif
191 #endif
192 
193 #else /* defined(__GNUC__) && __GNUC__ >= 2 */
194 
195 /* disable threaded code options */
196 #if OPT_DIRECT_THREADED_CODE
197 #undef OPT_DIRECT_THREADED_CODE
198 #endif
199 #if OPT_TOKEN_THREADED_CODE
200 #undef OPT_TOKEN_THREADED_CODE
201 #endif
202 #endif
203 
204 /* call threaded code */
205 #if OPT_CALL_THREADED_CODE
206 #if OPT_DIRECT_THREADED_CODE
207 #undef OPT_DIRECT_THREADED_CODE
208 #endif /* OPT_DIRECT_THREADED_CODE */
209 #endif /* OPT_CALL_THREADED_CODE */
210 
211 void rb_vm_encoded_insn_data_table_init(void);
212 typedef unsigned long rb_num_t;
213 typedef signed long rb_snum_t;
214 
215 enum ruby_tag_type {
216  RUBY_TAG_NONE = 0x0,
217  RUBY_TAG_RETURN = 0x1,
218  RUBY_TAG_BREAK = 0x2,
219  RUBY_TAG_NEXT = 0x3,
220  RUBY_TAG_RETRY = 0x4,
221  RUBY_TAG_REDO = 0x5,
222  RUBY_TAG_RAISE = 0x6,
223  RUBY_TAG_THROW = 0x7,
224  RUBY_TAG_FATAL = 0x8,
225  RUBY_TAG_MASK = 0xf
226 };
227 
228 #define TAG_NONE RUBY_TAG_NONE
229 #define TAG_RETURN RUBY_TAG_RETURN
230 #define TAG_BREAK RUBY_TAG_BREAK
231 #define TAG_NEXT RUBY_TAG_NEXT
232 #define TAG_RETRY RUBY_TAG_RETRY
233 #define TAG_REDO RUBY_TAG_REDO
234 #define TAG_RAISE RUBY_TAG_RAISE
235 #define TAG_THROW RUBY_TAG_THROW
236 #define TAG_FATAL RUBY_TAG_FATAL
237 #define TAG_MASK RUBY_TAG_MASK
238 
239 enum ruby_vm_throw_flags {
240  VM_THROW_NO_ESCAPE_FLAG = 0x8000,
241  VM_THROW_STATE_MASK = 0xff
242 };
243 
244 /* forward declarations */
245 struct rb_thread_struct;
247 
248 /* iseq data type */
250 
252  rb_serial_t raw;
253  VALUE data[2];
254 };
255 
256 #define IMEMO_CONST_CACHE_SHAREABLE IMEMO_FL_USER0
257 
258 // imemo_constcache
260  VALUE flags;
261 
262  VALUE value; // v0
263  VALUE _unused1; // v1
264  VALUE _unused2; // v2
265  const rb_cref_t *ic_cref; // v3
266 };
267 STATIC_ASSERT(sizeof_iseq_inline_constant_cache_entry,
268  (offsetof(struct iseq_inline_constant_cache_entry, ic_cref) +
269  sizeof(const rb_cref_t *)) <= RVALUE_SIZE);
270 
272  struct iseq_inline_constant_cache_entry *entry;
273 
285  const ID *segments;
286 };
287 
289  uintptr_t value; // attr_index in lower bits, dest_shape_id in upper bits
290  ID iv_set_name;
291 };
292 
294  struct rb_cvar_class_tbl_entry *entry;
295 };
296 
298  struct {
299  struct rb_thread_struct *running_thread;
300  VALUE value;
301  } once;
302  struct iseq_inline_constant_cache ic_cache;
303  struct iseq_inline_iv_cache_entry iv_cache;
304 };
305 
307  const struct rb_call_data *cd;
308  const struct rb_callcache *cc;
309  VALUE block_handler;
310  VALUE recv;
311  int argc;
312  bool kw_splat;
313  VALUE heap_argv;
314 };
315 
316 #ifndef VM_ARGC_STACK_MAX
317 #define VM_ARGC_STACK_MAX 128
318 #endif
319 
320 # define CALLING_ARGC(calling) ((calling)->heap_argv ? RARRAY_LENINT((calling)->heap_argv) : (calling)->argc)
321 
323 
324 #if 1
325 #define CoreDataFromValue(obj, type) (type*)DATA_PTR(obj)
326 #else
327 #define CoreDataFromValue(obj, type) (type*)rb_data_object_get(obj)
328 #endif
329 #define GetCoreDataFromValue(obj, type, ptr) ((ptr) = CoreDataFromValue((obj), type))
330 
331 typedef struct rb_iseq_location_struct {
332  VALUE pathobj; /* String (path) or Array [path, realpath]. Frozen. */
333  VALUE base_label; /* String */
334  VALUE label; /* String */
335  int first_lineno;
336  int node_id;
337  rb_code_location_t code_location;
339 
340 #define PATHOBJ_PATH 0
341 #define PATHOBJ_REALPATH 1
342 
343 static inline VALUE
344 pathobj_path(VALUE pathobj)
345 {
346  if (RB_TYPE_P(pathobj, T_STRING)) {
347  return pathobj;
348  }
349  else {
350  VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
351  return RARRAY_AREF(pathobj, PATHOBJ_PATH);
352  }
353 }
354 
355 static inline VALUE
356 pathobj_realpath(VALUE pathobj)
357 {
358  if (RB_TYPE_P(pathobj, T_STRING)) {
359  return pathobj;
360  }
361  else {
362  VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
363  return RARRAY_AREF(pathobj, PATHOBJ_REALPATH);
364  }
365 }
366 
367 /* Forward declarations */
368 struct rb_rjit_unit;
369 
370 typedef uintptr_t iseq_bits_t;
371 
372 #define ISEQ_IS_SIZE(body) (body->ic_size + body->ivc_size + body->ise_size + body->icvarc_size)
373 
374 /* [ TS_IVC | TS_ICVARC | TS_ISE | TS_IC ] */
375 #define ISEQ_IS_IC_ENTRY(body, idx) (body->is_entries[(idx) + body->ise_size + body->icvarc_size + body->ivc_size].ic_cache);
376 
377 /* instruction sequence type */
378 enum rb_iseq_type {
379  ISEQ_TYPE_TOP,
380  ISEQ_TYPE_METHOD,
381  ISEQ_TYPE_BLOCK,
382  ISEQ_TYPE_CLASS,
383  ISEQ_TYPE_RESCUE,
384  ISEQ_TYPE_ENSURE,
385  ISEQ_TYPE_EVAL,
386  ISEQ_TYPE_MAIN,
387  ISEQ_TYPE_PLAIN
388 };
389 
390 // Attributes specified by Primitive.attr!
391 enum rb_builtin_attr {
392  // The iseq does not call methods.
393  BUILTIN_ATTR_LEAF = 0x01,
394  // This iseq only contains single `opt_invokebuiltin_delegate_leave` instruction with 0 arguments.
395  BUILTIN_ATTR_SINGLE_NOARG_LEAF = 0x02,
396  // This attribute signals JIT to duplicate the iseq for each block iseq so that its `yield` will be monomorphic.
397  BUILTIN_ATTR_INLINE_BLOCK = 0x04,
398  // The iseq acts like a C method in backtraces.
399  BUILTIN_ATTR_C_TRACE = 0x08,
400 };
401 
402 typedef VALUE (*rb_jit_func_t)(struct rb_execution_context_struct *, struct rb_control_frame_struct *);
403 
405  enum rb_iseq_type type;
406 
407  unsigned int iseq_size;
408  VALUE *iseq_encoded; /* encoded iseq (insn addr and operands) */
409 
433  struct {
434  struct {
435  unsigned int has_lead : 1;
436  unsigned int has_opt : 1;
437  unsigned int has_rest : 1;
438  unsigned int has_post : 1;
439  unsigned int has_kw : 1;
440  unsigned int has_kwrest : 1;
441  unsigned int has_block : 1;
442 
443  unsigned int ambiguous_param0 : 1; /* {|a|} */
444  unsigned int accepts_no_kwarg : 1;
445  unsigned int ruby2_keywords: 1;
446  unsigned int anon_rest: 1;
447  unsigned int anon_kwrest: 1;
448  unsigned int use_block: 1;
449  unsigned int forwardable: 1;
450  } flags;
451 
452  unsigned int size;
453 
454  int lead_num;
455  int opt_num;
456  int rest_start;
457  int post_start;
458  int post_num;
459  int block_start;
460 
461  const VALUE *opt_table; /* (opt_num + 1) entries. */
462  /* opt_num and opt_table:
463  *
464  * def foo o1=e1, o2=e2, ..., oN=eN
465  * #=>
466  * # prologue code
467  * A1: e1
468  * A2: e2
469  * ...
470  * AN: eN
471  * AL: body
472  * opt_num = N
473  * opt_table = [A1, A2, ..., AN, AL]
474  */
475 
476  const struct rb_iseq_param_keyword {
477  int num;
478  int required_num;
479  int bits_start;
480  int rest_start;
481  const ID *table;
482  VALUE *default_values;
483  } *keyword;
484  } param;
485 
486  rb_iseq_location_t location;
487 
488  /* insn info, must be freed */
489  struct iseq_insn_info {
490  const struct iseq_insn_info_entry *body;
491  unsigned int *positions;
492  unsigned int size;
493 #if VM_INSN_INFO_TABLE_IMPL == 2
495 #endif
496  } insns_info;
497 
498  const ID *local_table; /* must free */
499 
500  /* catch table */
501  struct iseq_catch_table *catch_table;
502 
503  /* for child iseq */
504  const struct rb_iseq_struct *parent_iseq;
505  struct rb_iseq_struct *local_iseq; /* local_iseq->flip_cnt can be modified */
506 
507  union iseq_inline_storage_entry *is_entries; /* [ TS_IVC | TS_ICVARC | TS_ISE | TS_IC ] */
508  struct rb_call_data *call_data; //struct rb_call_data calls[ci_size];
509 
510  struct {
511  rb_snum_t flip_count;
512  VALUE script_lines;
513  VALUE coverage;
514  VALUE pc2branchindex;
515  VALUE *original_iseq;
516  } variable;
517 
518  unsigned int local_table_size;
519  unsigned int ic_size; // Number of IC caches
520  unsigned int ise_size; // Number of ISE caches
521  unsigned int ivc_size; // Number of IVC caches
522  unsigned int icvarc_size; // Number of ICVARC caches
523  unsigned int ci_size;
524  unsigned int stack_max; /* for stack overflow check */
525 
526  unsigned int builtin_attrs; // Union of rb_builtin_attr
527 
528  bool prism; // ISEQ was generated from prism compiler
529 
530  union {
531  iseq_bits_t * list; /* Find references for GC */
532  iseq_bits_t single;
533  } mark_bits;
534 
535  struct rb_id_table *outer_variables;
536 
537  const rb_iseq_t *mandatory_only_iseq;
538 
539 #if USE_RJIT || USE_YJIT
540  // Function pointer for JIT code on jit_exec()
541  rb_jit_func_t jit_entry;
542  // Number of calls on jit_exec()
543  long unsigned jit_entry_calls;
544 #endif
545 
546 #if USE_YJIT
547  // Function pointer for JIT code on jit_exec_exception()
548  rb_jit_func_t jit_exception;
549  // Number of calls on jit_exec_exception()
550  long unsigned jit_exception_calls;
551 #endif
552 
553 #if USE_RJIT
554  // RJIT stores some data on each iseq.
555  VALUE rjit_blocks;
556 #endif
557 
558 #if USE_YJIT
559  // YJIT stores some data on each iseq.
560  void *yjit_payload;
561  // Used to estimate how frequently this ISEQ gets called
562  uint64_t yjit_calls_at_interv;
563 #endif
564 };
565 
566 /* T_IMEMO/iseq */
567 /* typedef rb_iseq_t is in method.h */
569  VALUE flags; /* 1 */
570  VALUE wrapper; /* 2 */
571 
572  struct rb_iseq_constant_body *body; /* 3 */
573 
574  union { /* 4, 5 words */
575  struct iseq_compile_data *compile_data; /* used at compile time */
576 
577  struct {
578  VALUE obj;
579  int index;
580  } loader;
581 
582  struct {
583  struct rb_hook_list_struct *local_hooks;
584  rb_event_flag_t global_trace_events;
585  } exec;
586  } aux;
587 };
588 
589 #define ISEQ_BODY(iseq) ((iseq)->body)
590 
591 #if !defined(USE_LAZY_LOAD) || !(USE_LAZY_LOAD+0)
592 #define USE_LAZY_LOAD 0
593 #endif
594 
595 #if !USE_LAZY_LOAD
596 static inline const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq) {return 0;}
597 #endif
598 const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq);
599 
600 static inline const rb_iseq_t *
601 rb_iseq_check(const rb_iseq_t *iseq)
602 {
603  if (USE_LAZY_LOAD && ISEQ_BODY(iseq) == NULL) {
604  rb_iseq_complete((rb_iseq_t *)iseq);
605  }
606  return iseq;
607 }
608 
609 static inline bool
610 rb_iseq_attr_p(const rb_iseq_t *iseq, enum rb_builtin_attr attr)
611 {
612  return (ISEQ_BODY(iseq)->builtin_attrs & attr) == attr;
613 }
614 
615 static inline const rb_iseq_t *
616 def_iseq_ptr(rb_method_definition_t *def)
617 {
618 //TODO: re-visit. to check the bug, enable this assertion.
619 #if VM_CHECK_MODE > 0
620  if (def->type != VM_METHOD_TYPE_ISEQ) rb_bug("def_iseq_ptr: not iseq (%d)", def->type);
621 #endif
622  return rb_iseq_check(def->body.iseq.iseqptr);
623 }
624 
625 enum ruby_special_exceptions {
626  ruby_error_reenter,
627  ruby_error_nomemory,
628  ruby_error_sysstack,
629  ruby_error_stackfatal,
630  ruby_error_stream_closed,
631  ruby_special_error_count
632 };
633 
634 #define GetVMPtr(obj, ptr) \
635  GetCoreDataFromValue((obj), rb_vm_t, (ptr))
636 
637 struct rb_vm_struct;
638 typedef void rb_vm_at_exit_func(struct rb_vm_struct*);
639 
640 typedef struct rb_at_exit_list {
641  rb_vm_at_exit_func *func;
642  struct rb_at_exit_list *next;
644 
645 void *rb_objspace_alloc(void);
646 void rb_objspace_free(void *objspace);
647 void rb_objspace_call_finalizer(void);
648 
649 typedef struct rb_hook_list_struct {
650  struct rb_event_hook_struct *hooks;
651  rb_event_flag_t events;
652  unsigned int running;
653  bool need_clean;
654  bool is_local;
656 
657 
658 // see builtin.h for definition
659 typedef const struct rb_builtin_function *RB_BUILTIN;
660 
662  VALUE *varptr;
663  struct global_object_list *next;
664 };
665 
666 typedef struct rb_vm_struct {
667  VALUE self;
668 
669  struct {
670  struct ccan_list_head set;
671  unsigned int cnt;
672  unsigned int blocking_cnt;
673 
674  struct rb_ractor_struct *main_ractor;
675  struct rb_thread_struct *main_thread; // == vm->ractor.main_ractor->threads.main
676 
677  struct {
678  // monitor
679  rb_nativethread_lock_t lock;
680  struct rb_ractor_struct *lock_owner;
681  unsigned int lock_rec;
682 
683  // join at exit
684  rb_nativethread_cond_t terminate_cond;
685  bool terminate_waiting;
686 
687 #ifndef RUBY_THREAD_PTHREAD_H
688  bool barrier_waiting;
689  unsigned int barrier_cnt;
690  rb_nativethread_cond_t barrier_cond;
691 #endif
692  } sync;
693 
694  // ractor scheduling
695  struct {
696  rb_nativethread_lock_t lock;
697  struct rb_ractor_struct *lock_owner;
698  bool locked;
699 
700  rb_nativethread_cond_t cond; // GRQ
701  unsigned int snt_cnt; // count of shared NTs
702  unsigned int dnt_cnt; // count of dedicated NTs
703 
704  unsigned int running_cnt;
705 
706  unsigned int max_cpu;
707  struct ccan_list_head grq; // // Global Ready Queue
708  unsigned int grq_cnt;
709 
710  // running threads
711  struct ccan_list_head running_threads;
712 
713  // threads which switch context by timeslice
714  struct ccan_list_head timeslice_threads;
715 
716  struct ccan_list_head zombie_threads;
717 
718  // true if timeslice timer is not enable
719  bool timeslice_wait_inf;
720 
721  // barrier
722  rb_nativethread_cond_t barrier_complete_cond;
723  rb_nativethread_cond_t barrier_release_cond;
724  bool barrier_waiting;
725  unsigned int barrier_waiting_cnt;
726  unsigned int barrier_serial;
727  } sched;
728  } ractor;
729 
730 #ifdef USE_SIGALTSTACK
731  void *main_altstack;
732 #endif
733 
734  rb_serial_t fork_gen;
735  struct ccan_list_head waiting_fds; /* <=> struct waiting_fd */
736 
737  /* set in single-threaded processes only: */
738  volatile int ubf_async_safe;
739 
740  unsigned int running: 1;
741  unsigned int thread_abort_on_exception: 1;
742  unsigned int thread_report_on_exception: 1;
743  unsigned int thread_ignore_deadlock: 1;
744 
745  /* object management */
746  VALUE mark_object_ary;
748  const VALUE special_exceptions[ruby_special_error_count];
749 
750  /* load */
751  VALUE top_self;
752  VALUE load_path;
753  VALUE load_path_snapshot;
754  VALUE load_path_check_cache;
755  VALUE expanded_load_path;
756  VALUE loaded_features;
757  VALUE loaded_features_snapshot;
758  VALUE loaded_features_realpaths;
759  VALUE loaded_features_realpath_map;
760  struct st_table *loaded_features_index;
761  struct st_table *loading_table;
762  // For running the init function of statically linked
763  // extensions when they are loaded
764  struct st_table *static_ext_inits;
765 
766  /* signal */
767  struct {
768  VALUE cmd[RUBY_NSIG];
769  } trap_list;
770 
771  /* postponed_job (async-signal-safe, and thread-safe) */
772  struct rb_postponed_job_queue *postponed_job_queue;
773 
774  int src_encoding_index;
775 
776  /* workqueue (thread-safe, NOT async-signal-safe) */
777  struct ccan_list_head workqueue; /* <=> rb_workqueue_job.jnode */
778  rb_nativethread_lock_t workqueue_lock;
779 
780  VALUE orig_progname, progname;
781  VALUE coverages, me2counter;
782  int coverage_mode;
783 
784  struct {
785  struct rb_objspace *objspace;
786  struct gc_mark_func_data_struct {
787  void *data;
788  void (*mark_func)(VALUE v, void *data);
789  } *mark_func_data;
790  } gc;
791 
792  rb_at_exit_list *at_exit;
793 
794  st_table *frozen_strings;
795 
796  const struct rb_builtin_function *builtin_function_table;
797 
798  st_table *ci_table;
799  struct rb_id_table *negative_cme_table;
800  st_table *overloaded_cme_table; // cme -> overloaded_cme
801  st_table *unused_block_warning_table;
802  bool unused_block_warning_strict;
803 
804  // This id table contains a mapping from ID to ICs. It does this with ID
805  // keys and nested st_tables as values. The nested tables have ICs as keys
806  // and Qtrue as values. It is used when inline constant caches need to be
807  // invalidated or ISEQs are being freed.
808  struct rb_id_table *constant_cache;
809 
810 #ifndef VM_GLOBAL_CC_CACHE_TABLE_SIZE
811 #define VM_GLOBAL_CC_CACHE_TABLE_SIZE 1023
812 #endif
813  const struct rb_callcache *global_cc_cache_table[VM_GLOBAL_CC_CACHE_TABLE_SIZE]; // vm_eval.c
814 
815 #if defined(USE_VM_CLOCK) && USE_VM_CLOCK
816  uint32_t clock;
817 #endif
818 
819  /* params */
820  struct { /* size in byte */
821  size_t thread_vm_stack_size;
822  size_t thread_machine_stack_size;
823  size_t fiber_vm_stack_size;
824  size_t fiber_machine_stack_size;
825  } default_params;
826 
827 } rb_vm_t;
828 
829 /* default values */
830 
831 #define RUBY_VM_SIZE_ALIGN 4096
832 
833 #define RUBY_VM_THREAD_VM_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
834 #define RUBY_VM_THREAD_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
835 #define RUBY_VM_THREAD_MACHINE_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
836 #define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
837 
838 #define RUBY_VM_FIBER_VM_STACK_SIZE ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
839 #define RUBY_VM_FIBER_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
840 #define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 64 * 1024 * sizeof(VALUE)) /* 256 KB or 512 KB */
841 #if defined(__powerpc64__) || defined(__ppc64__) // macOS has __ppc64__
842 #define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 32 * 1024 * sizeof(VALUE)) /* 128 KB or 256 KB */
843 #else
844 #define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
845 #endif
846 
847 #if __has_feature(memory_sanitizer) || __has_feature(address_sanitizer)
848 /* It seems sanitizers consume A LOT of machine stacks */
849 #undef RUBY_VM_THREAD_MACHINE_STACK_SIZE
850 #define RUBY_VM_THREAD_MACHINE_STACK_SIZE (1024 * 1024 * sizeof(VALUE))
851 #undef RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN
852 #define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 512 * 1024 * sizeof(VALUE))
853 #undef RUBY_VM_FIBER_MACHINE_STACK_SIZE
854 #define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 256 * 1024 * sizeof(VALUE))
855 #undef RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN
856 #define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 128 * 1024 * sizeof(VALUE))
857 #endif
858 
859 #ifndef VM_DEBUG_BP_CHECK
860 #define VM_DEBUG_BP_CHECK 0
861 #endif
862 
863 #ifndef VM_DEBUG_VERIFY_METHOD_CACHE
864 #define VM_DEBUG_VERIFY_METHOD_CACHE (VMDEBUG != 0)
865 #endif
866 
868  VALUE self;
869  const VALUE *ep;
870  union {
871  const rb_iseq_t *iseq;
872  const struct vm_ifunc *ifunc;
873  VALUE val;
874  } code;
875 };
876 
877 enum rb_block_handler_type {
878  block_handler_type_iseq,
879  block_handler_type_ifunc,
880  block_handler_type_symbol,
881  block_handler_type_proc
882 };
883 
884 enum rb_block_type {
885  block_type_iseq,
886  block_type_ifunc,
887  block_type_symbol,
888  block_type_proc
889 };
890 
891 struct rb_block {
892  union {
893  struct rb_captured_block captured;
894  VALUE symbol;
895  VALUE proc;
896  } as;
897  enum rb_block_type type;
898 };
899 
900 typedef struct rb_control_frame_struct {
901  const VALUE *pc; // cfp[0]
902  VALUE *sp; // cfp[1]
903  const rb_iseq_t *iseq; // cfp[2]
904  VALUE self; // cfp[3] / block[0]
905  const VALUE *ep; // cfp[4] / block[1]
906  const void *block_code; // cfp[5] / block[2] -- iseq, ifunc, or forwarded block handler
907  void *jit_return; // cfp[6] -- return address for JIT code
908 #if VM_DEBUG_BP_CHECK
909  VALUE *bp_check; // cfp[7]
910 #endif
912 
913 extern const rb_data_type_t ruby_threadptr_data_type;
914 
915 static inline struct rb_thread_struct *
916 rb_thread_ptr(VALUE thval)
917 {
918  return (struct rb_thread_struct *)rb_check_typeddata(thval, &ruby_threadptr_data_type);
919 }
920 
921 enum rb_thread_status {
922  THREAD_RUNNABLE,
923  THREAD_STOPPED,
924  THREAD_STOPPED_FOREVER,
925  THREAD_KILLED
926 };
927 
928 #ifdef RUBY_JMP_BUF
929 typedef RUBY_JMP_BUF rb_jmpbuf_t;
930 #else
931 typedef void *rb_jmpbuf_t[5];
932 #endif
933 
934 /*
935  `rb_vm_tag_jmpbuf_t` type represents a buffer used to
936  long jump to a C frame associated with `rb_vm_tag`.
937 
938  Use-site of `rb_vm_tag_jmpbuf_t` is responsible for calling the
939  following functions:
940  - `rb_vm_tag_jmpbuf_init` once `rb_vm_tag_jmpbuf_t` is allocated.
941  - `rb_vm_tag_jmpbuf_deinit` once `rb_vm_tag_jmpbuf_t` is no longer necessary.
942 
943  `RB_VM_TAG_JMPBUF_GET` transforms a `rb_vm_tag_jmpbuf_t` into a
944  `rb_jmpbuf_t` to be passed to `rb_setjmp/rb_longjmp`.
945 */
946 #if defined(__wasm__) && !defined(__EMSCRIPTEN__)
947 /*
948  WebAssembly target with Asyncify-based SJLJ needs
949  to capture the execution context by unwind/rewind-ing
950  call frames into a jump buffer. The buffer space tends
951  to be considerably large unlike other architectures'
952  register-based buffers.
953  Therefore, we allocates the buffer on the heap on such
954  environments.
955 */
956 typedef rb_jmpbuf_t *rb_vm_tag_jmpbuf_t;
957 
958 #define RB_VM_TAG_JMPBUF_GET(buf) (*buf)
959 
960 static inline void
961 rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
962 {
963  *jmpbuf = ruby_xmalloc(sizeof(rb_jmpbuf_t));
964 }
965 
966 static inline void
967 rb_vm_tag_jmpbuf_deinit(const rb_vm_tag_jmpbuf_t *jmpbuf)
968 {
969  ruby_xfree(*jmpbuf);
970 }
971 #else
972 typedef rb_jmpbuf_t rb_vm_tag_jmpbuf_t;
973 
974 #define RB_VM_TAG_JMPBUF_GET(buf) (buf)
975 
976 static inline void
977 rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
978 {
979  // no-op
980 }
981 
982 static inline void
983 rb_vm_tag_jmpbuf_deinit(const rb_vm_tag_jmpbuf_t *jmpbuf)
984 {
985  // no-op
986 }
987 #endif
988 
989 /*
990  the members which are written in EC_PUSH_TAG() should be placed at
991  the beginning and the end, so that entire region is accessible.
992 */
993 struct rb_vm_tag {
994  VALUE tag;
995  VALUE retval;
996  rb_vm_tag_jmpbuf_t buf;
997  struct rb_vm_tag *prev;
998  enum ruby_tag_type state;
999  unsigned int lock_rec;
1000 };
1001 
1002 STATIC_ASSERT(rb_vm_tag_buf_offset, offsetof(struct rb_vm_tag, buf) > 0);
1003 STATIC_ASSERT(rb_vm_tag_buf_end,
1004  offsetof(struct rb_vm_tag, buf) + sizeof(rb_vm_tag_jmpbuf_t) <
1005  sizeof(struct rb_vm_tag));
1006 
1008  rb_unblock_function_t *func;
1009  void *arg;
1010 };
1011 
1012 struct rb_mutex_struct;
1013 
1014 typedef struct rb_fiber_struct rb_fiber_t;
1015 
1017  struct rb_waiting_list *next;
1018  struct rb_thread_struct *thread;
1019  struct rb_fiber_struct *fiber;
1020 };
1021 
1023  /* execution information */
1024  VALUE *vm_stack; /* must free, must mark */
1025  size_t vm_stack_size; /* size in word (byte size / sizeof(VALUE)) */
1026  rb_control_frame_t *cfp;
1027 
1028  struct rb_vm_tag *tag;
1029 
1030  /* interrupt flags */
1031  rb_atomic_t interrupt_flag;
1032  rb_atomic_t interrupt_mask; /* size should match flag */
1033 #if defined(USE_VM_CLOCK) && USE_VM_CLOCK
1034  uint32_t checked_clock;
1035 #endif
1036 
1037  rb_fiber_t *fiber_ptr;
1038  struct rb_thread_struct *thread_ptr;
1039 
1040  /* storage (ec (fiber) local) */
1041  struct rb_id_table *local_storage;
1042  VALUE local_storage_recursive_hash;
1043  VALUE local_storage_recursive_hash_for_trace;
1044 
1045  /* Inheritable fiber storage. */
1046  VALUE storage;
1047 
1048  /* eval env */
1049  const VALUE *root_lep;
1050  VALUE root_svar;
1051 
1052  /* trace information */
1053  struct rb_trace_arg_struct *trace_arg;
1054 
1055  /* temporary places */
1056  VALUE errinfo;
1057  VALUE passed_block_handler; /* for rb_iterate */
1058 
1059  uint8_t raised_flag; /* only 3 bits needed */
1060 
1061  /* n.b. only 7 bits needed, really: */
1062  BITFIELD(enum method_missing_reason, method_missing_reason, 8);
1063 
1064  VALUE private_const_reference;
1065 
1066  /* for GC */
1067  struct {
1068  VALUE *stack_start;
1069  VALUE *stack_end;
1070  size_t stack_maxsize;
1072 
1073 #ifdef RUBY_ASAN_ENABLED
1074  void *asan_fake_stack_handle;
1075 #endif
1076  } machine;
1077 };
1078 
1079 #ifndef rb_execution_context_t
1081 #define rb_execution_context_t rb_execution_context_t
1082 #endif
1083 
1084 // for builtin.h
1085 #define VM_CORE_H_EC_DEFINED 1
1086 
1087 // Set the vm_stack pointer in the execution context.
1088 void rb_ec_set_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
1089 
1090 // Initialize the vm_stack pointer in the execution context and push the initial stack frame.
1091 // @param ec the execution context to update.
1092 // @param stack a pointer to the stack to use.
1093 // @param size the size of the stack, as in `VALUE stack[size]`.
1094 void rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
1095 
1096 // Clear (set to `NULL`) the vm_stack pointer.
1097 // @param ec the execution context to update.
1098 void rb_ec_clear_vm_stack(rb_execution_context_t *ec);
1099 
1101  bool ractor_safe;
1102 };
1103 
1104 typedef struct rb_ractor_struct rb_ractor_t;
1105 
1106 struct rb_native_thread;
1107 
1108 typedef struct rb_thread_struct {
1109  struct ccan_list_node lt_node; // managed by a ractor
1110  VALUE self;
1111  rb_ractor_t *ractor;
1112  rb_vm_t *vm;
1113  struct rb_native_thread *nt;
1115 
1116  struct rb_thread_sched_item sched;
1117  bool mn_schedulable;
1118  rb_atomic_t serial; // only for RUBY_DEBUG_LOG()
1119 
1120  VALUE last_status; /* $? */
1121 
1122  /* for cfunc */
1123  struct rb_calling_info *calling;
1124 
1125  /* for load(true) */
1126  VALUE top_self;
1127  VALUE top_wrapper;
1128 
1129  /* thread control */
1130 
1131  BITFIELD(enum rb_thread_status, status, 2);
1132  /* bit flags */
1133  unsigned int has_dedicated_nt : 1;
1134  unsigned int to_kill : 1;
1135  unsigned int abort_on_exception: 1;
1136  unsigned int report_on_exception: 1;
1137  unsigned int pending_interrupt_queue_checked: 1;
1138  int8_t priority; /* -3 .. 3 (RUBY_THREAD_PRIORITY_{MIN,MAX}) */
1139  uint32_t running_time_us; /* 12500..800000 */
1140 
1141  void *blocking_region_buffer;
1142 
1143  VALUE thgroup;
1144  VALUE value;
1145 
1146  /* temporary place of retval on OPT_CALL_THREADED_CODE */
1147 #if OPT_CALL_THREADED_CODE
1148  VALUE retval;
1149 #endif
1150 
1151  /* async errinfo queue */
1152  VALUE pending_interrupt_queue;
1153  VALUE pending_interrupt_mask_stack;
1154 
1155  /* interrupt management */
1156  rb_nativethread_lock_t interrupt_lock;
1157  struct rb_unblock_callback unblock;
1158  VALUE locking_mutex;
1159  struct rb_mutex_struct *keeping_mutexes;
1160 
1161  struct rb_waiting_list *join_list;
1162 
1163  union {
1164  struct {
1165  VALUE proc;
1166  VALUE args;
1167  int kw_splat;
1168  } proc;
1169  struct {
1170  VALUE (*func)(void *);
1171  void *arg;
1172  } func;
1173  } invoke_arg;
1174 
1175  enum thread_invoke_type {
1176  thread_invoke_type_none = 0,
1177  thread_invoke_type_proc,
1178  thread_invoke_type_ractor_proc,
1179  thread_invoke_type_func
1180  } invoke_type;
1181 
1182  /* statistics data for profiler */
1183  VALUE stat_insn_usage;
1184 
1185  /* fiber */
1186  rb_fiber_t *root_fiber;
1187 
1188  VALUE scheduler;
1189  unsigned int blocking;
1190 
1191  /* misc */
1192  VALUE name;
1193  void **specific_storage;
1194 
1195  struct rb_ext_config ext_config;
1196 } rb_thread_t;
1197 
1198 static inline unsigned int
1199 rb_th_serial(const rb_thread_t *th)
1200 {
1201  return th ? (unsigned int)th->serial : 0;
1202 }
1203 
1204 typedef enum {
1205  VM_DEFINECLASS_TYPE_CLASS = 0x00,
1206  VM_DEFINECLASS_TYPE_SINGLETON_CLASS = 0x01,
1207  VM_DEFINECLASS_TYPE_MODULE = 0x02,
1208  /* 0x03..0x06 is reserved */
1209  VM_DEFINECLASS_TYPE_MASK = 0x07
1210 } rb_vm_defineclass_type_t;
1211 
1212 #define VM_DEFINECLASS_TYPE(x) ((rb_vm_defineclass_type_t)(x) & VM_DEFINECLASS_TYPE_MASK)
1213 #define VM_DEFINECLASS_FLAG_SCOPED 0x08
1214 #define VM_DEFINECLASS_FLAG_HAS_SUPERCLASS 0x10
1215 #define VM_DEFINECLASS_SCOPED_P(x) ((x) & VM_DEFINECLASS_FLAG_SCOPED)
1216 #define VM_DEFINECLASS_HAS_SUPERCLASS_P(x) \
1217  ((x) & VM_DEFINECLASS_FLAG_HAS_SUPERCLASS)
1218 
1219 /* iseq.c */
1220 RUBY_SYMBOL_EXPORT_BEGIN
1221 
1222 /* node -> iseq */
1223 rb_iseq_t *rb_iseq_new (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent, enum rb_iseq_type);
1224 rb_iseq_t *rb_iseq_new_top (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent);
1225 rb_iseq_t *rb_iseq_new_main (const VALUE ast_value, VALUE path, VALUE realpath, const rb_iseq_t *parent, int opt);
1226 rb_iseq_t *rb_iseq_new_eval (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, int first_lineno, const rb_iseq_t *parent, int isolated_depth);
1227 rb_iseq_t *rb_iseq_new_with_opt( VALUE ast_value, VALUE name, VALUE path, VALUE realpath, int first_lineno, const rb_iseq_t *parent, int isolated_depth,
1228  enum rb_iseq_type, const rb_compile_option_t*,
1229  VALUE script_lines);
1230 
1231 struct iseq_link_anchor;
1233  VALUE flags;
1234  VALUE reserved;
1235  void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *);
1236  const void *data;
1237 };
1238 static inline struct rb_iseq_new_with_callback_callback_func *
1239 rb_iseq_new_with_callback_new_callback(
1240  void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *), const void *ptr)
1241 {
1243  IMEMO_NEW(struct rb_iseq_new_with_callback_callback_func, imemo_ifunc, Qfalse);
1244  memo->func = func;
1245  memo->data = ptr;
1246 
1247  return memo;
1248 }
1249 rb_iseq_t *rb_iseq_new_with_callback(const struct rb_iseq_new_with_callback_callback_func * ifunc,
1250  VALUE name, VALUE path, VALUE realpath, int first_lineno,
1251  const rb_iseq_t *parent, enum rb_iseq_type, const rb_compile_option_t*);
1252 
1253 VALUE rb_iseq_disasm(const rb_iseq_t *iseq);
1254 int rb_iseq_disasm_insn(VALUE str, const VALUE *iseqval, size_t pos, const rb_iseq_t *iseq, VALUE child);
1255 
1256 VALUE rb_iseq_coverage(const rb_iseq_t *iseq);
1257 
1258 RUBY_EXTERN VALUE rb_cISeq;
1259 RUBY_EXTERN VALUE rb_cRubyVM;
1260 RUBY_EXTERN VALUE rb_mRubyVMFrozenCore;
1261 RUBY_EXTERN VALUE rb_block_param_proxy;
1262 RUBY_SYMBOL_EXPORT_END
1263 
1264 #define GetProcPtr(obj, ptr) \
1265  GetCoreDataFromValue((obj), rb_proc_t, (ptr))
1266 
1267 typedef struct {
1268  const struct rb_block block;
1269  unsigned int is_from_method: 1; /* bool */
1270  unsigned int is_lambda: 1; /* bool */
1271  unsigned int is_isolated: 1; /* bool */
1272 } rb_proc_t;
1273 
1274 RUBY_SYMBOL_EXPORT_BEGIN
1275 VALUE rb_proc_isolate(VALUE self);
1276 VALUE rb_proc_isolate_bang(VALUE self);
1277 VALUE rb_proc_ractor_make_shareable(VALUE self);
1278 RUBY_SYMBOL_EXPORT_END
1279 
1280 typedef struct {
1281  VALUE flags; /* imemo header */
1282  rb_iseq_t *iseq;
1283  const VALUE *ep;
1284  const VALUE *env;
1285  unsigned int env_size;
1286 } rb_env_t;
1287 
1288 extern const rb_data_type_t ruby_binding_data_type;
1289 
1290 #define GetBindingPtr(obj, ptr) \
1291  GetCoreDataFromValue((obj), rb_binding_t, (ptr))
1292 
1293 typedef struct {
1294  const struct rb_block block;
1295  const VALUE pathobj;
1296  int first_lineno;
1297 } rb_binding_t;
1298 
1299 /* used by compile time and send insn */
1300 
1301 enum vm_check_match_type {
1302  VM_CHECKMATCH_TYPE_WHEN = 1,
1303  VM_CHECKMATCH_TYPE_CASE = 2,
1304  VM_CHECKMATCH_TYPE_RESCUE = 3
1305 };
1306 
1307 #define VM_CHECKMATCH_TYPE_MASK 0x03
1308 #define VM_CHECKMATCH_ARRAY 0x04
1309 
1310 enum vm_opt_newarray_send_type {
1311  VM_OPT_NEWARRAY_SEND_MAX = 1,
1312  VM_OPT_NEWARRAY_SEND_MIN = 2,
1313  VM_OPT_NEWARRAY_SEND_HASH = 3,
1314  VM_OPT_NEWARRAY_SEND_PACK = 4,
1315  VM_OPT_NEWARRAY_SEND_PACK_BUFFER = 5,
1316 };
1317 
1318 enum vm_special_object_type {
1319  VM_SPECIAL_OBJECT_VMCORE = 1,
1320  VM_SPECIAL_OBJECT_CBASE,
1321  VM_SPECIAL_OBJECT_CONST_BASE
1322 };
1323 
1324 enum vm_svar_index {
1325  VM_SVAR_LASTLINE = 0, /* $_ */
1326  VM_SVAR_BACKREF = 1, /* $~ */
1327 
1328  VM_SVAR_EXTRA_START = 2,
1329  VM_SVAR_FLIPFLOP_START = 2 /* flipflop */
1330 };
1331 
1332 /* inline cache */
1333 typedef struct iseq_inline_constant_cache *IC;
1334 typedef struct iseq_inline_iv_cache_entry *IVC;
1335 typedef struct iseq_inline_cvar_cache_entry *ICVARC;
1336 typedef union iseq_inline_storage_entry *ISE;
1337 typedef const struct rb_callinfo *CALL_INFO;
1338 typedef const struct rb_callcache *CALL_CACHE;
1339 typedef struct rb_call_data *CALL_DATA;
1340 
1341 typedef VALUE CDHASH;
1342 
1343 #ifndef FUNC_FASTCALL
1344 #define FUNC_FASTCALL(x) x
1345 #endif
1346 
1347 typedef rb_control_frame_t *
1348  (FUNC_FASTCALL(*rb_insn_func_t))(rb_execution_context_t *, rb_control_frame_t *);
1349 
1350 #define VM_TAGGED_PTR_SET(p, tag) ((VALUE)(p) | (tag))
1351 #define VM_TAGGED_PTR_REF(v, mask) ((void *)((v) & ~mask))
1352 
1353 #define GC_GUARDED_PTR(p) VM_TAGGED_PTR_SET((p), 0x01)
1354 #define GC_GUARDED_PTR_REF(p) VM_TAGGED_PTR_REF((p), 0x03)
1355 #define GC_GUARDED_PTR_P(p) (((VALUE)(p)) & 0x01)
1356 
1357 enum vm_frame_env_flags {
1358  /* Frame/Environment flag bits:
1359  * MMMM MMMM MMMM MMMM ____ FFFF FFFE EEEX (LSB)
1360  *
1361  * X : tag for GC marking (It seems as Fixnum)
1362  * EEE : 4 bits Env flags
1363  * FF..: 7 bits Frame flags
1364  * MM..: 15 bits frame magic (to check frame corruption)
1365  */
1366 
1367  /* frame types */
1368  VM_FRAME_MAGIC_METHOD = 0x11110001,
1369  VM_FRAME_MAGIC_BLOCK = 0x22220001,
1370  VM_FRAME_MAGIC_CLASS = 0x33330001,
1371  VM_FRAME_MAGIC_TOP = 0x44440001,
1372  VM_FRAME_MAGIC_CFUNC = 0x55550001,
1373  VM_FRAME_MAGIC_IFUNC = 0x66660001,
1374  VM_FRAME_MAGIC_EVAL = 0x77770001,
1375  VM_FRAME_MAGIC_RESCUE = 0x78880001,
1376  VM_FRAME_MAGIC_DUMMY = 0x79990001,
1377 
1378  VM_FRAME_MAGIC_MASK = 0x7fff0001,
1379 
1380  /* frame flag */
1381  VM_FRAME_FLAG_FINISH = 0x0020,
1382  VM_FRAME_FLAG_BMETHOD = 0x0040,
1383  VM_FRAME_FLAG_CFRAME = 0x0080,
1384  VM_FRAME_FLAG_LAMBDA = 0x0100,
1385  VM_FRAME_FLAG_MODIFIED_BLOCK_PARAM = 0x0200,
1386  VM_FRAME_FLAG_CFRAME_KW = 0x0400,
1387  VM_FRAME_FLAG_PASSED = 0x0800,
1388 
1389  /* env flag */
1390  VM_ENV_FLAG_LOCAL = 0x0002,
1391  VM_ENV_FLAG_ESCAPED = 0x0004,
1392  VM_ENV_FLAG_WB_REQUIRED = 0x0008,
1393  VM_ENV_FLAG_ISOLATED = 0x0010,
1394 };
1395 
1396 #define VM_ENV_DATA_SIZE ( 3)
1397 
1398 #define VM_ENV_DATA_INDEX_ME_CREF (-2) /* ep[-2] */
1399 #define VM_ENV_DATA_INDEX_SPECVAL (-1) /* ep[-1] */
1400 #define VM_ENV_DATA_INDEX_FLAGS ( 0) /* ep[ 0] */
1401 #define VM_ENV_DATA_INDEX_ENV ( 1) /* ep[ 1] */
1402 
1403 #define VM_ENV_INDEX_LAST_LVAR (-VM_ENV_DATA_SIZE)
1404 
1405 static inline void VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value);
1406 
1407 static inline void
1408 VM_ENV_FLAGS_SET(const VALUE *ep, VALUE flag)
1409 {
1410  VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1411  VM_ASSERT(FIXNUM_P(flags));
1412  VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags | flag);
1413 }
1414 
1415 static inline void
1416 VM_ENV_FLAGS_UNSET(const VALUE *ep, VALUE flag)
1417 {
1418  VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1419  VM_ASSERT(FIXNUM_P(flags));
1420  VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags & ~flag);
1421 }
1422 
1423 static inline unsigned long
1424 VM_ENV_FLAGS(const VALUE *ep, long flag)
1425 {
1426  VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1427  VM_ASSERT(FIXNUM_P(flags));
1428  return flags & flag;
1429 }
1430 
1431 static inline unsigned long
1432 VM_FRAME_TYPE(const rb_control_frame_t *cfp)
1433 {
1434  return VM_ENV_FLAGS(cfp->ep, VM_FRAME_MAGIC_MASK);
1435 }
1436 
1437 static inline int
1438 VM_FRAME_LAMBDA_P(const rb_control_frame_t *cfp)
1439 {
1440  return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_LAMBDA) != 0;
1441 }
1442 
1443 static inline int
1444 VM_FRAME_CFRAME_KW_P(const rb_control_frame_t *cfp)
1445 {
1446  return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME_KW) != 0;
1447 }
1448 
1449 static inline int
1450 VM_FRAME_FINISHED_P(const rb_control_frame_t *cfp)
1451 {
1452  return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_FINISH) != 0;
1453 }
1454 
1455 static inline int
1456 VM_FRAME_BMETHOD_P(const rb_control_frame_t *cfp)
1457 {
1458  return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_BMETHOD) != 0;
1459 }
1460 
1461 static inline int
1462 rb_obj_is_iseq(VALUE iseq)
1463 {
1464  return imemo_type_p(iseq, imemo_iseq);
1465 }
1466 
1467 #if VM_CHECK_MODE > 0
1468 #define RUBY_VM_NORMAL_ISEQ_P(iseq) rb_obj_is_iseq((VALUE)iseq)
1469 #endif
1470 
1471 static inline int
1472 VM_FRAME_CFRAME_P(const rb_control_frame_t *cfp)
1473 {
1474  int cframe_p = VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME) != 0;
1475  VM_ASSERT(RUBY_VM_NORMAL_ISEQ_P(cfp->iseq) != cframe_p ||
1476  (VM_FRAME_TYPE(cfp) & VM_FRAME_MAGIC_MASK) == VM_FRAME_MAGIC_DUMMY);
1477  return cframe_p;
1478 }
1479 
1480 static inline int
1481 VM_FRAME_RUBYFRAME_P(const rb_control_frame_t *cfp)
1482 {
1483  return !VM_FRAME_CFRAME_P(cfp);
1484 }
1485 
1486 #define RUBYVM_CFUNC_FRAME_P(cfp) \
1487  (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_CFUNC)
1488 
1489 #define VM_GUARDED_PREV_EP(ep) GC_GUARDED_PTR(ep)
1490 #define VM_BLOCK_HANDLER_NONE 0
1491 
1492 static inline int
1493 VM_ENV_LOCAL_P(const VALUE *ep)
1494 {
1495  return VM_ENV_FLAGS(ep, VM_ENV_FLAG_LOCAL) ? 1 : 0;
1496 }
1497 
1498 static inline const VALUE *
1499 VM_ENV_PREV_EP(const VALUE *ep)
1500 {
1501  VM_ASSERT(VM_ENV_LOCAL_P(ep) == 0);
1502  return GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
1503 }
1504 
1505 static inline VALUE
1506 VM_ENV_BLOCK_HANDLER(const VALUE *ep)
1507 {
1508  VM_ASSERT(VM_ENV_LOCAL_P(ep));
1509  return ep[VM_ENV_DATA_INDEX_SPECVAL];
1510 }
1511 
1512 #if VM_CHECK_MODE > 0
1513 int rb_vm_ep_in_heap_p(const VALUE *ep);
1514 #endif
1515 
1516 static inline int
1517 VM_ENV_ESCAPED_P(const VALUE *ep)
1518 {
1519  VM_ASSERT(rb_vm_ep_in_heap_p(ep) == !!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED));
1520  return VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) ? 1 : 0;
1521 }
1522 
1524 static inline VALUE
1525 VM_ENV_ENVVAL(const VALUE *ep)
1526 {
1527  VALUE envval = ep[VM_ENV_DATA_INDEX_ENV];
1528  VM_ASSERT(VM_ENV_ESCAPED_P(ep));
1529  VM_ASSERT(envval == Qundef || imemo_type_p(envval, imemo_env));
1530  return envval;
1531 }
1532 
1534 static inline const rb_env_t *
1535 VM_ENV_ENVVAL_PTR(const VALUE *ep)
1536 {
1537  return (const rb_env_t *)VM_ENV_ENVVAL(ep);
1538 }
1539 
1540 static inline const rb_env_t *
1541 vm_env_new(VALUE *env_ep, VALUE *env_body, unsigned int env_size, const rb_iseq_t *iseq)
1542 {
1543  rb_env_t *env = IMEMO_NEW(rb_env_t, imemo_env, (VALUE)iseq);
1544  env->ep = env_ep;
1545  env->env = env_body;
1546  env->env_size = env_size;
1547  env_ep[VM_ENV_DATA_INDEX_ENV] = (VALUE)env;
1548  return env;
1549 }
1550 
1551 static inline void
1552 VM_FORCE_WRITE(const VALUE *ptr, VALUE v)
1553 {
1554  *((VALUE *)ptr) = v;
1555 }
1556 
1557 static inline void
1558 VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value)
1559 {
1560  VM_ASSERT(RB_SPECIAL_CONST_P(special_const_value));
1561  VM_FORCE_WRITE(ptr, special_const_value);
1562 }
1563 
1564 static inline void
1565 VM_STACK_ENV_WRITE(const VALUE *ep, int index, VALUE v)
1566 {
1567  VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_WB_REQUIRED) == 0);
1568  VM_FORCE_WRITE(&ep[index], v);
1569 }
1570 
1571 const VALUE *rb_vm_ep_local_ep(const VALUE *ep);
1572 const VALUE *rb_vm_proc_local_ep(VALUE proc);
1573 void rb_vm_block_ep_update(VALUE obj, const struct rb_block *dst, const VALUE *ep);
1574 void rb_vm_block_copy(VALUE obj, const struct rb_block *dst, const struct rb_block *src);
1575 
1576 VALUE rb_vm_frame_block_handler(const rb_control_frame_t *cfp);
1577 
1578 #define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp) ((cfp)+1)
1579 #define RUBY_VM_NEXT_CONTROL_FRAME(cfp) ((cfp)-1)
1580 
1581 #define RUBY_VM_VALID_CONTROL_FRAME_P(cfp, ecfp) \
1582  ((void *)(ecfp) > (void *)(cfp))
1583 
1584 static inline const rb_control_frame_t *
1585 RUBY_VM_END_CONTROL_FRAME(const rb_execution_context_t *ec)
1586 {
1587  return (rb_control_frame_t *)(ec->vm_stack + ec->vm_stack_size);
1588 }
1589 
1590 static inline int
1591 RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
1592 {
1593  return !RUBY_VM_VALID_CONTROL_FRAME_P(cfp, RUBY_VM_END_CONTROL_FRAME(ec));
1594 }
1595 
1596 static inline int
1597 VM_BH_ISEQ_BLOCK_P(VALUE block_handler)
1598 {
1599  if ((block_handler & 0x03) == 0x01) {
1600 #if VM_CHECK_MODE > 0
1601  struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1602  VM_ASSERT(imemo_type_p(captured->code.val, imemo_iseq));
1603 #endif
1604  return 1;
1605  }
1606  else {
1607  return 0;
1608  }
1609 }
1610 
1611 static inline VALUE
1612 VM_BH_FROM_ISEQ_BLOCK(const struct rb_captured_block *captured)
1613 {
1614  VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x01);
1615  VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1616  return block_handler;
1617 }
1618 
1619 static inline const struct rb_captured_block *
1620 VM_BH_TO_ISEQ_BLOCK(VALUE block_handler)
1621 {
1622  struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1623  VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1624  return captured;
1625 }
1626 
1627 static inline int
1628 VM_BH_IFUNC_P(VALUE block_handler)
1629 {
1630  if ((block_handler & 0x03) == 0x03) {
1631 #if VM_CHECK_MODE > 0
1632  struct rb_captured_block *captured = (void *)(block_handler & ~0x03);
1633  VM_ASSERT(imemo_type_p(captured->code.val, imemo_ifunc));
1634 #endif
1635  return 1;
1636  }
1637  else {
1638  return 0;
1639  }
1640 }
1641 
1642 static inline VALUE
1643 VM_BH_FROM_IFUNC_BLOCK(const struct rb_captured_block *captured)
1644 {
1645  VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x03);
1646  VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1647  return block_handler;
1648 }
1649 
1650 static inline const struct rb_captured_block *
1651 VM_BH_TO_IFUNC_BLOCK(VALUE block_handler)
1652 {
1653  struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1654  VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1655  return captured;
1656 }
1657 
1658 static inline const struct rb_captured_block *
1659 VM_BH_TO_CAPT_BLOCK(VALUE block_handler)
1660 {
1661  struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1662  VM_ASSERT(VM_BH_IFUNC_P(block_handler) || VM_BH_ISEQ_BLOCK_P(block_handler));
1663  return captured;
1664 }
1665 
1666 static inline enum rb_block_handler_type
1667 vm_block_handler_type(VALUE block_handler)
1668 {
1669  if (VM_BH_ISEQ_BLOCK_P(block_handler)) {
1670  return block_handler_type_iseq;
1671  }
1672  else if (VM_BH_IFUNC_P(block_handler)) {
1673  return block_handler_type_ifunc;
1674  }
1675  else if (SYMBOL_P(block_handler)) {
1676  return block_handler_type_symbol;
1677  }
1678  else {
1679  VM_ASSERT(rb_obj_is_proc(block_handler));
1680  return block_handler_type_proc;
1681  }
1682 }
1683 
1684 static inline void
1685 vm_block_handler_verify(MAYBE_UNUSED(VALUE block_handler))
1686 {
1687  VM_ASSERT(block_handler == VM_BLOCK_HANDLER_NONE ||
1688  (vm_block_handler_type(block_handler), 1));
1689 }
1690 
1691 static inline enum rb_block_type
1692 vm_block_type(const struct rb_block *block)
1693 {
1694 #if VM_CHECK_MODE > 0
1695  switch (block->type) {
1696  case block_type_iseq:
1697  VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_iseq));
1698  break;
1699  case block_type_ifunc:
1700  VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_ifunc));
1701  break;
1702  case block_type_symbol:
1703  VM_ASSERT(SYMBOL_P(block->as.symbol));
1704  break;
1705  case block_type_proc:
1706  VM_ASSERT(rb_obj_is_proc(block->as.proc));
1707  break;
1708  }
1709 #endif
1710  return block->type;
1711 }
1712 
1713 static inline void
1714 vm_block_type_set(const struct rb_block *block, enum rb_block_type type)
1715 {
1716  struct rb_block *mb = (struct rb_block *)block;
1717  mb->type = type;
1718 }
1719 
1720 static inline const struct rb_block *
1721 vm_proc_block(VALUE procval)
1722 {
1723  VM_ASSERT(rb_obj_is_proc(procval));
1724  return &((rb_proc_t *)RTYPEDDATA_DATA(procval))->block;
1725 }
1726 
1727 static inline const rb_iseq_t *vm_block_iseq(const struct rb_block *block);
1728 static inline const VALUE *vm_block_ep(const struct rb_block *block);
1729 
1730 static inline const rb_iseq_t *
1731 vm_proc_iseq(VALUE procval)
1732 {
1733  return vm_block_iseq(vm_proc_block(procval));
1734 }
1735 
1736 static inline const VALUE *
1737 vm_proc_ep(VALUE procval)
1738 {
1739  return vm_block_ep(vm_proc_block(procval));
1740 }
1741 
1742 static inline const rb_iseq_t *
1743 vm_block_iseq(const struct rb_block *block)
1744 {
1745  switch (vm_block_type(block)) {
1746  case block_type_iseq: return rb_iseq_check(block->as.captured.code.iseq);
1747  case block_type_proc: return vm_proc_iseq(block->as.proc);
1748  case block_type_ifunc:
1749  case block_type_symbol: return NULL;
1750  }
1751  VM_UNREACHABLE(vm_block_iseq);
1752  return NULL;
1753 }
1754 
1755 static inline const VALUE *
1756 vm_block_ep(const struct rb_block *block)
1757 {
1758  switch (vm_block_type(block)) {
1759  case block_type_iseq:
1760  case block_type_ifunc: return block->as.captured.ep;
1761  case block_type_proc: return vm_proc_ep(block->as.proc);
1762  case block_type_symbol: return NULL;
1763  }
1764  VM_UNREACHABLE(vm_block_ep);
1765  return NULL;
1766 }
1767 
1768 static inline VALUE
1769 vm_block_self(const struct rb_block *block)
1770 {
1771  switch (vm_block_type(block)) {
1772  case block_type_iseq:
1773  case block_type_ifunc:
1774  return block->as.captured.self;
1775  case block_type_proc:
1776  return vm_block_self(vm_proc_block(block->as.proc));
1777  case block_type_symbol:
1778  return Qundef;
1779  }
1780  VM_UNREACHABLE(vm_block_self);
1781  return Qundef;
1782 }
1783 
1784 static inline VALUE
1785 VM_BH_TO_SYMBOL(VALUE block_handler)
1786 {
1787  VM_ASSERT(SYMBOL_P(block_handler));
1788  return block_handler;
1789 }
1790 
1791 static inline VALUE
1792 VM_BH_FROM_SYMBOL(VALUE symbol)
1793 {
1794  VM_ASSERT(SYMBOL_P(symbol));
1795  return symbol;
1796 }
1797 
1798 static inline VALUE
1799 VM_BH_TO_PROC(VALUE block_handler)
1800 {
1801  VM_ASSERT(rb_obj_is_proc(block_handler));
1802  return block_handler;
1803 }
1804 
1805 static inline VALUE
1806 VM_BH_FROM_PROC(VALUE procval)
1807 {
1808  VM_ASSERT(rb_obj_is_proc(procval));
1809  return procval;
1810 }
1811 
1812 /* VM related object allocate functions */
1813 VALUE rb_thread_alloc(VALUE klass);
1814 VALUE rb_binding_alloc(VALUE klass);
1815 VALUE rb_proc_alloc(VALUE klass);
1816 VALUE rb_proc_dup(VALUE self);
1817 
1818 /* for debug */
1819 extern bool rb_vmdebug_stack_dump_raw(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, FILE *);
1820 extern bool rb_vmdebug_debug_print_pre(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, const VALUE *_pc, FILE *);
1821 extern bool rb_vmdebug_debug_print_post(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, FILE *);
1822 
1823 #define SDR() rb_vmdebug_stack_dump_raw(GET_EC(), GET_EC()->cfp, stderr)
1824 #define SDR2(cfp) rb_vmdebug_stack_dump_raw(GET_EC(), (cfp), stderr)
1825 bool rb_vm_bugreport(const void *, FILE *);
1826 typedef void (*ruby_sighandler_t)(int);
1827 RBIMPL_ATTR_FORMAT(RBIMPL_PRINTF_FORMAT, 4, 5)
1828 NORETURN(void rb_bug_for_fatal_signal(ruby_sighandler_t default_sighandler, int sig, const void *, const char *fmt, ...));
1829 
1830 /* functions about thread/vm execution */
1831 RUBY_SYMBOL_EXPORT_BEGIN
1832 VALUE rb_iseq_eval(const rb_iseq_t *iseq);
1833 VALUE rb_iseq_eval_main(const rb_iseq_t *iseq);
1834 VALUE rb_iseq_path(const rb_iseq_t *iseq);
1835 VALUE rb_iseq_realpath(const rb_iseq_t *iseq);
1836 RUBY_SYMBOL_EXPORT_END
1837 
1838 VALUE rb_iseq_pathobj_new(VALUE path, VALUE realpath);
1839 void rb_iseq_pathobj_set(const rb_iseq_t *iseq, VALUE path, VALUE realpath);
1840 
1841 int rb_ec_frame_method_id_and_class(const rb_execution_context_t *ec, ID *idp, ID *called_idp, VALUE *klassp);
1842 void rb_ec_setup_exception(const rb_execution_context_t *ec, VALUE mesg, VALUE cause);
1843 
1844 VALUE rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, int argc, const VALUE *argv, int kw_splat, VALUE block_handler);
1845 
1846 VALUE rb_vm_make_proc_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda);
1847 static inline VALUE
1848 rb_vm_make_proc(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1849 {
1850  return rb_vm_make_proc_lambda(ec, captured, klass, 0);
1851 }
1852 
1853 static inline VALUE
1854 rb_vm_make_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1855 {
1856  return rb_vm_make_proc_lambda(ec, captured, klass, 1);
1857 }
1858 
1859 VALUE rb_vm_make_binding(const rb_execution_context_t *ec, const rb_control_frame_t *src_cfp);
1860 VALUE rb_vm_env_local_variables(const rb_env_t *env);
1861 const rb_env_t *rb_vm_env_prev_env(const rb_env_t *env);
1862 const VALUE *rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const ID *dynvars);
1863 void rb_vm_inc_const_missing_count(void);
1864 VALUE rb_vm_call_kw(rb_execution_context_t *ec, VALUE recv, VALUE id, int argc,
1865  const VALUE *argv, const rb_callable_method_entry_t *me, int kw_splat);
1866 void rb_vm_pop_frame_no_int(rb_execution_context_t *ec);
1867 void rb_vm_pop_frame(rb_execution_context_t *ec);
1868 
1869 void rb_thread_start_timer_thread(void);
1870 void rb_thread_stop_timer_thread(void);
1871 void rb_thread_reset_timer_thread(void);
1872 void rb_thread_wakeup_timer_thread(int);
1873 
1874 static inline void
1875 rb_vm_living_threads_init(rb_vm_t *vm)
1876 {
1877  ccan_list_head_init(&vm->waiting_fds);
1878  ccan_list_head_init(&vm->workqueue);
1879  ccan_list_head_init(&vm->ractor.set);
1880  ccan_list_head_init(&vm->ractor.sched.zombie_threads);
1881 }
1882 
1883 typedef int rb_backtrace_iter_func(void *, VALUE, int, VALUE);
1884 rb_control_frame_t *rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1885 rb_control_frame_t *rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1886 VALUE *rb_vm_svar_lep(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1887 int rb_vm_get_sourceline(const rb_control_frame_t *);
1888 void rb_vm_stack_to_heap(rb_execution_context_t *ec);
1889 void ruby_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame);
1890 rb_thread_t * ruby_thread_from_native(void);
1891 int ruby_thread_set_native(rb_thread_t *th);
1892 int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp);
1893 void rb_vm_rewind_cfp(rb_execution_context_t *ec, rb_control_frame_t *cfp);
1894 void rb_vm_env_write(const VALUE *ep, int index, VALUE v);
1895 VALUE rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler);
1896 
1897 void rb_vm_register_special_exception_str(enum ruby_special_exceptions sp, VALUE exception_class, VALUE mesg);
1898 
1899 #define rb_vm_register_special_exception(sp, e, m) \
1900  rb_vm_register_special_exception_str(sp, e, rb_usascii_str_new_static((m), (long)rb_strlen_lit(m)))
1901 
1902 void rb_gc_mark_machine_context(const rb_execution_context_t *ec);
1903 
1904 void rb_vm_rewrite_cref(rb_cref_t *node, VALUE old_klass, VALUE new_klass, rb_cref_t **new_cref_ptr);
1905 
1906 const rb_callable_method_entry_t *rb_vm_frame_method_entry(const rb_control_frame_t *cfp);
1907 
1908 #define sysstack_error GET_VM()->special_exceptions[ruby_error_sysstack]
1909 
1910 #define CHECK_VM_STACK_OVERFLOW0(cfp, sp, margin) do { \
1911  STATIC_ASSERT(sizeof_sp, sizeof(*(sp)) == sizeof(VALUE)); \
1912  STATIC_ASSERT(sizeof_cfp, sizeof(*(cfp)) == sizeof(rb_control_frame_t)); \
1913  const struct rb_control_frame_struct *bound = (void *)&(sp)[(margin)]; \
1914  if (UNLIKELY((cfp) <= &bound[1])) { \
1915  vm_stackoverflow(); \
1916  } \
1917 } while (0)
1918 
1919 #define CHECK_VM_STACK_OVERFLOW(cfp, margin) \
1920  CHECK_VM_STACK_OVERFLOW0((cfp), (cfp)->sp, (margin))
1921 
1922 VALUE rb_catch_protect(VALUE t, rb_block_call_func *func, VALUE data, enum ruby_tag_type *stateptr);
1923 
1924 rb_execution_context_t *rb_vm_main_ractor_ec(rb_vm_t *vm); // ractor.c
1925 
1926 /* for thread */
1927 
1928 #if RUBY_VM_THREAD_MODEL == 2
1929 
1930 RUBY_EXTERN struct rb_ractor_struct *ruby_single_main_ractor; // ractor.c
1931 RUBY_EXTERN rb_vm_t *ruby_current_vm_ptr;
1932 RUBY_EXTERN rb_event_flag_t ruby_vm_event_flags;
1933 RUBY_EXTERN rb_event_flag_t ruby_vm_event_enabled_global_flags;
1934 RUBY_EXTERN unsigned int ruby_vm_event_local_num;
1935 
1936 #define GET_VM() rb_current_vm()
1937 #define GET_RACTOR() rb_current_ractor()
1938 #define GET_THREAD() rb_current_thread()
1939 #define GET_EC() rb_current_execution_context(true)
1940 
1941 static inline rb_thread_t *
1942 rb_ec_thread_ptr(const rb_execution_context_t *ec)
1943 {
1944  return ec->thread_ptr;
1945 }
1946 
1947 static inline rb_ractor_t *
1948 rb_ec_ractor_ptr(const rb_execution_context_t *ec)
1949 {
1950  const rb_thread_t *th = rb_ec_thread_ptr(ec);
1951  if (th) {
1952  VM_ASSERT(th->ractor != NULL);
1953  return th->ractor;
1954  }
1955  else {
1956  return NULL;
1957  }
1958 }
1959 
1960 static inline rb_vm_t *
1961 rb_ec_vm_ptr(const rb_execution_context_t *ec)
1962 {
1963  const rb_thread_t *th = rb_ec_thread_ptr(ec);
1964  if (th) {
1965  return th->vm;
1966  }
1967  else {
1968  return NULL;
1969  }
1970 }
1971 
1972 static inline rb_execution_context_t *
1973 rb_current_execution_context(bool expect_ec)
1974 {
1975 #ifdef RB_THREAD_LOCAL_SPECIFIER
1976  #ifdef __APPLE__
1977  rb_execution_context_t *ec = rb_current_ec();
1978  #else
1979  rb_execution_context_t *ec = ruby_current_ec;
1980  #endif
1981 
1982  /* On the shared objects, `__tls_get_addr()` is used to access the TLS
1983  * and the address of the `ruby_current_ec` can be stored on a function
1984  * frame. However, this address can be mis-used after native thread
1985  * migration of a coroutine.
1986  * 1) Get `ptr =&ruby_current_ec` op NT1 and store it on the frame.
1987  * 2) Context switch and resume it on the NT2.
1988  * 3) `ptr` is used on NT2 but it accesses to the TLS on NT1.
1989  * This assertion checks such misusage.
1990  *
1991  * To avoid accidents, `GET_EC()` should be called once on the frame.
1992  * Note that inlining can produce the problem.
1993  */
1994  VM_ASSERT(ec == rb_current_ec_noinline());
1995 #else
1996  rb_execution_context_t *ec = native_tls_get(ruby_current_ec_key);
1997 #endif
1998  VM_ASSERT(!expect_ec || ec != NULL);
1999  return ec;
2000 }
2001 
2002 static inline rb_thread_t *
2003 rb_current_thread(void)
2004 {
2005  const rb_execution_context_t *ec = GET_EC();
2006  return rb_ec_thread_ptr(ec);
2007 }
2008 
2009 static inline rb_ractor_t *
2010 rb_current_ractor_raw(bool expect)
2011 {
2012  if (ruby_single_main_ractor) {
2013  return ruby_single_main_ractor;
2014  }
2015  else {
2016  const rb_execution_context_t *ec = rb_current_execution_context(expect);
2017  return (expect || ec) ? rb_ec_ractor_ptr(ec) : NULL;
2018  }
2019 }
2020 
2021 static inline rb_ractor_t *
2022 rb_current_ractor(void)
2023 {
2024  return rb_current_ractor_raw(true);
2025 }
2026 
2027 static inline rb_vm_t *
2028 rb_current_vm(void)
2029 {
2030 #if 0 // TODO: reconsider the assertions
2031  VM_ASSERT(ruby_current_vm_ptr == NULL ||
2032  ruby_current_execution_context_ptr == NULL ||
2033  rb_ec_thread_ptr(GET_EC()) == NULL ||
2034  rb_ec_thread_ptr(GET_EC())->status == THREAD_KILLED ||
2035  rb_ec_vm_ptr(GET_EC()) == ruby_current_vm_ptr);
2036 #endif
2037 
2038  return ruby_current_vm_ptr;
2039 }
2040 
2041 void rb_ec_vm_lock_rec_release(const rb_execution_context_t *ec,
2042  unsigned int recorded_lock_rec,
2043  unsigned int current_lock_rec);
2044 
2045 static inline unsigned int
2046 rb_ec_vm_lock_rec(const rb_execution_context_t *ec)
2047 {
2048  rb_vm_t *vm = rb_ec_vm_ptr(ec);
2049 
2050  if (vm->ractor.sync.lock_owner != rb_ec_ractor_ptr(ec)) {
2051  return 0;
2052  }
2053  else {
2054  return vm->ractor.sync.lock_rec;
2055  }
2056 }
2057 
2058 #else
2059 #error "unsupported thread model"
2060 #endif
2061 
2062 enum {
2063  TIMER_INTERRUPT_MASK = 0x01,
2064  PENDING_INTERRUPT_MASK = 0x02,
2065  POSTPONED_JOB_INTERRUPT_MASK = 0x04,
2066  TRAP_INTERRUPT_MASK = 0x08,
2067  TERMINATE_INTERRUPT_MASK = 0x10,
2068  VM_BARRIER_INTERRUPT_MASK = 0x20,
2069 };
2070 
2071 #define RUBY_VM_SET_TIMER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TIMER_INTERRUPT_MASK)
2072 #define RUBY_VM_SET_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, PENDING_INTERRUPT_MASK)
2073 #define RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, POSTPONED_JOB_INTERRUPT_MASK)
2074 #define RUBY_VM_SET_TRAP_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TRAP_INTERRUPT_MASK)
2075 #define RUBY_VM_SET_TERMINATE_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TERMINATE_INTERRUPT_MASK)
2076 #define RUBY_VM_SET_VM_BARRIER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, VM_BARRIER_INTERRUPT_MASK)
2077 #define RUBY_VM_INTERRUPTED(ec) ((ec)->interrupt_flag & ~(ec)->interrupt_mask & \
2078  (PENDING_INTERRUPT_MASK|TRAP_INTERRUPT_MASK))
2079 
2080 static inline bool
2081 RUBY_VM_INTERRUPTED_ANY(rb_execution_context_t *ec)
2082 {
2083 #if defined(USE_VM_CLOCK) && USE_VM_CLOCK
2084  uint32_t current_clock = rb_ec_vm_ptr(ec)->clock;
2085 
2086  if (current_clock != ec->checked_clock) {
2087  ec->checked_clock = current_clock;
2088  RUBY_VM_SET_TIMER_INTERRUPT(ec);
2089  }
2090 #endif
2091  return ec->interrupt_flag & ~(ec)->interrupt_mask;
2092 }
2093 
2094 VALUE rb_exc_set_backtrace(VALUE exc, VALUE bt);
2095 int rb_signal_buff_size(void);
2096 int rb_signal_exec(rb_thread_t *th, int sig);
2097 void rb_threadptr_check_signal(rb_thread_t *mth);
2098 void rb_threadptr_signal_raise(rb_thread_t *th, int sig);
2099 void rb_threadptr_signal_exit(rb_thread_t *th);
2100 int rb_threadptr_execute_interrupts(rb_thread_t *, int);
2101 void rb_threadptr_interrupt(rb_thread_t *th);
2102 void rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th);
2103 void rb_threadptr_pending_interrupt_clear(rb_thread_t *th);
2104 void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v);
2105 VALUE rb_ec_get_errinfo(const rb_execution_context_t *ec);
2106 void rb_ec_error_print(rb_execution_context_t * volatile ec, volatile VALUE errinfo);
2107 void rb_execution_context_update(rb_execution_context_t *ec);
2108 void rb_execution_context_mark(const rb_execution_context_t *ec);
2109 void rb_fiber_close(rb_fiber_t *fib);
2110 void Init_native_thread(rb_thread_t *th);
2111 int rb_vm_check_ints_blocking(rb_execution_context_t *ec);
2112 
2113 // vm_sync.h
2114 void rb_vm_cond_wait(rb_vm_t *vm, rb_nativethread_cond_t *cond);
2115 void rb_vm_cond_timedwait(rb_vm_t *vm, rb_nativethread_cond_t *cond, unsigned long msec);
2116 
2117 #define RUBY_VM_CHECK_INTS(ec) rb_vm_check_ints(ec)
2118 static inline void
2119 rb_vm_check_ints(rb_execution_context_t *ec)
2120 {
2121 #ifdef RUBY_ASSERT_CRITICAL_SECTION
2122  VM_ASSERT(ruby_assert_critical_section_entered == 0);
2123 #endif
2124 
2125  VM_ASSERT(ec == GET_EC());
2126 
2127  if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(ec))) {
2128  rb_threadptr_execute_interrupts(rb_ec_thread_ptr(ec), 0);
2129  }
2130 }
2131 
2132 /* tracer */
2133 
2135  rb_event_flag_t event;
2137  const rb_control_frame_t *cfp;
2138  VALUE self;
2139  ID id;
2140  ID called_id;
2141  VALUE klass;
2142  VALUE data;
2143 
2144  int klass_solved;
2145 
2146  /* calc from cfp */
2147  int lineno;
2148  VALUE path;
2149 };
2150 
2151 void rb_hook_list_mark(rb_hook_list_t *hooks);
2152 void rb_hook_list_mark_and_update(rb_hook_list_t *hooks);
2153 void rb_hook_list_free(rb_hook_list_t *hooks);
2154 void rb_hook_list_connect_tracepoint(VALUE target, rb_hook_list_t *list, VALUE tpval, unsigned int target_line);
2155 void rb_hook_list_remove_tracepoint(rb_hook_list_t *list, VALUE tpval);
2156 
2157 void rb_exec_event_hooks(struct rb_trace_arg_struct *trace_arg, rb_hook_list_t *hooks, int pop_p);
2158 
2159 #define EXEC_EVENT_HOOK_ORIG(ec_, hooks_, flag_, self_, id_, called_id_, klass_, data_, pop_p_) do { \
2160  const rb_event_flag_t flag_arg_ = (flag_); \
2161  rb_hook_list_t *hooks_arg_ = (hooks_); \
2162  if (UNLIKELY((hooks_arg_)->events & (flag_arg_))) { \
2163  /* defer evaluating the other arguments */ \
2164  rb_exec_event_hook_orig(ec_, hooks_arg_, flag_arg_, self_, id_, called_id_, klass_, data_, pop_p_); \
2165  } \
2166 } while (0)
2167 
2168 static inline void
2169 rb_exec_event_hook_orig(rb_execution_context_t *ec, rb_hook_list_t *hooks, rb_event_flag_t flag,
2170  VALUE self, ID id, ID called_id, VALUE klass, VALUE data, int pop_p)
2171 {
2172  struct rb_trace_arg_struct trace_arg;
2173 
2174  VM_ASSERT((hooks->events & flag) != 0);
2175 
2176  trace_arg.event = flag;
2177  trace_arg.ec = ec;
2178  trace_arg.cfp = ec->cfp;
2179  trace_arg.self = self;
2180  trace_arg.id = id;
2181  trace_arg.called_id = called_id;
2182  trace_arg.klass = klass;
2183  trace_arg.data = data;
2184  trace_arg.path = Qundef;
2185  trace_arg.klass_solved = 0;
2186 
2187  rb_exec_event_hooks(&trace_arg, hooks, pop_p);
2188 }
2189 
2191  VALUE self;
2192  uint32_t id;
2193  rb_hook_list_t hooks;
2194 };
2195 
2196 static inline rb_hook_list_t *
2197 rb_ec_ractor_hooks(const rb_execution_context_t *ec)
2198 {
2199  struct rb_ractor_pub *cr_pub = (struct rb_ractor_pub *)rb_ec_ractor_ptr(ec);
2200  return &cr_pub->hooks;
2201 }
2202 
2203 #define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2204  EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_ractor_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 0)
2205 
2206 #define EXEC_EVENT_HOOK_AND_POP_FRAME(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2207  EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_ractor_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 1)
2208 
2209 static inline void
2210 rb_exec_event_hook_script_compiled(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE eval_script)
2211 {
2212  EXEC_EVENT_HOOK(ec, RUBY_EVENT_SCRIPT_COMPILED, ec->cfp->self, 0, 0, 0,
2213  NIL_P(eval_script) ? (VALUE)iseq :
2214  rb_ary_new_from_args(2, eval_script, (VALUE)iseq));
2215 }
2216 
2217 void rb_vm_trap_exit(rb_vm_t *vm);
2218 void rb_vm_postponed_job_atfork(void); /* vm_trace.c */
2219 void rb_vm_postponed_job_free(void); /* vm_trace.c */
2220 size_t rb_vm_memsize_postponed_job_queue(void); /* vm_trace.c */
2221 void rb_vm_postponed_job_queue_init(rb_vm_t *vm); /* vm_trace.c */
2222 
2223 RUBY_SYMBOL_EXPORT_BEGIN
2224 
2225 int rb_thread_check_trap_pending(void);
2226 
2227 /* #define RUBY_EVENT_RESERVED_FOR_INTERNAL_USE 0x030000 */ /* from vm_core.h */
2228 #define RUBY_EVENT_COVERAGE_LINE 0x010000
2229 #define RUBY_EVENT_COVERAGE_BRANCH 0x020000
2230 
2231 extern VALUE rb_get_coverages(void);
2232 extern void rb_set_coverages(VALUE, int, VALUE);
2233 extern void rb_clear_coverages(void);
2234 extern void rb_reset_coverages(void);
2235 extern void rb_resume_coverages(void);
2236 extern void rb_suspend_coverages(void);
2237 
2238 void rb_postponed_job_flush(rb_vm_t *vm);
2239 
2240 // ractor.c
2241 RUBY_EXTERN VALUE rb_eRactorUnsafeError;
2242 RUBY_EXTERN VALUE rb_eRactorIsolationError;
2243 
2244 RUBY_SYMBOL_EXPORT_END
2245 
2246 #endif /* RUBY_VM_CORE_H */
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition: atomic.h:69
#define RUBY_ALIGNAS
Wraps (or simulates) alignas.
Definition: stdalign.h:27
#define RUBY_EXTERN
Declaration of externally visible global variables.
Definition: dllexport.h:45
#define RUBY_EVENT_SCRIPT_COMPILED
Encountered an eval.
Definition: event.h:60
uint32_t rb_event_flag_t
Represents event(s).
Definition: event.h:108
#define RBIMPL_ATTR_FORMAT(x, y, z)
Wraps (or simulates) __attribute__((format))
Definition: format.h:27
#define T_STRING
Old name of RUBY_T_STRING.
Definition: value_type.h:78
#define Qundef
Old name of RUBY_Qundef.
#define Qfalse
Old name of RUBY_Qfalse.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition: value_type.h:56
#define NIL_P
Old name of RB_NIL_P.
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition: value_type.h:88
void * rb_check_typeddata(VALUE obj, const rb_data_type_t *data_type)
Identical to rb_typeddata_is_kind_of(), except it raises exceptions instead of returning false.
Definition: error.c:1370
void rb_bug(const char *fmt,...)
Interpreter panic switch.
Definition: error.c:1088
VALUE rb_ary_new_from_args(long n,...)
Constructs an array from the passed objects.
Definition: array.c:753
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
Definition: proc.c:119
void rb_unblock_function_t(void *)
This is the type of UBFs.
Definition: thread.h:336
char * ptr
Pointer to the underlying memory region, of at least capa bytes.
Definition: io.h:2
VALUE rb_block_call_func(RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg))
This is the type of a function that the interpreter expect for C-backended blocks.
Definition: iterator.h:83
VALUE type(ANYARGS)
ANYARGS-ed function type.
Definition: cxxanyargs.hpp:56
#define RBIMPL_ATTR_NONNULL(list)
Wraps (or simulates) __attribute__((nonnull))
Definition: nonnull.h:27
#define inline
Old Visual Studio versions do not support the inline keyword, so we need to define it to be __inline.
Definition: defines.h:88
Functions related to nodes in the AST.
#define RARRAY_AREF(a, i)
Definition: rarray.h:403
#define RTYPEDDATA_DATA(v)
Convenient getter macro.
Definition: rtypeddata.h:102
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
Defines old _.
C99 shim for <stdbool.h>
Definition: vm_core.h:259
const ID * segments
A null-terminated list of ids, used to represent a constant's path idNULL is used to represent the ::...
Definition: vm_core.h:285
Definition: vm_core.h:293
Definition: vm_core.h:288
Definition: iseq.h:240
Definition: method.h:62
CREF (Class REFerence)
Definition: method.h:44
Definition: class.h:36
This is the struct that holds necessary info for a struct.
Definition: rtypeddata.h:200
struct rb_iseq_constant_body::@150 param
parameter information
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition: method.h:135
Definition: st.h:79
IFUNC (Internal FUNCtion)
Definition: imemo.h:88
Definition: vm_core.h:251
Definition: vm_core.h:297
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition: value.h:52
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
Definition: value.h:69
uintptr_t VALUE
Type that represents a Ruby object.
Definition: value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition: value_type.h:376
void * ruby_xmalloc(size_t size)
Allocates a storage instance.
Definition: gc.c:4180
void ruby_xfree(void *ptr)
Deallocates a storage instance.
Definition: gc.c:4264