21#define N_OR_RUBY_DEBUG(n) (((n) > 0) ? (n) : RUBY_DEBUG) 
   23#define VM_CHECK_MODE N_OR_RUBY_DEBUG(0) 
   48#include "ruby/internal/config.h" 
   54#include "ruby_assert.h" 
   56#define RVALUE_SIZE (sizeof(struct RBasic) + sizeof(VALUE[RBIMPL_RVALUE_EMBED_LEN_MAX])) 
   59#define VM_ASSERT(expr, ...) \ 
   60    RUBY_ASSERT_MESG_WHEN(VM_CHECK_MODE > 0, expr, #expr RBIMPL_VA_OPT_ARGS(__VA_ARGS__)) 
   61#define VM_UNREACHABLE(func) rb_bug(#func ": unreachable") 
   62#define RUBY_ASSERT_CRITICAL_SECTION 
   63#define RUBY_DEBUG_THREAD_SCHEDULE() rb_thread_schedule() 
   65#define VM_ASSERT(...) ((void)0) 
   66#define VM_UNREACHABLE(func) UNREACHABLE 
   67#define RUBY_DEBUG_THREAD_SCHEDULE() 
   70#define RUBY_ASSERT_MUTEX_OWNED(mutex) VM_ASSERT(rb_mutex_owned_p(mutex)) 
   72#if defined(RUBY_ASSERT_CRITICAL_SECTION) 
   97extern int ruby_assert_critical_section_entered;
 
   98#define RUBY_ASSERT_CRITICAL_SECTION_ENTER() do{ruby_assert_critical_section_entered += 1;}while(false) 
   99#define RUBY_ASSERT_CRITICAL_SECTION_LEAVE() do{VM_ASSERT(ruby_assert_critical_section_entered > 0);ruby_assert_critical_section_entered -= 1;}while(false) 
  101#define RUBY_ASSERT_CRITICAL_SECTION_ENTER() 
  102#define RUBY_ASSERT_CRITICAL_SECTION_LEAVE() 
  105#if defined(__wasm__) && !defined(__EMSCRIPTEN__) 
  106# include "wasm/setjmp.h" 
  111#if defined(__linux__) || defined(__FreeBSD__) 
  112# define RB_THREAD_T_HAS_NATIVE_ID 
  116#include "ccan/list/list.h" 
  119#include "internal/array.h" 
  120#include "internal/basic_operators.h" 
  121#include "internal/namespace.h" 
  122#include "internal/sanitizers.h" 
  123#include "internal/serial.h" 
  124#include "internal/set_table.h" 
  125#include "internal/vm.h" 
  130#include "ruby_atomic.h" 
  140#ifndef VM_INSN_INFO_TABLE_IMPL 
  141# define VM_INSN_INFO_TABLE_IMPL 2 
  146# define NSIG NSIG_MAX 
  147#elif defined(_SIG_MAXSIG)       
  149# define NSIG _SIG_MAXSIG 
  150#elif defined(_SIGMAX)           
  151# define NSIG (_SIGMAX + 1) 
  155# define NSIG (sizeof(sigset_t) * CHAR_BIT + 1) 
  158#define RUBY_NSIG NSIG 
  161#  define RUBY_SIGCHLD (SIGCLD) 
  162#elif defined(SIGCHLD) 
  163#  define RUBY_SIGCHLD (SIGCHLD) 
  166#if defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) && defined(SA_SIGINFO) && !defined(__NetBSD__) 
  167#  define USE_SIGALTSTACK 
  168void *rb_allocate_sigaltstack(
void);
 
  169void *rb_register_sigaltstack(
void *);
 
  170#  define RB_ALTSTACK_INIT(var, altstack) var = rb_register_sigaltstack(altstack) 
  171#  define RB_ALTSTACK_FREE(var) free(var) 
  172#  define RB_ALTSTACK(var)  var 
  174#  define RB_ALTSTACK_INIT(var, altstack) 
  175#  define RB_ALTSTACK_FREE(var) 
  176#  define RB_ALTSTACK(var) (0) 
  179#include THREAD_IMPL_H 
  180#define RUBY_VM_THREAD_MODEL 2 
  187#if defined(__GNUC__) && __GNUC__ >= 2 
  189#if OPT_TOKEN_THREADED_CODE 
  190#if OPT_DIRECT_THREADED_CODE 
  191#undef OPT_DIRECT_THREADED_CODE 
  198#if OPT_DIRECT_THREADED_CODE 
  199#undef OPT_DIRECT_THREADED_CODE 
  201#if OPT_TOKEN_THREADED_CODE 
  202#undef OPT_TOKEN_THREADED_CODE 
  207#if    OPT_CALL_THREADED_CODE 
  208#if    OPT_DIRECT_THREADED_CODE 
  209#undef OPT_DIRECT_THREADED_CODE 
  213void rb_vm_encoded_insn_data_table_init(
void);
 
  214typedef unsigned long rb_num_t;
 
  215typedef   signed long rb_snum_t;
 
  219    RUBY_TAG_RETURN     = 0x1,
 
  220    RUBY_TAG_BREAK      = 0x2,
 
  222    RUBY_TAG_RETRY      = 0x4,
 
  224    RUBY_TAG_RAISE      = 0x6,
 
  225    RUBY_TAG_THROW      = 0x7,
 
  226    RUBY_TAG_FATAL      = 0x8,
 
  230#define TAG_NONE        RUBY_TAG_NONE 
  231#define TAG_RETURN      RUBY_TAG_RETURN 
  232#define TAG_BREAK       RUBY_TAG_BREAK 
  233#define TAG_NEXT        RUBY_TAG_NEXT 
  234#define TAG_RETRY       RUBY_TAG_RETRY 
  235#define TAG_REDO        RUBY_TAG_REDO 
  236#define TAG_RAISE       RUBY_TAG_RAISE 
  237#define TAG_THROW       RUBY_TAG_THROW 
  238#define TAG_FATAL       RUBY_TAG_FATAL 
  239#define TAG_MASK        RUBY_TAG_MASK 
  241enum ruby_vm_throw_flags {
 
  242    VM_THROW_NO_ESCAPE_FLAG = 0x8000,
 
  243    VM_THROW_STATE_MASK = 0xff
 
  258#define IMEMO_CONST_CACHE_SHAREABLE IMEMO_FL_USER0 
  267STATIC_ASSERT(sizeof_iseq_inline_constant_cache_entry,
 
  269               sizeof(
const rb_cref_t *)) <= RVALUE_SIZE);
 
  316#ifndef VM_ARGC_STACK_MAX 
  317#define VM_ARGC_STACK_MAX 128 
  320# define CALLING_ARGC(calling) ((calling)->heap_argv ? RARRAY_LENINT((calling)->heap_argv) : (calling)->argc) 
  325#define CoreDataFromValue(obj, type) (type*)DATA_PTR(obj) 
  327#define CoreDataFromValue(obj, type) (type*)rb_data_object_get(obj) 
  329#define GetCoreDataFromValue(obj, type, ptr) ((ptr) = CoreDataFromValue((obj), type)) 
  340#define PATHOBJ_PATH     0 
  341#define PATHOBJ_REALPATH 1 
  344pathobj_path(
VALUE pathobj)
 
  356pathobj_realpath(
VALUE pathobj)
 
  368typedef uintptr_t iseq_bits_t;
 
  370#define ISEQ_IS_SIZE(body) (body->ic_size + body->ivc_size + body->ise_size + body->icvarc_size) 
  373#define ISEQ_IS_IC_ENTRY(body, idx) (body->is_entries[(idx) + body->ise_size + body->icvarc_size + body->ivc_size].ic_cache); 
  389enum rb_builtin_attr {
 
  391    BUILTIN_ATTR_LEAF = 0x01,
 
  393    BUILTIN_ATTR_SINGLE_NOARG_LEAF = 0x02,
 
  395    BUILTIN_ATTR_INLINE_BLOCK = 0x04,
 
  397    BUILTIN_ATTR_C_TRACE = 0x08,
 
  403    enum rb_iseq_type type;
 
  405    unsigned int iseq_size;
 
  433            unsigned int has_lead   : 1;
 
  434            unsigned int has_opt    : 1;
 
  435            unsigned int has_rest   : 1;
 
  436            unsigned int has_post   : 1;
 
  437            unsigned int has_kw     : 1;
 
  438            unsigned int has_kwrest : 1;
 
  439            unsigned int has_block  : 1;
 
  441            unsigned int ambiguous_param0 : 1; 
 
  442            unsigned int accepts_no_kwarg : 1;
 
  443            unsigned int ruby2_keywords: 1;
 
  444            unsigned int anon_rest: 1;
 
  445            unsigned int anon_kwrest: 1;
 
  446            unsigned int use_block: 1;
 
  447            unsigned int forwardable: 1;
 
  459        const VALUE *opt_table; 
 
  474        const struct rb_iseq_param_keyword {
 
  480            VALUE *default_values;
 
  489        unsigned int *positions;
 
  491#if VM_INSN_INFO_TABLE_IMPL == 2 
  492        struct succ_index_table *succ_index_table;
 
 
  496    const ID *local_table;              
 
  515        rb_snum_t flip_count;
 
  518        VALUE pc2branchindex;
 
  519        VALUE *original_iseq;
 
  522    unsigned int local_table_size;
 
  523    unsigned int ic_size;     
 
  524    unsigned int ise_size;    
 
  525    unsigned int ivc_size;    
 
  526    unsigned int icvarc_size; 
 
  527    unsigned int ci_size;
 
  528    unsigned int stack_max; 
 
  530    unsigned int builtin_attrs; 
 
  543#if USE_YJIT || USE_ZJIT 
  545    rb_jit_func_t jit_entry;
 
  547    long unsigned jit_entry_calls;
 
  549    rb_jit_func_t jit_exception;
 
  551    long unsigned jit_exception_calls;
 
  558    uint64_t yjit_calls_at_interv;
 
 
  590#define ISEQ_BODY(iseq) ((iseq)->body) 
  592#if !defined(USE_LAZY_LOAD) || !(USE_LAZY_LOAD+0) 
  593#define USE_LAZY_LOAD 0 
  604    if (USE_LAZY_LOAD && ISEQ_BODY(iseq) == NULL) {
 
  611rb_iseq_attr_p(
const rb_iseq_t *iseq, 
enum rb_builtin_attr attr)
 
  613    return (ISEQ_BODY(iseq)->builtin_attrs & attr) == attr;
 
  621    if (def->type != VM_METHOD_TYPE_ISEQ) rb_bug(
"def_iseq_ptr: not iseq (%d)", def->type);
 
  623    return rb_iseq_check(def->body.iseq.
iseqptr);
 
  626enum ruby_special_exceptions {
 
  630    ruby_error_stackfatal,
 
  631    ruby_error_stream_closed,
 
  632    ruby_special_error_count
 
  635#define GetVMPtr(obj, ptr) \ 
  636  GetCoreDataFromValue((obj), rb_vm_t, (ptr)) 
  642    rb_vm_at_exit_func *func;
 
 
  646void *rb_objspace_alloc(
void);
 
  647void rb_objspace_free(
void *
objspace);
 
  648void rb_objspace_call_finalizer(
void);
 
  653    unsigned int running;
 
 
  671        struct ccan_list_head set;
 
  673        unsigned int blocking_cnt;
 
  680            rb_nativethread_lock_t lock;
 
  682            unsigned int lock_rec;
 
  685            rb_nativethread_cond_t terminate_cond;
 
  686            bool terminate_waiting;
 
  688#ifndef RUBY_THREAD_PTHREAD_H 
  690            bool barrier_waiting;
 
  691            unsigned int barrier_cnt;
 
  692            rb_nativethread_cond_t barrier_complete_cond;
 
  693            rb_nativethread_cond_t barrier_release_cond;
 
  697#ifdef RUBY_THREAD_PTHREAD_H 
  700            rb_nativethread_lock_t lock;
 
  704            rb_nativethread_cond_t cond; 
 
  705            unsigned int snt_cnt; 
 
  706            unsigned int dnt_cnt; 
 
  708            unsigned int running_cnt;
 
  710            unsigned int max_cpu;
 
  711            struct ccan_list_head grq; 
 
  712            unsigned int grq_cnt;
 
  715            struct ccan_list_head running_threads;
 
  718            struct ccan_list_head timeslice_threads;
 
  720            struct ccan_list_head zombie_threads;
 
  723            bool timeslice_wait_inf;
 
  726            rb_nativethread_cond_t barrier_complete_cond;
 
  727            rb_nativethread_cond_t barrier_release_cond;
 
  728            bool barrier_waiting;
 
  729            unsigned int barrier_waiting_cnt;
 
  730            unsigned int barrier_serial;
 
  732            unsigned int barrier_lock_rec;
 
  737#ifdef USE_SIGALTSTACK 
  741    rb_serial_t fork_gen;
 
  744    volatile int ubf_async_safe;
 
  746    unsigned int running: 1;
 
  747    unsigned int thread_abort_on_exception: 1;
 
  748    unsigned int thread_report_on_exception: 1;
 
  749    unsigned int thread_ignore_deadlock: 1;
 
  752    VALUE mark_object_ary;
 
  754    const VALUE special_exceptions[ruby_special_error_count];
 
  767        VALUE cmd[RUBY_NSIG];
 
  773    int src_encoding_index;
 
  776    struct ccan_list_head workqueue; 
 
  777    rb_nativethread_lock_t workqueue_lock;
 
  779    VALUE orig_progname, progname;
 
  780    VALUE coverages, me2counter;
 
  785        struct gc_mark_func_data_struct {
 
  787            void (*mark_func)(
VALUE v, 
void *data);
 
  806    ID inserting_constant_cache_id;
 
  808#ifndef VM_GLOBAL_CC_CACHE_TABLE_SIZE 
  809#define VM_GLOBAL_CC_CACHE_TABLE_SIZE 1023 
  811    const struct rb_callcache *global_cc_cache_table[VM_GLOBAL_CC_CACHE_TABLE_SIZE]; 
 
  813#if defined(USE_VM_CLOCK) && USE_VM_CLOCK 
  819        size_t thread_vm_stack_size;
 
  820        size_t thread_machine_stack_size;
 
  821        size_t fiber_vm_stack_size;
 
  822        size_t fiber_machine_stack_size;
 
 
  826extern bool ruby_vm_during_cleanup;
 
  830#define RUBY_VM_SIZE_ALIGN 4096 
  832#define RUBY_VM_THREAD_VM_STACK_SIZE          ( 128 * 1024 * sizeof(VALUE))  
  833#define RUBY_VM_THREAD_VM_STACK_SIZE_MIN      (   2 * 1024 * sizeof(VALUE))  
  834#define RUBY_VM_THREAD_MACHINE_STACK_SIZE     ( 128 * 1024 * sizeof(VALUE))  
  835#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN (  16 * 1024 * sizeof(VALUE))  
  837#define RUBY_VM_FIBER_VM_STACK_SIZE           (  16 * 1024 * sizeof(VALUE))  
  838#define RUBY_VM_FIBER_VM_STACK_SIZE_MIN       (   2 * 1024 * sizeof(VALUE))  
  839#define RUBY_VM_FIBER_MACHINE_STACK_SIZE      (  64 * 1024 * sizeof(VALUE))  
  840#if defined(__powerpc64__) || defined(__ppc64__)  
  841#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN  (  32 * 1024 * sizeof(VALUE))  
  843#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN  (  16 * 1024 * sizeof(VALUE))  
  846#if __has_feature(memory_sanitizer) || __has_feature(address_sanitizer) 
  848#undef  RUBY_VM_THREAD_MACHINE_STACK_SIZE 
  849#define RUBY_VM_THREAD_MACHINE_STACK_SIZE     (1024 * 1024 * sizeof(VALUE)) 
  850#undef  RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN 
  851#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 512 * 1024 * sizeof(VALUE)) 
  852#undef  RUBY_VM_FIBER_MACHINE_STACK_SIZE 
  853#define RUBY_VM_FIBER_MACHINE_STACK_SIZE      ( 256 * 1024 * sizeof(VALUE)) 
  854#undef  RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN 
  855#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN  ( 128 * 1024 * sizeof(VALUE)) 
  858#ifndef VM_DEBUG_BP_CHECK 
  859#define VM_DEBUG_BP_CHECK 0 
  862#ifndef VM_DEBUG_VERIFY_METHOD_CACHE 
  863#define VM_DEBUG_VERIFY_METHOD_CACHE (VMDEBUG != 0) 
  876enum rb_block_handler_type {
 
  877    block_handler_type_iseq,
 
  878    block_handler_type_ifunc,
 
  879    block_handler_type_symbol,
 
  880    block_handler_type_proc
 
  896    enum rb_block_type 
type;
 
 
  905    const void *block_code; 
 
 
  915rb_thread_ptr(
VALUE thval)
 
  920enum rb_thread_status {
 
  923    THREAD_STOPPED_FOREVER,
 
  928typedef RUBY_JMP_BUF rb_jmpbuf_t;
 
  930typedef void *rb_jmpbuf_t[5];
 
  945#if defined(__wasm__) && !defined(__EMSCRIPTEN__) 
  955typedef rb_jmpbuf_t *rb_vm_tag_jmpbuf_t;
 
  957#define RB_VM_TAG_JMPBUF_GET(buf) (*buf) 
  960rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
 
  962    *jmpbuf = ruby_xmalloc(
sizeof(rb_jmpbuf_t));
 
  966rb_vm_tag_jmpbuf_deinit(
const rb_vm_tag_jmpbuf_t *jmpbuf)
 
  971typedef rb_jmpbuf_t rb_vm_tag_jmpbuf_t;
 
  973#define RB_VM_TAG_JMPBUF_GET(buf) (buf) 
  976rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
 
  982rb_vm_tag_jmpbuf_deinit(
const rb_vm_tag_jmpbuf_t *jmpbuf)
 
  995    rb_vm_tag_jmpbuf_t buf;
 
  997    enum ruby_tag_type state;
 
  998    unsigned int lock_rec;
 
 
 1001STATIC_ASSERT(rb_vm_tag_buf_offset, offsetof(
struct rb_vm_tag, buf) > 0);
 
 1002STATIC_ASSERT(rb_vm_tag_buf_end,
 
 1003              offsetof(
struct rb_vm_tag, buf) + 
sizeof(rb_vm_tag_jmpbuf_t) <
 
 1024    size_t vm_stack_size;       
 
 1032#if defined(USE_VM_CLOCK) && USE_VM_CLOCK 
 1033    uint32_t checked_clock;
 
 1041    VALUE local_storage_recursive_hash;
 
 1042    VALUE local_storage_recursive_hash_for_trace;
 
 1048    const VALUE *root_lep;
 
 1056    VALUE passed_block_handler; 
 
 1058    uint8_t raised_flag; 
 
 1061    BITFIELD(
enum method_missing_reason, method_missing_reason, 8);
 
 1063    VALUE private_const_reference;
 
 1074        size_t stack_maxsize;
 
 1077#ifdef RUBY_ASAN_ENABLED 
 1078        void *asan_fake_stack_handle;
 
 
 1083#ifndef rb_execution_context_t 
 1085#define rb_execution_context_t rb_execution_context_t 
 1089#define VM_CORE_H_EC_DEFINED 1 
 1113    struct ccan_list_node lt_node; 
 
 1121    bool mn_schedulable;
 
 1135    BITFIELD(
enum rb_thread_status, status, 2);
 
 1137    unsigned int has_dedicated_nt : 1;
 
 1138    unsigned int to_kill : 1;
 
 1139    unsigned int abort_on_exception: 1;
 
 1140    unsigned int report_on_exception: 1;
 
 1141    unsigned int pending_interrupt_queue_checked: 1;
 
 1143    uint32_t running_time_us; 
 
 1145    void *blocking_region_buffer;
 
 1151#if OPT_CALL_THREADED_CODE 
 1156    VALUE pending_interrupt_queue;
 
 1157    VALUE pending_interrupt_mask_stack;
 
 1160    rb_nativethread_lock_t interrupt_lock;
 
 1162    VALUE locking_mutex;
 
 1164    struct ccan_list_head interrupt_exec_tasks;
 
 1175            VALUE (*func)(
void *);
 
 1180    enum thread_invoke_type {
 
 1181        thread_invoke_type_none = 0,
 
 1182        thread_invoke_type_proc,
 
 1183        thread_invoke_type_ractor_proc,
 
 1184        thread_invoke_type_func
 
 1191    unsigned int blocking;
 
 1195    void **specific_storage;
 
 
 1200static inline unsigned int 
 1203    return th ? (
unsigned int)th->serial : 0;
 
 1207    VM_DEFINECLASS_TYPE_CLASS           = 0x00,
 
 1208    VM_DEFINECLASS_TYPE_SINGLETON_CLASS = 0x01,
 
 1209    VM_DEFINECLASS_TYPE_MODULE          = 0x02,
 
 1211    VM_DEFINECLASS_TYPE_MASK            = 0x07
 
 1212} rb_vm_defineclass_type_t;
 
 1214#define VM_DEFINECLASS_TYPE(x) ((rb_vm_defineclass_type_t)(x) & VM_DEFINECLASS_TYPE_MASK) 
 1215#define VM_DEFINECLASS_FLAG_SCOPED         0x08 
 1216#define VM_DEFINECLASS_FLAG_HAS_SUPERCLASS 0x10 
 1217#define VM_DEFINECLASS_SCOPED_P(x) ((x) & VM_DEFINECLASS_FLAG_SCOPED) 
 1218#define VM_DEFINECLASS_HAS_SUPERCLASS_P(x) \ 
 1219    ((x) & VM_DEFINECLASS_FLAG_HAS_SUPERCLASS) 
 1222RUBY_SYMBOL_EXPORT_BEGIN
 
 1231                                VALUE script_lines);
 
 1241rb_iseq_new_with_callback_new_callback(
 
 1264RUBY_SYMBOL_EXPORT_END
 
 1266#define GetProcPtr(obj, ptr) \ 
 1267  GetCoreDataFromValue((obj), rb_proc_t, (ptr)) 
 1271    unsigned int is_from_method: 1;     
 
 1272    unsigned int is_lambda: 1;          
 
 1273    unsigned int is_isolated: 1;        
 
 
 1276RUBY_SYMBOL_EXPORT_BEGIN
 
 1280RUBY_SYMBOL_EXPORT_END
 
 1287    unsigned int env_size;
 
 
 1292#define GetBindingPtr(obj, ptr) \ 
 1293  GetCoreDataFromValue((obj), rb_binding_t, (ptr)) 
 1297    const VALUE pathobj;
 
 
 1303enum vm_check_match_type {
 
 1304    VM_CHECKMATCH_TYPE_WHEN = 1,
 
 1305    VM_CHECKMATCH_TYPE_CASE = 2,
 
 1306    VM_CHECKMATCH_TYPE_RESCUE = 3
 
 1309#define VM_CHECKMATCH_TYPE_MASK   0x03 
 1310#define VM_CHECKMATCH_ARRAY       0x04 
 1312enum vm_opt_newarray_send_type {
 
 1313    VM_OPT_NEWARRAY_SEND_MAX = 1,
 
 1314    VM_OPT_NEWARRAY_SEND_MIN = 2,
 
 1315    VM_OPT_NEWARRAY_SEND_HASH = 3,
 
 1316    VM_OPT_NEWARRAY_SEND_PACK = 4,
 
 1317    VM_OPT_NEWARRAY_SEND_PACK_BUFFER = 5,
 
 1318    VM_OPT_NEWARRAY_SEND_INCLUDE_P = 6,
 
 1321enum vm_special_object_type {
 
 1322    VM_SPECIAL_OBJECT_VMCORE = 1,
 
 1323    VM_SPECIAL_OBJECT_CBASE,
 
 1324    VM_SPECIAL_OBJECT_CONST_BASE
 
 1328    VM_SVAR_LASTLINE = 0,      
 
 1329    VM_SVAR_BACKREF = 1,       
 
 1331    VM_SVAR_EXTRA_START = 2,
 
 1332    VM_SVAR_FLIPFLOP_START = 2 
 
 1344typedef VALUE CDHASH;
 
 1346#ifndef FUNC_FASTCALL 
 1347#define FUNC_FASTCALL(x) x 
 1353#define VM_TAGGED_PTR_SET(p, tag)  ((VALUE)(p) | (tag)) 
 1354#define VM_TAGGED_PTR_REF(v, mask) ((void *)((v) & ~mask)) 
 1356#define GC_GUARDED_PTR(p)     VM_TAGGED_PTR_SET((p), 0x01) 
 1357#define GC_GUARDED_PTR_REF(p) VM_TAGGED_PTR_REF((p), 0x03) 
 1358#define GC_GUARDED_PTR_P(p)   (((VALUE)(p)) & 0x01) 
 1360enum vm_frame_env_flags {
 
 1371    VM_FRAME_MAGIC_METHOD = 0x11110001,
 
 1372    VM_FRAME_MAGIC_BLOCK  = 0x22220001,
 
 1373    VM_FRAME_MAGIC_CLASS  = 0x33330001,
 
 1374    VM_FRAME_MAGIC_TOP    = 0x44440001,
 
 1375    VM_FRAME_MAGIC_CFUNC  = 0x55550001,
 
 1376    VM_FRAME_MAGIC_IFUNC  = 0x66660001,
 
 1377    VM_FRAME_MAGIC_EVAL   = 0x77770001,
 
 1378    VM_FRAME_MAGIC_RESCUE = 0x78880001,
 
 1379    VM_FRAME_MAGIC_DUMMY  = 0x79990001,
 
 1381    VM_FRAME_MAGIC_MASK   = 0x7fff0001,
 
 1384    VM_FRAME_FLAG_FINISH    = 0x0020,
 
 1385    VM_FRAME_FLAG_BMETHOD   = 0x0040,
 
 1386    VM_FRAME_FLAG_CFRAME    = 0x0080,
 
 1387    VM_FRAME_FLAG_LAMBDA    = 0x0100,
 
 1388    VM_FRAME_FLAG_MODIFIED_BLOCK_PARAM = 0x0200,
 
 1389    VM_FRAME_FLAG_CFRAME_KW  = 0x0400,
 
 1390    VM_FRAME_FLAG_PASSED     = 0x0800,
 
 1391    VM_FRAME_FLAG_NS_REQUIRE = 0x1000,
 
 1394    VM_ENV_FLAG_LOCAL       = 0x0002,
 
 1395    VM_ENV_FLAG_ESCAPED     = 0x0004,
 
 1396    VM_ENV_FLAG_WB_REQUIRED = 0x0008,
 
 1397    VM_ENV_FLAG_ISOLATED    = 0x0010,
 
 1400#define VM_ENV_DATA_SIZE             ( 3) 
 1402#define VM_ENV_DATA_INDEX_ME_CREF    (-2)  
 1403#define VM_ENV_DATA_INDEX_SPECVAL    (-1)  
 1404#define VM_ENV_DATA_INDEX_FLAGS      ( 0)  
 1405#define VM_ENV_DATA_INDEX_ENV        ( 1)  
 1407#define VM_ENV_INDEX_LAST_LVAR              (-VM_ENV_DATA_SIZE) 
 1409static inline void VM_FORCE_WRITE_SPECIAL_CONST(
const VALUE *ptr, 
VALUE special_const_value);
 
 1414    VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
 
 1416    VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags | flag);
 
 1420VM_ENV_FLAGS_UNSET(
const VALUE *ep, 
VALUE flag)
 
 1422    VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
 
 1424    VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags & ~flag);
 
 1427static inline unsigned long 
 1428VM_ENV_FLAGS(
const VALUE *ep, 
long flag)
 
 1430    VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
 
 1432    return flags & flag;
 
 1435static inline unsigned long 
 1436VM_ENV_FLAGS_UNCHECKED(
const VALUE *ep, 
long flag)
 
 1438    VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
 
 1439    return flags & flag;
 
 1442static inline unsigned long 
 1443VM_ENV_FRAME_TYPE_P(
const VALUE *ep, 
unsigned long frame_type)
 
 1445    return VM_ENV_FLAGS(ep, VM_FRAME_MAGIC_MASK) == frame_type;
 
 1448static inline unsigned long 
 1451    return VM_ENV_FLAGS(cfp->ep, VM_FRAME_MAGIC_MASK);
 
 1454static inline unsigned long 
 1457    return VM_ENV_FLAGS_UNCHECKED(cfp->ep, VM_FRAME_MAGIC_MASK);
 
 1463    return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_LAMBDA) != 0;
 
 1469    return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME_KW) != 0;
 
 1475    return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_FINISH) != 0;
 
 1481    return VM_ENV_FLAGS_UNCHECKED(cfp->ep, VM_FRAME_FLAG_FINISH) != 0;
 
 1487    return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_BMETHOD) != 0;
 
 1491rb_obj_is_iseq(
VALUE iseq)
 
 1493    return imemo_type_p(iseq, imemo_iseq);
 
 1496#if VM_CHECK_MODE > 0 
 1497#define RUBY_VM_NORMAL_ISEQ_P(iseq)  rb_obj_is_iseq((VALUE)iseq) 
 1503    int cframe_p = VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME) != 0;
 
 1504    VM_ASSERT(RUBY_VM_NORMAL_ISEQ_P(cfp->iseq) != cframe_p ||
 
 1505              (VM_FRAME_TYPE(cfp) & VM_FRAME_MAGIC_MASK) == VM_FRAME_MAGIC_DUMMY);
 
 1512    return VM_ENV_FLAGS_UNCHECKED(cfp->ep, VM_FRAME_FLAG_CFRAME) != 0;
 
 1518    return !VM_FRAME_CFRAME_P(cfp);
 
 1524    return !VM_FRAME_CFRAME_P_UNCHECKED(cfp);
 
 1530    return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_NS_REQUIRE) != 0;
 
 1533#define RUBYVM_CFUNC_FRAME_P(cfp) \ 
 1534  (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_CFUNC) 
 1536#define VM_GUARDED_PREV_EP(ep)         GC_GUARDED_PTR(ep) 
 1537#define VM_BLOCK_HANDLER_NONE 0 
 1540VM_ENV_LOCAL_P(
const VALUE *ep)
 
 1542    return VM_ENV_FLAGS(ep, VM_ENV_FLAG_LOCAL) ? 1 : 0;
 
 1546VM_ENV_LOCAL_P_UNCHECKED(
const VALUE *ep)
 
 1548    return VM_ENV_FLAGS_UNCHECKED(ep, VM_ENV_FLAG_LOCAL) ? 1 : 0;
 
 1551static inline const VALUE *
 
 1552VM_ENV_PREV_EP_UNCHECKED(
const VALUE *ep)
 
 1554    return GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
 
 1557static inline const VALUE *
 
 1558VM_ENV_PREV_EP(
const VALUE *ep)
 
 1560    VM_ASSERT(VM_ENV_LOCAL_P(ep) == 0);
 
 1561    return VM_ENV_PREV_EP_UNCHECKED(ep);
 
 1565VM_ENV_NAMESPACED_P(
const VALUE *ep)
 
 1567    return VM_ENV_FRAME_TYPE_P(ep, VM_FRAME_MAGIC_CLASS) || VM_ENV_FRAME_TYPE_P(ep, VM_FRAME_MAGIC_TOP);
 
 1571VM_ENV_BLOCK_HANDLER(
const VALUE *ep)
 
 1573    if (VM_ENV_NAMESPACED_P(ep)) {
 
 1574        VM_ASSERT(VM_ENV_LOCAL_P(ep));
 
 1575        return VM_BLOCK_HANDLER_NONE;
 
 1578    VM_ASSERT(VM_ENV_LOCAL_P(ep));
 
 1579    return ep[VM_ENV_DATA_INDEX_SPECVAL];
 
 1583VM_ENV_NAMESPACE(
const VALUE *ep)
 
 1585    VM_ASSERT(VM_ENV_NAMESPACED_P(ep));
 
 1586    VM_ASSERT(VM_ENV_LOCAL_P(ep));
 
 1587    return (
const rb_namespace_t *)GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
 
 1591VM_ENV_NAMESPACE_UNCHECKED(
const VALUE *ep)
 
 1593    return (
const rb_namespace_t *)GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
 
 1596#if VM_CHECK_MODE > 0 
 1597int rb_vm_ep_in_heap_p(
const VALUE *ep);
 
 1601VM_ENV_ESCAPED_P(
const VALUE *ep)
 
 1603    VM_ASSERT(rb_vm_ep_in_heap_p(ep) == !!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED));
 
 1604    return VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) ? 1 : 0;
 
 1609VM_ENV_ENVVAL(const 
VALUE *ep)
 
 1611    VALUE envval = ep[VM_ENV_DATA_INDEX_ENV];
 
 1612    VM_ASSERT(VM_ENV_ESCAPED_P(ep));
 
 1613    VM_ASSERT(envval == 
Qundef || imemo_type_p(envval, imemo_env));
 
 1619VM_ENV_ENVVAL_PTR(const 
VALUE *ep)
 
 1621    return (
const rb_env_t *)VM_ENV_ENVVAL(ep);
 
 1629    env->env = env_body;
 
 1630    env->env_size = env_size;
 
 1631    env_ep[VM_ENV_DATA_INDEX_ENV] = (
VALUE)env;
 
 1638    *((
VALUE *)ptr) = v;
 
 1642VM_FORCE_WRITE_SPECIAL_CONST(
const VALUE *ptr, 
VALUE special_const_value)
 
 1645    VM_FORCE_WRITE(ptr, special_const_value);
 
 1649VM_STACK_ENV_WRITE(
const VALUE *ep, 
int index, 
VALUE v)
 
 1651    VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_WB_REQUIRED) == 0);
 
 1652    VM_FORCE_WRITE(&ep[index], v);
 
 1655const VALUE *rb_vm_ep_local_ep(
const VALUE *ep);
 
 1662#define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp) ((cfp)+1) 
 1663#define RUBY_VM_NEXT_CONTROL_FRAME(cfp) ((cfp)-1) 
 1665#define RUBY_VM_VALID_CONTROL_FRAME_P(cfp, ecfp) \ 
 1666  ((void *)(ecfp) > (void *)(cfp)) 
 1677    return !RUBY_VM_VALID_CONTROL_FRAME_P(cfp, RUBY_VM_END_CONTROL_FRAME(ec));
 
 1681VM_BH_ISEQ_BLOCK_P(
VALUE block_handler)
 
 1683    if ((block_handler & 0x03) == 0x01) {
 
 1684#if VM_CHECK_MODE > 0 
 1686        VM_ASSERT(imemo_type_p(captured->code.val, imemo_iseq));
 
 1698    VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x01);
 
 1699    VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
 
 1700    return block_handler;
 
 1704VM_BH_TO_ISEQ_BLOCK(
VALUE block_handler)
 
 1707    VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
 
 1712VM_BH_IFUNC_P(
VALUE block_handler)
 
 1714    if ((block_handler & 0x03) == 0x03) {
 
 1715#if VM_CHECK_MODE > 0 
 1717        VM_ASSERT(imemo_type_p(captured->code.val, imemo_ifunc));
 
 1729    VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x03);
 
 1730    VM_ASSERT(VM_BH_IFUNC_P(block_handler));
 
 1731    return block_handler;
 
 1735VM_BH_TO_IFUNC_BLOCK(
VALUE block_handler)
 
 1738    VM_ASSERT(VM_BH_IFUNC_P(block_handler));
 
 1743VM_BH_TO_CAPT_BLOCK(
VALUE block_handler)
 
 1746    VM_ASSERT(VM_BH_IFUNC_P(block_handler) || VM_BH_ISEQ_BLOCK_P(block_handler));
 
 1750static inline enum rb_block_handler_type
 
 1751vm_block_handler_type(
VALUE block_handler)
 
 1753    if (VM_BH_ISEQ_BLOCK_P(block_handler)) {
 
 1754        return block_handler_type_iseq;
 
 1756    else if (VM_BH_IFUNC_P(block_handler)) {
 
 1757        return block_handler_type_ifunc;
 
 1759    else if (
SYMBOL_P(block_handler)) {
 
 1760        return block_handler_type_symbol;
 
 1764        return block_handler_type_proc;
 
 1769vm_block_handler_verify(MAYBE_UNUSED(
VALUE block_handler))
 
 1771    VM_ASSERT(block_handler == VM_BLOCK_HANDLER_NONE ||
 
 1772              (vm_block_handler_type(block_handler), 1));
 
 1775static inline enum rb_block_type
 
 1776vm_block_type(
const struct rb_block *block)
 
 1778#if VM_CHECK_MODE > 0 
 1779    switch (block->type) {
 
 1780      case block_type_iseq:
 
 1781        VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_iseq));
 
 1783      case block_type_ifunc:
 
 1784        VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_ifunc));
 
 1786      case block_type_symbol:
 
 1787        VM_ASSERT(
SYMBOL_P(block->as.symbol));
 
 1789      case block_type_proc:
 
 1798vm_block_type_set(
const struct rb_block *block, 
enum rb_block_type 
type)
 
 1804static inline const struct rb_block *
 
 1805vm_proc_block(
VALUE procval)
 
 1812static inline const VALUE *vm_block_ep(
const struct rb_block *block);
 
 1815vm_proc_iseq(
VALUE procval)
 
 1817    return vm_block_iseq(vm_proc_block(procval));
 
 1820static inline const VALUE *
 
 1821vm_proc_ep(
VALUE procval)
 
 1823    return vm_block_ep(vm_proc_block(procval));
 
 1827vm_block_iseq(
const struct rb_block *block)
 
 1829    switch (vm_block_type(block)) {
 
 1830      case block_type_iseq: 
return rb_iseq_check(block->as.captured.code.iseq);
 
 1831      case block_type_proc: 
return vm_proc_iseq(block->as.proc);
 
 1832      case block_type_ifunc:
 
 1833      case block_type_symbol: 
return NULL;
 
 1835    VM_UNREACHABLE(vm_block_iseq);
 
 1839static inline const VALUE *
 
 1840vm_block_ep(
const struct rb_block *block)
 
 1842    switch (vm_block_type(block)) {
 
 1843      case block_type_iseq:
 
 1844      case block_type_ifunc:  
return block->as.captured.ep;
 
 1845      case block_type_proc:   
return vm_proc_ep(block->as.proc);
 
 1846      case block_type_symbol: 
return NULL;
 
 1848    VM_UNREACHABLE(vm_block_ep);
 
 1853vm_block_self(
const struct rb_block *block)
 
 1855    switch (vm_block_type(block)) {
 
 1856      case block_type_iseq:
 
 1857      case block_type_ifunc:
 
 1858        return block->as.captured.self;
 
 1859      case block_type_proc:
 
 1860        return vm_block_self(vm_proc_block(block->as.proc));
 
 1861      case block_type_symbol:
 
 1864    VM_UNREACHABLE(vm_block_self);
 
 1869VM_BH_TO_SYMBOL(
VALUE block_handler)
 
 1871    VM_ASSERT(
SYMBOL_P(block_handler));
 
 1872    return block_handler;
 
 1876VM_BH_FROM_SYMBOL(
VALUE symbol)
 
 1883VM_BH_TO_PROC(
VALUE block_handler)
 
 1886    return block_handler;
 
 1890VM_BH_FROM_PROC(
VALUE procval)
 
 1907#define SDR() rb_vmdebug_stack_dump_raw(GET_EC(), GET_EC()->cfp, stderr) 
 1908#define SDR2(cfp) rb_vmdebug_stack_dump_raw(GET_EC(), (cfp), stderr) 
 1909bool rb_vm_bugreport(
const void *, 
FILE *);
 
 1910typedef void (*ruby_sighandler_t)(int);
 
 1912NORETURN(
void rb_bug_for_fatal_signal(ruby_sighandler_t default_sighandler, 
int sig, const 
void *, const 
char *fmt, ...));
 
 1915RUBY_SYMBOL_EXPORT_BEGIN
 
 1920RUBY_SYMBOL_EXPORT_END
 
 1934    return rb_vm_make_proc_lambda(ec, captured, klass, 0);
 
 1940    return rb_vm_make_proc_lambda(ec, captured, klass, 1);
 
 1948void rb_vm_inc_const_missing_count(
void);
 
 1954void rb_thread_start_timer_thread(
void);
 
 1955void rb_thread_stop_timer_thread(
void);
 
 1956void rb_thread_reset_timer_thread(
void);
 
 1957void rb_thread_wakeup_timer_thread(
int);
 
 1960rb_vm_living_threads_init(
rb_vm_t *vm)
 
 1962    ccan_list_head_init(&vm->workqueue);
 
 1963    ccan_list_head_init(&vm->ractor.set);
 
 1964#ifdef RUBY_THREAD_PTHREAD_H 
 1965    ccan_list_head_init(&vm->ractor.sched.zombie_threads);
 
 1969typedef int rb_backtrace_iter_func(
void *, 
VALUE, 
int, 
VALUE);
 
 1975void ruby_thread_init_stack(
rb_thread_t *th, 
void *local_in_parent_frame);
 
 1976void rb_thread_malloc_stack_set(
rb_thread_t *th, 
void *stack);
 
 1981void rb_vm_env_write(
const VALUE *ep, 
int index, 
VALUE v);
 
 1984void rb_vm_register_special_exception_str(
enum ruby_special_exceptions sp, 
VALUE exception_class, 
VALUE mesg);
 
 1986#define rb_vm_register_special_exception(sp, e, m) \ 
 1987    rb_vm_register_special_exception_str(sp, e, rb_usascii_str_new_static((m), (long)rb_strlen_lit(m))) 
 1996#define sysstack_error GET_VM()->special_exceptions[ruby_error_sysstack] 
 1998#define CHECK_VM_STACK_OVERFLOW0(cfp, sp, margin) do {                       \ 
 1999    STATIC_ASSERT(sizeof_sp,  sizeof(*(sp))  == sizeof(VALUE));              \ 
 2000    STATIC_ASSERT(sizeof_cfp, sizeof(*(cfp)) == sizeof(rb_control_frame_t)); \ 
 2001    const struct rb_control_frame_struct *bound = (void *)&(sp)[(margin)];   \ 
 2002    if (UNLIKELY((cfp) <= &bound[1])) {                                      \ 
 2003        vm_stackoverflow();                                                  \ 
 2007#define CHECK_VM_STACK_OVERFLOW(cfp, margin) \ 
 2008    CHECK_VM_STACK_OVERFLOW0((cfp), (cfp)->sp, (margin)) 
 2016#if RUBY_VM_THREAD_MODEL == 2 
 2024#define GET_VM()     rb_current_vm() 
 2025#define GET_RACTOR() rb_current_ractor() 
 2026#define GET_THREAD() rb_current_thread() 
 2027#define GET_EC()     rb_current_execution_context(true) 
 2032    return ec->thread_ptr;
 
 2040        VM_ASSERT(th->ractor != NULL);
 
 2063rb_current_execution_context(
bool expect_ec)
 
 2065#ifdef RB_THREAD_LOCAL_SPECIFIER 
 2066  #ifdef RB_THREAD_CURRENT_EC_NOINLINE 
 2084    VM_ASSERT(ec == rb_current_ec_noinline());
 
 2088    VM_ASSERT(!expect_ec || ec != NULL);
 
 2093rb_current_thread(
void)
 
 2096    return rb_ec_thread_ptr(ec);
 
 2100rb_current_ractor_raw(
bool expect)
 
 2102    if (ruby_single_main_ractor) {
 
 2103        return ruby_single_main_ractor;
 
 2107        return (expect || ec) ? rb_ec_ractor_ptr(ec) : NULL;
 
 2112rb_current_ractor(
void)
 
 2114    return rb_current_ractor_raw(
true);
 
 2121    VM_ASSERT(ruby_current_vm_ptr == NULL ||
 
 2122              ruby_current_execution_context_ptr == NULL ||
 
 2123              rb_ec_thread_ptr(GET_EC()) == NULL ||
 
 2124              rb_ec_thread_ptr(GET_EC())->status == THREAD_KILLED ||
 
 2125              rb_ec_vm_ptr(GET_EC()) == ruby_current_vm_ptr);
 
 2128    return ruby_current_vm_ptr;
 
 2132                               unsigned int recorded_lock_rec,
 
 2133                               unsigned int current_lock_rec);
 
 2137NO_SANITIZE(
"thread", 
static inline bool 
 2140    VM_ASSERT(cr == GET_RACTOR());
 
 2141    return vm->ractor.sync.lock_owner == cr;
 
 2144static inline unsigned int 
 2147    rb_vm_t *vm = rb_ec_vm_ptr(ec);
 
 2149    if (!vm_locked_by_ractor_p(vm, rb_ec_ractor_ptr(ec))) {
 
 2153        return vm->ractor.sync.lock_rec;
 
 2158#error "unsupported thread model" 
 2162    TIMER_INTERRUPT_MASK         = 0x01,
 
 2163    PENDING_INTERRUPT_MASK       = 0x02,
 
 2164    POSTPONED_JOB_INTERRUPT_MASK = 0x04,
 
 2165    TRAP_INTERRUPT_MASK          = 0x08,
 
 2166    TERMINATE_INTERRUPT_MASK     = 0x10,
 
 2167    VM_BARRIER_INTERRUPT_MASK    = 0x20,
 
 2170#define RUBY_VM_SET_TIMER_INTERRUPT(ec)         ATOMIC_OR((ec)->interrupt_flag, TIMER_INTERRUPT_MASK) 
 2171#define RUBY_VM_SET_INTERRUPT(ec)               ATOMIC_OR((ec)->interrupt_flag, PENDING_INTERRUPT_MASK) 
 2172#define RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, POSTPONED_JOB_INTERRUPT_MASK) 
 2173#define RUBY_VM_SET_TRAP_INTERRUPT(ec)          ATOMIC_OR((ec)->interrupt_flag, TRAP_INTERRUPT_MASK) 
 2174#define RUBY_VM_SET_TERMINATE_INTERRUPT(ec)     ATOMIC_OR((ec)->interrupt_flag, TERMINATE_INTERRUPT_MASK) 
 2175#define RUBY_VM_SET_VM_BARRIER_INTERRUPT(ec)    ATOMIC_OR((ec)->interrupt_flag, VM_BARRIER_INTERRUPT_MASK) 
 2180    return (ATOMIC_LOAD_RELAXED(ec->interrupt_flag) & ~(ec->interrupt_mask) & (PENDING_INTERRUPT_MASK|TRAP_INTERRUPT_MASK));
 
 2186#if defined(USE_VM_CLOCK) && USE_VM_CLOCK 
 2187    uint32_t current_clock = rb_ec_vm_ptr(ec)->clock;
 
 2189    if (current_clock != ec->checked_clock) {
 
 2190        ec->checked_clock = current_clock;
 
 2191        RUBY_VM_SET_TIMER_INTERRUPT(ec);
 
 2194    return ATOMIC_LOAD_RELAXED(ec->interrupt_flag) & ~(ec)->interrupt_mask;
 
 2198int rb_signal_buff_size(
void);
 
 2201void rb_threadptr_signal_raise(
rb_thread_t *th, 
int sig);
 
 2203int rb_threadptr_execute_interrupts(
rb_thread_t *, 
int);
 
 2205void rb_threadptr_unlock_all_locking_mutexes(
rb_thread_t *th);
 
 2206void rb_threadptr_pending_interrupt_clear(
rb_thread_t *th);
 
 2217void rb_vm_cond_wait(
rb_vm_t *vm, rb_nativethread_cond_t *cond);
 
 2218void rb_vm_cond_timedwait(
rb_vm_t *vm, rb_nativethread_cond_t *cond, 
unsigned long msec);
 
 2220#define RUBY_VM_CHECK_INTS(ec) rb_vm_check_ints(ec) 
 2224#ifdef RUBY_ASSERT_CRITICAL_SECTION 
 2225    VM_ASSERT(ruby_assert_critical_section_entered == 0);
 
 2228    VM_ASSERT(ec == rb_current_ec_noinline());
 
 2230    if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(ec))) {
 
 2231        rb_threadptr_execute_interrupts(rb_ec_thread_ptr(ec), 0);
 
 2262#define EXEC_EVENT_HOOK_ORIG(ec_, hooks_, flag_, self_, id_, called_id_, klass_, data_, pop_p_) do { \ 
 2263    const rb_event_flag_t flag_arg_ = (flag_); \ 
 2264    rb_hook_list_t *hooks_arg_ = (hooks_); \ 
 2265    if (UNLIKELY((hooks_arg_)->events & (flag_arg_))) { \ 
 2267        rb_exec_event_hook_orig(ec_, hooks_arg_, flag_arg_, self_, id_, called_id_, klass_, data_, pop_p_); \ 
 2277    VM_ASSERT((hooks->events & flag) != 0);
 
 2279    trace_arg.event = flag;
 
 2281    trace_arg.cfp = ec->cfp;
 
 2282    trace_arg.self = self;
 
 2284    trace_arg.called_id = called_id;
 
 2285    trace_arg.klass = klass;
 
 2286    trace_arg.data = data;
 
 2288    trace_arg.klass_solved = 0;
 
 2290    rb_exec_event_hooks(&trace_arg, hooks, pop_p);
 
 2303    return &cr_pub->hooks;
 
 2306#define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_) \ 
 2307  EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_ractor_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 0) 
 2309#define EXEC_EVENT_HOOK_AND_POP_FRAME(ec_, flag_, self_, id_, called_id_, klass_, data_) \ 
 2310  EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_ractor_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 1) 
 2317                    rb_ary_new_from_args(2, eval_script, (
VALUE)iseq));
 
 2320void rb_vm_trap_exit(
rb_vm_t *vm);
 
 2321void rb_vm_postponed_job_atfork(
void); 
 
 2322void rb_vm_postponed_job_free(
void); 
 
 2323size_t rb_vm_memsize_postponed_job_queue(
void); 
 
 2324void rb_vm_postponed_job_queue_init(
rb_vm_t *vm); 
 
 2326RUBY_SYMBOL_EXPORT_BEGIN
 
 2328int rb_thread_check_trap_pending(
void);
 
 2331#define RUBY_EVENT_COVERAGE_LINE                0x010000 
 2332#define RUBY_EVENT_COVERAGE_BRANCH              0x020000 
 2334extern VALUE rb_get_coverages(
void);
 
 2335extern void rb_set_coverages(
VALUE, 
int, 
VALUE);
 
 2336extern void rb_clear_coverages(
void);
 
 2337extern void rb_reset_coverages(
void);
 
 2338extern void rb_resume_coverages(
void);
 
 2339extern void rb_suspend_coverages(
void);
 
 2341void rb_postponed_job_flush(
rb_vm_t *vm);
 
 2347RUBY_SYMBOL_EXPORT_END
 
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
 
#define RUBY_ALIGNAS
Wraps (or simulates) alignas.
 
#define RUBY_EXTERN
Declaration of externally visible global variables.
 
#define RUBY_EVENT_SCRIPT_COMPILED
Encountered an eval.
 
uint32_t rb_event_flag_t
Represents event(s).
 
#define T_STRING
Old name of RUBY_T_STRING.
 
#define Qundef
Old name of RUBY_Qundef.
 
#define Qfalse
Old name of RUBY_Qfalse.
 
#define T_ARRAY
Old name of RUBY_T_ARRAY.
 
#define NIL_P
Old name of RB_NIL_P.
 
#define FIXNUM_P
Old name of RB_FIXNUM_P.
 
#define SYMBOL_P
Old name of RB_SYMBOL_P.
 
void * rb_check_typeddata(VALUE obj, const rb_data_type_t *data_type)
Identical to rb_typeddata_is_kind_of(), except it raises exceptions instead of returning false.
 
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
 
void rb_unblock_function_t(void *)
This is the type of UBFs.
 
VALUE rb_block_call_func(RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg))
This is the type of a function that the interpreter expect for C-backended blocks.
 
VALUE type(ANYARGS)
ANYARGS-ed function type.
 
#define RBIMPL_ATTR_NONNULL(list)
Wraps (or simulates) __attribute__((nonnull))
 
#define inline
Old Visual Studio versions do not support the inline keyword, so we need to define it to be __inline.
 
Functions related to nodes in the AST.
 
#define RARRAY_AREF(a, i)
 
#define RTYPEDDATA_DATA(v)
Convenient getter macro.
 
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
 
const ID * segments
A null-terminated list of ids, used to represent a constant's path idNULL is used to represent the ::...
 
This is the struct that holds necessary info for a struct.
 
struct rb_iseq_constant_body::@156 param
parameter information
 
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
 
Internal header for Namespace.
 
IFUNC (Internal FUNCtion)
 
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
 
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
 
uintptr_t VALUE
Type that represents a Ruby object.
 
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.