21#define N_OR_RUBY_DEBUG(n) (((n) > 0) ? (n) : RUBY_DEBUG)
23#define VM_CHECK_MODE N_OR_RUBY_DEBUG(0)
48#include "ruby/internal/config.h"
54#include "ruby_assert.h"
56#define RVALUE_SIZE (sizeof(struct RBasic) + sizeof(VALUE[RBIMPL_RVALUE_EMBED_LEN_MAX]))
59#define VM_ASSERT(expr, ...) \
60 RUBY_ASSERT_MESG_WHEN(VM_CHECK_MODE > 0, expr, #expr RBIMPL_VA_OPT_ARGS(__VA_ARGS__))
61#define VM_UNREACHABLE(func) rb_bug(#func ": unreachable")
62#define RUBY_ASSERT_CRITICAL_SECTION
63#define RUBY_DEBUG_THREAD_SCHEDULE() rb_thread_schedule()
65#define VM_ASSERT(...) ((void)0)
66#define VM_UNREACHABLE(func) UNREACHABLE
67#define RUBY_DEBUG_THREAD_SCHEDULE()
70#define RUBY_ASSERT_MUTEX_OWNED(mutex) VM_ASSERT(rb_mutex_owned_p(mutex))
72#if defined(RUBY_ASSERT_CRITICAL_SECTION)
97extern int ruby_assert_critical_section_entered;
98#define RUBY_ASSERT_CRITICAL_SECTION_ENTER() do{ruby_assert_critical_section_entered += 1;}while(false)
99#define RUBY_ASSERT_CRITICAL_SECTION_LEAVE() do{VM_ASSERT(ruby_assert_critical_section_entered > 0);ruby_assert_critical_section_entered -= 1;}while(false)
101#define RUBY_ASSERT_CRITICAL_SECTION_ENTER()
102#define RUBY_ASSERT_CRITICAL_SECTION_LEAVE()
105#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
106# include "wasm/setjmp.h"
111#if defined(__linux__) || defined(__FreeBSD__)
112# define RB_THREAD_T_HAS_NATIVE_ID
116#include "ccan/list/list.h"
119#include "internal/array.h"
120#include "internal/basic_operators.h"
121#include "internal/box.h"
122#include "internal/sanitizers.h"
123#include "internal/serial.h"
124#include "internal/set_table.h"
125#include "internal/vm.h"
130#include "ruby_atomic.h"
140#ifndef VM_INSN_INFO_TABLE_IMPL
141# define VM_INSN_INFO_TABLE_IMPL 2
146# define NSIG NSIG_MAX
147#elif defined(_SIG_MAXSIG)
149# define NSIG _SIG_MAXSIG
150#elif defined(_SIGMAX)
151# define NSIG (_SIGMAX + 1)
155# define NSIG (sizeof(sigset_t) * CHAR_BIT + 1)
158#define RUBY_NSIG NSIG
161# define RUBY_SIGCHLD (SIGCLD)
162#elif defined(SIGCHLD)
163# define RUBY_SIGCHLD (SIGCHLD)
166#if defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) && defined(SA_SIGINFO) && !defined(__NetBSD__)
167# define USE_SIGALTSTACK
168void *rb_allocate_sigaltstack(
void);
169void *rb_register_sigaltstack(
void *);
170# define RB_ALTSTACK_INIT(var, altstack) var = rb_register_sigaltstack(altstack)
171# define RB_ALTSTACK_FREE(var) free(var)
172# define RB_ALTSTACK(var) var
174# define RB_ALTSTACK_INIT(var, altstack)
175# define RB_ALTSTACK_FREE(var)
176# define RB_ALTSTACK(var) (0)
179#include THREAD_IMPL_H
180#define RUBY_VM_THREAD_MODEL 2
187#if defined(__GNUC__) && __GNUC__ >= 2
189#if OPT_TOKEN_THREADED_CODE
190#if OPT_DIRECT_THREADED_CODE
191#undef OPT_DIRECT_THREADED_CODE
198#if OPT_DIRECT_THREADED_CODE
199#undef OPT_DIRECT_THREADED_CODE
201#if OPT_TOKEN_THREADED_CODE
202#undef OPT_TOKEN_THREADED_CODE
207#if OPT_CALL_THREADED_CODE
208#if OPT_DIRECT_THREADED_CODE
209#undef OPT_DIRECT_THREADED_CODE
213void rb_vm_encoded_insn_data_table_init(
void);
214typedef unsigned long rb_num_t;
215typedef signed long rb_snum_t;
219 RUBY_TAG_RETURN = 0x1,
220 RUBY_TAG_BREAK = 0x2,
222 RUBY_TAG_RETRY = 0x4,
224 RUBY_TAG_RAISE = 0x6,
225 RUBY_TAG_THROW = 0x7,
226 RUBY_TAG_FATAL = 0x8,
230#define TAG_NONE RUBY_TAG_NONE
231#define TAG_RETURN RUBY_TAG_RETURN
232#define TAG_BREAK RUBY_TAG_BREAK
233#define TAG_NEXT RUBY_TAG_NEXT
234#define TAG_RETRY RUBY_TAG_RETRY
235#define TAG_REDO RUBY_TAG_REDO
236#define TAG_RAISE RUBY_TAG_RAISE
237#define TAG_THROW RUBY_TAG_THROW
238#define TAG_FATAL RUBY_TAG_FATAL
239#define TAG_MASK RUBY_TAG_MASK
241enum ruby_vm_throw_flags {
242 VM_THROW_NO_ESCAPE_FLAG = 0x8000,
243 VM_THROW_STATE_MASK = 0xff
258#define IMEMO_CONST_CACHE_SHAREABLE IMEMO_FL_USER0
267STATIC_ASSERT(sizeof_iseq_inline_constant_cache_entry,
269 sizeof(
const rb_cref_t *)) <= RVALUE_SIZE);
316#ifndef VM_ARGC_STACK_MAX
317#define VM_ARGC_STACK_MAX 128
320#define VM_KW_SPECIFIED_BITS_MAX (32-1)
322# define CALLING_ARGC(calling) ((calling)->heap_argv ? RARRAY_LENINT((calling)->heap_argv) : (calling)->argc)
326#ifndef RUBY_CORE_DATA_TYPE_CHECK
328# define RUBY_CORE_DATA_TYPE_CHECK 1
330# define RUBY_CORE_DATA_TYPE_CHECK 0
333#if !RUBY_CORE_DATA_TYPE_CHECK
334#define GetCoreDataFromValue(obj, type, data_type, ptr) ((ptr) = (type*)RTYPEDDATA_GET_DATA(obj))
336#define GetCoreDataFromValue(obj, type, data_type, ptr) TypedData_Get_Struct(obj, type, data_type, ptr)
348#define PATHOBJ_PATH 0
349#define PATHOBJ_REALPATH 1
352pathobj_path(
VALUE pathobj)
364pathobj_realpath(
VALUE pathobj)
376typedef uintptr_t iseq_bits_t;
378#define ISEQ_IS_SIZE(body) (body->ic_size + body->ivc_size + body->ise_size + body->icvarc_size)
381#define ISEQ_IS_IC_ENTRY(body, idx) (body->is_entries[(idx) + body->ise_size + body->icvarc_size + body->ivc_size].ic_cache);
397enum rb_builtin_attr {
399 BUILTIN_ATTR_LEAF = 0x01,
401 BUILTIN_ATTR_SINGLE_NOARG_LEAF = 0x02,
403 BUILTIN_ATTR_INLINE_BLOCK = 0x04,
405 BUILTIN_ATTR_C_TRACE = 0x08,
407 BUILTIN_ATTR_WITHOUT_INTERRUPTS = 0x10,
414 enum rb_iseq_type type;
416 unsigned int iseq_size;
444 unsigned int has_lead : 1;
445 unsigned int has_opt : 1;
446 unsigned int has_rest : 1;
447 unsigned int has_post : 1;
448 unsigned int has_kw : 1;
449 unsigned int has_kwrest : 1;
450 unsigned int has_block : 1;
452 unsigned int ambiguous_param0 : 1;
453 unsigned int accepts_no_kwarg : 1;
454 unsigned int ruby2_keywords: 1;
455 unsigned int anon_rest: 1;
456 unsigned int anon_kwrest: 1;
457 unsigned int use_block: 1;
458 unsigned int forwardable: 1;
459 unsigned int accepts_no_block: 1;
471 const VALUE *opt_table;
492 VALUE *default_values;
501 unsigned int *positions;
503#if VM_INSN_INFO_TABLE_IMPL == 2
504 struct succ_index_table *succ_index_table;
508 const ID *local_table;
527 rb_snum_t flip_count;
530 VALUE pc2branchindex;
531 VALUE *original_iseq;
534 unsigned int local_table_size;
535 unsigned int ic_size;
536 unsigned int ise_size;
537 unsigned int ivc_size;
538 unsigned int icvarc_size;
539 unsigned int ci_size;
540 unsigned int stack_max;
542 unsigned int builtin_attrs;
555#if USE_YJIT || USE_ZJIT
557 rb_jit_func_t jit_entry;
559 long unsigned jit_entry_calls;
561 rb_jit_func_t jit_exception;
563 long unsigned jit_exception_calls;
570 uint64_t yjit_calls_at_interv;
596 unsigned int local_hooks_cnt;
602#define ISEQ_BODY(iseq) ((iseq)->body)
604#if !defined(USE_LAZY_LOAD) || !(USE_LAZY_LOAD+0)
605#define USE_LAZY_LOAD 0
616 if (USE_LAZY_LOAD && ISEQ_BODY(iseq) == NULL) {
623rb_iseq_attr_p(
const rb_iseq_t *iseq,
enum rb_builtin_attr attr)
625 return (ISEQ_BODY(iseq)->builtin_attrs & attr) == attr;
633 if (def->type != VM_METHOD_TYPE_ISEQ) rb_bug(
"def_iseq_ptr: not iseq (%d)", def->type);
635 return rb_iseq_check(def->body.iseq.
iseqptr);
638enum ruby_special_exceptions {
642 ruby_error_stackfatal,
643 ruby_error_stream_closed,
644 ruby_special_error_count
649#define GetVMPtr(obj, ptr) \
650 GetCoreDataFromValue((obj), rb_vm_t, &ruby_vm_data_type, (ptr))
656 rb_vm_at_exit_func *func;
660void *rb_objspace_alloc(
void);
661void rb_objspace_free(
void *
objspace);
662void rb_objspace_call_finalizer(
void);
664enum rb_hook_list_type {
665 hook_list_type_ractor_local,
666 hook_list_type_targeted_iseq,
667 hook_list_type_targeted_def,
668 hook_list_type_global
674 unsigned int running;
675 enum rb_hook_list_type
type;
691 struct ccan_list_head set;
693 unsigned int blocking_cnt;
700 rb_nativethread_lock_t lock;
702 unsigned int lock_rec;
705 rb_nativethread_cond_t terminate_cond;
706 bool terminate_waiting;
708#ifndef RUBY_THREAD_PTHREAD_H
710 bool barrier_waiting;
711 unsigned int barrier_cnt;
712 rb_nativethread_cond_t barrier_complete_cond;
713 rb_nativethread_cond_t barrier_release_cond;
717#ifdef RUBY_THREAD_PTHREAD_H
720 rb_nativethread_lock_t lock;
724 rb_nativethread_cond_t cond;
725 unsigned int snt_cnt;
726 unsigned int dnt_cnt;
728 unsigned int running_cnt;
730 unsigned int max_cpu;
731 struct ccan_list_head grq;
732 unsigned int grq_cnt;
735 struct ccan_list_head running_threads;
738 struct ccan_list_head timeslice_threads;
740 struct ccan_list_head zombie_threads;
743 bool timeslice_wait_inf;
746 rb_nativethread_cond_t barrier_complete_cond;
747 rb_nativethread_cond_t barrier_release_cond;
748 bool barrier_waiting;
749 unsigned int barrier_waiting_cnt;
750 unsigned int barrier_serial;
752 unsigned int barrier_lock_rec;
757#ifdef USE_SIGALTSTACK
761 rb_serial_t fork_gen;
764 volatile int ubf_async_safe;
766 unsigned int running: 1;
767 unsigned int thread_abort_on_exception: 1;
768 unsigned int thread_report_on_exception: 1;
769 unsigned int thread_ignore_deadlock: 1;
772 VALUE mark_object_ary;
774 const VALUE special_exceptions[ruby_special_error_count];
787 VALUE cmd[RUBY_NSIG];
793 int src_encoding_index;
796 struct ccan_list_head workqueue;
797 rb_nativethread_lock_t workqueue_lock;
799 VALUE orig_progname, progname;
800 VALUE coverages, me2counter;
805 struct gc_mark_func_data_struct {
807 void (*mark_func)(
VALUE v,
void *data);
819 VALUE cc_refinement_set;
826 ID inserting_constant_cache_id;
828#ifndef VM_GLOBAL_CC_CACHE_TABLE_SIZE
829#define VM_GLOBAL_CC_CACHE_TABLE_SIZE 1023
831 const struct rb_callcache *global_cc_cache_table[VM_GLOBAL_CC_CACHE_TABLE_SIZE];
833#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
839 size_t thread_vm_stack_size;
840 size_t thread_machine_stack_size;
841 size_t fiber_vm_stack_size;
842 size_t fiber_machine_stack_size;
846extern bool ruby_vm_during_cleanup;
850#define RUBY_VM_SIZE_ALIGN 4096
852#define RUBY_VM_THREAD_VM_STACK_SIZE ( 128 * 1024 * sizeof(VALUE))
853#define RUBY_VM_THREAD_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE))
854#define RUBY_VM_THREAD_MACHINE_STACK_SIZE ( 128 * 1024 * sizeof(VALUE))
855#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE))
857#define RUBY_VM_FIBER_VM_STACK_SIZE ( 16 * 1024 * sizeof(VALUE))
858#define RUBY_VM_FIBER_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE))
859#define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 64 * 1024 * sizeof(VALUE))
860#if defined(__powerpc64__) || defined(__ppc64__)
861#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 32 * 1024 * sizeof(VALUE))
863#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE))
866#if __has_feature(memory_sanitizer) || __has_feature(address_sanitizer) || __has_feature(leak_sanitizer)
868#undef RUBY_VM_THREAD_MACHINE_STACK_SIZE
869#define RUBY_VM_THREAD_MACHINE_STACK_SIZE (1024 * 1024 * sizeof(VALUE))
870#undef RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN
871#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 512 * 1024 * sizeof(VALUE))
872#undef RUBY_VM_FIBER_MACHINE_STACK_SIZE
873#define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 256 * 1024 * sizeof(VALUE))
874#undef RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN
875#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 128 * 1024 * sizeof(VALUE))
878#ifndef VM_DEBUG_BP_CHECK
879#define VM_DEBUG_BP_CHECK 0
882#ifndef VM_DEBUG_VERIFY_METHOD_CACHE
883#define VM_DEBUG_VERIFY_METHOD_CACHE (VMDEBUG != 0)
896enum rb_block_handler_type {
897 block_handler_type_iseq,
898 block_handler_type_ifunc,
899 block_handler_type_symbol,
900 block_handler_type_proc
916 enum rb_block_type
type;
925 const void *block_code;
935rb_thread_ptr(
VALUE thval)
940enum rb_thread_status {
943 THREAD_STOPPED_FOREVER,
948typedef RUBY_JMP_BUF rb_jmpbuf_t;
950typedef void *rb_jmpbuf_t[5];
965#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
975typedef rb_jmpbuf_t *rb_vm_tag_jmpbuf_t;
977#define RB_VM_TAG_JMPBUF_GET(buf) (*buf)
980rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
982 *jmpbuf = ruby_xmalloc(
sizeof(rb_jmpbuf_t));
986rb_vm_tag_jmpbuf_deinit(
const rb_vm_tag_jmpbuf_t *jmpbuf)
991typedef rb_jmpbuf_t rb_vm_tag_jmpbuf_t;
993#define RB_VM_TAG_JMPBUF_GET(buf) (buf)
996rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
1002rb_vm_tag_jmpbuf_deinit(
const rb_vm_tag_jmpbuf_t *jmpbuf)
1015 rb_vm_tag_jmpbuf_t buf;
1017 enum ruby_tag_type state;
1018 unsigned int lock_rec;
1021STATIC_ASSERT(rb_vm_tag_buf_offset, offsetof(
struct rb_vm_tag, buf) > 0);
1022STATIC_ASSERT(rb_vm_tag_buf_end,
1023 offsetof(
struct rb_vm_tag, buf) +
sizeof(rb_vm_tag_jmpbuf_t) <
1044 size_t vm_stack_size;
1052#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
1053 uint32_t checked_clock;
1059 rb_serial_t ractor_id;
1063 VALUE local_storage_recursive_hash;
1064 VALUE local_storage_recursive_hash_for_trace;
1070 const VALUE *root_lep;
1078 VALUE passed_block_handler;
1080 uint8_t raised_flag;
1083 BITFIELD(
enum method_missing_reason, method_missing_reason, 8);
1085 VALUE private_const_reference;
1096 size_t stack_maxsize;
1099#ifdef RUBY_ASAN_ENABLED
1100 void *asan_fake_stack_handle;
1105#ifndef rb_execution_context_t
1107#define rb_execution_context_t rb_execution_context_t
1111#define VM_CORE_H_EC_DEFINED 1
1139 struct ccan_list_node lt_node;
1147 bool mn_schedulable;
1161 BITFIELD(
enum rb_thread_status, status, 2);
1163 unsigned int main_thread : 1;
1164 unsigned int has_dedicated_nt : 1;
1165 unsigned int to_kill : 1;
1166 unsigned int abort_on_exception: 1;
1167 unsigned int report_on_exception: 1;
1168 unsigned int pending_interrupt_queue_checked: 1;
1170 uint32_t running_time_us;
1172 void *blocking_region_buffer;
1178#if OPT_CALL_THREADED_CODE
1183 VALUE pending_interrupt_queue;
1184 VALUE pending_interrupt_mask_stack;
1187 rb_nativethread_lock_t interrupt_lock;
1189 VALUE locking_mutex;
1191 struct ccan_list_head interrupt_exec_tasks;
1202 VALUE (*func)(
void *);
1207 enum thread_invoke_type {
1208 thread_invoke_type_none = 0,
1209 thread_invoke_type_proc,
1210 thread_invoke_type_ractor_proc,
1211 thread_invoke_type_func
1218 unsigned int blocking;
1222 void **specific_storage;
1227static inline unsigned int
1230 return th ? (
unsigned int)th->serial : 0;
1234 VM_DEFINECLASS_TYPE_CLASS = 0x00,
1235 VM_DEFINECLASS_TYPE_SINGLETON_CLASS = 0x01,
1236 VM_DEFINECLASS_TYPE_MODULE = 0x02,
1238 VM_DEFINECLASS_TYPE_MASK = 0x07
1239} rb_vm_defineclass_type_t;
1241#define VM_DEFINECLASS_TYPE(x) ((rb_vm_defineclass_type_t)(x) & VM_DEFINECLASS_TYPE_MASK)
1242#define VM_DEFINECLASS_FLAG_SCOPED 0x08
1243#define VM_DEFINECLASS_FLAG_HAS_SUPERCLASS 0x10
1244#define VM_DEFINECLASS_SCOPED_P(x) ((x) & VM_DEFINECLASS_FLAG_SCOPED)
1245#define VM_DEFINECLASS_HAS_SUPERCLASS_P(x) \
1246 ((x) & VM_DEFINECLASS_FLAG_HAS_SUPERCLASS)
1249RUBY_SYMBOL_EXPORT_BEGIN
1258 VALUE script_lines);
1268rb_iseq_new_with_callback_new_callback(
1291RUBY_SYMBOL_EXPORT_END
1295#define GetProcPtr(obj, ptr) \
1296 GetCoreDataFromValue((obj), rb_proc_t, &ruby_proc_data_type, (ptr))
1300 unsigned int is_from_method: 1;
1301 unsigned int is_lambda: 1;
1302 unsigned int is_isolated: 1;
1305RUBY_SYMBOL_EXPORT_BEGIN
1309RUBY_SYMBOL_EXPORT_END
1316 unsigned int env_size;
1321#define GetBindingPtr(obj, ptr) \
1322 GetCoreDataFromValue((obj), rb_binding_t, &ruby_binding_data_type, (ptr))
1326 const VALUE pathobj;
1332enum vm_check_match_type {
1333 VM_CHECKMATCH_TYPE_WHEN = 1,
1334 VM_CHECKMATCH_TYPE_CASE = 2,
1335 VM_CHECKMATCH_TYPE_RESCUE = 3
1338#define VM_CHECKMATCH_TYPE_MASK 0x03
1339#define VM_CHECKMATCH_ARRAY 0x04
1341enum vm_opt_newarray_send_type {
1342 VM_OPT_NEWARRAY_SEND_MAX = 1,
1343 VM_OPT_NEWARRAY_SEND_MIN = 2,
1344 VM_OPT_NEWARRAY_SEND_HASH = 3,
1345 VM_OPT_NEWARRAY_SEND_PACK = 4,
1346 VM_OPT_NEWARRAY_SEND_PACK_BUFFER = 5,
1347 VM_OPT_NEWARRAY_SEND_INCLUDE_P = 6,
1350enum vm_special_object_type {
1351 VM_SPECIAL_OBJECT_VMCORE = 1,
1352 VM_SPECIAL_OBJECT_CBASE,
1353 VM_SPECIAL_OBJECT_CONST_BASE
1357 VM_SVAR_LASTLINE = 0,
1358 VM_SVAR_BACKREF = 1,
1360 VM_SVAR_EXTRA_START = 2,
1361 VM_SVAR_FLIPFLOP_START = 2
1373typedef VALUE CDHASH;
1375#ifndef FUNC_FASTCALL
1376#define FUNC_FASTCALL(x) x
1382#define VM_TAGGED_PTR_SET(p, tag) ((VALUE)(p) | (tag))
1383#define VM_TAGGED_PTR_REF(v, mask) ((void *)((v) & ~mask))
1385#define GC_GUARDED_PTR(p) VM_TAGGED_PTR_SET((p), 0x01)
1386#define GC_GUARDED_PTR_REF(p) VM_TAGGED_PTR_REF((p), 0x03)
1387#define GC_GUARDED_PTR_P(p) (((VALUE)(p)) & 0x01)
1389enum vm_frame_env_flags {
1400 VM_FRAME_MAGIC_METHOD = 0x11110001,
1401 VM_FRAME_MAGIC_BLOCK = 0x22220001,
1402 VM_FRAME_MAGIC_CLASS = 0x33330001,
1403 VM_FRAME_MAGIC_TOP = 0x44440001,
1404 VM_FRAME_MAGIC_CFUNC = 0x55550001,
1405 VM_FRAME_MAGIC_IFUNC = 0x66660001,
1406 VM_FRAME_MAGIC_EVAL = 0x77770001,
1407 VM_FRAME_MAGIC_RESCUE = 0x78880001,
1408 VM_FRAME_MAGIC_DUMMY = 0x79990001,
1410 VM_FRAME_MAGIC_MASK = 0x7fff0001,
1413 VM_FRAME_FLAG_FINISH = 0x0020,
1414 VM_FRAME_FLAG_BMETHOD = 0x0040,
1415 VM_FRAME_FLAG_CFRAME = 0x0080,
1416 VM_FRAME_FLAG_LAMBDA = 0x0100,
1417 VM_FRAME_FLAG_MODIFIED_BLOCK_PARAM = 0x0200,
1418 VM_FRAME_FLAG_CFRAME_KW = 0x0400,
1419 VM_FRAME_FLAG_PASSED = 0x0800,
1420 VM_FRAME_FLAG_BOX_REQUIRE = 0x1000,
1423 VM_ENV_FLAG_LOCAL = 0x0002,
1424 VM_ENV_FLAG_ESCAPED = 0x0004,
1425 VM_ENV_FLAG_WB_REQUIRED = 0x0008,
1426 VM_ENV_FLAG_ISOLATED = 0x0010,
1429#define VM_ENV_DATA_SIZE ( 3)
1431#define VM_ENV_DATA_INDEX_ME_CREF (-2)
1432#define VM_ENV_DATA_INDEX_SPECVAL (-1)
1433#define VM_ENV_DATA_INDEX_FLAGS ( 0)
1434#define VM_ENV_DATA_INDEX_ENV ( 1)
1436#define VM_ENV_INDEX_LAST_LVAR (-VM_ENV_DATA_SIZE)
1438static inline void VM_FORCE_WRITE_SPECIAL_CONST(
const VALUE *ptr,
VALUE special_const_value);
1443 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1445 VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags | flag);
1449VM_ENV_FLAGS_UNSET(
const VALUE *ep,
VALUE flag)
1451 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1453 VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags & ~flag);
1456static inline unsigned long
1457VM_ENV_FLAGS(
const VALUE *ep,
long flag)
1459 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1461 return flags & flag;
1464static inline unsigned long
1465VM_ENV_FLAGS_UNCHECKED(
const VALUE *ep,
long flag)
1467 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1468 return flags & flag;
1471static inline unsigned long
1472VM_ENV_FRAME_TYPE_P(
const VALUE *ep,
unsigned long frame_type)
1474 return VM_ENV_FLAGS(ep, VM_FRAME_MAGIC_MASK) == frame_type;
1477static inline unsigned long
1480 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_MAGIC_MASK);
1483static inline unsigned long
1486 return VM_ENV_FLAGS_UNCHECKED(cfp->ep, VM_FRAME_MAGIC_MASK);
1492 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_LAMBDA) != 0;
1498 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME_KW) != 0;
1504 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_FINISH) != 0;
1510 return VM_ENV_FLAGS_UNCHECKED(cfp->ep, VM_FRAME_FLAG_FINISH) != 0;
1516 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_BMETHOD) != 0;
1520rb_obj_is_iseq(
VALUE iseq)
1522 return imemo_type_p(iseq, imemo_iseq);
1525#if VM_CHECK_MODE > 0
1526#define RUBY_VM_NORMAL_ISEQ_P(iseq) rb_obj_is_iseq((VALUE)iseq)
1532 int cframe_p = VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME) != 0;
1533 VM_ASSERT(RUBY_VM_NORMAL_ISEQ_P(cfp->iseq) != cframe_p ||
1534 (VM_FRAME_TYPE(cfp) & VM_FRAME_MAGIC_MASK) == VM_FRAME_MAGIC_DUMMY);
1541 return VM_ENV_FLAGS_UNCHECKED(cfp->ep, VM_FRAME_FLAG_CFRAME) != 0;
1547 return !VM_FRAME_CFRAME_P(cfp);
1553 return !VM_FRAME_CFRAME_P_UNCHECKED(cfp);
1559 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_BOX_REQUIRE) != 0;
1562#define RUBYVM_CFUNC_FRAME_P(cfp) \
1563 (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_CFUNC)
1565#define VM_GUARDED_PREV_EP(ep) GC_GUARDED_PTR(ep)
1566#define VM_BLOCK_HANDLER_NONE 0
1569VM_ENV_LOCAL_P(
const VALUE *ep)
1571 return VM_ENV_FLAGS(ep, VM_ENV_FLAG_LOCAL) ? 1 : 0;
1575VM_ENV_LOCAL_P_UNCHECKED(
const VALUE *ep)
1577 return VM_ENV_FLAGS_UNCHECKED(ep, VM_ENV_FLAG_LOCAL) ? 1 : 0;
1580static inline const VALUE *
1581VM_ENV_PREV_EP_UNCHECKED(
const VALUE *ep)
1583 return GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
1586static inline const VALUE *
1587VM_ENV_PREV_EP(
const VALUE *ep)
1589 VM_ASSERT(VM_ENV_LOCAL_P(ep) == 0);
1590 return VM_ENV_PREV_EP_UNCHECKED(ep);
1594VM_ENV_BOXED_P(
const VALUE *ep)
1596 return VM_ENV_FRAME_TYPE_P(ep, VM_FRAME_MAGIC_CLASS) || VM_ENV_FRAME_TYPE_P(ep, VM_FRAME_MAGIC_TOP);
1600VM_ENV_BLOCK_HANDLER(
const VALUE *ep)
1602 if (VM_ENV_BOXED_P(ep)) {
1603 VM_ASSERT(VM_ENV_LOCAL_P(ep));
1604 return VM_BLOCK_HANDLER_NONE;
1607 VM_ASSERT(VM_ENV_LOCAL_P(ep));
1608 return ep[VM_ENV_DATA_INDEX_SPECVAL];
1612VM_ENV_BOX(
const VALUE *ep)
1614 VM_ASSERT(VM_ENV_BOXED_P(ep));
1615 VM_ASSERT(VM_ENV_LOCAL_P(ep));
1616 return (
const rb_box_t *)GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
1620VM_ENV_BOX_UNCHECKED(
const VALUE *ep)
1622 return (
const rb_box_t *)GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
1625#if VM_CHECK_MODE > 0
1626int rb_vm_ep_in_heap_p(
const VALUE *ep);
1630VM_ENV_ESCAPED_P(
const VALUE *ep)
1632 VM_ASSERT(rb_vm_ep_in_heap_p(ep) == !!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED));
1633 return VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) ? 1 : 0;
1638VM_ENV_ENVVAL(const
VALUE *ep)
1640 VALUE envval = ep[VM_ENV_DATA_INDEX_ENV];
1641 VM_ASSERT(VM_ENV_ESCAPED_P(ep));
1642 VM_ASSERT(envval ==
Qundef || imemo_type_p(envval, imemo_env));
1648VM_ENV_ENVVAL_PTR(const
VALUE *ep)
1650 return (
const rb_env_t *)VM_ENV_ENVVAL(ep);
1658 env->env = env_body;
1659 env->env_size = env_size;
1660 env_ep[VM_ENV_DATA_INDEX_ENV] = (
VALUE)env;
1667 *((
VALUE *)ptr) = v;
1671VM_FORCE_WRITE_SPECIAL_CONST(
const VALUE *ptr,
VALUE special_const_value)
1674 VM_FORCE_WRITE(ptr, special_const_value);
1678VM_STACK_ENV_WRITE(
const VALUE *ep,
int index,
VALUE v)
1680 VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_WB_REQUIRED) == 0);
1681 VM_FORCE_WRITE(&ep[index], v);
1684const VALUE *rb_vm_ep_local_ep(
const VALUE *ep);
1691#define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp) ((cfp)+1)
1692#define RUBY_VM_NEXT_CONTROL_FRAME(cfp) ((cfp)-1)
1694#define RUBY_VM_VALID_CONTROL_FRAME_P(cfp, ecfp) \
1695 ((void *)(ecfp) > (void *)(cfp))
1706 return !RUBY_VM_VALID_CONTROL_FRAME_P(cfp, RUBY_VM_END_CONTROL_FRAME(ec));
1710VM_BH_ISEQ_BLOCK_P(
VALUE block_handler)
1712 if ((block_handler & 0x03) == 0x01) {
1713#if VM_CHECK_MODE > 0
1715 VM_ASSERT(imemo_type_p(captured->code.val, imemo_iseq));
1727 VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x01);
1728 VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1729 return block_handler;
1733VM_BH_TO_ISEQ_BLOCK(
VALUE block_handler)
1736 VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1741VM_BH_IFUNC_P(
VALUE block_handler)
1743 if ((block_handler & 0x03) == 0x03) {
1744#if VM_CHECK_MODE > 0
1746 VM_ASSERT(imemo_type_p(captured->code.val, imemo_ifunc));
1758 VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x03);
1759 VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1760 return block_handler;
1764VM_BH_TO_IFUNC_BLOCK(
VALUE block_handler)
1767 VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1772VM_BH_TO_CAPT_BLOCK(
VALUE block_handler)
1775 VM_ASSERT(VM_BH_IFUNC_P(block_handler) || VM_BH_ISEQ_BLOCK_P(block_handler));
1779static inline enum rb_block_handler_type
1780vm_block_handler_type(
VALUE block_handler)
1782 if (VM_BH_ISEQ_BLOCK_P(block_handler)) {
1783 return block_handler_type_iseq;
1785 else if (VM_BH_IFUNC_P(block_handler)) {
1786 return block_handler_type_ifunc;
1788 else if (
SYMBOL_P(block_handler)) {
1789 return block_handler_type_symbol;
1793 return block_handler_type_proc;
1798vm_block_handler_verify(MAYBE_UNUSED(
VALUE block_handler))
1800 VM_ASSERT(block_handler == VM_BLOCK_HANDLER_NONE ||
1801 (vm_block_handler_type(block_handler), 1));
1804static inline enum rb_block_type
1805vm_block_type(
const struct rb_block *block)
1807#if VM_CHECK_MODE > 0
1808 switch (block->type) {
1809 case block_type_iseq:
1810 VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_iseq));
1812 case block_type_ifunc:
1813 VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_ifunc));
1815 case block_type_symbol:
1816 VM_ASSERT(
SYMBOL_P(block->as.symbol));
1818 case block_type_proc:
1827vm_block_type_set(
const struct rb_block *block,
enum rb_block_type
type)
1833static inline const struct rb_block *
1834vm_proc_block(
VALUE procval)
1841static inline const VALUE *vm_block_ep(
const struct rb_block *block);
1844vm_proc_iseq(
VALUE procval)
1846 return vm_block_iseq(vm_proc_block(procval));
1849static inline const VALUE *
1850vm_proc_ep(
VALUE procval)
1852 return vm_block_ep(vm_proc_block(procval));
1856vm_block_iseq(
const struct rb_block *block)
1858 switch (vm_block_type(block)) {
1859 case block_type_iseq:
return rb_iseq_check(block->as.captured.code.iseq);
1860 case block_type_proc:
return vm_proc_iseq(block->as.proc);
1861 case block_type_ifunc:
1862 case block_type_symbol:
return NULL;
1864 VM_UNREACHABLE(vm_block_iseq);
1868static inline const VALUE *
1869vm_block_ep(
const struct rb_block *block)
1871 switch (vm_block_type(block)) {
1872 case block_type_iseq:
1873 case block_type_ifunc:
return block->as.captured.ep;
1874 case block_type_proc:
return vm_proc_ep(block->as.proc);
1875 case block_type_symbol:
return NULL;
1877 VM_UNREACHABLE(vm_block_ep);
1882vm_block_self(
const struct rb_block *block)
1884 switch (vm_block_type(block)) {
1885 case block_type_iseq:
1886 case block_type_ifunc:
1887 return block->as.captured.self;
1888 case block_type_proc:
1889 return vm_block_self(vm_proc_block(block->as.proc));
1890 case block_type_symbol:
1893 VM_UNREACHABLE(vm_block_self);
1898VM_BH_TO_SYMBOL(
VALUE block_handler)
1900 VM_ASSERT(
SYMBOL_P(block_handler));
1901 return block_handler;
1905VM_BH_FROM_SYMBOL(
VALUE symbol)
1912VM_BH_TO_PROC(
VALUE block_handler)
1915 return block_handler;
1919VM_BH_FROM_PROC(
VALUE procval)
1936#define SDR() rb_vmdebug_stack_dump_raw(GET_EC(), GET_EC()->cfp, stderr)
1937#define SDR2(cfp) rb_vmdebug_stack_dump_raw(GET_EC(), (cfp), stderr)
1938bool rb_vm_bugreport(
const void *,
FILE *);
1939typedef void (*ruby_sighandler_t)(int);
1941NORETURN(
void rb_bug_for_fatal_signal(ruby_sighandler_t default_sighandler,
int sig, const
void *, const
char *fmt, ...));
1944RUBY_SYMBOL_EXPORT_BEGIN
1949RUBY_SYMBOL_EXPORT_END
1963 return rb_vm_make_proc_lambda(ec, captured, klass, 0);
1969 return rb_vm_make_proc_lambda(ec, captured, klass, 1);
1977void rb_vm_inc_const_missing_count(
void);
1983void rb_thread_start_timer_thread(
void);
1984void rb_thread_stop_timer_thread(
void);
1985void rb_thread_reset_timer_thread(
void);
1986void rb_thread_wakeup_timer_thread(
int);
1989rb_vm_living_threads_init(
rb_vm_t *vm)
1991 ccan_list_head_init(&vm->workqueue);
1992 ccan_list_head_init(&vm->ractor.set);
1993#ifdef RUBY_THREAD_PTHREAD_H
1994 ccan_list_head_init(&vm->ractor.sched.zombie_threads);
1998typedef int rb_backtrace_iter_func(
void *,
VALUE,
int,
VALUE);
2004void ruby_thread_init_stack(
rb_thread_t *th,
void *local_in_parent_frame);
2005void rb_thread_malloc_stack_set(
rb_thread_t *th,
void *stack,
size_t stack_size);
2010void rb_vm_env_write(
const VALUE *ep,
int index,
VALUE v);
2013void rb_vm_register_special_exception_str(
enum ruby_special_exceptions sp,
VALUE exception_class,
VALUE mesg);
2015#define rb_vm_register_special_exception(sp, e, m) \
2016 rb_vm_register_special_exception_str(sp, e, rb_usascii_str_new_static((m), (long)rb_strlen_lit(m)))
2025#define sysstack_error GET_VM()->special_exceptions[ruby_error_sysstack]
2027#define CHECK_VM_STACK_OVERFLOW0(cfp, sp, margin) do { \
2028 STATIC_ASSERT(sizeof_sp, sizeof(*(sp)) == sizeof(VALUE)); \
2029 STATIC_ASSERT(sizeof_cfp, sizeof(*(cfp)) == sizeof(rb_control_frame_t)); \
2030 const struct rb_control_frame_struct *bound = (void *)&(sp)[(margin)]; \
2031 if (UNLIKELY((cfp) <= &bound[1])) { \
2032 vm_stackoverflow(); \
2036#define CHECK_VM_STACK_OVERFLOW(cfp, margin) \
2037 CHECK_VM_STACK_OVERFLOW0((cfp), (cfp)->sp, (margin))
2045#if RUBY_VM_THREAD_MODEL == 2
2051RUBY_EXTERN unsigned int ruby_vm_iseq_events_enabled;
2054#define GET_VM() rb_current_vm()
2055#define GET_RACTOR() rb_current_ractor()
2056#define GET_THREAD() rb_current_thread()
2057#define GET_EC() rb_current_execution_context(true)
2059static inline rb_serial_t
2062 VM_ASSERT(ec->serial >= 1);
2069 return ec->thread_ptr;
2077 VM_ASSERT(th->ractor != NULL);
2085static inline rb_serial_t
2088 rb_serial_t ractor_id = ec->ractor_id;
2108rb_current_execution_context(
bool expect_ec)
2110#ifdef RB_THREAD_LOCAL_SPECIFIER
2111 #ifdef RB_THREAD_CURRENT_EC_NOINLINE
2129 VM_ASSERT(ec == rb_current_ec_noinline());
2133 VM_ASSERT(!expect_ec || ec != NULL);
2138rb_current_thread(
void)
2141 return rb_ec_thread_ptr(ec);
2145rb_current_ractor_raw(
bool expect)
2147 if (ruby_single_main_ractor) {
2148 return ruby_single_main_ractor;
2152 return (expect || ec) ? rb_ec_ractor_ptr(ec) : NULL;
2157rb_current_ractor(
void)
2159 return rb_current_ractor_raw(
true);
2166 VM_ASSERT(ruby_current_vm_ptr == NULL ||
2167 ruby_current_execution_context_ptr == NULL ||
2168 rb_ec_thread_ptr(GET_EC()) == NULL ||
2169 rb_ec_thread_ptr(GET_EC())->status == THREAD_KILLED ||
2170 rb_ec_vm_ptr(GET_EC()) == ruby_current_vm_ptr);
2173 return ruby_current_vm_ptr;
2177 unsigned int recorded_lock_rec,
2178 unsigned int current_lock_rec);
2182NO_SANITIZE(
"thread",
static inline bool
2185 VM_ASSERT(cr == GET_RACTOR());
2186 return vm->ractor.sync.lock_owner == cr;
2189static inline unsigned int
2192 rb_vm_t *vm = rb_ec_vm_ptr(ec);
2194 if (!vm_locked_by_ractor_p(vm, rb_ec_ractor_ptr(ec))) {
2198 return vm->ractor.sync.lock_rec;
2203#error "unsupported thread model"
2207 TIMER_INTERRUPT_MASK = 0x01,
2208 PENDING_INTERRUPT_MASK = 0x02,
2209 POSTPONED_JOB_INTERRUPT_MASK = 0x04,
2210 TRAP_INTERRUPT_MASK = 0x08,
2211 TERMINATE_INTERRUPT_MASK = 0x10,
2212 VM_BARRIER_INTERRUPT_MASK = 0x20,
2215#define RUBY_VM_SET_TIMER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TIMER_INTERRUPT_MASK)
2216#define RUBY_VM_SET_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, PENDING_INTERRUPT_MASK)
2217#define RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, POSTPONED_JOB_INTERRUPT_MASK)
2218#define RUBY_VM_SET_TRAP_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TRAP_INTERRUPT_MASK)
2219#define RUBY_VM_SET_TERMINATE_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TERMINATE_INTERRUPT_MASK)
2220#define RUBY_VM_SET_VM_BARRIER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, VM_BARRIER_INTERRUPT_MASK)
2225 return (ATOMIC_LOAD_RELAXED(ec->interrupt_flag) & ~(ec->interrupt_mask) & (PENDING_INTERRUPT_MASK|TRAP_INTERRUPT_MASK));
2231#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
2232 uint32_t current_clock = rb_ec_vm_ptr(ec)->clock;
2234 if (current_clock != ec->checked_clock) {
2235 ec->checked_clock = current_clock;
2236 RUBY_VM_SET_TIMER_INTERRUPT(ec);
2239 return ATOMIC_LOAD_RELAXED(ec->interrupt_flag) & ~(ec)->interrupt_mask;
2243int rb_signal_buff_size(
void);
2246void rb_threadptr_signal_raise(
rb_thread_t *th,
int sig);
2248int rb_threadptr_execute_interrupts(
rb_thread_t *,
int);
2250void rb_threadptr_unlock_all_locking_mutexes(
rb_thread_t *th);
2251void rb_threadptr_pending_interrupt_clear(
rb_thread_t *th);
2262void rb_vm_cond_wait(
rb_vm_t *vm, rb_nativethread_cond_t *cond);
2263void rb_vm_cond_timedwait(
rb_vm_t *vm, rb_nativethread_cond_t *cond,
unsigned long msec);
2265#define RUBY_VM_CHECK_INTS(ec) rb_vm_check_ints(ec)
2269#ifdef RUBY_ASSERT_CRITICAL_SECTION
2270 VM_ASSERT(ruby_assert_critical_section_entered == 0);
2273 VM_ASSERT(ec == rb_current_ec_noinline());
2275 if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(ec))) {
2276 rb_threadptr_execute_interrupts(rb_ec_thread_ptr(ec), 0);
2302void rb_hook_list_connect_local_tracepoint(
rb_hook_list_t *list,
VALUE tpval,
unsigned int target_line);
2308#define EXEC_EVENT_HOOK_ORIG(ec_, hooks_, flag_, self_, id_, called_id_, klass_, data_, pop_p_) do { \
2309 const rb_event_flag_t flag_arg_ = (flag_); \
2310 rb_hook_list_t *hooks_arg_ = (hooks_); \
2311 if (UNLIKELY((hooks_arg_)->events & (flag_arg_))) { \
2313 rb_exec_event_hook_orig(ec_, hooks_arg_, flag_arg_, self_, id_, called_id_, klass_, data_, pop_p_); \
2323 VM_ASSERT((hooks->events & flag) != 0);
2325 trace_arg.event = flag;
2327 trace_arg.cfp = ec->cfp;
2328 trace_arg.self = self;
2330 trace_arg.called_id = called_id;
2331 trace_arg.klass = klass;
2332 trace_arg.data = data;
2334 trace_arg.klass_solved = 0;
2336 rb_exec_event_hooks(&trace_arg, hooks, pop_p);
2344 unsigned int targeted_hooks_cnt;
2351 return &cr_pub->hooks;
2357 return &rb_ec_vm_ptr(ec)->global_hooks;
2364 VM_ASSERT(event != 0 && ((event - 1) & event) == 0);
2367 return rb_vm_global_hooks(ec);
2370 return rb_ec_ractor_hooks(ec);
2374#define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2375 EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_hooks(ec_, flag_), flag_, self_, id_, called_id_, klass_, data_, 0)
2377#define EXEC_EVENT_HOOK_AND_POP_FRAME(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2378 EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_hooks(ec_, flag_), flag_, self_, id_, called_id_, klass_, data_, 1)
2385 rb_ary_new_from_args(2, eval_script, (
VALUE)iseq));
2388void rb_vm_trap_exit(
rb_vm_t *vm);
2389void rb_vm_postponed_job_atfork(
void);
2390size_t rb_vm_memsize_postponed_job_queue(
void);
2392RUBY_SYMBOL_EXPORT_BEGIN
2394int rb_thread_check_trap_pending(
void);
2397#define RUBY_EVENT_COVERAGE_LINE 0x010000
2398#define RUBY_EVENT_COVERAGE_BRANCH 0x020000
2400extern VALUE rb_get_coverages(
void);
2401extern void rb_set_coverages(
VALUE,
int,
VALUE);
2402extern void rb_clear_coverages(
void);
2403extern void rb_reset_coverages(
void);
2404extern void rb_resume_coverages(
void);
2405extern void rb_suspend_coverages(
void);
2407void rb_postponed_job_flush(
rb_vm_t *vm);
2413RUBY_SYMBOL_EXPORT_END
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
#define RUBY_ALIGNAS
Wraps (or simulates) alignas.
#define RUBY_EXTERN
Declaration of externally visible global variables.
#define RUBY_EVENT_SCRIPT_COMPILED
Encountered an eval.
#define RUBY_INTERNAL_EVENT_OBJSPACE_MASK
Bitmask of GC events.
uint32_t rb_event_flag_t
Represents event(s).
#define T_STRING
Old name of RUBY_T_STRING.
#define Qundef
Old name of RUBY_Qundef.
#define Qfalse
Old name of RUBY_Qfalse.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
#define NIL_P
Old name of RB_NIL_P.
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
void * rb_check_typeddata(VALUE obj, const rb_data_type_t *data_type)
Identical to rb_typeddata_is_kind_of(), except it raises exceptions instead of returning false.
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
void rb_unblock_function_t(void *)
This is the type of UBFs.
VALUE rb_block_call_func(RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg))
This is the type of a function that the interpreter expect for C-backended blocks.
VALUE type(ANYARGS)
ANYARGS-ed function type.
#define RBIMPL_ATTR_NONNULL(list)
Wraps (or simulates) __attribute__((nonnull))
#define inline
Old Visual Studio versions do not support the inline keyword, so we need to define it to be __inline.
Functions related to nodes in the AST.
#define RARRAY_AREF(a, i)
#define RTYPEDDATA_DATA(v)
Convenient getter macro.
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
const ID * segments
A null-terminated list of ids, used to represent a constant's path idNULL is used to represent the ::...
Internal header for Ruby Box.
This is the struct that holds necessary info for a struct.
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
IFUNC (Internal FUNCtion)
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
uintptr_t VALUE
Type that represents a Ruby object.
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.