21#define N_OR_RUBY_DEBUG(n) (((n) > 0) ? (n) : RUBY_DEBUG)
23#define VM_CHECK_MODE N_OR_RUBY_DEBUG(0)
48#include "ruby/internal/config.h"
54#include "ruby_assert.h"
56#define RVALUE_SIZE (sizeof(struct RBasic) + sizeof(VALUE[RBIMPL_RVALUE_EMBED_LEN_MAX]))
59#define VM_ASSERT(expr, ...) \
60 RUBY_ASSERT_MESG_WHEN(VM_CHECK_MODE > 0, expr, #expr RBIMPL_VA_OPT_ARGS(__VA_ARGS__))
61#define VM_UNREACHABLE(func) rb_bug(#func ": unreachable")
62#define RUBY_ASSERT_CRITICAL_SECTION
63#define RUBY_DEBUG_THREAD_SCHEDULE() rb_thread_schedule()
65#define VM_ASSERT(...) ((void)0)
66#define VM_UNREACHABLE(func) UNREACHABLE
67#define RUBY_DEBUG_THREAD_SCHEDULE()
70#define RUBY_ASSERT_MUTEX_OWNED(mutex) VM_ASSERT(rb_mutex_owned_p(mutex))
72#if defined(RUBY_ASSERT_CRITICAL_SECTION)
97extern int ruby_assert_critical_section_entered;
98#define RUBY_ASSERT_CRITICAL_SECTION_ENTER() do{ruby_assert_critical_section_entered += 1;}while(false)
99#define RUBY_ASSERT_CRITICAL_SECTION_LEAVE() do{VM_ASSERT(ruby_assert_critical_section_entered > 0);ruby_assert_critical_section_entered -= 1;}while(false)
101#define RUBY_ASSERT_CRITICAL_SECTION_ENTER()
102#define RUBY_ASSERT_CRITICAL_SECTION_LEAVE()
105#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
106# include "wasm/setjmp.h"
111#if defined(__linux__) || defined(__FreeBSD__)
112# define RB_THREAD_T_HAS_NATIVE_ID
116#include "ccan/list/list.h"
119#include "internal/array.h"
120#include "internal/basic_operators.h"
121#include "internal/sanitizers.h"
122#include "internal/serial.h"
123#include "internal/vm.h"
128#include "ruby_atomic.h"
138#ifndef VM_INSN_INFO_TABLE_IMPL
139# define VM_INSN_INFO_TABLE_IMPL 2
144# define NSIG NSIG_MAX
145#elif defined(_SIG_MAXSIG)
147# define NSIG _SIG_MAXSIG
148#elif defined(_SIGMAX)
149# define NSIG (_SIGMAX + 1)
153# define NSIG (sizeof(sigset_t) * CHAR_BIT + 1)
156#define RUBY_NSIG NSIG
159# define RUBY_SIGCHLD (SIGCLD)
160#elif defined(SIGCHLD)
161# define RUBY_SIGCHLD (SIGCHLD)
164#if defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) && defined(SA_SIGINFO) && !defined(__NetBSD__)
165# define USE_SIGALTSTACK
166void *rb_allocate_sigaltstack(
void);
167void *rb_register_sigaltstack(
void *);
168# define RB_ALTSTACK_INIT(var, altstack) var = rb_register_sigaltstack(altstack)
169# define RB_ALTSTACK_FREE(var) free(var)
170# define RB_ALTSTACK(var) var
172# define RB_ALTSTACK_INIT(var, altstack)
173# define RB_ALTSTACK_FREE(var)
174# define RB_ALTSTACK(var) (0)
177#include THREAD_IMPL_H
178#define RUBY_VM_THREAD_MODEL 2
185#if defined(__GNUC__) && __GNUC__ >= 2
187#if OPT_TOKEN_THREADED_CODE
188#if OPT_DIRECT_THREADED_CODE
189#undef OPT_DIRECT_THREADED_CODE
196#if OPT_DIRECT_THREADED_CODE
197#undef OPT_DIRECT_THREADED_CODE
199#if OPT_TOKEN_THREADED_CODE
200#undef OPT_TOKEN_THREADED_CODE
205#if OPT_CALL_THREADED_CODE
206#if OPT_DIRECT_THREADED_CODE
207#undef OPT_DIRECT_THREADED_CODE
211void rb_vm_encoded_insn_data_table_init(
void);
212typedef unsigned long rb_num_t;
213typedef signed long rb_snum_t;
217 RUBY_TAG_RETURN = 0x1,
218 RUBY_TAG_BREAK = 0x2,
220 RUBY_TAG_RETRY = 0x4,
222 RUBY_TAG_RAISE = 0x6,
223 RUBY_TAG_THROW = 0x7,
224 RUBY_TAG_FATAL = 0x8,
228#define TAG_NONE RUBY_TAG_NONE
229#define TAG_RETURN RUBY_TAG_RETURN
230#define TAG_BREAK RUBY_TAG_BREAK
231#define TAG_NEXT RUBY_TAG_NEXT
232#define TAG_RETRY RUBY_TAG_RETRY
233#define TAG_REDO RUBY_TAG_REDO
234#define TAG_RAISE RUBY_TAG_RAISE
235#define TAG_THROW RUBY_TAG_THROW
236#define TAG_FATAL RUBY_TAG_FATAL
237#define TAG_MASK RUBY_TAG_MASK
239enum ruby_vm_throw_flags {
240 VM_THROW_NO_ESCAPE_FLAG = 0x8000,
241 VM_THROW_STATE_MASK = 0xff
256#define IMEMO_CONST_CACHE_SHAREABLE IMEMO_FL_USER0
267STATIC_ASSERT(sizeof_iseq_inline_constant_cache_entry,
269 sizeof(
const rb_cref_t *)) <= RVALUE_SIZE);
316#ifndef VM_ARGC_STACK_MAX
317#define VM_ARGC_STACK_MAX 128
320# define CALLING_ARGC(calling) ((calling)->heap_argv ? RARRAY_LENINT((calling)->heap_argv) : (calling)->argc)
325#define CoreDataFromValue(obj, type) (type*)DATA_PTR(obj)
327#define CoreDataFromValue(obj, type) (type*)rb_data_object_get(obj)
329#define GetCoreDataFromValue(obj, type, ptr) ((ptr) = CoreDataFromValue((obj), type))
340#define PATHOBJ_PATH 0
341#define PATHOBJ_REALPATH 1
344pathobj_path(
VALUE pathobj)
356pathobj_realpath(
VALUE pathobj)
368typedef uintptr_t iseq_bits_t;
370#define ISEQ_IS_SIZE(body) (body->ic_size + body->ivc_size + body->ise_size + body->icvarc_size)
373#define ISEQ_IS_IC_ENTRY(body, idx) (body->is_entries[(idx) + body->ise_size + body->icvarc_size + body->ivc_size].ic_cache);
389enum rb_builtin_attr {
391 BUILTIN_ATTR_LEAF = 0x01,
393 BUILTIN_ATTR_SINGLE_NOARG_LEAF = 0x02,
395 BUILTIN_ATTR_INLINE_BLOCK = 0x04,
397 BUILTIN_ATTR_C_TRACE = 0x08,
403 enum rb_iseq_type type;
405 unsigned int iseq_size;
433 unsigned int has_lead : 1;
434 unsigned int has_opt : 1;
435 unsigned int has_rest : 1;
436 unsigned int has_post : 1;
437 unsigned int has_kw : 1;
438 unsigned int has_kwrest : 1;
439 unsigned int has_block : 1;
441 unsigned int ambiguous_param0 : 1;
442 unsigned int accepts_no_kwarg : 1;
443 unsigned int ruby2_keywords: 1;
444 unsigned int anon_rest: 1;
445 unsigned int anon_kwrest: 1;
446 unsigned int use_block: 1;
447 unsigned int forwardable: 1;
459 const VALUE *opt_table;
474 const struct rb_iseq_param_keyword {
480 VALUE *default_values;
489 unsigned int *positions;
491#if VM_INSN_INFO_TABLE_IMPL == 2
492 struct succ_index_table *succ_index_table;
496 const ID *local_table;
509 rb_snum_t flip_count;
512 VALUE pc2branchindex;
513 VALUE *original_iseq;
516 unsigned int local_table_size;
517 unsigned int ic_size;
518 unsigned int ise_size;
519 unsigned int ivc_size;
520 unsigned int icvarc_size;
521 unsigned int ci_size;
522 unsigned int stack_max;
524 unsigned int builtin_attrs;
539 rb_jit_func_t jit_entry;
541 long unsigned jit_entry_calls;
546 rb_jit_func_t jit_exception;
548 long unsigned jit_exception_calls;
555 uint64_t yjit_calls_at_interv;
582#define ISEQ_BODY(iseq) ((iseq)->body)
584#if !defined(USE_LAZY_LOAD) || !(USE_LAZY_LOAD+0)
585#define USE_LAZY_LOAD 0
596 if (USE_LAZY_LOAD && ISEQ_BODY(iseq) == NULL) {
603rb_iseq_attr_p(
const rb_iseq_t *iseq,
enum rb_builtin_attr attr)
605 return (ISEQ_BODY(iseq)->builtin_attrs & attr) == attr;
613 if (def->type != VM_METHOD_TYPE_ISEQ) rb_bug(
"def_iseq_ptr: not iseq (%d)", def->type);
615 return rb_iseq_check(def->body.iseq.
iseqptr);
618enum ruby_special_exceptions {
622 ruby_error_stackfatal,
623 ruby_error_stream_closed,
624 ruby_special_error_count
627#define GetVMPtr(obj, ptr) \
628 GetCoreDataFromValue((obj), rb_vm_t, (ptr))
634 rb_vm_at_exit_func *func;
638void *rb_objspace_alloc(
void);
639void rb_objspace_free(
void *
objspace);
640void rb_objspace_call_finalizer(
void);
645 unsigned int running;
663 struct ccan_list_head set;
665 unsigned int blocking_cnt;
672 rb_nativethread_lock_t lock;
674 unsigned int lock_rec;
677 rb_nativethread_cond_t terminate_cond;
678 bool terminate_waiting;
680#ifndef RUBY_THREAD_PTHREAD_H
681 bool barrier_waiting;
682 unsigned int barrier_cnt;
683 rb_nativethread_cond_t barrier_cond;
689 rb_nativethread_lock_t lock;
693 rb_nativethread_cond_t cond;
694 unsigned int snt_cnt;
695 unsigned int dnt_cnt;
697 unsigned int running_cnt;
699 unsigned int max_cpu;
700 struct ccan_list_head grq;
701 unsigned int grq_cnt;
704 struct ccan_list_head running_threads;
707 struct ccan_list_head timeslice_threads;
709 struct ccan_list_head zombie_threads;
712 bool timeslice_wait_inf;
715 rb_nativethread_cond_t barrier_complete_cond;
716 rb_nativethread_cond_t barrier_release_cond;
717 bool barrier_waiting;
718 unsigned int barrier_waiting_cnt;
719 unsigned int barrier_serial;
723#ifdef USE_SIGALTSTACK
727 rb_serial_t fork_gen;
728 struct ccan_list_head waiting_fds;
731 volatile int ubf_async_safe;
733 unsigned int running: 1;
734 unsigned int thread_abort_on_exception: 1;
735 unsigned int thread_report_on_exception: 1;
736 unsigned int thread_ignore_deadlock: 1;
739 VALUE mark_object_ary;
741 const VALUE special_exceptions[ruby_special_error_count];
746 VALUE load_path_snapshot;
747 VALUE load_path_check_cache;
748 VALUE expanded_load_path;
749 VALUE loaded_features;
750 VALUE loaded_features_snapshot;
751 VALUE loaded_features_realpaths;
752 VALUE loaded_features_realpath_map;
753 struct st_table *loaded_features_index;
761 VALUE cmd[RUBY_NSIG];
767 int src_encoding_index;
770 struct ccan_list_head workqueue;
771 rb_nativethread_lock_t workqueue_lock;
773 VALUE orig_progname, progname;
774 VALUE coverages, me2counter;
779 struct gc_mark_func_data_struct {
781 void (*mark_func)(
VALUE v,
void *data);
794 st_table *unused_block_warning_table;
801 ID inserting_constant_cache_id;
803#ifndef VM_GLOBAL_CC_CACHE_TABLE_SIZE
804#define VM_GLOBAL_CC_CACHE_TABLE_SIZE 1023
806 const struct rb_callcache *global_cc_cache_table[VM_GLOBAL_CC_CACHE_TABLE_SIZE];
808#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
814 size_t thread_vm_stack_size;
815 size_t thread_machine_stack_size;
816 size_t fiber_vm_stack_size;
817 size_t fiber_machine_stack_size;
824#define RUBY_VM_SIZE_ALIGN 4096
826#define RUBY_VM_THREAD_VM_STACK_SIZE ( 128 * 1024 * sizeof(VALUE))
827#define RUBY_VM_THREAD_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE))
828#define RUBY_VM_THREAD_MACHINE_STACK_SIZE ( 128 * 1024 * sizeof(VALUE))
829#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE))
831#define RUBY_VM_FIBER_VM_STACK_SIZE ( 16 * 1024 * sizeof(VALUE))
832#define RUBY_VM_FIBER_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE))
833#define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 64 * 1024 * sizeof(VALUE))
834#if defined(__powerpc64__) || defined(__ppc64__)
835#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 32 * 1024 * sizeof(VALUE))
837#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE))
840#if __has_feature(memory_sanitizer) || __has_feature(address_sanitizer)
842#undef RUBY_VM_THREAD_MACHINE_STACK_SIZE
843#define RUBY_VM_THREAD_MACHINE_STACK_SIZE (1024 * 1024 * sizeof(VALUE))
844#undef RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN
845#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 512 * 1024 * sizeof(VALUE))
846#undef RUBY_VM_FIBER_MACHINE_STACK_SIZE
847#define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 256 * 1024 * sizeof(VALUE))
848#undef RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN
849#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 128 * 1024 * sizeof(VALUE))
852#ifndef VM_DEBUG_BP_CHECK
853#define VM_DEBUG_BP_CHECK 0
856#ifndef VM_DEBUG_VERIFY_METHOD_CACHE
857#define VM_DEBUG_VERIFY_METHOD_CACHE (VMDEBUG != 0)
870enum rb_block_handler_type {
871 block_handler_type_iseq,
872 block_handler_type_ifunc,
873 block_handler_type_symbol,
874 block_handler_type_proc
890 enum rb_block_type
type;
899 const void *block_code;
909rb_thread_ptr(
VALUE thval)
914enum rb_thread_status {
917 THREAD_STOPPED_FOREVER,
922typedef RUBY_JMP_BUF rb_jmpbuf_t;
924typedef void *rb_jmpbuf_t[5];
939#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
949typedef rb_jmpbuf_t *rb_vm_tag_jmpbuf_t;
951#define RB_VM_TAG_JMPBUF_GET(buf) (*buf)
954rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
956 *jmpbuf = ruby_xmalloc(
sizeof(rb_jmpbuf_t));
960rb_vm_tag_jmpbuf_deinit(
const rb_vm_tag_jmpbuf_t *jmpbuf)
965typedef rb_jmpbuf_t rb_vm_tag_jmpbuf_t;
967#define RB_VM_TAG_JMPBUF_GET(buf) (buf)
970rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
976rb_vm_tag_jmpbuf_deinit(
const rb_vm_tag_jmpbuf_t *jmpbuf)
989 rb_vm_tag_jmpbuf_t buf;
991 enum ruby_tag_type state;
992 unsigned int lock_rec;
995STATIC_ASSERT(rb_vm_tag_buf_offset, offsetof(
struct rb_vm_tag, buf) > 0);
996STATIC_ASSERT(rb_vm_tag_buf_end,
997 offsetof(
struct rb_vm_tag, buf) +
sizeof(rb_vm_tag_jmpbuf_t) <
1018 size_t vm_stack_size;
1026#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
1027 uint32_t checked_clock;
1035 VALUE local_storage_recursive_hash;
1036 VALUE local_storage_recursive_hash_for_trace;
1042 const VALUE *root_lep;
1050 VALUE passed_block_handler;
1052 uint8_t raised_flag;
1055 BITFIELD(
enum method_missing_reason, method_missing_reason, 8);
1057 VALUE private_const_reference;
1063 size_t stack_maxsize;
1066#ifdef RUBY_ASAN_ENABLED
1067 void *asan_fake_stack_handle;
1072#ifndef rb_execution_context_t
1074#define rb_execution_context_t rb_execution_context_t
1078#define VM_CORE_H_EC_DEFINED 1
1102 struct ccan_list_node lt_node;
1110 bool mn_schedulable;
1124 BITFIELD(
enum rb_thread_status, status, 2);
1126 unsigned int has_dedicated_nt : 1;
1127 unsigned int to_kill : 1;
1128 unsigned int abort_on_exception: 1;
1129 unsigned int report_on_exception: 1;
1130 unsigned int pending_interrupt_queue_checked: 1;
1132 uint32_t running_time_us;
1134 void *blocking_region_buffer;
1140#if OPT_CALL_THREADED_CODE
1145 VALUE pending_interrupt_queue;
1146 VALUE pending_interrupt_mask_stack;
1149 rb_nativethread_lock_t interrupt_lock;
1151 VALUE locking_mutex;
1153 struct ccan_list_head interrupt_exec_tasks;
1164 VALUE (*func)(
void *);
1169 enum thread_invoke_type {
1170 thread_invoke_type_none = 0,
1171 thread_invoke_type_proc,
1172 thread_invoke_type_ractor_proc,
1173 thread_invoke_type_func
1177 VALUE stat_insn_usage;
1183 unsigned int blocking;
1187 void **specific_storage;
1192static inline unsigned int
1195 return th ? (
unsigned int)th->serial : 0;
1199 VM_DEFINECLASS_TYPE_CLASS = 0x00,
1200 VM_DEFINECLASS_TYPE_SINGLETON_CLASS = 0x01,
1201 VM_DEFINECLASS_TYPE_MODULE = 0x02,
1203 VM_DEFINECLASS_TYPE_MASK = 0x07
1204} rb_vm_defineclass_type_t;
1206#define VM_DEFINECLASS_TYPE(x) ((rb_vm_defineclass_type_t)(x) & VM_DEFINECLASS_TYPE_MASK)
1207#define VM_DEFINECLASS_FLAG_SCOPED 0x08
1208#define VM_DEFINECLASS_FLAG_HAS_SUPERCLASS 0x10
1209#define VM_DEFINECLASS_SCOPED_P(x) ((x) & VM_DEFINECLASS_FLAG_SCOPED)
1210#define VM_DEFINECLASS_HAS_SUPERCLASS_P(x) \
1211 ((x) & VM_DEFINECLASS_FLAG_HAS_SUPERCLASS)
1214RUBY_SYMBOL_EXPORT_BEGIN
1223 VALUE script_lines);
1233rb_iseq_new_with_callback_new_callback(
1256RUBY_SYMBOL_EXPORT_END
1258#define GetProcPtr(obj, ptr) \
1259 GetCoreDataFromValue((obj), rb_proc_t, (ptr))
1263 unsigned int is_from_method: 1;
1264 unsigned int is_lambda: 1;
1265 unsigned int is_isolated: 1;
1268RUBY_SYMBOL_EXPORT_BEGIN
1271VALUE rb_proc_ractor_make_shareable(
VALUE self);
1272RUBY_SYMBOL_EXPORT_END
1279 unsigned int env_size;
1284#define GetBindingPtr(obj, ptr) \
1285 GetCoreDataFromValue((obj), rb_binding_t, (ptr))
1289 const VALUE pathobj;
1295enum vm_check_match_type {
1296 VM_CHECKMATCH_TYPE_WHEN = 1,
1297 VM_CHECKMATCH_TYPE_CASE = 2,
1298 VM_CHECKMATCH_TYPE_RESCUE = 3
1301#define VM_CHECKMATCH_TYPE_MASK 0x03
1302#define VM_CHECKMATCH_ARRAY 0x04
1304enum vm_opt_newarray_send_type {
1305 VM_OPT_NEWARRAY_SEND_MAX = 1,
1306 VM_OPT_NEWARRAY_SEND_MIN = 2,
1307 VM_OPT_NEWARRAY_SEND_HASH = 3,
1308 VM_OPT_NEWARRAY_SEND_PACK = 4,
1309 VM_OPT_NEWARRAY_SEND_PACK_BUFFER = 5,
1310 VM_OPT_NEWARRAY_SEND_INCLUDE_P = 6,
1313enum vm_special_object_type {
1314 VM_SPECIAL_OBJECT_VMCORE = 1,
1315 VM_SPECIAL_OBJECT_CBASE,
1316 VM_SPECIAL_OBJECT_CONST_BASE
1320 VM_SVAR_LASTLINE = 0,
1321 VM_SVAR_BACKREF = 1,
1323 VM_SVAR_EXTRA_START = 2,
1324 VM_SVAR_FLIPFLOP_START = 2
1336typedef VALUE CDHASH;
1338#ifndef FUNC_FASTCALL
1339#define FUNC_FASTCALL(x) x
1345#define VM_TAGGED_PTR_SET(p, tag) ((VALUE)(p) | (tag))
1346#define VM_TAGGED_PTR_REF(v, mask) ((void *)((v) & ~mask))
1348#define GC_GUARDED_PTR(p) VM_TAGGED_PTR_SET((p), 0x01)
1349#define GC_GUARDED_PTR_REF(p) VM_TAGGED_PTR_REF((p), 0x03)
1350#define GC_GUARDED_PTR_P(p) (((VALUE)(p)) & 0x01)
1352enum vm_frame_env_flags {
1363 VM_FRAME_MAGIC_METHOD = 0x11110001,
1364 VM_FRAME_MAGIC_BLOCK = 0x22220001,
1365 VM_FRAME_MAGIC_CLASS = 0x33330001,
1366 VM_FRAME_MAGIC_TOP = 0x44440001,
1367 VM_FRAME_MAGIC_CFUNC = 0x55550001,
1368 VM_FRAME_MAGIC_IFUNC = 0x66660001,
1369 VM_FRAME_MAGIC_EVAL = 0x77770001,
1370 VM_FRAME_MAGIC_RESCUE = 0x78880001,
1371 VM_FRAME_MAGIC_DUMMY = 0x79990001,
1373 VM_FRAME_MAGIC_MASK = 0x7fff0001,
1376 VM_FRAME_FLAG_FINISH = 0x0020,
1377 VM_FRAME_FLAG_BMETHOD = 0x0040,
1378 VM_FRAME_FLAG_CFRAME = 0x0080,
1379 VM_FRAME_FLAG_LAMBDA = 0x0100,
1380 VM_FRAME_FLAG_MODIFIED_BLOCK_PARAM = 0x0200,
1381 VM_FRAME_FLAG_CFRAME_KW = 0x0400,
1382 VM_FRAME_FLAG_PASSED = 0x0800,
1385 VM_ENV_FLAG_LOCAL = 0x0002,
1386 VM_ENV_FLAG_ESCAPED = 0x0004,
1387 VM_ENV_FLAG_WB_REQUIRED = 0x0008,
1388 VM_ENV_FLAG_ISOLATED = 0x0010,
1391#define VM_ENV_DATA_SIZE ( 3)
1393#define VM_ENV_DATA_INDEX_ME_CREF (-2)
1394#define VM_ENV_DATA_INDEX_SPECVAL (-1)
1395#define VM_ENV_DATA_INDEX_FLAGS ( 0)
1396#define VM_ENV_DATA_INDEX_ENV ( 1)
1398#define VM_ENV_INDEX_LAST_LVAR (-VM_ENV_DATA_SIZE)
1400static inline void VM_FORCE_WRITE_SPECIAL_CONST(
const VALUE *ptr,
VALUE special_const_value);
1405 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1407 VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags | flag);
1411VM_ENV_FLAGS_UNSET(
const VALUE *ep,
VALUE flag)
1413 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1415 VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags & ~flag);
1418static inline unsigned long
1419VM_ENV_FLAGS(
const VALUE *ep,
long flag)
1421 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1423 return flags & flag;
1426static inline unsigned long
1429 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_MAGIC_MASK);
1435 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_LAMBDA) != 0;
1441 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME_KW) != 0;
1447 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_FINISH) != 0;
1453 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_BMETHOD) != 0;
1457rb_obj_is_iseq(
VALUE iseq)
1459 return imemo_type_p(iseq, imemo_iseq);
1462#if VM_CHECK_MODE > 0
1463#define RUBY_VM_NORMAL_ISEQ_P(iseq) rb_obj_is_iseq((VALUE)iseq)
1469 int cframe_p = VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME) != 0;
1470 VM_ASSERT(RUBY_VM_NORMAL_ISEQ_P(cfp->iseq) != cframe_p ||
1471 (VM_FRAME_TYPE(cfp) & VM_FRAME_MAGIC_MASK) == VM_FRAME_MAGIC_DUMMY);
1478 return !VM_FRAME_CFRAME_P(cfp);
1481#define RUBYVM_CFUNC_FRAME_P(cfp) \
1482 (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_CFUNC)
1484#define VM_GUARDED_PREV_EP(ep) GC_GUARDED_PTR(ep)
1485#define VM_BLOCK_HANDLER_NONE 0
1488VM_ENV_LOCAL_P(
const VALUE *ep)
1490 return VM_ENV_FLAGS(ep, VM_ENV_FLAG_LOCAL) ? 1 : 0;
1493static inline const VALUE *
1494VM_ENV_PREV_EP(
const VALUE *ep)
1496 VM_ASSERT(VM_ENV_LOCAL_P(ep) == 0);
1497 return GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
1501VM_ENV_BLOCK_HANDLER(
const VALUE *ep)
1503 VM_ASSERT(VM_ENV_LOCAL_P(ep));
1504 return ep[VM_ENV_DATA_INDEX_SPECVAL];
1507#if VM_CHECK_MODE > 0
1508int rb_vm_ep_in_heap_p(
const VALUE *ep);
1512VM_ENV_ESCAPED_P(
const VALUE *ep)
1514 VM_ASSERT(rb_vm_ep_in_heap_p(ep) == !!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED));
1515 return VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) ? 1 : 0;
1520VM_ENV_ENVVAL(const
VALUE *ep)
1522 VALUE envval = ep[VM_ENV_DATA_INDEX_ENV];
1523 VM_ASSERT(VM_ENV_ESCAPED_P(ep));
1524 VM_ASSERT(envval ==
Qundef || imemo_type_p(envval, imemo_env));
1530VM_ENV_ENVVAL_PTR(const
VALUE *ep)
1532 return (
const rb_env_t *)VM_ENV_ENVVAL(ep);
1540 env->env = env_body;
1541 env->env_size = env_size;
1542 env_ep[VM_ENV_DATA_INDEX_ENV] = (
VALUE)env;
1549 *((
VALUE *)ptr) = v;
1553VM_FORCE_WRITE_SPECIAL_CONST(
const VALUE *ptr,
VALUE special_const_value)
1556 VM_FORCE_WRITE(ptr, special_const_value);
1560VM_STACK_ENV_WRITE(
const VALUE *ep,
int index,
VALUE v)
1562 VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_WB_REQUIRED) == 0);
1563 VM_FORCE_WRITE(&ep[index], v);
1566const VALUE *rb_vm_ep_local_ep(
const VALUE *ep);
1573#define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp) ((cfp)+1)
1574#define RUBY_VM_NEXT_CONTROL_FRAME(cfp) ((cfp)-1)
1576#define RUBY_VM_VALID_CONTROL_FRAME_P(cfp, ecfp) \
1577 ((void *)(ecfp) > (void *)(cfp))
1588 return !RUBY_VM_VALID_CONTROL_FRAME_P(cfp, RUBY_VM_END_CONTROL_FRAME(ec));
1592VM_BH_ISEQ_BLOCK_P(
VALUE block_handler)
1594 if ((block_handler & 0x03) == 0x01) {
1595#if VM_CHECK_MODE > 0
1597 VM_ASSERT(imemo_type_p(captured->code.val, imemo_iseq));
1609 VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x01);
1610 VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1611 return block_handler;
1615VM_BH_TO_ISEQ_BLOCK(
VALUE block_handler)
1618 VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1623VM_BH_IFUNC_P(
VALUE block_handler)
1625 if ((block_handler & 0x03) == 0x03) {
1626#if VM_CHECK_MODE > 0
1628 VM_ASSERT(imemo_type_p(captured->code.val, imemo_ifunc));
1640 VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x03);
1641 VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1642 return block_handler;
1646VM_BH_TO_IFUNC_BLOCK(
VALUE block_handler)
1649 VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1654VM_BH_TO_CAPT_BLOCK(
VALUE block_handler)
1657 VM_ASSERT(VM_BH_IFUNC_P(block_handler) || VM_BH_ISEQ_BLOCK_P(block_handler));
1661static inline enum rb_block_handler_type
1662vm_block_handler_type(
VALUE block_handler)
1664 if (VM_BH_ISEQ_BLOCK_P(block_handler)) {
1665 return block_handler_type_iseq;
1667 else if (VM_BH_IFUNC_P(block_handler)) {
1668 return block_handler_type_ifunc;
1670 else if (
SYMBOL_P(block_handler)) {
1671 return block_handler_type_symbol;
1675 return block_handler_type_proc;
1680vm_block_handler_verify(MAYBE_UNUSED(
VALUE block_handler))
1682 VM_ASSERT(block_handler == VM_BLOCK_HANDLER_NONE ||
1683 (vm_block_handler_type(block_handler), 1));
1686static inline enum rb_block_type
1687vm_block_type(
const struct rb_block *block)
1689#if VM_CHECK_MODE > 0
1690 switch (block->type) {
1691 case block_type_iseq:
1692 VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_iseq));
1694 case block_type_ifunc:
1695 VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_ifunc));
1697 case block_type_symbol:
1698 VM_ASSERT(
SYMBOL_P(block->as.symbol));
1700 case block_type_proc:
1709vm_block_type_set(
const struct rb_block *block,
enum rb_block_type
type)
1715static inline const struct rb_block *
1716vm_proc_block(
VALUE procval)
1723static inline const VALUE *vm_block_ep(
const struct rb_block *block);
1726vm_proc_iseq(
VALUE procval)
1728 return vm_block_iseq(vm_proc_block(procval));
1731static inline const VALUE *
1732vm_proc_ep(
VALUE procval)
1734 return vm_block_ep(vm_proc_block(procval));
1738vm_block_iseq(
const struct rb_block *block)
1740 switch (vm_block_type(block)) {
1741 case block_type_iseq:
return rb_iseq_check(block->as.captured.code.iseq);
1742 case block_type_proc:
return vm_proc_iseq(block->as.proc);
1743 case block_type_ifunc:
1744 case block_type_symbol:
return NULL;
1746 VM_UNREACHABLE(vm_block_iseq);
1750static inline const VALUE *
1751vm_block_ep(
const struct rb_block *block)
1753 switch (vm_block_type(block)) {
1754 case block_type_iseq:
1755 case block_type_ifunc:
return block->as.captured.ep;
1756 case block_type_proc:
return vm_proc_ep(block->as.proc);
1757 case block_type_symbol:
return NULL;
1759 VM_UNREACHABLE(vm_block_ep);
1764vm_block_self(
const struct rb_block *block)
1766 switch (vm_block_type(block)) {
1767 case block_type_iseq:
1768 case block_type_ifunc:
1769 return block->as.captured.self;
1770 case block_type_proc:
1771 return vm_block_self(vm_proc_block(block->as.proc));
1772 case block_type_symbol:
1775 VM_UNREACHABLE(vm_block_self);
1780VM_BH_TO_SYMBOL(
VALUE block_handler)
1782 VM_ASSERT(
SYMBOL_P(block_handler));
1783 return block_handler;
1787VM_BH_FROM_SYMBOL(
VALUE symbol)
1794VM_BH_TO_PROC(
VALUE block_handler)
1797 return block_handler;
1801VM_BH_FROM_PROC(
VALUE procval)
1818#define SDR() rb_vmdebug_stack_dump_raw(GET_EC(), GET_EC()->cfp, stderr)
1819#define SDR2(cfp) rb_vmdebug_stack_dump_raw(GET_EC(), (cfp), stderr)
1820bool rb_vm_bugreport(
const void *,
FILE *);
1821typedef void (*ruby_sighandler_t)(int);
1823NORETURN(
void rb_bug_for_fatal_signal(ruby_sighandler_t default_sighandler,
int sig, const
void *, const
char *fmt, ...));
1826RUBY_SYMBOL_EXPORT_BEGIN
1831RUBY_SYMBOL_EXPORT_END
1845 return rb_vm_make_proc_lambda(ec, captured, klass, 0);
1851 return rb_vm_make_proc_lambda(ec, captured, klass, 1);
1859void rb_vm_inc_const_missing_count(
void);
1865void rb_thread_start_timer_thread(
void);
1866void rb_thread_stop_timer_thread(
void);
1867void rb_thread_reset_timer_thread(
void);
1868void rb_thread_wakeup_timer_thread(
int);
1871rb_vm_living_threads_init(
rb_vm_t *vm)
1873 ccan_list_head_init(&vm->waiting_fds);
1874 ccan_list_head_init(&vm->workqueue);
1875 ccan_list_head_init(&vm->ractor.set);
1876 ccan_list_head_init(&vm->ractor.sched.zombie_threads);
1879typedef int rb_backtrace_iter_func(
void *,
VALUE,
int,
VALUE);
1885void ruby_thread_init_stack(
rb_thread_t *th,
void *local_in_parent_frame);
1890void rb_vm_env_write(
const VALUE *ep,
int index,
VALUE v);
1893void rb_vm_register_special_exception_str(
enum ruby_special_exceptions sp,
VALUE exception_class,
VALUE mesg);
1895#define rb_vm_register_special_exception(sp, e, m) \
1896 rb_vm_register_special_exception_str(sp, e, rb_usascii_str_new_static((m), (long)rb_strlen_lit(m)))
1904#define sysstack_error GET_VM()->special_exceptions[ruby_error_sysstack]
1906#define CHECK_VM_STACK_OVERFLOW0(cfp, sp, margin) do { \
1907 STATIC_ASSERT(sizeof_sp, sizeof(*(sp)) == sizeof(VALUE)); \
1908 STATIC_ASSERT(sizeof_cfp, sizeof(*(cfp)) == sizeof(rb_control_frame_t)); \
1909 const struct rb_control_frame_struct *bound = (void *)&(sp)[(margin)]; \
1910 if (UNLIKELY((cfp) <= &bound[1])) { \
1911 vm_stackoverflow(); \
1915#define CHECK_VM_STACK_OVERFLOW(cfp, margin) \
1916 CHECK_VM_STACK_OVERFLOW0((cfp), (cfp)->sp, (margin))
1924#if RUBY_VM_THREAD_MODEL == 2
1932#define GET_VM() rb_current_vm()
1933#define GET_RACTOR() rb_current_ractor()
1934#define GET_THREAD() rb_current_thread()
1935#define GET_EC() rb_current_execution_context(true)
1940 return ec->thread_ptr;
1948 VM_ASSERT(th->ractor != NULL);
1971rb_current_execution_context(
bool expect_ec)
1973#ifdef RB_THREAD_LOCAL_SPECIFIER
1974 #if defined(__arm64__) || defined(__aarch64__)
1992 VM_ASSERT(ec == rb_current_ec_noinline());
1996 VM_ASSERT(!expect_ec || ec != NULL);
2001rb_current_thread(
void)
2004 return rb_ec_thread_ptr(ec);
2008rb_current_ractor_raw(
bool expect)
2010 if (ruby_single_main_ractor) {
2011 return ruby_single_main_ractor;
2015 return (expect || ec) ? rb_ec_ractor_ptr(ec) : NULL;
2020rb_current_ractor(
void)
2022 return rb_current_ractor_raw(
true);
2029 VM_ASSERT(ruby_current_vm_ptr == NULL ||
2030 ruby_current_execution_context_ptr == NULL ||
2031 rb_ec_thread_ptr(GET_EC()) == NULL ||
2032 rb_ec_thread_ptr(GET_EC())->status == THREAD_KILLED ||
2033 rb_ec_vm_ptr(GET_EC()) == ruby_current_vm_ptr);
2036 return ruby_current_vm_ptr;
2040 unsigned int recorded_lock_rec,
2041 unsigned int current_lock_rec);
2043static inline unsigned int
2046 rb_vm_t *vm = rb_ec_vm_ptr(ec);
2048 if (vm->ractor.sync.lock_owner != rb_ec_ractor_ptr(ec)) {
2052 return vm->ractor.sync.lock_rec;
2057#error "unsupported thread model"
2061 TIMER_INTERRUPT_MASK = 0x01,
2062 PENDING_INTERRUPT_MASK = 0x02,
2063 POSTPONED_JOB_INTERRUPT_MASK = 0x04,
2064 TRAP_INTERRUPT_MASK = 0x08,
2065 TERMINATE_INTERRUPT_MASK = 0x10,
2066 VM_BARRIER_INTERRUPT_MASK = 0x20,
2069#define RUBY_VM_SET_TIMER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TIMER_INTERRUPT_MASK)
2070#define RUBY_VM_SET_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, PENDING_INTERRUPT_MASK)
2071#define RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, POSTPONED_JOB_INTERRUPT_MASK)
2072#define RUBY_VM_SET_TRAP_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TRAP_INTERRUPT_MASK)
2073#define RUBY_VM_SET_TERMINATE_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TERMINATE_INTERRUPT_MASK)
2074#define RUBY_VM_SET_VM_BARRIER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, VM_BARRIER_INTERRUPT_MASK)
2075#define RUBY_VM_INTERRUPTED(ec) ((ec)->interrupt_flag & ~(ec)->interrupt_mask & \
2076 (PENDING_INTERRUPT_MASK|TRAP_INTERRUPT_MASK))
2081#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
2082 uint32_t current_clock = rb_ec_vm_ptr(ec)->clock;
2084 if (current_clock != ec->checked_clock) {
2085 ec->checked_clock = current_clock;
2086 RUBY_VM_SET_TIMER_INTERRUPT(ec);
2089 return ec->interrupt_flag & ~(ec)->interrupt_mask;
2093int rb_signal_buff_size(
void);
2096void rb_threadptr_signal_raise(
rb_thread_t *th,
int sig);
2098int rb_threadptr_execute_interrupts(
rb_thread_t *,
int);
2100void rb_threadptr_unlock_all_locking_mutexes(
rb_thread_t *th);
2101void rb_threadptr_pending_interrupt_clear(
rb_thread_t *th);
2112void rb_vm_cond_wait(
rb_vm_t *vm, rb_nativethread_cond_t *cond);
2113void rb_vm_cond_timedwait(
rb_vm_t *vm, rb_nativethread_cond_t *cond,
unsigned long msec);
2115#define RUBY_VM_CHECK_INTS(ec) rb_vm_check_ints(ec)
2119#ifdef RUBY_ASSERT_CRITICAL_SECTION
2120 VM_ASSERT(ruby_assert_critical_section_entered == 0);
2123 VM_ASSERT(ec == GET_EC());
2125 if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(ec))) {
2126 rb_threadptr_execute_interrupts(rb_ec_thread_ptr(ec), 0);
2157#define EXEC_EVENT_HOOK_ORIG(ec_, hooks_, flag_, self_, id_, called_id_, klass_, data_, pop_p_) do { \
2158 const rb_event_flag_t flag_arg_ = (flag_); \
2159 rb_hook_list_t *hooks_arg_ = (hooks_); \
2160 if (UNLIKELY((hooks_arg_)->events & (flag_arg_))) { \
2162 rb_exec_event_hook_orig(ec_, hooks_arg_, flag_arg_, self_, id_, called_id_, klass_, data_, pop_p_); \
2172 VM_ASSERT((hooks->events & flag) != 0);
2174 trace_arg.event = flag;
2176 trace_arg.cfp = ec->cfp;
2177 trace_arg.self = self;
2179 trace_arg.called_id = called_id;
2180 trace_arg.klass = klass;
2181 trace_arg.data = data;
2183 trace_arg.klass_solved = 0;
2185 rb_exec_event_hooks(&trace_arg, hooks, pop_p);
2198 return &cr_pub->hooks;
2201#define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2202 EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_ractor_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 0)
2204#define EXEC_EVENT_HOOK_AND_POP_FRAME(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2205 EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_ractor_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 1)
2212 rb_ary_new_from_args(2, eval_script, (
VALUE)iseq));
2215void rb_vm_trap_exit(
rb_vm_t *vm);
2216void rb_vm_postponed_job_atfork(
void);
2217void rb_vm_postponed_job_free(
void);
2218size_t rb_vm_memsize_postponed_job_queue(
void);
2219void rb_vm_postponed_job_queue_init(
rb_vm_t *vm);
2221RUBY_SYMBOL_EXPORT_BEGIN
2223int rb_thread_check_trap_pending(
void);
2226#define RUBY_EVENT_COVERAGE_LINE 0x010000
2227#define RUBY_EVENT_COVERAGE_BRANCH 0x020000
2229extern VALUE rb_get_coverages(
void);
2230extern void rb_set_coverages(
VALUE,
int,
VALUE);
2231extern void rb_clear_coverages(
void);
2232extern void rb_reset_coverages(
void);
2233extern void rb_resume_coverages(
void);
2234extern void rb_suspend_coverages(
void);
2236void rb_postponed_job_flush(
rb_vm_t *vm);
2242RUBY_SYMBOL_EXPORT_END
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
#define RUBY_ALIGNAS
Wraps (or simulates) alignas.
#define RUBY_EXTERN
Declaration of externally visible global variables.
#define RUBY_EVENT_SCRIPT_COMPILED
Encountered an eval.
uint32_t rb_event_flag_t
Represents event(s).
#define T_STRING
Old name of RUBY_T_STRING.
#define Qundef
Old name of RUBY_Qundef.
#define Qfalse
Old name of RUBY_Qfalse.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
#define NIL_P
Old name of RB_NIL_P.
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
void * rb_check_typeddata(VALUE obj, const rb_data_type_t *data_type)
Identical to rb_typeddata_is_kind_of(), except it raises exceptions instead of returning false.
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
void rb_unblock_function_t(void *)
This is the type of UBFs.
VALUE rb_block_call_func(RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg))
This is the type of a function that the interpreter expect for C-backended blocks.
VALUE type(ANYARGS)
ANYARGS-ed function type.
#define RBIMPL_ATTR_NONNULL(list)
Wraps (or simulates) __attribute__((nonnull))
#define inline
Old Visual Studio versions do not support the inline keyword, so we need to define it to be __inline.
Functions related to nodes in the AST.
#define RARRAY_AREF(a, i)
#define RTYPEDDATA_DATA(v)
Convenient getter macro.
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
const ID * segments
A null-terminated list of ids, used to represent a constant's path idNULL is used to represent the ::...
This is the struct that holds necessary info for a struct.
struct rb_iseq_constant_body::@154 param
parameter information
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
IFUNC (Internal FUNCtion)
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
uintptr_t VALUE
Type that represents a Ruby object.
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.