Ruby 3.5.0dev (2025-10-30 revision eed9441afc861f10d113102536d0e616f44a069f)
vm_core.h (eed9441afc861f10d113102536d0e616f44a069f)
1#ifndef RUBY_VM_CORE_H
2#define RUBY_VM_CORE_H
3/**********************************************************************
4
5 vm_core.h -
6
7 $Author$
8 created at: 04/01/01 19:41:38 JST
9
10 Copyright (C) 2004-2007 Koichi Sasada
11
12**********************************************************************/
13
14/*
15 * Enable check mode.
16 * 1: enable local assertions.
17 */
18#ifndef VM_CHECK_MODE
19
20// respect RUBY_DUBUG: if given n is 0, then use RUBY_DEBUG
21#define N_OR_RUBY_DEBUG(n) (((n) > 0) ? (n) : RUBY_DEBUG)
22
23#define VM_CHECK_MODE N_OR_RUBY_DEBUG(0)
24#endif
25
39#ifndef VMDEBUG
40#define VMDEBUG 0
41#endif
42
43#if 0
44#undef VMDEBUG
45#define VMDEBUG 3
46#endif
47
48#include "ruby/internal/config.h"
49
50#include <stddef.h>
51#include <signal.h>
52#include <stdarg.h>
53
54#include "ruby_assert.h"
55
56#define RVALUE_SIZE (sizeof(struct RBasic) + sizeof(VALUE[RBIMPL_RVALUE_EMBED_LEN_MAX]))
57
58#if VM_CHECK_MODE > 0
59#define VM_ASSERT(expr, ...) \
60 RUBY_ASSERT_MESG_WHEN(VM_CHECK_MODE > 0, expr, #expr RBIMPL_VA_OPT_ARGS(__VA_ARGS__))
61#define VM_UNREACHABLE(func) rb_bug(#func ": unreachable")
62#define RUBY_ASSERT_CRITICAL_SECTION
63#define RUBY_DEBUG_THREAD_SCHEDULE() rb_thread_schedule()
64#else
65#define VM_ASSERT(/*expr, */...) ((void)0)
66#define VM_UNREACHABLE(func) UNREACHABLE
67#define RUBY_DEBUG_THREAD_SCHEDULE()
68#endif
69
70#define RUBY_ASSERT_MUTEX_OWNED(mutex) VM_ASSERT(rb_mutex_owned_p(mutex))
71
72#if defined(RUBY_ASSERT_CRITICAL_SECTION)
73/*
74# Critical Section Assertions
75
76These assertions are used to ensure that context switching does not occur between two points in the code. In theory,
77such code should already be protected by a mutex, but these assertions are used to ensure that the mutex is held.
78
79The specific case where it can be useful is where a mutex is held further up the call stack, and the code in question
80may not directly hold the mutex. In this case, the critical section assertions can be used to ensure that the mutex is
81held by someone else.
82
83These assertions are only enabled when RUBY_ASSERT_CRITICAL_SECTION is defined, which is only defined if VM_CHECK_MODE
84is set.
85
86## Example Usage
87
88```c
89RUBY_ASSERT_CRITICAL_SECTION_ENTER();
90// ... some code which does not invoke rb_vm_check_ints() ...
91RUBY_ASSERT_CRITICAL_SECTION_LEAVE();
92```
93
94If `rb_vm_check_ints()` is called between the `RUBY_ASSERT_CRITICAL_SECTION_ENTER()` and
95`RUBY_ASSERT_CRITICAL_SECTION_LEAVE()`, a failed assertion will result.
96*/
97extern int ruby_assert_critical_section_entered;
98#define RUBY_ASSERT_CRITICAL_SECTION_ENTER() do{ruby_assert_critical_section_entered += 1;}while(false)
99#define RUBY_ASSERT_CRITICAL_SECTION_LEAVE() do{VM_ASSERT(ruby_assert_critical_section_entered > 0);ruby_assert_critical_section_entered -= 1;}while(false)
100#else
101#define RUBY_ASSERT_CRITICAL_SECTION_ENTER()
102#define RUBY_ASSERT_CRITICAL_SECTION_LEAVE()
103#endif
104
105#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
106# include "wasm/setjmp.h"
107#else
108# include <setjmp.h>
109#endif
110
111#if defined(__linux__) || defined(__FreeBSD__)
112# define RB_THREAD_T_HAS_NATIVE_ID
113#endif
114
116#include "ccan/list/list.h"
117#include "id.h"
118#include "internal.h"
119#include "internal/array.h"
120#include "internal/basic_operators.h"
121#include "internal/namespace.h"
122#include "internal/sanitizers.h"
123#include "internal/serial.h"
124#include "internal/set_table.h"
125#include "internal/vm.h"
126#include "method.h"
127#include "node.h"
128#include "ruby/ruby.h"
129#include "ruby/st.h"
130#include "ruby_atomic.h"
131#include "vm_opts.h"
132
133#include "ruby/thread_native.h"
134/*
135 * implementation selector of get_insn_info algorithm
136 * 0: linear search
137 * 1: binary search
138 * 2: succinct bitvector
139 */
140#ifndef VM_INSN_INFO_TABLE_IMPL
141# define VM_INSN_INFO_TABLE_IMPL 2
142#endif
143
144#if defined(NSIG_MAX) /* POSIX issue 8 */
145# undef NSIG
146# define NSIG NSIG_MAX
147#elif defined(_SIG_MAXSIG) /* FreeBSD */
148# undef NSIG
149# define NSIG _SIG_MAXSIG
150#elif defined(_SIGMAX) /* QNX */
151# define NSIG (_SIGMAX + 1)
152#elif defined(NSIG) /* 99% of everything else */
153# /* take it */
154#else /* Last resort */
155# define NSIG (sizeof(sigset_t) * CHAR_BIT + 1)
156#endif
157
158#define RUBY_NSIG NSIG
159
160#if defined(SIGCLD)
161# define RUBY_SIGCHLD (SIGCLD)
162#elif defined(SIGCHLD)
163# define RUBY_SIGCHLD (SIGCHLD)
164#endif
165
166#if defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) && defined(SA_SIGINFO) && !defined(__NetBSD__)
167# define USE_SIGALTSTACK
168void *rb_allocate_sigaltstack(void);
169void *rb_register_sigaltstack(void *);
170# define RB_ALTSTACK_INIT(var, altstack) var = rb_register_sigaltstack(altstack)
171# define RB_ALTSTACK_FREE(var) free(var)
172# define RB_ALTSTACK(var) var
173#else /* noop */
174# define RB_ALTSTACK_INIT(var, altstack)
175# define RB_ALTSTACK_FREE(var)
176# define RB_ALTSTACK(var) (0)
177#endif
178
179#include THREAD_IMPL_H
180#define RUBY_VM_THREAD_MODEL 2
181
182/*****************/
183/* configuration */
184/*****************/
185
186/* gcc ver. check */
187#if defined(__GNUC__) && __GNUC__ >= 2
188
189#if OPT_TOKEN_THREADED_CODE
190#if OPT_DIRECT_THREADED_CODE
191#undef OPT_DIRECT_THREADED_CODE
192#endif
193#endif
194
195#else /* defined(__GNUC__) && __GNUC__ >= 2 */
196
197/* disable threaded code options */
198#if OPT_DIRECT_THREADED_CODE
199#undef OPT_DIRECT_THREADED_CODE
200#endif
201#if OPT_TOKEN_THREADED_CODE
202#undef OPT_TOKEN_THREADED_CODE
203#endif
204#endif
205
206/* call threaded code */
207#if OPT_CALL_THREADED_CODE
208#if OPT_DIRECT_THREADED_CODE
209#undef OPT_DIRECT_THREADED_CODE
210#endif /* OPT_DIRECT_THREADED_CODE */
211#endif /* OPT_CALL_THREADED_CODE */
212
213void rb_vm_encoded_insn_data_table_init(void);
214typedef unsigned long rb_num_t;
215typedef signed long rb_snum_t;
216
217enum ruby_tag_type {
218 RUBY_TAG_NONE = 0x0,
219 RUBY_TAG_RETURN = 0x1,
220 RUBY_TAG_BREAK = 0x2,
221 RUBY_TAG_NEXT = 0x3,
222 RUBY_TAG_RETRY = 0x4,
223 RUBY_TAG_REDO = 0x5,
224 RUBY_TAG_RAISE = 0x6,
225 RUBY_TAG_THROW = 0x7,
226 RUBY_TAG_FATAL = 0x8,
227 RUBY_TAG_MASK = 0xf
228};
229
230#define TAG_NONE RUBY_TAG_NONE
231#define TAG_RETURN RUBY_TAG_RETURN
232#define TAG_BREAK RUBY_TAG_BREAK
233#define TAG_NEXT RUBY_TAG_NEXT
234#define TAG_RETRY RUBY_TAG_RETRY
235#define TAG_REDO RUBY_TAG_REDO
236#define TAG_RAISE RUBY_TAG_RAISE
237#define TAG_THROW RUBY_TAG_THROW
238#define TAG_FATAL RUBY_TAG_FATAL
239#define TAG_MASK RUBY_TAG_MASK
240
241enum ruby_vm_throw_flags {
242 VM_THROW_NO_ESCAPE_FLAG = 0x8000,
243 VM_THROW_STATE_MASK = 0xff
244};
245
246/* forward declarations */
247struct rb_thread_struct;
249
250/* iseq data type */
252
254 rb_serial_t raw;
255 VALUE data[2];
256};
257
258#define IMEMO_CONST_CACHE_SHAREABLE IMEMO_FL_USER0
259
260// imemo_constcache
262 VALUE flags;
263
264 VALUE value;
265 const rb_cref_t *ic_cref;
266};
267STATIC_ASSERT(sizeof_iseq_inline_constant_cache_entry,
268 (offsetof(struct iseq_inline_constant_cache_entry, ic_cref) +
269 sizeof(const rb_cref_t *)) <= RVALUE_SIZE);
270
287
289 uint64_t value; // dest_shape_id in former half, attr_index in latter half
290 ID iv_set_name;
291};
292
296
298 struct {
299 struct rb_thread_struct *running_thread;
300 VALUE value;
301 } once;
302 struct iseq_inline_constant_cache ic_cache;
303 struct iseq_inline_iv_cache_entry iv_cache;
304};
305
307 const struct rb_call_data *cd;
308 const struct rb_callcache *cc;
309 VALUE block_handler;
310 VALUE recv;
311 int argc;
312 bool kw_splat;
313 VALUE heap_argv;
314};
315
316#ifndef VM_ARGC_STACK_MAX
317#define VM_ARGC_STACK_MAX 128
318#endif
319
320# define CALLING_ARGC(calling) ((calling)->heap_argv ? RARRAY_LENINT((calling)->heap_argv) : (calling)->argc)
321
323
324#if 1
325#define CoreDataFromValue(obj, type) (type*)DATA_PTR(obj)
326#else
327#define CoreDataFromValue(obj, type) (type*)rb_data_object_get(obj)
328#endif
329#define GetCoreDataFromValue(obj, type, ptr) ((ptr) = CoreDataFromValue((obj), type))
330
332 VALUE pathobj; /* String (path) or Array [path, realpath]. Frozen. */
333 VALUE base_label; /* String */
334 VALUE label; /* String */
335 int first_lineno;
336 int node_id;
337 rb_code_location_t code_location;
339
340#define PATHOBJ_PATH 0
341#define PATHOBJ_REALPATH 1
342
343static inline VALUE
344pathobj_path(VALUE pathobj)
345{
346 if (RB_TYPE_P(pathobj, T_STRING)) {
347 return pathobj;
348 }
349 else {
350 VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
351 return RARRAY_AREF(pathobj, PATHOBJ_PATH);
352 }
353}
354
355static inline VALUE
356pathobj_realpath(VALUE pathobj)
357{
358 if (RB_TYPE_P(pathobj, T_STRING)) {
359 return pathobj;
360 }
361 else {
362 VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
363 return RARRAY_AREF(pathobj, PATHOBJ_REALPATH);
364 }
365}
366
367/* Forward declarations */
368typedef uintptr_t iseq_bits_t;
369
370#define ISEQ_IS_SIZE(body) (body->ic_size + body->ivc_size + body->ise_size + body->icvarc_size)
371
372/* [ TS_IVC | TS_ICVARC | TS_ISE | TS_IC ] */
373#define ISEQ_IS_IC_ENTRY(body, idx) (body->is_entries[(idx) + body->ise_size + body->icvarc_size + body->ivc_size].ic_cache);
374
375/* instruction sequence type */
376enum rb_iseq_type {
377 ISEQ_TYPE_TOP,
378 ISEQ_TYPE_METHOD,
379 ISEQ_TYPE_BLOCK,
380 ISEQ_TYPE_CLASS,
381 ISEQ_TYPE_RESCUE,
382 ISEQ_TYPE_ENSURE,
383 ISEQ_TYPE_EVAL,
384 ISEQ_TYPE_MAIN,
385 ISEQ_TYPE_PLAIN
386};
387
388// Attributes specified by Primitive.attr!
389enum rb_builtin_attr {
390 // The iseq does not call methods.
391 BUILTIN_ATTR_LEAF = 0x01,
392 // This iseq only contains single `opt_invokebuiltin_delegate_leave` instruction with 0 arguments.
393 BUILTIN_ATTR_SINGLE_NOARG_LEAF = 0x02,
394 // This attribute signals JIT to duplicate the iseq for each block iseq so that its `yield` will be monomorphic.
395 BUILTIN_ATTR_INLINE_BLOCK = 0x04,
396 // The iseq acts like a C method in backtraces.
397 BUILTIN_ATTR_C_TRACE = 0x08,
398};
399
400typedef VALUE (*rb_jit_func_t)(struct rb_execution_context_struct *, struct rb_control_frame_struct *);
401
403 enum rb_iseq_type type;
404
405 unsigned int iseq_size;
406 VALUE *iseq_encoded; /* encoded iseq (insn addr and operands) */
407
431 struct {
432 struct {
433 unsigned int has_lead : 1;
434 unsigned int has_opt : 1;
435 unsigned int has_rest : 1;
436 unsigned int has_post : 1;
437 unsigned int has_kw : 1;
438 unsigned int has_kwrest : 1;
439 unsigned int has_block : 1;
440
441 unsigned int ambiguous_param0 : 1; /* {|a|} */
442 unsigned int accepts_no_kwarg : 1;
443 unsigned int ruby2_keywords: 1;
444 unsigned int anon_rest: 1;
445 unsigned int anon_kwrest: 1;
446 unsigned int use_block: 1;
447 unsigned int forwardable: 1;
448 } flags;
449
450 unsigned int size;
451
452 int lead_num;
453 int opt_num;
454 int rest_start;
455 int post_start;
456 int post_num;
457 int block_start;
458
459 const VALUE *opt_table; /* (opt_num + 1) entries. */
460 /* opt_num and opt_table:
461 *
462 * def foo o1=e1, o2=e2, ..., oN=eN
463 * #=>
464 * # prologue code
465 * A1: e1
466 * A2: e2
467 * ...
468 * AN: eN
469 * AL: body
470 * opt_num = N
471 * opt_table = [A1, A2, ..., AN, AL]
472 */
473
474 const struct rb_iseq_param_keyword {
475 int num;
476 int required_num;
477 int bits_start;
478 int rest_start;
479 const ID *table;
480 VALUE *default_values;
481 } *keyword;
483
484 rb_iseq_location_t location;
485
486 /* insn info, must be freed */
488 const struct iseq_insn_info_entry *body;
489 unsigned int *positions;
490 unsigned int size;
491#if VM_INSN_INFO_TABLE_IMPL == 2
492 struct succ_index_table *succ_index_table;
493#endif
494 } insns_info;
495
496 const ID *local_table; /* must free */
497
498 enum lvar_state {
499 lvar_uninitialized,
500 lvar_initialized,
501 lvar_reassigned,
502 } *lvar_states;
503
504 /* catch table */
505 struct iseq_catch_table *catch_table;
506
507 /* for child iseq */
508 const struct rb_iseq_struct *parent_iseq;
509 struct rb_iseq_struct *local_iseq; /* local_iseq->flip_cnt can be modified */
510
511 union iseq_inline_storage_entry *is_entries; /* [ TS_IVC | TS_ICVARC | TS_ISE | TS_IC ] */
512 struct rb_call_data *call_data; //struct rb_call_data calls[ci_size];
513
514 struct {
515 rb_snum_t flip_count;
516 VALUE script_lines;
517 VALUE coverage;
518 VALUE pc2branchindex;
519 VALUE *original_iseq;
520 } variable;
521
522 unsigned int local_table_size;
523 unsigned int ic_size; // Number of IC caches
524 unsigned int ise_size; // Number of ISE caches
525 unsigned int ivc_size; // Number of IVC caches
526 unsigned int icvarc_size; // Number of ICVARC caches
527 unsigned int ci_size;
528 unsigned int stack_max; /* for stack overflow check */
529
530 unsigned int builtin_attrs; // Union of rb_builtin_attr
531
532 bool prism; // ISEQ was generated from prism compiler
533
534 union {
535 iseq_bits_t * list; /* Find references for GC */
536 iseq_bits_t single;
537 } mark_bits;
538
539 struct rb_id_table *outer_variables;
540
541 const rb_iseq_t *mandatory_only_iseq;
542
543#if USE_YJIT || USE_ZJIT
544 // Function pointer for JIT code on jit_exec()
545 rb_jit_func_t jit_entry;
546 // Number of calls on jit_exec()
547 long unsigned jit_entry_calls;
548 // Function pointer for JIT code on jit_exec_exception()
549 rb_jit_func_t jit_exception;
550 // Number of calls on jit_exec_exception()
551 long unsigned jit_exception_calls;
552#endif
553
554#if USE_YJIT
555 // YJIT stores some data on each iseq.
556 void *yjit_payload;
557 // Used to estimate how frequently this ISEQ gets called
558 uint64_t yjit_calls_at_interv;
559#endif
560
561#if USE_ZJIT
562 // ZJIT stores some data on each iseq.
563 void *zjit_payload;
564#endif
565};
566
567/* T_IMEMO/iseq */
568/* typedef rb_iseq_t is in method.h */
570 VALUE flags; /* 1 */
571 VALUE wrapper; /* 2 */
572
573 struct rb_iseq_constant_body *body; /* 3 */
574
575 union { /* 4, 5 words */
576 struct iseq_compile_data *compile_data; /* used at compile time */
577
578 struct {
579 VALUE obj;
580 int index;
581 } loader;
582
583 struct {
584 struct rb_hook_list_struct *local_hooks;
585 rb_event_flag_t global_trace_events;
586 } exec;
587 } aux;
588};
589
590#define ISEQ_BODY(iseq) ((iseq)->body)
591
592#if !defined(USE_LAZY_LOAD) || !(USE_LAZY_LOAD+0)
593#define USE_LAZY_LOAD 0
594#endif
595
596#if !USE_LAZY_LOAD
597static inline const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq) {return 0;}
598#endif
599const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq);
600
601static inline const rb_iseq_t *
602rb_iseq_check(const rb_iseq_t *iseq)
603{
604 if (USE_LAZY_LOAD && ISEQ_BODY(iseq) == NULL) {
605 rb_iseq_complete((rb_iseq_t *)iseq);
606 }
607 return iseq;
608}
609
610static inline bool
611rb_iseq_attr_p(const rb_iseq_t *iseq, enum rb_builtin_attr attr)
612{
613 return (ISEQ_BODY(iseq)->builtin_attrs & attr) == attr;
614}
615
616static inline const rb_iseq_t *
617def_iseq_ptr(rb_method_definition_t *def)
618{
619//TODO: re-visit. to check the bug, enable this assertion.
620#if VM_CHECK_MODE > 0
621 if (def->type != VM_METHOD_TYPE_ISEQ) rb_bug("def_iseq_ptr: not iseq (%d)", def->type);
622#endif
623 return rb_iseq_check(def->body.iseq.iseqptr);
624}
625
626enum ruby_special_exceptions {
627 ruby_error_reenter,
628 ruby_error_nomemory,
629 ruby_error_sysstack,
630 ruby_error_stackfatal,
631 ruby_error_stream_closed,
632 ruby_special_error_count
633};
634
635#define GetVMPtr(obj, ptr) \
636 GetCoreDataFromValue((obj), rb_vm_t, (ptr))
637
638struct rb_vm_struct;
639typedef void rb_vm_at_exit_func(struct rb_vm_struct*);
640
641typedef struct rb_at_exit_list {
642 rb_vm_at_exit_func *func;
643 struct rb_at_exit_list *next;
645
646void *rb_objspace_alloc(void);
647void rb_objspace_free(void *objspace);
648void rb_objspace_call_finalizer(void);
649
650typedef struct rb_hook_list_struct {
651 struct rb_event_hook_struct *hooks;
652 rb_event_flag_t events;
653 unsigned int running;
654 bool need_clean;
655 bool is_local;
657
658
659// see builtin.h for definition
660typedef const struct rb_builtin_function *RB_BUILTIN;
661
663 VALUE *varptr;
664 struct global_object_list *next;
665};
666
667typedef struct rb_vm_struct {
668 VALUE self;
669
670 struct {
671 struct ccan_list_head set;
672 unsigned int cnt;
673 unsigned int blocking_cnt;
674
675 struct rb_ractor_struct *main_ractor;
676 struct rb_thread_struct *main_thread; // == vm->ractor.main_ractor->threads.main
677
678 struct {
679 // monitor
680 rb_nativethread_lock_t lock;
681 struct rb_ractor_struct *lock_owner;
682 unsigned int lock_rec;
683
684 // join at exit
685 rb_nativethread_cond_t terminate_cond;
686 bool terminate_waiting;
687
688#ifndef RUBY_THREAD_PTHREAD_H
689 // win32
690 bool barrier_waiting;
691 unsigned int barrier_cnt;
692 rb_nativethread_cond_t barrier_complete_cond;
693 rb_nativethread_cond_t barrier_release_cond;
694#endif
695 } sync;
696
697#ifdef RUBY_THREAD_PTHREAD_H
698 // ractor scheduling
699 struct {
700 rb_nativethread_lock_t lock;
701 struct rb_ractor_struct *lock_owner;
702 bool locked;
703
704 rb_nativethread_cond_t cond; // GRQ
705 unsigned int snt_cnt; // count of shared NTs
706 unsigned int dnt_cnt; // count of dedicated NTs
707
708 unsigned int running_cnt;
709
710 unsigned int max_cpu;
711 struct ccan_list_head grq; // // Global Ready Queue
712 unsigned int grq_cnt;
713
714 // running threads
715 struct ccan_list_head running_threads;
716
717 // threads which switch context by timeslice
718 struct ccan_list_head timeslice_threads;
719
720 struct ccan_list_head zombie_threads;
721
722 // true if timeslice timer is not enable
723 bool timeslice_wait_inf;
724
725 // barrier
726 rb_nativethread_cond_t barrier_complete_cond;
727 rb_nativethread_cond_t barrier_release_cond;
728 bool barrier_waiting;
729 unsigned int barrier_waiting_cnt;
730 unsigned int barrier_serial;
731 struct rb_ractor_struct *barrier_ractor;
732 unsigned int barrier_lock_rec;
733 } sched;
734#endif
735 } ractor;
736
737#ifdef USE_SIGALTSTACK
738 void *main_altstack;
739#endif
740
741 rb_serial_t fork_gen;
742
743 /* set in single-threaded processes only: */
744 volatile int ubf_async_safe;
745
746 unsigned int running: 1;
747 unsigned int thread_abort_on_exception: 1;
748 unsigned int thread_report_on_exception: 1;
749 unsigned int thread_ignore_deadlock: 1;
750
751 /* object management */
752 VALUE mark_object_ary;
754 const VALUE special_exceptions[ruby_special_error_count];
755
756 /* namespace */
757 rb_namespace_t *root_namespace;
758 rb_namespace_t *main_namespace;
759
760 /* load */
761 // For running the init function of statically linked
762 // extensions when they are loaded
763 struct st_table *static_ext_inits;
764
765 /* signal */
766 struct {
767 VALUE cmd[RUBY_NSIG];
768 } trap_list;
769
770 /* postponed_job (async-signal-safe, and thread-safe) */
771 struct rb_postponed_job_queue *postponed_job_queue;
772
773 int src_encoding_index;
774
775 /* workqueue (thread-safe, NOT async-signal-safe) */
776 struct ccan_list_head workqueue; /* <=> rb_workqueue_job.jnode */
777 rb_nativethread_lock_t workqueue_lock;
778
779 VALUE orig_progname, progname;
780 VALUE coverages, me2counter;
781 int coverage_mode;
782
783 struct {
784 struct rb_objspace *objspace;
785 struct gc_mark_func_data_struct {
786 void *data;
787 void (*mark_func)(VALUE v, void *data);
788 } *mark_func_data;
789 } gc;
790
791 rb_at_exit_list *at_exit;
792
793 const struct rb_builtin_function *builtin_function_table;
794
795 st_table *ci_table;
796 struct rb_id_table *negative_cme_table;
797 st_table *overloaded_cme_table; // cme -> overloaded_cme
798 set_table *unused_block_warning_table;
799 set_table *cc_refinement_table;
800
801 // This id table contains a mapping from ID to ICs. It does this with ID
802 // keys and nested st_tables as values. The nested tables have ICs as keys
803 // and Qtrue as values. It is used when inline constant caches need to be
804 // invalidated or ISEQs are being freed.
805 struct rb_id_table *constant_cache;
806 ID inserting_constant_cache_id;
807
808#ifndef VM_GLOBAL_CC_CACHE_TABLE_SIZE
809#define VM_GLOBAL_CC_CACHE_TABLE_SIZE 1023
810#endif
811 const struct rb_callcache *global_cc_cache_table[VM_GLOBAL_CC_CACHE_TABLE_SIZE]; // vm_eval.c
812
813#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
814 uint32_t clock;
815#endif
816
817 /* params */
818 struct { /* size in byte */
819 size_t thread_vm_stack_size;
820 size_t thread_machine_stack_size;
821 size_t fiber_vm_stack_size;
822 size_t fiber_machine_stack_size;
823 } default_params;
824} rb_vm_t;
825
826extern bool ruby_vm_during_cleanup;
827
828/* default values */
829
830#define RUBY_VM_SIZE_ALIGN 4096
831
832#define RUBY_VM_THREAD_VM_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
833#define RUBY_VM_THREAD_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
834#define RUBY_VM_THREAD_MACHINE_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
835#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
836
837#define RUBY_VM_FIBER_VM_STACK_SIZE ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
838#define RUBY_VM_FIBER_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
839#define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 64 * 1024 * sizeof(VALUE)) /* 256 KB or 512 KB */
840#if defined(__powerpc64__) || defined(__ppc64__) // macOS has __ppc64__
841#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 32 * 1024 * sizeof(VALUE)) /* 128 KB or 256 KB */
842#else
843#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
844#endif
845
846#if __has_feature(memory_sanitizer) || __has_feature(address_sanitizer)
847/* It seems sanitizers consume A LOT of machine stacks */
848#undef RUBY_VM_THREAD_MACHINE_STACK_SIZE
849#define RUBY_VM_THREAD_MACHINE_STACK_SIZE (1024 * 1024 * sizeof(VALUE))
850#undef RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN
851#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 512 * 1024 * sizeof(VALUE))
852#undef RUBY_VM_FIBER_MACHINE_STACK_SIZE
853#define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 256 * 1024 * sizeof(VALUE))
854#undef RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN
855#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 128 * 1024 * sizeof(VALUE))
856#endif
857
858#ifndef VM_DEBUG_BP_CHECK
859#define VM_DEBUG_BP_CHECK 0
860#endif
861
862#ifndef VM_DEBUG_VERIFY_METHOD_CACHE
863#define VM_DEBUG_VERIFY_METHOD_CACHE (VMDEBUG != 0)
864#endif
865
867 VALUE self;
868 const VALUE *ep;
869 union {
870 const rb_iseq_t *iseq;
871 const struct vm_ifunc *ifunc;
872 VALUE val;
873 } code;
874};
875
876enum rb_block_handler_type {
877 block_handler_type_iseq,
878 block_handler_type_ifunc,
879 block_handler_type_symbol,
880 block_handler_type_proc
881};
882
883enum rb_block_type {
884 block_type_iseq,
885 block_type_ifunc,
886 block_type_symbol,
887 block_type_proc
888};
889
890struct rb_block {
891 union {
892 struct rb_captured_block captured;
893 VALUE symbol;
894 VALUE proc;
895 } as;
896 enum rb_block_type type;
897};
898
900 const VALUE *pc; // cfp[0]
901 VALUE *sp; // cfp[1]
902 const rb_iseq_t *iseq; // cfp[2]
903 VALUE self; // cfp[3] / block[0]
904 const VALUE *ep; // cfp[4] / block[1]
905 const void *block_code; // cfp[5] / block[2] -- iseq, ifunc, or forwarded block handler
906 void *jit_return; // cfp[6] -- return address for JIT code
907#if VM_DEBUG_BP_CHECK
908 VALUE *bp_check; // cfp[7]
909#endif
911
912extern const rb_data_type_t ruby_threadptr_data_type;
913
914static inline struct rb_thread_struct *
915rb_thread_ptr(VALUE thval)
916{
917 return (struct rb_thread_struct *)rb_check_typeddata(thval, &ruby_threadptr_data_type);
918}
919
920enum rb_thread_status {
921 THREAD_RUNNABLE,
922 THREAD_STOPPED,
923 THREAD_STOPPED_FOREVER,
924 THREAD_KILLED
925};
926
927#ifdef RUBY_JMP_BUF
928typedef RUBY_JMP_BUF rb_jmpbuf_t;
929#else
930typedef void *rb_jmpbuf_t[5];
931#endif
932
933/*
934 `rb_vm_tag_jmpbuf_t` type represents a buffer used to
935 long jump to a C frame associated with `rb_vm_tag`.
936
937 Use-site of `rb_vm_tag_jmpbuf_t` is responsible for calling the
938 following functions:
939 - `rb_vm_tag_jmpbuf_init` once `rb_vm_tag_jmpbuf_t` is allocated.
940 - `rb_vm_tag_jmpbuf_deinit` once `rb_vm_tag_jmpbuf_t` is no longer necessary.
941
942 `RB_VM_TAG_JMPBUF_GET` transforms a `rb_vm_tag_jmpbuf_t` into a
943 `rb_jmpbuf_t` to be passed to `rb_setjmp/rb_longjmp`.
944*/
945#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
946/*
947 WebAssembly target with Asyncify-based SJLJ needs
948 to capture the execution context by unwind/rewind-ing
949 call frames into a jump buffer. The buffer space tends
950 to be considerably large unlike other architectures'
951 register-based buffers.
952 Therefore, we allocates the buffer on the heap on such
953 environments.
954*/
955typedef rb_jmpbuf_t *rb_vm_tag_jmpbuf_t;
956
957#define RB_VM_TAG_JMPBUF_GET(buf) (*buf)
958
959static inline void
960rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
961{
962 *jmpbuf = ruby_xmalloc(sizeof(rb_jmpbuf_t));
963}
964
965static inline void
966rb_vm_tag_jmpbuf_deinit(const rb_vm_tag_jmpbuf_t *jmpbuf)
967{
968 ruby_xfree(*jmpbuf);
969}
970#else
971typedef rb_jmpbuf_t rb_vm_tag_jmpbuf_t;
972
973#define RB_VM_TAG_JMPBUF_GET(buf) (buf)
974
975static inline void
976rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
977{
978 // no-op
979}
980
981static inline void
982rb_vm_tag_jmpbuf_deinit(const rb_vm_tag_jmpbuf_t *jmpbuf)
983{
984 // no-op
985}
986#endif
987
988/*
989 the members which are written in EC_PUSH_TAG() should be placed at
990 the beginning and the end, so that entire region is accessible.
991*/
992struct rb_vm_tag {
993 VALUE tag;
994 VALUE retval;
995 rb_vm_tag_jmpbuf_t buf;
996 struct rb_vm_tag *prev;
997 enum ruby_tag_type state;
998 unsigned int lock_rec;
999};
1000
1001STATIC_ASSERT(rb_vm_tag_buf_offset, offsetof(struct rb_vm_tag, buf) > 0);
1002STATIC_ASSERT(rb_vm_tag_buf_end,
1003 offsetof(struct rb_vm_tag, buf) + sizeof(rb_vm_tag_jmpbuf_t) <
1004 sizeof(struct rb_vm_tag));
1005
1008 void *arg;
1009};
1010
1011struct rb_mutex_struct;
1012
1013typedef struct rb_fiber_struct rb_fiber_t;
1014
1016 struct rb_waiting_list *next;
1017 struct rb_thread_struct *thread;
1018 struct rb_fiber_struct *fiber;
1019};
1020
1022 /* execution information */
1023 VALUE *vm_stack; /* must free, must mark */
1024 size_t vm_stack_size; /* size in word (byte size / sizeof(VALUE)) */
1025 rb_control_frame_t *cfp;
1026
1027 struct rb_vm_tag *tag;
1028
1029 /* interrupt flags */
1030 rb_atomic_t interrupt_flag;
1031 rb_atomic_t interrupt_mask; /* size should match flag */
1032#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
1033 uint32_t checked_clock;
1034#endif
1035
1036 rb_fiber_t *fiber_ptr;
1037 struct rb_thread_struct *thread_ptr;
1038
1039 /* storage (ec (fiber) local) */
1040 struct rb_id_table *local_storage;
1041 VALUE local_storage_recursive_hash;
1042 VALUE local_storage_recursive_hash_for_trace;
1043
1044 /* Inheritable fiber storage. */
1045 VALUE storage;
1046
1047 /* eval env */
1048 const VALUE *root_lep;
1049 VALUE root_svar;
1050
1051 /* trace information */
1052 struct rb_trace_arg_struct *trace_arg;
1053
1054 /* temporary places */
1055 VALUE errinfo;
1056 VALUE passed_block_handler; /* for rb_iterate */
1057
1058 uint8_t raised_flag; /* only 3 bits needed */
1059
1060 /* n.b. only 7 bits needed, really: */
1061 BITFIELD(enum method_missing_reason, method_missing_reason, 8);
1062
1063 VALUE private_const_reference;
1064
1065 struct {
1066 VALUE obj;
1067 VALUE fields_obj;
1068 } gen_fields_cache;
1069
1070 /* for GC */
1071 struct {
1072 VALUE *stack_start;
1073 VALUE *stack_end;
1074 size_t stack_maxsize;
1076
1077#ifdef RUBY_ASAN_ENABLED
1078 void *asan_fake_stack_handle;
1079#endif
1080 } machine;
1081};
1082
1083#ifndef rb_execution_context_t
1085#define rb_execution_context_t rb_execution_context_t
1086#endif
1087
1088// for builtin.h
1089#define VM_CORE_H_EC_DEFINED 1
1090
1091// Set the vm_stack pointer in the execution context.
1092void rb_ec_set_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
1093
1094// Initialize the vm_stack pointer in the execution context and push the initial stack frame.
1095// @param ec the execution context to update.
1096// @param stack a pointer to the stack to use.
1097// @param size the size of the stack, as in `VALUE stack[size]`.
1098void rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
1099
1100// Clear (set to `NULL`) the vm_stack pointer.
1101// @param ec the execution context to update.
1102void rb_ec_clear_vm_stack(rb_execution_context_t *ec);
1103
1105 bool ractor_safe;
1106};
1107
1108typedef struct rb_ractor_struct rb_ractor_t;
1109
1110struct rb_native_thread;
1111
1112typedef struct rb_thread_struct {
1113 struct ccan_list_node lt_node; // managed by a ractor (r->threads.set)
1114 VALUE self;
1115 rb_ractor_t *ractor;
1116 rb_vm_t *vm;
1117 struct rb_native_thread *nt;
1119
1120 struct rb_thread_sched_item sched;
1121 bool mn_schedulable;
1122 rb_atomic_t serial; // only for RUBY_DEBUG_LOG()
1123
1124 VALUE last_status; /* $? */
1125
1126 /* for cfunc */
1127 struct rb_calling_info *calling;
1128
1129 /* for load(true) */
1130 VALUE top_self;
1131 VALUE top_wrapper;
1132
1133 /* thread control */
1134
1135 BITFIELD(enum rb_thread_status, status, 2);
1136 /* bit flags */
1137 unsigned int has_dedicated_nt : 1;
1138 unsigned int to_kill : 1;
1139 unsigned int abort_on_exception: 1;
1140 unsigned int report_on_exception: 1;
1141 unsigned int pending_interrupt_queue_checked: 1;
1142 int8_t priority; /* -3 .. 3 (RUBY_THREAD_PRIORITY_{MIN,MAX}) */
1143 uint32_t running_time_us; /* 12500..800000 */
1144
1145 void *blocking_region_buffer;
1146
1147 VALUE thgroup;
1148 VALUE value;
1149
1150 /* temporary place of retval on OPT_CALL_THREADED_CODE */
1151#if OPT_CALL_THREADED_CODE
1152 VALUE retval;
1153#endif
1154
1155 /* async errinfo queue */
1156 VALUE pending_interrupt_queue;
1157 VALUE pending_interrupt_mask_stack;
1158
1159 /* interrupt management */
1160 rb_nativethread_lock_t interrupt_lock;
1161 struct rb_unblock_callback unblock;
1162 VALUE locking_mutex;
1163 struct rb_mutex_struct *keeping_mutexes;
1164 struct ccan_list_head interrupt_exec_tasks;
1165
1166 struct rb_waiting_list *join_list;
1167
1168 union {
1169 struct {
1170 VALUE proc;
1171 VALUE args;
1172 int kw_splat;
1173 } proc;
1174 struct {
1175 VALUE (*func)(void *);
1176 void *arg;
1177 } func;
1178 } invoke_arg;
1179
1180 enum thread_invoke_type {
1181 thread_invoke_type_none = 0,
1182 thread_invoke_type_proc,
1183 thread_invoke_type_ractor_proc,
1184 thread_invoke_type_func
1185 } invoke_type;
1186
1187 /* fiber */
1188 rb_fiber_t *root_fiber;
1189
1190 VALUE scheduler;
1191 unsigned int blocking;
1192
1193 /* misc */
1194 VALUE name;
1195 void **specific_storage;
1196
1197 struct rb_ext_config ext_config;
1198} rb_thread_t;
1199
1200static inline unsigned int
1201rb_th_serial(const rb_thread_t *th)
1202{
1203 return th ? (unsigned int)th->serial : 0;
1204}
1205
1206typedef enum {
1207 VM_DEFINECLASS_TYPE_CLASS = 0x00,
1208 VM_DEFINECLASS_TYPE_SINGLETON_CLASS = 0x01,
1209 VM_DEFINECLASS_TYPE_MODULE = 0x02,
1210 /* 0x03..0x06 is reserved */
1211 VM_DEFINECLASS_TYPE_MASK = 0x07
1212} rb_vm_defineclass_type_t;
1213
1214#define VM_DEFINECLASS_TYPE(x) ((rb_vm_defineclass_type_t)(x) & VM_DEFINECLASS_TYPE_MASK)
1215#define VM_DEFINECLASS_FLAG_SCOPED 0x08
1216#define VM_DEFINECLASS_FLAG_HAS_SUPERCLASS 0x10
1217#define VM_DEFINECLASS_SCOPED_P(x) ((x) & VM_DEFINECLASS_FLAG_SCOPED)
1218#define VM_DEFINECLASS_HAS_SUPERCLASS_P(x) \
1219 ((x) & VM_DEFINECLASS_FLAG_HAS_SUPERCLASS)
1220
1221/* iseq.c */
1222RUBY_SYMBOL_EXPORT_BEGIN
1223
1224/* node -> iseq */
1225rb_iseq_t *rb_iseq_new (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent, enum rb_iseq_type);
1226rb_iseq_t *rb_iseq_new_top (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent);
1227rb_iseq_t *rb_iseq_new_main (const VALUE ast_value, VALUE path, VALUE realpath, const rb_iseq_t *parent, int opt);
1228rb_iseq_t *rb_iseq_new_eval (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, int first_lineno, const rb_iseq_t *parent, int isolated_depth);
1229rb_iseq_t *rb_iseq_new_with_opt( VALUE ast_value, VALUE name, VALUE path, VALUE realpath, int first_lineno, const rb_iseq_t *parent, int isolated_depth,
1230 enum rb_iseq_type, const rb_compile_option_t*,
1231 VALUE script_lines);
1232
1233struct iseq_link_anchor;
1235 VALUE flags;
1236 VALUE reserved;
1237 void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *);
1238 const void *data;
1239};
1240static inline struct rb_iseq_new_with_callback_callback_func *
1241rb_iseq_new_with_callback_new_callback(
1242 void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *), const void *ptr)
1243{
1245 IMEMO_NEW(struct rb_iseq_new_with_callback_callback_func, imemo_ifunc, Qfalse);
1246 memo->func = func;
1247 memo->data = ptr;
1248
1249 return memo;
1250}
1251rb_iseq_t *rb_iseq_new_with_callback(const struct rb_iseq_new_with_callback_callback_func * ifunc,
1252 VALUE name, VALUE path, VALUE realpath, int first_lineno,
1253 const rb_iseq_t *parent, enum rb_iseq_type, const rb_compile_option_t*);
1254
1255VALUE rb_iseq_disasm(const rb_iseq_t *iseq);
1256int rb_iseq_disasm_insn(VALUE str, const VALUE *iseqval, size_t pos, const rb_iseq_t *iseq, VALUE child);
1257
1258VALUE rb_iseq_coverage(const rb_iseq_t *iseq);
1259
1260RUBY_EXTERN VALUE rb_cISeq;
1261RUBY_EXTERN VALUE rb_cRubyVM;
1262RUBY_EXTERN VALUE rb_mRubyVMFrozenCore;
1263RUBY_EXTERN VALUE rb_block_param_proxy;
1264RUBY_SYMBOL_EXPORT_END
1265
1266#define GetProcPtr(obj, ptr) \
1267 GetCoreDataFromValue((obj), rb_proc_t, (ptr))
1268
1269typedef struct {
1270 const struct rb_block block;
1271 unsigned int is_from_method: 1; /* bool */
1272 unsigned int is_lambda: 1; /* bool */
1273 unsigned int is_isolated: 1; /* bool */
1274} rb_proc_t;
1275
1276RUBY_SYMBOL_EXPORT_BEGIN
1277VALUE rb_proc_isolate(VALUE self);
1278VALUE rb_proc_isolate_bang(VALUE self, VALUE replace_self);
1279VALUE rb_proc_ractor_make_shareable(VALUE proc, VALUE replace_self);
1280RUBY_SYMBOL_EXPORT_END
1281
1282typedef struct {
1283 VALUE flags; /* imemo header */
1284 rb_iseq_t *iseq;
1285 const VALUE *ep;
1286 const VALUE *env;
1287 unsigned int env_size;
1288} rb_env_t;
1289
1290extern const rb_data_type_t ruby_binding_data_type;
1291
1292#define GetBindingPtr(obj, ptr) \
1293 GetCoreDataFromValue((obj), rb_binding_t, (ptr))
1294
1295typedef struct {
1296 const struct rb_block block;
1297 const VALUE pathobj;
1298 int first_lineno;
1299} rb_binding_t;
1300
1301/* used by compile time and send insn */
1302
1303enum vm_check_match_type {
1304 VM_CHECKMATCH_TYPE_WHEN = 1,
1305 VM_CHECKMATCH_TYPE_CASE = 2,
1306 VM_CHECKMATCH_TYPE_RESCUE = 3
1307};
1308
1309#define VM_CHECKMATCH_TYPE_MASK 0x03
1310#define VM_CHECKMATCH_ARRAY 0x04
1311
1312enum vm_opt_newarray_send_type {
1313 VM_OPT_NEWARRAY_SEND_MAX = 1,
1314 VM_OPT_NEWARRAY_SEND_MIN = 2,
1315 VM_OPT_NEWARRAY_SEND_HASH = 3,
1316 VM_OPT_NEWARRAY_SEND_PACK = 4,
1317 VM_OPT_NEWARRAY_SEND_PACK_BUFFER = 5,
1318 VM_OPT_NEWARRAY_SEND_INCLUDE_P = 6,
1319};
1320
1321enum vm_special_object_type {
1322 VM_SPECIAL_OBJECT_VMCORE = 1,
1323 VM_SPECIAL_OBJECT_CBASE,
1324 VM_SPECIAL_OBJECT_CONST_BASE
1325};
1326
1327enum vm_svar_index {
1328 VM_SVAR_LASTLINE = 0, /* $_ */
1329 VM_SVAR_BACKREF = 1, /* $~ */
1330
1331 VM_SVAR_EXTRA_START = 2,
1332 VM_SVAR_FLIPFLOP_START = 2 /* flipflop */
1333};
1334
1335/* inline cache */
1336typedef struct iseq_inline_constant_cache *IC;
1337typedef struct iseq_inline_iv_cache_entry *IVC;
1338typedef struct iseq_inline_cvar_cache_entry *ICVARC;
1339typedef union iseq_inline_storage_entry *ISE;
1340typedef const struct rb_callinfo *CALL_INFO;
1341typedef const struct rb_callcache *CALL_CACHE;
1342typedef struct rb_call_data *CALL_DATA;
1343
1344typedef VALUE CDHASH;
1345
1346#ifndef FUNC_FASTCALL
1347#define FUNC_FASTCALL(x) x
1348#endif
1349
1350typedef rb_control_frame_t *
1351 (FUNC_FASTCALL(*rb_insn_func_t))(rb_execution_context_t *, rb_control_frame_t *);
1352
1353#define VM_TAGGED_PTR_SET(p, tag) ((VALUE)(p) | (tag))
1354#define VM_TAGGED_PTR_REF(v, mask) ((void *)((v) & ~mask))
1355
1356#define GC_GUARDED_PTR(p) VM_TAGGED_PTR_SET((p), 0x01)
1357#define GC_GUARDED_PTR_REF(p) VM_TAGGED_PTR_REF((p), 0x03)
1358#define GC_GUARDED_PTR_P(p) (((VALUE)(p)) & 0x01)
1359
1360enum vm_frame_env_flags {
1361 /* Frame/Environment flag bits:
1362 * MMMM MMMM MMMM MMMM ___F FFFF FFFE EEEX (LSB)
1363 *
1364 * X : tag for GC marking (It seems as Fixnum)
1365 * EEE : 4 bits Env flags
1366 * FF..: 8 bits Frame flags
1367 * MM..: 15 bits frame magic (to check frame corruption)
1368 */
1369
1370 /* frame types */
1371 VM_FRAME_MAGIC_METHOD = 0x11110001,
1372 VM_FRAME_MAGIC_BLOCK = 0x22220001,
1373 VM_FRAME_MAGIC_CLASS = 0x33330001,
1374 VM_FRAME_MAGIC_TOP = 0x44440001,
1375 VM_FRAME_MAGIC_CFUNC = 0x55550001,
1376 VM_FRAME_MAGIC_IFUNC = 0x66660001,
1377 VM_FRAME_MAGIC_EVAL = 0x77770001,
1378 VM_FRAME_MAGIC_RESCUE = 0x78880001,
1379 VM_FRAME_MAGIC_DUMMY = 0x79990001,
1380
1381 VM_FRAME_MAGIC_MASK = 0x7fff0001,
1382
1383 /* frame flag */
1384 VM_FRAME_FLAG_FINISH = 0x0020,
1385 VM_FRAME_FLAG_BMETHOD = 0x0040,
1386 VM_FRAME_FLAG_CFRAME = 0x0080,
1387 VM_FRAME_FLAG_LAMBDA = 0x0100,
1388 VM_FRAME_FLAG_MODIFIED_BLOCK_PARAM = 0x0200,
1389 VM_FRAME_FLAG_CFRAME_KW = 0x0400,
1390 VM_FRAME_FLAG_PASSED = 0x0800,
1391 VM_FRAME_FLAG_NS_REQUIRE = 0x1000,
1392
1393 /* env flag */
1394 VM_ENV_FLAG_LOCAL = 0x0002,
1395 VM_ENV_FLAG_ESCAPED = 0x0004,
1396 VM_ENV_FLAG_WB_REQUIRED = 0x0008,
1397 VM_ENV_FLAG_ISOLATED = 0x0010,
1398};
1399
1400#define VM_ENV_DATA_SIZE ( 3)
1401
1402#define VM_ENV_DATA_INDEX_ME_CREF (-2) /* ep[-2] */
1403#define VM_ENV_DATA_INDEX_SPECVAL (-1) /* ep[-1] */
1404#define VM_ENV_DATA_INDEX_FLAGS ( 0) /* ep[ 0] */
1405#define VM_ENV_DATA_INDEX_ENV ( 1) /* ep[ 1] */
1406
1407#define VM_ENV_INDEX_LAST_LVAR (-VM_ENV_DATA_SIZE)
1408
1409static inline void VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value);
1410
1411static inline void
1412VM_ENV_FLAGS_SET(const VALUE *ep, VALUE flag)
1413{
1414 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1415 VM_ASSERT(FIXNUM_P(flags));
1416 VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags | flag);
1417}
1418
1419static inline void
1420VM_ENV_FLAGS_UNSET(const VALUE *ep, VALUE flag)
1421{
1422 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1423 VM_ASSERT(FIXNUM_P(flags));
1424 VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags & ~flag);
1425}
1426
1427static inline unsigned long
1428VM_ENV_FLAGS(const VALUE *ep, long flag)
1429{
1430 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1431 VM_ASSERT(FIXNUM_P(flags));
1432 return flags & flag;
1433}
1434
1435static inline unsigned long
1436VM_ENV_FLAGS_UNCHECKED(const VALUE *ep, long flag)
1437{
1438 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1439 return flags & flag;
1440}
1441
1442static inline unsigned long
1443VM_ENV_FRAME_TYPE_P(const VALUE *ep, unsigned long frame_type)
1444{
1445 return VM_ENV_FLAGS(ep, VM_FRAME_MAGIC_MASK) == frame_type;
1446}
1447
1448static inline unsigned long
1449VM_FRAME_TYPE(const rb_control_frame_t *cfp)
1450{
1451 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_MAGIC_MASK);
1452}
1453
1454static inline unsigned long
1455VM_FRAME_TYPE_UNCHECKED(const rb_control_frame_t *cfp)
1456{
1457 return VM_ENV_FLAGS_UNCHECKED(cfp->ep, VM_FRAME_MAGIC_MASK);
1458}
1459
1460static inline int
1461VM_FRAME_LAMBDA_P(const rb_control_frame_t *cfp)
1462{
1463 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_LAMBDA) != 0;
1464}
1465
1466static inline int
1467VM_FRAME_CFRAME_KW_P(const rb_control_frame_t *cfp)
1468{
1469 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME_KW) != 0;
1470}
1471
1472static inline int
1473VM_FRAME_FINISHED_P(const rb_control_frame_t *cfp)
1474{
1475 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_FINISH) != 0;
1476}
1477
1478static inline int
1479VM_FRAME_FINISHED_P_UNCHECKED(const rb_control_frame_t *cfp)
1480{
1481 return VM_ENV_FLAGS_UNCHECKED(cfp->ep, VM_FRAME_FLAG_FINISH) != 0;
1482}
1483
1484static inline int
1485VM_FRAME_BMETHOD_P(const rb_control_frame_t *cfp)
1486{
1487 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_BMETHOD) != 0;
1488}
1489
1490static inline int
1491rb_obj_is_iseq(VALUE iseq)
1492{
1493 return imemo_type_p(iseq, imemo_iseq);
1494}
1495
1496#if VM_CHECK_MODE > 0
1497#define RUBY_VM_NORMAL_ISEQ_P(iseq) rb_obj_is_iseq((VALUE)iseq)
1498#endif
1499
1500static inline int
1501VM_FRAME_CFRAME_P(const rb_control_frame_t *cfp)
1502{
1503 int cframe_p = VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME) != 0;
1504 VM_ASSERT(RUBY_VM_NORMAL_ISEQ_P(cfp->iseq) != cframe_p ||
1505 (VM_FRAME_TYPE(cfp) & VM_FRAME_MAGIC_MASK) == VM_FRAME_MAGIC_DUMMY);
1506 return cframe_p;
1507}
1508
1509static inline int
1510VM_FRAME_CFRAME_P_UNCHECKED(const rb_control_frame_t *cfp)
1511{
1512 return VM_ENV_FLAGS_UNCHECKED(cfp->ep, VM_FRAME_FLAG_CFRAME) != 0;
1513}
1514
1515static inline int
1516VM_FRAME_RUBYFRAME_P(const rb_control_frame_t *cfp)
1517{
1518 return !VM_FRAME_CFRAME_P(cfp);
1519}
1520
1521static inline int
1522VM_FRAME_RUBYFRAME_P_UNCHECKED(const rb_control_frame_t *cfp)
1523{
1524 return !VM_FRAME_CFRAME_P_UNCHECKED(cfp);
1525}
1526
1527static inline int
1528VM_FRAME_NS_REQUIRE_P(const rb_control_frame_t *cfp)
1529{
1530 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_NS_REQUIRE) != 0;
1531}
1532
1533#define RUBYVM_CFUNC_FRAME_P(cfp) \
1534 (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_CFUNC)
1535
1536#define VM_GUARDED_PREV_EP(ep) GC_GUARDED_PTR(ep)
1537#define VM_BLOCK_HANDLER_NONE 0
1538
1539static inline int
1540VM_ENV_LOCAL_P(const VALUE *ep)
1541{
1542 return VM_ENV_FLAGS(ep, VM_ENV_FLAG_LOCAL) ? 1 : 0;
1543}
1544
1545static inline int
1546VM_ENV_LOCAL_P_UNCHECKED(const VALUE *ep)
1547{
1548 return VM_ENV_FLAGS_UNCHECKED(ep, VM_ENV_FLAG_LOCAL) ? 1 : 0;
1549}
1550
1551static inline const VALUE *
1552VM_ENV_PREV_EP_UNCHECKED(const VALUE *ep)
1553{
1554 return GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
1555}
1556
1557static inline const VALUE *
1558VM_ENV_PREV_EP(const VALUE *ep)
1559{
1560 VM_ASSERT(VM_ENV_LOCAL_P(ep) == 0);
1561 return VM_ENV_PREV_EP_UNCHECKED(ep);
1562}
1563
1564static inline bool
1565VM_ENV_NAMESPACED_P(const VALUE *ep)
1566{
1567 return VM_ENV_FRAME_TYPE_P(ep, VM_FRAME_MAGIC_CLASS) || VM_ENV_FRAME_TYPE_P(ep, VM_FRAME_MAGIC_TOP);
1568}
1569
1570static inline VALUE
1571VM_ENV_BLOCK_HANDLER(const VALUE *ep)
1572{
1573 if (VM_ENV_NAMESPACED_P(ep)) {
1574 VM_ASSERT(VM_ENV_LOCAL_P(ep));
1575 return VM_BLOCK_HANDLER_NONE;
1576 }
1577
1578 VM_ASSERT(VM_ENV_LOCAL_P(ep));
1579 return ep[VM_ENV_DATA_INDEX_SPECVAL];
1580}
1581
1582static inline const rb_namespace_t *
1583VM_ENV_NAMESPACE(const VALUE *ep)
1584{
1585 VM_ASSERT(VM_ENV_NAMESPACED_P(ep));
1586 VM_ASSERT(VM_ENV_LOCAL_P(ep));
1587 return (const rb_namespace_t *)GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
1588}
1589
1590static inline const rb_namespace_t *
1591VM_ENV_NAMESPACE_UNCHECKED(const VALUE *ep)
1592{
1593 return (const rb_namespace_t *)GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
1594}
1595
1596#if VM_CHECK_MODE > 0
1597int rb_vm_ep_in_heap_p(const VALUE *ep);
1598#endif
1599
1600static inline int
1601VM_ENV_ESCAPED_P(const VALUE *ep)
1602{
1603 VM_ASSERT(rb_vm_ep_in_heap_p(ep) == !!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED));
1604 return VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) ? 1 : 0;
1605}
1606
1608static inline VALUE
1609VM_ENV_ENVVAL(const VALUE *ep)
1610{
1611 VALUE envval = ep[VM_ENV_DATA_INDEX_ENV];
1612 VM_ASSERT(VM_ENV_ESCAPED_P(ep));
1613 VM_ASSERT(envval == Qundef || imemo_type_p(envval, imemo_env));
1614 return envval;
1615}
1616
1618static inline const rb_env_t *
1619VM_ENV_ENVVAL_PTR(const VALUE *ep)
1620{
1621 return (const rb_env_t *)VM_ENV_ENVVAL(ep);
1622}
1623
1624static inline const rb_env_t *
1625vm_env_new(VALUE *env_ep, VALUE *env_body, unsigned int env_size, const rb_iseq_t *iseq)
1626{
1627 rb_env_t *env = IMEMO_NEW(rb_env_t, imemo_env, (VALUE)iseq);
1628 env->ep = env_ep;
1629 env->env = env_body;
1630 env->env_size = env_size;
1631 env_ep[VM_ENV_DATA_INDEX_ENV] = (VALUE)env;
1632 return env;
1633}
1634
1635static inline void
1636VM_FORCE_WRITE(const VALUE *ptr, VALUE v)
1637{
1638 *((VALUE *)ptr) = v;
1639}
1640
1641static inline void
1642VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value)
1643{
1644 VM_ASSERT(RB_SPECIAL_CONST_P(special_const_value));
1645 VM_FORCE_WRITE(ptr, special_const_value);
1646}
1647
1648static inline void
1649VM_STACK_ENV_WRITE(const VALUE *ep, int index, VALUE v)
1650{
1651 VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_WB_REQUIRED) == 0);
1652 VM_FORCE_WRITE(&ep[index], v);
1653}
1654
1655const VALUE *rb_vm_ep_local_ep(const VALUE *ep);
1656const VALUE *rb_vm_proc_local_ep(VALUE proc);
1657void rb_vm_block_ep_update(VALUE obj, const struct rb_block *dst, const VALUE *ep);
1658void rb_vm_block_copy(VALUE obj, const struct rb_block *dst, const struct rb_block *src);
1659
1660VALUE rb_vm_frame_block_handler(const rb_control_frame_t *cfp);
1661
1662#define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp) ((cfp)+1)
1663#define RUBY_VM_NEXT_CONTROL_FRAME(cfp) ((cfp)-1)
1664
1665#define RUBY_VM_VALID_CONTROL_FRAME_P(cfp, ecfp) \
1666 ((void *)(ecfp) > (void *)(cfp))
1667
1668static inline const rb_control_frame_t *
1669RUBY_VM_END_CONTROL_FRAME(const rb_execution_context_t *ec)
1670{
1671 return (rb_control_frame_t *)(ec->vm_stack + ec->vm_stack_size);
1672}
1673
1674static inline int
1675RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
1676{
1677 return !RUBY_VM_VALID_CONTROL_FRAME_P(cfp, RUBY_VM_END_CONTROL_FRAME(ec));
1678}
1679
1680static inline int
1681VM_BH_ISEQ_BLOCK_P(VALUE block_handler)
1682{
1683 if ((block_handler & 0x03) == 0x01) {
1684#if VM_CHECK_MODE > 0
1685 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1686 VM_ASSERT(imemo_type_p(captured->code.val, imemo_iseq));
1687#endif
1688 return 1;
1689 }
1690 else {
1691 return 0;
1692 }
1693}
1694
1695static inline VALUE
1696VM_BH_FROM_ISEQ_BLOCK(const struct rb_captured_block *captured)
1697{
1698 VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x01);
1699 VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1700 return block_handler;
1701}
1702
1703static inline const struct rb_captured_block *
1704VM_BH_TO_ISEQ_BLOCK(VALUE block_handler)
1705{
1706 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1707 VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1708 return captured;
1709}
1710
1711static inline int
1712VM_BH_IFUNC_P(VALUE block_handler)
1713{
1714 if ((block_handler & 0x03) == 0x03) {
1715#if VM_CHECK_MODE > 0
1716 struct rb_captured_block *captured = (void *)(block_handler & ~0x03);
1717 VM_ASSERT(imemo_type_p(captured->code.val, imemo_ifunc));
1718#endif
1719 return 1;
1720 }
1721 else {
1722 return 0;
1723 }
1724}
1725
1726static inline VALUE
1727VM_BH_FROM_IFUNC_BLOCK(const struct rb_captured_block *captured)
1728{
1729 VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x03);
1730 VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1731 return block_handler;
1732}
1733
1734static inline const struct rb_captured_block *
1735VM_BH_TO_IFUNC_BLOCK(VALUE block_handler)
1736{
1737 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1738 VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1739 return captured;
1740}
1741
1742static inline const struct rb_captured_block *
1743VM_BH_TO_CAPT_BLOCK(VALUE block_handler)
1744{
1745 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1746 VM_ASSERT(VM_BH_IFUNC_P(block_handler) || VM_BH_ISEQ_BLOCK_P(block_handler));
1747 return captured;
1748}
1749
1750static inline enum rb_block_handler_type
1751vm_block_handler_type(VALUE block_handler)
1752{
1753 if (VM_BH_ISEQ_BLOCK_P(block_handler)) {
1754 return block_handler_type_iseq;
1755 }
1756 else if (VM_BH_IFUNC_P(block_handler)) {
1757 return block_handler_type_ifunc;
1758 }
1759 else if (SYMBOL_P(block_handler)) {
1760 return block_handler_type_symbol;
1761 }
1762 else {
1763 VM_ASSERT(rb_obj_is_proc(block_handler));
1764 return block_handler_type_proc;
1765 }
1766}
1767
1768static inline void
1769vm_block_handler_verify(MAYBE_UNUSED(VALUE block_handler))
1770{
1771 VM_ASSERT(block_handler == VM_BLOCK_HANDLER_NONE ||
1772 (vm_block_handler_type(block_handler), 1));
1773}
1774
1775static inline enum rb_block_type
1776vm_block_type(const struct rb_block *block)
1777{
1778#if VM_CHECK_MODE > 0
1779 switch (block->type) {
1780 case block_type_iseq:
1781 VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_iseq));
1782 break;
1783 case block_type_ifunc:
1784 VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_ifunc));
1785 break;
1786 case block_type_symbol:
1787 VM_ASSERT(SYMBOL_P(block->as.symbol));
1788 break;
1789 case block_type_proc:
1790 VM_ASSERT(rb_obj_is_proc(block->as.proc));
1791 break;
1792 }
1793#endif
1794 return block->type;
1795}
1796
1797static inline void
1798vm_block_type_set(const struct rb_block *block, enum rb_block_type type)
1799{
1800 struct rb_block *mb = (struct rb_block *)block;
1801 mb->type = type;
1802}
1803
1804static inline const struct rb_block *
1805vm_proc_block(VALUE procval)
1806{
1807 VM_ASSERT(rb_obj_is_proc(procval));
1808 return &((rb_proc_t *)RTYPEDDATA_DATA(procval))->block;
1809}
1810
1811static inline const rb_iseq_t *vm_block_iseq(const struct rb_block *block);
1812static inline const VALUE *vm_block_ep(const struct rb_block *block);
1813
1814static inline const rb_iseq_t *
1815vm_proc_iseq(VALUE procval)
1816{
1817 return vm_block_iseq(vm_proc_block(procval));
1818}
1819
1820static inline const VALUE *
1821vm_proc_ep(VALUE procval)
1822{
1823 return vm_block_ep(vm_proc_block(procval));
1824}
1825
1826static inline const rb_iseq_t *
1827vm_block_iseq(const struct rb_block *block)
1828{
1829 switch (vm_block_type(block)) {
1830 case block_type_iseq: return rb_iseq_check(block->as.captured.code.iseq);
1831 case block_type_proc: return vm_proc_iseq(block->as.proc);
1832 case block_type_ifunc:
1833 case block_type_symbol: return NULL;
1834 }
1835 VM_UNREACHABLE(vm_block_iseq);
1836 return NULL;
1837}
1838
1839static inline const VALUE *
1840vm_block_ep(const struct rb_block *block)
1841{
1842 switch (vm_block_type(block)) {
1843 case block_type_iseq:
1844 case block_type_ifunc: return block->as.captured.ep;
1845 case block_type_proc: return vm_proc_ep(block->as.proc);
1846 case block_type_symbol: return NULL;
1847 }
1848 VM_UNREACHABLE(vm_block_ep);
1849 return NULL;
1850}
1851
1852static inline VALUE
1853vm_block_self(const struct rb_block *block)
1854{
1855 switch (vm_block_type(block)) {
1856 case block_type_iseq:
1857 case block_type_ifunc:
1858 return block->as.captured.self;
1859 case block_type_proc:
1860 return vm_block_self(vm_proc_block(block->as.proc));
1861 case block_type_symbol:
1862 return Qundef;
1863 }
1864 VM_UNREACHABLE(vm_block_self);
1865 return Qundef;
1866}
1867
1868static inline VALUE
1869VM_BH_TO_SYMBOL(VALUE block_handler)
1870{
1871 VM_ASSERT(SYMBOL_P(block_handler));
1872 return block_handler;
1873}
1874
1875static inline VALUE
1876VM_BH_FROM_SYMBOL(VALUE symbol)
1877{
1878 VM_ASSERT(SYMBOL_P(symbol));
1879 return symbol;
1880}
1881
1882static inline VALUE
1883VM_BH_TO_PROC(VALUE block_handler)
1884{
1885 VM_ASSERT(rb_obj_is_proc(block_handler));
1886 return block_handler;
1887}
1888
1889static inline VALUE
1890VM_BH_FROM_PROC(VALUE procval)
1891{
1892 VM_ASSERT(rb_obj_is_proc(procval));
1893 return procval;
1894}
1895
1896/* VM related object allocate functions */
1897VALUE rb_thread_alloc(VALUE klass);
1898VALUE rb_binding_alloc(VALUE klass);
1899VALUE rb_proc_alloc(VALUE klass);
1900VALUE rb_proc_dup(VALUE self);
1901
1902/* for debug */
1903extern bool rb_vmdebug_stack_dump_raw(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, FILE *);
1904extern bool rb_vmdebug_debug_print_pre(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, const VALUE *_pc, FILE *);
1905extern bool rb_vmdebug_debug_print_post(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, FILE *);
1906
1907#define SDR() rb_vmdebug_stack_dump_raw(GET_EC(), GET_EC()->cfp, stderr)
1908#define SDR2(cfp) rb_vmdebug_stack_dump_raw(GET_EC(), (cfp), stderr)
1909bool rb_vm_bugreport(const void *, FILE *);
1910typedef void (*ruby_sighandler_t)(int);
1911RBIMPL_ATTR_FORMAT(RBIMPL_PRINTF_FORMAT, 4, 5)
1912NORETURN(void rb_bug_for_fatal_signal(ruby_sighandler_t default_sighandler, int sig, const void *, const char *fmt, ...));
1913
1914/* functions about thread/vm execution */
1915RUBY_SYMBOL_EXPORT_BEGIN
1916VALUE rb_iseq_eval(const rb_iseq_t *iseq, const rb_namespace_t *ns);
1917VALUE rb_iseq_eval_main(const rb_iseq_t *iseq);
1918VALUE rb_iseq_path(const rb_iseq_t *iseq);
1919VALUE rb_iseq_realpath(const rb_iseq_t *iseq);
1920RUBY_SYMBOL_EXPORT_END
1921
1922VALUE rb_iseq_pathobj_new(VALUE path, VALUE realpath);
1923void rb_iseq_pathobj_set(const rb_iseq_t *iseq, VALUE path, VALUE realpath);
1924
1925int rb_ec_frame_method_id_and_class(const rb_execution_context_t *ec, ID *idp, ID *called_idp, VALUE *klassp);
1926void rb_ec_setup_exception(const rb_execution_context_t *ec, VALUE mesg, VALUE cause);
1927
1928VALUE rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, int argc, const VALUE *argv, int kw_splat, VALUE block_handler);
1929
1930VALUE rb_vm_make_proc_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda);
1931static inline VALUE
1932rb_vm_make_proc(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1933{
1934 return rb_vm_make_proc_lambda(ec, captured, klass, 0);
1935}
1936
1937static inline VALUE
1938rb_vm_make_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1939{
1940 return rb_vm_make_proc_lambda(ec, captured, klass, 1);
1941}
1942
1943VALUE rb_vm_make_binding(const rb_execution_context_t *ec, const rb_control_frame_t *src_cfp);
1944VALUE rb_vm_env_local_variables(const rb_env_t *env);
1945VALUE rb_vm_env_numbered_parameters(const rb_env_t *env);
1946const rb_env_t *rb_vm_env_prev_env(const rb_env_t *env);
1947const VALUE *rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const ID *dynvars);
1948void rb_vm_inc_const_missing_count(void);
1949VALUE rb_vm_call_kw(rb_execution_context_t *ec, VALUE recv, VALUE id, int argc,
1950 const VALUE *argv, const rb_callable_method_entry_t *me, int kw_splat);
1951void rb_vm_pop_frame_no_int(rb_execution_context_t *ec);
1952void rb_vm_pop_frame(rb_execution_context_t *ec);
1953
1954void rb_thread_start_timer_thread(void);
1955void rb_thread_stop_timer_thread(void);
1956void rb_thread_reset_timer_thread(void);
1957void rb_thread_wakeup_timer_thread(int);
1958
1959static inline void
1960rb_vm_living_threads_init(rb_vm_t *vm)
1961{
1962 ccan_list_head_init(&vm->workqueue);
1963 ccan_list_head_init(&vm->ractor.set);
1964#ifdef RUBY_THREAD_PTHREAD_H
1965 ccan_list_head_init(&vm->ractor.sched.zombie_threads);
1966#endif
1967}
1968
1969typedef int rb_backtrace_iter_func(void *, VALUE, int, VALUE);
1970rb_control_frame_t *rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1971rb_control_frame_t *rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1972VALUE *rb_vm_svar_lep(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1973int rb_vm_get_sourceline(const rb_control_frame_t *);
1974void rb_vm_stack_to_heap(rb_execution_context_t *ec);
1975void ruby_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame);
1976void rb_thread_malloc_stack_set(rb_thread_t *th, void *stack);
1977rb_thread_t * ruby_thread_from_native(void);
1978int ruby_thread_set_native(rb_thread_t *th);
1979int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp);
1980void rb_vm_rewind_cfp(rb_execution_context_t *ec, rb_control_frame_t *cfp);
1981void rb_vm_env_write(const VALUE *ep, int index, VALUE v);
1982VALUE rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler);
1983
1984void rb_vm_register_special_exception_str(enum ruby_special_exceptions sp, VALUE exception_class, VALUE mesg);
1985
1986#define rb_vm_register_special_exception(sp, e, m) \
1987 rb_vm_register_special_exception_str(sp, e, rb_usascii_str_new_static((m), (long)rb_strlen_lit(m)))
1988
1989void rb_gc_mark_machine_context(const rb_execution_context_t *ec);
1990
1991rb_cref_t *rb_vm_rewrite_cref(rb_cref_t *node, VALUE old_klass, VALUE new_klass);
1992
1993const rb_callable_method_entry_t *rb_vm_frame_method_entry(const rb_control_frame_t *cfp);
1994const rb_callable_method_entry_t *rb_vm_frame_method_entry_unchecked(const rb_control_frame_t *cfp);
1995
1996#define sysstack_error GET_VM()->special_exceptions[ruby_error_sysstack]
1997
1998#define CHECK_VM_STACK_OVERFLOW0(cfp, sp, margin) do { \
1999 STATIC_ASSERT(sizeof_sp, sizeof(*(sp)) == sizeof(VALUE)); \
2000 STATIC_ASSERT(sizeof_cfp, sizeof(*(cfp)) == sizeof(rb_control_frame_t)); \
2001 const struct rb_control_frame_struct *bound = (void *)&(sp)[(margin)]; \
2002 if (UNLIKELY((cfp) <= &bound[1])) { \
2003 vm_stackoverflow(); \
2004 } \
2005} while (0)
2006
2007#define CHECK_VM_STACK_OVERFLOW(cfp, margin) \
2008 CHECK_VM_STACK_OVERFLOW0((cfp), (cfp)->sp, (margin))
2009
2010VALUE rb_catch_protect(VALUE t, rb_block_call_func *func, VALUE data, enum ruby_tag_type *stateptr);
2011
2012rb_execution_context_t *rb_vm_main_ractor_ec(rb_vm_t *vm); // ractor.c
2013
2014/* for thread */
2015
2016#if RUBY_VM_THREAD_MODEL == 2
2017
2018RUBY_EXTERN struct rb_ractor_struct *ruby_single_main_ractor; // ractor.c
2019RUBY_EXTERN rb_vm_t *ruby_current_vm_ptr;
2020RUBY_EXTERN rb_event_flag_t ruby_vm_event_flags;
2021RUBY_EXTERN rb_event_flag_t ruby_vm_event_enabled_global_flags;
2022RUBY_EXTERN unsigned int ruby_vm_event_local_num;
2023
2024#define GET_VM() rb_current_vm()
2025#define GET_RACTOR() rb_current_ractor()
2026#define GET_THREAD() rb_current_thread()
2027#define GET_EC() rb_current_execution_context(true)
2028
2029static inline rb_thread_t *
2030rb_ec_thread_ptr(const rb_execution_context_t *ec)
2031{
2032 return ec->thread_ptr;
2033}
2034
2035static inline rb_ractor_t *
2036rb_ec_ractor_ptr(const rb_execution_context_t *ec)
2037{
2038 const rb_thread_t *th = rb_ec_thread_ptr(ec);
2039 if (th) {
2040 VM_ASSERT(th->ractor != NULL);
2041 return th->ractor;
2042 }
2043 else {
2044 return NULL;
2045 }
2046}
2047
2048static inline rb_vm_t *
2049rb_ec_vm_ptr(const rb_execution_context_t *ec)
2050{
2051 const rb_thread_t *th = rb_ec_thread_ptr(ec);
2052 if (th) {
2053 return th->vm;
2054 }
2055 else {
2056 return NULL;
2057 }
2058}
2059
2060NOINLINE(struct rb_execution_context_struct *rb_current_ec_noinline(void));
2061
2062static inline rb_execution_context_t *
2063rb_current_execution_context(bool expect_ec)
2064{
2065#ifdef RB_THREAD_LOCAL_SPECIFIER
2066 #ifdef RB_THREAD_CURRENT_EC_NOINLINE
2067 rb_execution_context_t * volatile ec = rb_current_ec();
2068 #else
2069 rb_execution_context_t * volatile ec = ruby_current_ec;
2070 #endif
2071
2072 /* On the shared objects, `__tls_get_addr()` is used to access the TLS
2073 * and the address of the `ruby_current_ec` can be stored on a function
2074 * frame. However, this address can be mis-used after native thread
2075 * migration of a coroutine.
2076 * 1) Get `ptr =&ruby_current_ec` op NT1 and store it on the frame.
2077 * 2) Context switch and resume it on the NT2.
2078 * 3) `ptr` is used on NT2 but it accesses to the TLS on NT1.
2079 * This assertion checks such misusage.
2080 *
2081 * To avoid accidents, `GET_EC()` should be called once on the frame.
2082 * Note that inlining can produce the problem.
2083 */
2084 VM_ASSERT(ec == rb_current_ec_noinline());
2085#else
2086 rb_execution_context_t * volatile ec = native_tls_get(ruby_current_ec_key);
2087#endif
2088 VM_ASSERT(!expect_ec || ec != NULL);
2089 return ec;
2090}
2091
2092static inline rb_thread_t *
2093rb_current_thread(void)
2094{
2095 const rb_execution_context_t *ec = GET_EC();
2096 return rb_ec_thread_ptr(ec);
2097}
2098
2099static inline rb_ractor_t *
2100rb_current_ractor_raw(bool expect)
2101{
2102 if (ruby_single_main_ractor) {
2103 return ruby_single_main_ractor;
2104 }
2105 else {
2106 const rb_execution_context_t *ec = rb_current_execution_context(expect);
2107 return (expect || ec) ? rb_ec_ractor_ptr(ec) : NULL;
2108 }
2109}
2110
2111static inline rb_ractor_t *
2112rb_current_ractor(void)
2113{
2114 return rb_current_ractor_raw(true);
2115}
2116
2117static inline rb_vm_t *
2118rb_current_vm(void)
2119{
2120#if 0 // TODO: reconsider the assertions
2121 VM_ASSERT(ruby_current_vm_ptr == NULL ||
2122 ruby_current_execution_context_ptr == NULL ||
2123 rb_ec_thread_ptr(GET_EC()) == NULL ||
2124 rb_ec_thread_ptr(GET_EC())->status == THREAD_KILLED ||
2125 rb_ec_vm_ptr(GET_EC()) == ruby_current_vm_ptr);
2126#endif
2127
2128 return ruby_current_vm_ptr;
2129}
2130
2131void rb_ec_vm_lock_rec_release(const rb_execution_context_t *ec,
2132 unsigned int recorded_lock_rec,
2133 unsigned int current_lock_rec);
2134
2135/* This technically is a data race, as it's checked without the lock, however we
2136 * check against a value only our own thread will write. */
2137NO_SANITIZE("thread", static inline bool
2138vm_locked_by_ractor_p(rb_vm_t *vm, rb_ractor_t *cr))
2139{
2140 VM_ASSERT(cr == GET_RACTOR());
2141 return vm->ractor.sync.lock_owner == cr;
2142}
2143
2144static inline unsigned int
2145rb_ec_vm_lock_rec(const rb_execution_context_t *ec)
2146{
2147 rb_vm_t *vm = rb_ec_vm_ptr(ec);
2148
2149 if (!vm_locked_by_ractor_p(vm, rb_ec_ractor_ptr(ec))) {
2150 return 0;
2151 }
2152 else {
2153 return vm->ractor.sync.lock_rec;
2154 }
2155}
2156
2157#else
2158#error "unsupported thread model"
2159#endif
2160
2161enum {
2162 TIMER_INTERRUPT_MASK = 0x01,
2163 PENDING_INTERRUPT_MASK = 0x02,
2164 POSTPONED_JOB_INTERRUPT_MASK = 0x04,
2165 TRAP_INTERRUPT_MASK = 0x08,
2166 TERMINATE_INTERRUPT_MASK = 0x10,
2167 VM_BARRIER_INTERRUPT_MASK = 0x20,
2168};
2169
2170#define RUBY_VM_SET_TIMER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TIMER_INTERRUPT_MASK)
2171#define RUBY_VM_SET_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, PENDING_INTERRUPT_MASK)
2172#define RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, POSTPONED_JOB_INTERRUPT_MASK)
2173#define RUBY_VM_SET_TRAP_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TRAP_INTERRUPT_MASK)
2174#define RUBY_VM_SET_TERMINATE_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TERMINATE_INTERRUPT_MASK)
2175#define RUBY_VM_SET_VM_BARRIER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, VM_BARRIER_INTERRUPT_MASK)
2176
2177static inline bool
2178RUBY_VM_INTERRUPTED(rb_execution_context_t *ec)
2179{
2180 return (ATOMIC_LOAD_RELAXED(ec->interrupt_flag) & ~(ec->interrupt_mask) & (PENDING_INTERRUPT_MASK|TRAP_INTERRUPT_MASK));
2181}
2182
2183static inline bool
2184RUBY_VM_INTERRUPTED_ANY(rb_execution_context_t *ec)
2185{
2186#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
2187 uint32_t current_clock = rb_ec_vm_ptr(ec)->clock;
2188
2189 if (current_clock != ec->checked_clock) {
2190 ec->checked_clock = current_clock;
2191 RUBY_VM_SET_TIMER_INTERRUPT(ec);
2192 }
2193#endif
2194 return ATOMIC_LOAD_RELAXED(ec->interrupt_flag) & ~(ec)->interrupt_mask;
2195}
2196
2197VALUE rb_exc_set_backtrace(VALUE exc, VALUE bt);
2198int rb_signal_buff_size(void);
2199int rb_signal_exec(rb_thread_t *th, int sig);
2200void rb_threadptr_check_signal(rb_thread_t *mth);
2201void rb_threadptr_signal_raise(rb_thread_t *th, int sig);
2202void rb_threadptr_signal_exit(rb_thread_t *th);
2203int rb_threadptr_execute_interrupts(rb_thread_t *, int);
2204void rb_threadptr_interrupt(rb_thread_t *th);
2205void rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th);
2206void rb_threadptr_pending_interrupt_clear(rb_thread_t *th);
2207void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v);
2208VALUE rb_ec_get_errinfo(const rb_execution_context_t *ec);
2209void rb_ec_error_print(rb_execution_context_t * volatile ec, volatile VALUE errinfo);
2210void rb_execution_context_update(rb_execution_context_t *ec);
2211void rb_execution_context_mark(const rb_execution_context_t *ec);
2212void rb_fiber_close(rb_fiber_t *fib);
2213void Init_native_thread(rb_thread_t *th);
2214int rb_vm_check_ints_blocking(rb_execution_context_t *ec);
2215
2216// vm_sync.h
2217void rb_vm_cond_wait(rb_vm_t *vm, rb_nativethread_cond_t *cond);
2218void rb_vm_cond_timedwait(rb_vm_t *vm, rb_nativethread_cond_t *cond, unsigned long msec);
2219
2220#define RUBY_VM_CHECK_INTS(ec) rb_vm_check_ints(ec)
2221static inline void
2222rb_vm_check_ints(rb_execution_context_t *ec)
2223{
2224#ifdef RUBY_ASSERT_CRITICAL_SECTION
2225 VM_ASSERT(ruby_assert_critical_section_entered == 0);
2226#endif
2227
2228 VM_ASSERT(ec == rb_current_ec_noinline());
2229
2230 if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(ec))) {
2231 rb_threadptr_execute_interrupts(rb_ec_thread_ptr(ec), 0);
2232 }
2233}
2234
2235/* tracer */
2236
2238 rb_event_flag_t event;
2240 const rb_control_frame_t *cfp;
2241 VALUE self;
2242 ID id;
2243 ID called_id;
2244 VALUE klass;
2245 VALUE data;
2246
2247 int klass_solved;
2248
2249 /* calc from cfp */
2250 int lineno;
2251 VALUE path;
2252};
2253
2254void rb_hook_list_mark(rb_hook_list_t *hooks);
2255void rb_hook_list_mark_and_move(rb_hook_list_t *hooks);
2256void rb_hook_list_free(rb_hook_list_t *hooks);
2257void rb_hook_list_connect_tracepoint(VALUE target, rb_hook_list_t *list, VALUE tpval, unsigned int target_line);
2258void rb_hook_list_remove_tracepoint(rb_hook_list_t *list, VALUE tpval);
2259
2260void rb_exec_event_hooks(struct rb_trace_arg_struct *trace_arg, rb_hook_list_t *hooks, int pop_p);
2261
2262#define EXEC_EVENT_HOOK_ORIG(ec_, hooks_, flag_, self_, id_, called_id_, klass_, data_, pop_p_) do { \
2263 const rb_event_flag_t flag_arg_ = (flag_); \
2264 rb_hook_list_t *hooks_arg_ = (hooks_); \
2265 if (UNLIKELY((hooks_arg_)->events & (flag_arg_))) { \
2266 /* defer evaluating the other arguments */ \
2267 rb_exec_event_hook_orig(ec_, hooks_arg_, flag_arg_, self_, id_, called_id_, klass_, data_, pop_p_); \
2268 } \
2269} while (0)
2270
2271static inline void
2272rb_exec_event_hook_orig(rb_execution_context_t *ec, rb_hook_list_t *hooks, rb_event_flag_t flag,
2273 VALUE self, ID id, ID called_id, VALUE klass, VALUE data, int pop_p)
2274{
2275 struct rb_trace_arg_struct trace_arg;
2276
2277 VM_ASSERT((hooks->events & flag) != 0);
2278
2279 trace_arg.event = flag;
2280 trace_arg.ec = ec;
2281 trace_arg.cfp = ec->cfp;
2282 trace_arg.self = self;
2283 trace_arg.id = id;
2284 trace_arg.called_id = called_id;
2285 trace_arg.klass = klass;
2286 trace_arg.data = data;
2287 trace_arg.path = Qundef;
2288 trace_arg.klass_solved = 0;
2289
2290 rb_exec_event_hooks(&trace_arg, hooks, pop_p);
2291}
2292
2294 VALUE self;
2295 uint32_t id;
2296 rb_hook_list_t hooks;
2297};
2298
2299static inline rb_hook_list_t *
2300rb_ec_ractor_hooks(const rb_execution_context_t *ec)
2301{
2302 struct rb_ractor_pub *cr_pub = (struct rb_ractor_pub *)rb_ec_ractor_ptr(ec);
2303 return &cr_pub->hooks;
2304}
2305
2306#define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2307 EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_ractor_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 0)
2308
2309#define EXEC_EVENT_HOOK_AND_POP_FRAME(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2310 EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_ractor_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 1)
2311
2312static inline void
2313rb_exec_event_hook_script_compiled(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE eval_script)
2314{
2315 EXEC_EVENT_HOOK(ec, RUBY_EVENT_SCRIPT_COMPILED, ec->cfp->self, 0, 0, 0,
2316 NIL_P(eval_script) ? (VALUE)iseq :
2317 rb_ary_new_from_args(2, eval_script, (VALUE)iseq));
2318}
2319
2320void rb_vm_trap_exit(rb_vm_t *vm);
2321void rb_vm_postponed_job_atfork(void); /* vm_trace.c */
2322void rb_vm_postponed_job_free(void); /* vm_trace.c */
2323size_t rb_vm_memsize_postponed_job_queue(void); /* vm_trace.c */
2324void rb_vm_postponed_job_queue_init(rb_vm_t *vm); /* vm_trace.c */
2325
2326RUBY_SYMBOL_EXPORT_BEGIN
2327
2328int rb_thread_check_trap_pending(void);
2329
2330/* #define RUBY_EVENT_RESERVED_FOR_INTERNAL_USE 0x030000 */ /* from vm_core.h */
2331#define RUBY_EVENT_COVERAGE_LINE 0x010000
2332#define RUBY_EVENT_COVERAGE_BRANCH 0x020000
2333
2334extern VALUE rb_get_coverages(void);
2335extern void rb_set_coverages(VALUE, int, VALUE);
2336extern void rb_clear_coverages(void);
2337extern void rb_reset_coverages(void);
2338extern void rb_resume_coverages(void);
2339extern void rb_suspend_coverages(void);
2340
2341void rb_postponed_job_flush(rb_vm_t *vm);
2342
2343// ractor.c
2344RUBY_EXTERN VALUE rb_eRactorUnsafeError;
2345RUBY_EXTERN VALUE rb_eRactorIsolationError;
2346
2347RUBY_SYMBOL_EXPORT_END
2348
2349#endif /* RUBY_VM_CORE_H */
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ALIGNAS
Wraps (or simulates) alignas.
Definition stdalign.h:27
#define RUBY_EXTERN
Declaration of externally visible global variables.
Definition dllexport.h:45
#define RUBY_EVENT_SCRIPT_COMPILED
Encountered an eval.
Definition event.h:60
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RBIMPL_ATTR_FORMAT(x, y, z)
Wraps (or simulates) __attribute__((format))
Definition format.h:29
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define Qundef
Old name of RUBY_Qundef.
#define Qfalse
Old name of RUBY_Qfalse.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define NIL_P
Old name of RB_NIL_P.
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
void * rb_check_typeddata(VALUE obj, const rb_data_type_t *data_type)
Identical to rb_typeddata_is_kind_of(), except it raises exceptions instead of returning false.
Definition error.c:1397
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
Definition proc.c:120
void rb_unblock_function_t(void *)
This is the type of UBFs.
Definition thread.h:336
VALUE rb_block_call_func(RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg))
This is the type of a function that the interpreter expect for C-backended blocks.
Definition iterator.h:83
VALUE type(ANYARGS)
ANYARGS-ed function type.
#define RBIMPL_ATTR_NONNULL(list)
Wraps (or simulates) __attribute__((nonnull))
Definition nonnull.h:30
#define inline
Old Visual Studio versions do not support the inline keyword, so we need to define it to be __inline.
Definition defines.h:91
Functions related to nodes in the AST.
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RTYPEDDATA_DATA(v)
Convenient getter macro.
Definition rtypeddata.h:103
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
Defines old _.
C99 shim for <stdbool.h>
Definition vm_core.h:261
const ID * segments
A null-terminated list of ids, used to represent a constant's path idNULL is used to represent the ::...
Definition vm_core.h:285
Definition vm_core.h:293
Definition vm_core.h:288
Definition iseq.h:257
Definition method.h:63
CREF (Class REFerence)
Definition method.h:45
Definition class.h:72
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:202
struct rb_iseq_constant_body::@156 param
parameter information
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:143
Internal header for Namespace.
Definition namespace.h:14
Definition st.h:79
IFUNC (Internal FUNCtion)
Definition imemo.h:85
Definition vm_core.h:253
Definition vm_core.h:297
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
Definition value.h:69
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376