Ruby 4.0.0dev (2025-12-04 revision b43e66d3b37d4bd029a90dbee376e475aed79d2a)
vm_core.h (b43e66d3b37d4bd029a90dbee376e475aed79d2a)
1#ifndef RUBY_VM_CORE_H
2#define RUBY_VM_CORE_H
3/**********************************************************************
4
5 vm_core.h -
6
7 $Author$
8 created at: 04/01/01 19:41:38 JST
9
10 Copyright (C) 2004-2007 Koichi Sasada
11
12**********************************************************************/
13
14/*
15 * Enable check mode.
16 * 1: enable local assertions.
17 */
18#ifndef VM_CHECK_MODE
19
20// respect RUBY_DUBUG: if given n is 0, then use RUBY_DEBUG
21#define N_OR_RUBY_DEBUG(n) (((n) > 0) ? (n) : RUBY_DEBUG)
22
23#define VM_CHECK_MODE N_OR_RUBY_DEBUG(0)
24#endif
25
39#ifndef VMDEBUG
40#define VMDEBUG 0
41#endif
42
43#if 0
44#undef VMDEBUG
45#define VMDEBUG 3
46#endif
47
48#include "ruby/internal/config.h"
49
50#include <stddef.h>
51#include <signal.h>
52#include <stdarg.h>
53
54#include "ruby_assert.h"
55
56#define RVALUE_SIZE (sizeof(struct RBasic) + sizeof(VALUE[RBIMPL_RVALUE_EMBED_LEN_MAX]))
57
58#if VM_CHECK_MODE > 0
59#define VM_ASSERT(expr, ...) \
60 RUBY_ASSERT_MESG_WHEN(VM_CHECK_MODE > 0, expr, #expr RBIMPL_VA_OPT_ARGS(__VA_ARGS__))
61#define VM_UNREACHABLE(func) rb_bug(#func ": unreachable")
62#define RUBY_ASSERT_CRITICAL_SECTION
63#define RUBY_DEBUG_THREAD_SCHEDULE() rb_thread_schedule()
64#else
65#define VM_ASSERT(/*expr, */...) ((void)0)
66#define VM_UNREACHABLE(func) UNREACHABLE
67#define RUBY_DEBUG_THREAD_SCHEDULE()
68#endif
69
70#define RUBY_ASSERT_MUTEX_OWNED(mutex) VM_ASSERT(rb_mutex_owned_p(mutex))
71
72#if defined(RUBY_ASSERT_CRITICAL_SECTION)
73/*
74# Critical Section Assertions
75
76These assertions are used to ensure that context switching does not occur between two points in the code. In theory,
77such code should already be protected by a mutex, but these assertions are used to ensure that the mutex is held.
78
79The specific case where it can be useful is where a mutex is held further up the call stack, and the code in question
80may not directly hold the mutex. In this case, the critical section assertions can be used to ensure that the mutex is
81held by someone else.
82
83These assertions are only enabled when RUBY_ASSERT_CRITICAL_SECTION is defined, which is only defined if VM_CHECK_MODE
84is set.
85
86## Example Usage
87
88```c
89RUBY_ASSERT_CRITICAL_SECTION_ENTER();
90// ... some code which does not invoke rb_vm_check_ints() ...
91RUBY_ASSERT_CRITICAL_SECTION_LEAVE();
92```
93
94If `rb_vm_check_ints()` is called between the `RUBY_ASSERT_CRITICAL_SECTION_ENTER()` and
95`RUBY_ASSERT_CRITICAL_SECTION_LEAVE()`, a failed assertion will result.
96*/
97extern int ruby_assert_critical_section_entered;
98#define RUBY_ASSERT_CRITICAL_SECTION_ENTER() do{ruby_assert_critical_section_entered += 1;}while(false)
99#define RUBY_ASSERT_CRITICAL_SECTION_LEAVE() do{VM_ASSERT(ruby_assert_critical_section_entered > 0);ruby_assert_critical_section_entered -= 1;}while(false)
100#else
101#define RUBY_ASSERT_CRITICAL_SECTION_ENTER()
102#define RUBY_ASSERT_CRITICAL_SECTION_LEAVE()
103#endif
104
105#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
106# include "wasm/setjmp.h"
107#else
108# include <setjmp.h>
109#endif
110
111#if defined(__linux__) || defined(__FreeBSD__)
112# define RB_THREAD_T_HAS_NATIVE_ID
113#endif
114
116#include "ccan/list/list.h"
117#include "id.h"
118#include "internal.h"
119#include "internal/array.h"
120#include "internal/basic_operators.h"
121#include "internal/box.h"
122#include "internal/sanitizers.h"
123#include "internal/serial.h"
124#include "internal/set_table.h"
125#include "internal/vm.h"
126#include "method.h"
127#include "node.h"
128#include "ruby/ruby.h"
129#include "ruby/st.h"
130#include "ruby_atomic.h"
131#include "vm_opts.h"
132
133#include "ruby/thread_native.h"
134/*
135 * implementation selector of get_insn_info algorithm
136 * 0: linear search
137 * 1: binary search
138 * 2: succinct bitvector
139 */
140#ifndef VM_INSN_INFO_TABLE_IMPL
141# define VM_INSN_INFO_TABLE_IMPL 2
142#endif
143
144#if defined(NSIG_MAX) /* POSIX issue 8 */
145# undef NSIG
146# define NSIG NSIG_MAX
147#elif defined(_SIG_MAXSIG) /* FreeBSD */
148# undef NSIG
149# define NSIG _SIG_MAXSIG
150#elif defined(_SIGMAX) /* QNX */
151# define NSIG (_SIGMAX + 1)
152#elif defined(NSIG) /* 99% of everything else */
153# /* take it */
154#else /* Last resort */
155# define NSIG (sizeof(sigset_t) * CHAR_BIT + 1)
156#endif
157
158#define RUBY_NSIG NSIG
159
160#if defined(SIGCLD)
161# define RUBY_SIGCHLD (SIGCLD)
162#elif defined(SIGCHLD)
163# define RUBY_SIGCHLD (SIGCHLD)
164#endif
165
166#if defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) && defined(SA_SIGINFO) && !defined(__NetBSD__)
167# define USE_SIGALTSTACK
168void *rb_allocate_sigaltstack(void);
169void *rb_register_sigaltstack(void *);
170# define RB_ALTSTACK_INIT(var, altstack) var = rb_register_sigaltstack(altstack)
171# define RB_ALTSTACK_FREE(var) free(var)
172# define RB_ALTSTACK(var) var
173#else /* noop */
174# define RB_ALTSTACK_INIT(var, altstack)
175# define RB_ALTSTACK_FREE(var)
176# define RB_ALTSTACK(var) (0)
177#endif
178
179#include THREAD_IMPL_H
180#define RUBY_VM_THREAD_MODEL 2
181
182/*****************/
183/* configuration */
184/*****************/
185
186/* gcc ver. check */
187#if defined(__GNUC__) && __GNUC__ >= 2
188
189#if OPT_TOKEN_THREADED_CODE
190#if OPT_DIRECT_THREADED_CODE
191#undef OPT_DIRECT_THREADED_CODE
192#endif
193#endif
194
195#else /* defined(__GNUC__) && __GNUC__ >= 2 */
196
197/* disable threaded code options */
198#if OPT_DIRECT_THREADED_CODE
199#undef OPT_DIRECT_THREADED_CODE
200#endif
201#if OPT_TOKEN_THREADED_CODE
202#undef OPT_TOKEN_THREADED_CODE
203#endif
204#endif
205
206/* call threaded code */
207#if OPT_CALL_THREADED_CODE
208#if OPT_DIRECT_THREADED_CODE
209#undef OPT_DIRECT_THREADED_CODE
210#endif /* OPT_DIRECT_THREADED_CODE */
211#endif /* OPT_CALL_THREADED_CODE */
212
213void rb_vm_encoded_insn_data_table_init(void);
214typedef unsigned long rb_num_t;
215typedef signed long rb_snum_t;
216
217enum ruby_tag_type {
218 RUBY_TAG_NONE = 0x0,
219 RUBY_TAG_RETURN = 0x1,
220 RUBY_TAG_BREAK = 0x2,
221 RUBY_TAG_NEXT = 0x3,
222 RUBY_TAG_RETRY = 0x4,
223 RUBY_TAG_REDO = 0x5,
224 RUBY_TAG_RAISE = 0x6,
225 RUBY_TAG_THROW = 0x7,
226 RUBY_TAG_FATAL = 0x8,
227 RUBY_TAG_MASK = 0xf
228};
229
230#define TAG_NONE RUBY_TAG_NONE
231#define TAG_RETURN RUBY_TAG_RETURN
232#define TAG_BREAK RUBY_TAG_BREAK
233#define TAG_NEXT RUBY_TAG_NEXT
234#define TAG_RETRY RUBY_TAG_RETRY
235#define TAG_REDO RUBY_TAG_REDO
236#define TAG_RAISE RUBY_TAG_RAISE
237#define TAG_THROW RUBY_TAG_THROW
238#define TAG_FATAL RUBY_TAG_FATAL
239#define TAG_MASK RUBY_TAG_MASK
240
241enum ruby_vm_throw_flags {
242 VM_THROW_NO_ESCAPE_FLAG = 0x8000,
243 VM_THROW_STATE_MASK = 0xff
244};
245
246/* forward declarations */
247struct rb_thread_struct;
249
250/* iseq data type */
252
254 rb_serial_t raw;
255 VALUE data[2];
256};
257
258#define IMEMO_CONST_CACHE_SHAREABLE IMEMO_FL_USER0
259
260// imemo_constcache
262 VALUE flags;
263
264 VALUE value;
265 const rb_cref_t *ic_cref;
266};
267STATIC_ASSERT(sizeof_iseq_inline_constant_cache_entry,
268 (offsetof(struct iseq_inline_constant_cache_entry, ic_cref) +
269 sizeof(const rb_cref_t *)) <= RVALUE_SIZE);
270
287
289 uint64_t value; // dest_shape_id in former half, attr_index in latter half
290 ID iv_set_name;
291};
292
296
298 struct {
299 struct rb_thread_struct *running_thread;
300 VALUE value;
301 } once;
302 struct iseq_inline_constant_cache ic_cache;
303 struct iseq_inline_iv_cache_entry iv_cache;
304};
305
307 const struct rb_call_data *cd;
308 const struct rb_callcache *cc;
309 VALUE block_handler;
310 VALUE recv;
311 int argc;
312 bool kw_splat;
313 VALUE heap_argv;
314};
315
316#ifndef VM_ARGC_STACK_MAX
317#define VM_ARGC_STACK_MAX 128
318#endif
319
320#define VM_KW_SPECIFIED_BITS_MAX (32-1) /* TODO: 32 -> Fixnum's max bits */
321
322# define CALLING_ARGC(calling) ((calling)->heap_argv ? RARRAY_LENINT((calling)->heap_argv) : (calling)->argc)
323
325
326#if 1
327#define CoreDataFromValue(obj, type) (type*)DATA_PTR(obj)
328#else
329#define CoreDataFromValue(obj, type) (type*)rb_data_object_get(obj)
330#endif
331#define GetCoreDataFromValue(obj, type, ptr) ((ptr) = CoreDataFromValue((obj), type))
332
334 VALUE pathobj; /* String (path) or Array [path, realpath]. Frozen. */
335 VALUE base_label; /* String */
336 VALUE label; /* String */
337 int first_lineno;
338 int node_id;
339 rb_code_location_t code_location;
341
342#define PATHOBJ_PATH 0
343#define PATHOBJ_REALPATH 1
344
345static inline VALUE
346pathobj_path(VALUE pathobj)
347{
348 if (RB_TYPE_P(pathobj, T_STRING)) {
349 return pathobj;
350 }
351 else {
352 VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
353 return RARRAY_AREF(pathobj, PATHOBJ_PATH);
354 }
355}
356
357static inline VALUE
358pathobj_realpath(VALUE pathobj)
359{
360 if (RB_TYPE_P(pathobj, T_STRING)) {
361 return pathobj;
362 }
363 else {
364 VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
365 return RARRAY_AREF(pathobj, PATHOBJ_REALPATH);
366 }
367}
368
369/* Forward declarations */
370typedef uintptr_t iseq_bits_t;
371
372#define ISEQ_IS_SIZE(body) (body->ic_size + body->ivc_size + body->ise_size + body->icvarc_size)
373
374/* [ TS_IVC | TS_ICVARC | TS_ISE | TS_IC ] */
375#define ISEQ_IS_IC_ENTRY(body, idx) (body->is_entries[(idx) + body->ise_size + body->icvarc_size + body->ivc_size].ic_cache);
376
377/* instruction sequence type */
378enum rb_iseq_type {
379 ISEQ_TYPE_TOP,
380 ISEQ_TYPE_METHOD,
381 ISEQ_TYPE_BLOCK,
382 ISEQ_TYPE_CLASS,
383 ISEQ_TYPE_RESCUE,
384 ISEQ_TYPE_ENSURE,
385 ISEQ_TYPE_EVAL,
386 ISEQ_TYPE_MAIN,
387 ISEQ_TYPE_PLAIN
388};
389
390// Attributes specified by Primitive.attr!
391enum rb_builtin_attr {
392 // The iseq does not call methods.
393 BUILTIN_ATTR_LEAF = 0x01,
394 // This iseq only contains single `opt_invokebuiltin_delegate_leave` instruction with 0 arguments.
395 BUILTIN_ATTR_SINGLE_NOARG_LEAF = 0x02,
396 // This attribute signals JIT to duplicate the iseq for each block iseq so that its `yield` will be monomorphic.
397 BUILTIN_ATTR_INLINE_BLOCK = 0x04,
398 // The iseq acts like a C method in backtraces.
399 BUILTIN_ATTR_C_TRACE = 0x08,
400};
401
402typedef VALUE (*rb_jit_func_t)(struct rb_execution_context_struct *, struct rb_control_frame_struct *);
403typedef VALUE (*rb_zjit_func_t)(struct rb_execution_context_struct *, struct rb_control_frame_struct *, rb_jit_func_t);
404
406 enum rb_iseq_type type;
407
408 unsigned int iseq_size;
409 VALUE *iseq_encoded; /* encoded iseq (insn addr and operands) */
410
435 struct {
436 unsigned int has_lead : 1;
437 unsigned int has_opt : 1;
438 unsigned int has_rest : 1;
439 unsigned int has_post : 1;
440 unsigned int has_kw : 1;
441 unsigned int has_kwrest : 1;
442 unsigned int has_block : 1;
443
444 unsigned int ambiguous_param0 : 1; /* {|a|} */
445 unsigned int accepts_no_kwarg : 1;
446 unsigned int ruby2_keywords: 1;
447 unsigned int anon_rest: 1;
448 unsigned int anon_kwrest: 1;
449 unsigned int use_block: 1;
450 unsigned int forwardable: 1;
451 } flags;
452
453 unsigned int size;
454
455 int lead_num;
456 int opt_num;
457 int rest_start;
458 int post_start;
459 int post_num;
460 int block_start;
461
462 const VALUE *opt_table; /* (opt_num + 1) entries. */
463 /* opt_num and opt_table:
464 *
465 * def foo o1=e1, o2=e2, ..., oN=eN
466 * #=>
467 * # prologue code
468 * A1: e1
469 * A2: e2
470 * ...
471 * AN: eN
472 * AL: body
473 * opt_num = N
474 * opt_table = [A1, A2, ..., AN, AL]
475 */
476
478 int num;
479 int required_num;
480 int bits_start;
481 int rest_start;
482 const ID *table;
483 VALUE *default_values;
484 } *keyword;
485 } param;
486
487 rb_iseq_location_t location;
488
489 /* insn info, must be freed */
491 const struct iseq_insn_info_entry *body;
492 unsigned int *positions;
493 unsigned int size;
494#if VM_INSN_INFO_TABLE_IMPL == 2
495 struct succ_index_table *succ_index_table;
496#endif
497 } insns_info;
498
499 const ID *local_table; /* must free */
500
501 enum lvar_state {
502 lvar_uninitialized,
503 lvar_initialized,
504 lvar_reassigned,
505 } *lvar_states;
506
507 /* catch table */
508 struct iseq_catch_table *catch_table;
509
510 /* for child iseq */
511 const struct rb_iseq_struct *parent_iseq;
512 struct rb_iseq_struct *local_iseq; /* local_iseq->flip_cnt can be modified */
513
514 union iseq_inline_storage_entry *is_entries; /* [ TS_IVC | TS_ICVARC | TS_ISE | TS_IC ] */
515 struct rb_call_data *call_data; //struct rb_call_data calls[ci_size];
516
517 struct {
518 rb_snum_t flip_count;
519 VALUE script_lines;
520 VALUE coverage;
521 VALUE pc2branchindex;
522 VALUE *original_iseq;
523 } variable;
524
525 unsigned int local_table_size;
526 unsigned int ic_size; // Number of IC caches
527 unsigned int ise_size; // Number of ISE caches
528 unsigned int ivc_size; // Number of IVC caches
529 unsigned int icvarc_size; // Number of ICVARC caches
530 unsigned int ci_size;
531 unsigned int stack_max; /* for stack overflow check */
532
533 unsigned int builtin_attrs; // Union of rb_builtin_attr
534
535 bool prism; // ISEQ was generated from prism compiler
536
537 union {
538 iseq_bits_t * list; /* Find references for GC */
539 iseq_bits_t single;
540 } mark_bits;
541
542 struct rb_id_table *outer_variables;
543
544 const rb_iseq_t *mandatory_only_iseq;
545
546#if USE_YJIT || USE_ZJIT
547 // Function pointer for JIT code on jit_exec()
548 rb_jit_func_t jit_entry;
549 // Number of calls on jit_exec()
550 long unsigned jit_entry_calls;
551 // Function pointer for JIT code on jit_exec_exception()
552 rb_jit_func_t jit_exception;
553 // Number of calls on jit_exec_exception()
554 long unsigned jit_exception_calls;
555#endif
556
557#if USE_YJIT
558 // YJIT stores some data on each iseq.
559 void *yjit_payload;
560 // Used to estimate how frequently this ISEQ gets called
561 uint64_t yjit_calls_at_interv;
562#endif
563
564#if USE_ZJIT
565 // ZJIT stores some data on each iseq.
566 void *zjit_payload;
567#endif
568};
569
570/* T_IMEMO/iseq */
571/* typedef rb_iseq_t is in method.h */
573 VALUE flags; /* 1 */
574 VALUE wrapper; /* 2 */
575
576 struct rb_iseq_constant_body *body; /* 3 */
577
578 union { /* 4, 5 words */
579 struct iseq_compile_data *compile_data; /* used at compile time */
580
581 struct {
582 VALUE obj;
583 int index;
584 } loader;
585
586 struct {
587 struct rb_hook_list_struct *local_hooks;
588 rb_event_flag_t global_trace_events;
589 } exec;
590 } aux;
591};
592
593#define ISEQ_BODY(iseq) ((iseq)->body)
594
595#if !defined(USE_LAZY_LOAD) || !(USE_LAZY_LOAD+0)
596#define USE_LAZY_LOAD 0
597#endif
598
599#if !USE_LAZY_LOAD
600static inline const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq) {return 0;}
601#endif
602const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq);
603
604static inline const rb_iseq_t *
605rb_iseq_check(const rb_iseq_t *iseq)
606{
607 if (USE_LAZY_LOAD && ISEQ_BODY(iseq) == NULL) {
608 rb_iseq_complete((rb_iseq_t *)iseq);
609 }
610 return iseq;
611}
612
613static inline bool
614rb_iseq_attr_p(const rb_iseq_t *iseq, enum rb_builtin_attr attr)
615{
616 return (ISEQ_BODY(iseq)->builtin_attrs & attr) == attr;
617}
618
619static inline const rb_iseq_t *
620def_iseq_ptr(rb_method_definition_t *def)
621{
622//TODO: re-visit. to check the bug, enable this assertion.
623#if VM_CHECK_MODE > 0
624 if (def->type != VM_METHOD_TYPE_ISEQ) rb_bug("def_iseq_ptr: not iseq (%d)", def->type);
625#endif
626 return rb_iseq_check(def->body.iseq.iseqptr);
627}
628
629enum ruby_special_exceptions {
630 ruby_error_reenter,
631 ruby_error_nomemory,
632 ruby_error_sysstack,
633 ruby_error_stackfatal,
634 ruby_error_stream_closed,
635 ruby_special_error_count
636};
637
638#define GetVMPtr(obj, ptr) \
639 GetCoreDataFromValue((obj), rb_vm_t, (ptr))
640
641struct rb_vm_struct;
642typedef void rb_vm_at_exit_func(struct rb_vm_struct*);
643
644typedef struct rb_at_exit_list {
645 rb_vm_at_exit_func *func;
646 struct rb_at_exit_list *next;
648
649void *rb_objspace_alloc(void);
650void rb_objspace_free(void *objspace);
651void rb_objspace_call_finalizer(void);
652
653typedef struct rb_hook_list_struct {
654 struct rb_event_hook_struct *hooks;
655 rb_event_flag_t events;
656 unsigned int running;
657 bool need_clean;
658 bool is_local;
660
661
662// see builtin.h for definition
663typedef const struct rb_builtin_function *RB_BUILTIN;
664
666 VALUE *varptr;
667 struct global_object_list *next;
668};
669
670typedef struct rb_vm_struct {
671 VALUE self;
672
673 struct {
674 struct ccan_list_head set;
675 unsigned int cnt;
676 unsigned int blocking_cnt;
677
678 struct rb_ractor_struct *main_ractor;
679 struct rb_thread_struct *main_thread; // == vm->ractor.main_ractor->threads.main
680
681 struct {
682 // monitor
683 rb_nativethread_lock_t lock;
684 struct rb_ractor_struct *lock_owner;
685 unsigned int lock_rec;
686
687 // join at exit
688 rb_nativethread_cond_t terminate_cond;
689 bool terminate_waiting;
690
691#ifndef RUBY_THREAD_PTHREAD_H
692 // win32
693 bool barrier_waiting;
694 unsigned int barrier_cnt;
695 rb_nativethread_cond_t barrier_complete_cond;
696 rb_nativethread_cond_t barrier_release_cond;
697#endif
698 } sync;
699
700#ifdef RUBY_THREAD_PTHREAD_H
701 // ractor scheduling
702 struct {
703 rb_nativethread_lock_t lock;
704 struct rb_ractor_struct *lock_owner;
705 bool locked;
706
707 rb_nativethread_cond_t cond; // GRQ
708 unsigned int snt_cnt; // count of shared NTs
709 unsigned int dnt_cnt; // count of dedicated NTs
710
711 unsigned int running_cnt;
712
713 unsigned int max_cpu;
714 struct ccan_list_head grq; // // Global Ready Queue
715 unsigned int grq_cnt;
716
717 // running threads
718 struct ccan_list_head running_threads;
719
720 // threads which switch context by timeslice
721 struct ccan_list_head timeslice_threads;
722
723 struct ccan_list_head zombie_threads;
724
725 // true if timeslice timer is not enable
726 bool timeslice_wait_inf;
727
728 // barrier
729 rb_nativethread_cond_t barrier_complete_cond;
730 rb_nativethread_cond_t barrier_release_cond;
731 bool barrier_waiting;
732 unsigned int barrier_waiting_cnt;
733 unsigned int barrier_serial;
734 struct rb_ractor_struct *barrier_ractor;
735 unsigned int barrier_lock_rec;
736 } sched;
737#endif
738 } ractor;
739
740#ifdef USE_SIGALTSTACK
741 void *main_altstack;
742#endif
743
744 rb_serial_t fork_gen;
745
746 /* set in single-threaded processes only: */
747 volatile int ubf_async_safe;
748
749 unsigned int running: 1;
750 unsigned int thread_abort_on_exception: 1;
751 unsigned int thread_report_on_exception: 1;
752 unsigned int thread_ignore_deadlock: 1;
753
754 /* object management */
755 VALUE mark_object_ary;
757 const VALUE special_exceptions[ruby_special_error_count];
758
759 /* Ruby Box */
760 rb_box_t *root_box;
761 rb_box_t *main_box;
762
763 /* load */
764 // For running the init function of statically linked
765 // extensions when they are loaded
766 struct st_table *static_ext_inits;
767
768 /* signal */
769 struct {
770 VALUE cmd[RUBY_NSIG];
771 } trap_list;
772
773 /* postponed_job (async-signal-safe, and thread-safe) */
774 struct rb_postponed_job_queue *postponed_job_queue;
775
776 int src_encoding_index;
777
778 /* workqueue (thread-safe, NOT async-signal-safe) */
779 struct ccan_list_head workqueue; /* <=> rb_workqueue_job.jnode */
780 rb_nativethread_lock_t workqueue_lock;
781
782 VALUE orig_progname, progname;
783 VALUE coverages, me2counter;
784 int coverage_mode;
785
786 struct {
787 struct rb_objspace *objspace;
788 struct gc_mark_func_data_struct {
789 void *data;
790 void (*mark_func)(VALUE v, void *data);
791 } *mark_func_data;
792 } gc;
793
794 rb_at_exit_list *at_exit;
795
796 const struct rb_builtin_function *builtin_function_table;
797
798 st_table *ci_table;
799 struct rb_id_table *negative_cme_table;
800 st_table *overloaded_cme_table; // cme -> overloaded_cme
801 set_table *unused_block_warning_table;
802 set_table *cc_refinement_table;
803
804 // This id table contains a mapping from ID to ICs. It does this with ID
805 // keys and nested st_tables as values. The nested tables have ICs as keys
806 // and Qtrue as values. It is used when inline constant caches need to be
807 // invalidated or ISEQs are being freed.
808 struct rb_id_table *constant_cache;
809 ID inserting_constant_cache_id;
810
811#ifndef VM_GLOBAL_CC_CACHE_TABLE_SIZE
812#define VM_GLOBAL_CC_CACHE_TABLE_SIZE 1023
813#endif
814 const struct rb_callcache *global_cc_cache_table[VM_GLOBAL_CC_CACHE_TABLE_SIZE]; // vm_eval.c
815
816#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
817 uint32_t clock;
818#endif
819
820 /* params */
821 struct { /* size in byte */
822 size_t thread_vm_stack_size;
823 size_t thread_machine_stack_size;
824 size_t fiber_vm_stack_size;
825 size_t fiber_machine_stack_size;
826 } default_params;
827} rb_vm_t;
828
829extern bool ruby_vm_during_cleanup;
830
831/* default values */
832
833#define RUBY_VM_SIZE_ALIGN 4096
834
835#define RUBY_VM_THREAD_VM_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
836#define RUBY_VM_THREAD_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
837#define RUBY_VM_THREAD_MACHINE_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
838#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
839
840#define RUBY_VM_FIBER_VM_STACK_SIZE ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
841#define RUBY_VM_FIBER_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
842#define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 64 * 1024 * sizeof(VALUE)) /* 256 KB or 512 KB */
843#if defined(__powerpc64__) || defined(__ppc64__) // macOS has __ppc64__
844#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 32 * 1024 * sizeof(VALUE)) /* 128 KB or 256 KB */
845#else
846#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
847#endif
848
849#if __has_feature(memory_sanitizer) || __has_feature(address_sanitizer) || __has_feature(leak_sanitizer)
850/* It seems sanitizers consume A LOT of machine stacks */
851#undef RUBY_VM_THREAD_MACHINE_STACK_SIZE
852#define RUBY_VM_THREAD_MACHINE_STACK_SIZE (1024 * 1024 * sizeof(VALUE))
853#undef RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN
854#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 512 * 1024 * sizeof(VALUE))
855#undef RUBY_VM_FIBER_MACHINE_STACK_SIZE
856#define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 256 * 1024 * sizeof(VALUE))
857#undef RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN
858#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 128 * 1024 * sizeof(VALUE))
859#endif
860
861#ifndef VM_DEBUG_BP_CHECK
862#define VM_DEBUG_BP_CHECK 0
863#endif
864
865#ifndef VM_DEBUG_VERIFY_METHOD_CACHE
866#define VM_DEBUG_VERIFY_METHOD_CACHE (VMDEBUG != 0)
867#endif
868
870 VALUE self;
871 const VALUE *ep;
872 union {
873 const rb_iseq_t *iseq;
874 const struct vm_ifunc *ifunc;
875 VALUE val;
876 } code;
877};
878
879enum rb_block_handler_type {
880 block_handler_type_iseq,
881 block_handler_type_ifunc,
882 block_handler_type_symbol,
883 block_handler_type_proc
884};
885
886enum rb_block_type {
887 block_type_iseq,
888 block_type_ifunc,
889 block_type_symbol,
890 block_type_proc
891};
892
893struct rb_block {
894 union {
895 struct rb_captured_block captured;
896 VALUE symbol;
897 VALUE proc;
898 } as;
899 enum rb_block_type type;
900};
901
903 const VALUE *pc; // cfp[0]
904 VALUE *sp; // cfp[1]
905 const rb_iseq_t *iseq; // cfp[2]
906 VALUE self; // cfp[3] / block[0]
907 const VALUE *ep; // cfp[4] / block[1]
908 const void *block_code; // cfp[5] / block[2] -- iseq, ifunc, or forwarded block handler
909 void *jit_return; // cfp[6] -- return address for JIT code
910#if VM_DEBUG_BP_CHECK
911 VALUE *bp_check; // cfp[7]
912#endif
914
915extern const rb_data_type_t ruby_threadptr_data_type;
916
917static inline struct rb_thread_struct *
918rb_thread_ptr(VALUE thval)
919{
920 return (struct rb_thread_struct *)rb_check_typeddata(thval, &ruby_threadptr_data_type);
921}
922
923enum rb_thread_status {
924 THREAD_RUNNABLE,
925 THREAD_STOPPED,
926 THREAD_STOPPED_FOREVER,
927 THREAD_KILLED
928};
929
930#ifdef RUBY_JMP_BUF
931typedef RUBY_JMP_BUF rb_jmpbuf_t;
932#else
933typedef void *rb_jmpbuf_t[5];
934#endif
935
936/*
937 `rb_vm_tag_jmpbuf_t` type represents a buffer used to
938 long jump to a C frame associated with `rb_vm_tag`.
939
940 Use-site of `rb_vm_tag_jmpbuf_t` is responsible for calling the
941 following functions:
942 - `rb_vm_tag_jmpbuf_init` once `rb_vm_tag_jmpbuf_t` is allocated.
943 - `rb_vm_tag_jmpbuf_deinit` once `rb_vm_tag_jmpbuf_t` is no longer necessary.
944
945 `RB_VM_TAG_JMPBUF_GET` transforms a `rb_vm_tag_jmpbuf_t` into a
946 `rb_jmpbuf_t` to be passed to `rb_setjmp/rb_longjmp`.
947*/
948#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
949/*
950 WebAssembly target with Asyncify-based SJLJ needs
951 to capture the execution context by unwind/rewind-ing
952 call frames into a jump buffer. The buffer space tends
953 to be considerably large unlike other architectures'
954 register-based buffers.
955 Therefore, we allocates the buffer on the heap on such
956 environments.
957*/
958typedef rb_jmpbuf_t *rb_vm_tag_jmpbuf_t;
959
960#define RB_VM_TAG_JMPBUF_GET(buf) (*buf)
961
962static inline void
963rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
964{
965 *jmpbuf = ruby_xmalloc(sizeof(rb_jmpbuf_t));
966}
967
968static inline void
969rb_vm_tag_jmpbuf_deinit(const rb_vm_tag_jmpbuf_t *jmpbuf)
970{
971 ruby_xfree(*jmpbuf);
972}
973#else
974typedef rb_jmpbuf_t rb_vm_tag_jmpbuf_t;
975
976#define RB_VM_TAG_JMPBUF_GET(buf) (buf)
977
978static inline void
979rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
980{
981 // no-op
982}
983
984static inline void
985rb_vm_tag_jmpbuf_deinit(const rb_vm_tag_jmpbuf_t *jmpbuf)
986{
987 // no-op
988}
989#endif
990
991/*
992 the members which are written in EC_PUSH_TAG() should be placed at
993 the beginning and the end, so that entire region is accessible.
994*/
995struct rb_vm_tag {
996 VALUE tag;
997 VALUE retval;
998 rb_vm_tag_jmpbuf_t buf;
999 struct rb_vm_tag *prev;
1000 enum ruby_tag_type state;
1001 unsigned int lock_rec;
1002};
1003
1004STATIC_ASSERT(rb_vm_tag_buf_offset, offsetof(struct rb_vm_tag, buf) > 0);
1005STATIC_ASSERT(rb_vm_tag_buf_end,
1006 offsetof(struct rb_vm_tag, buf) + sizeof(rb_vm_tag_jmpbuf_t) <
1007 sizeof(struct rb_vm_tag));
1008
1011 void *arg;
1012};
1013
1014struct rb_mutex_struct;
1015
1016typedef struct rb_fiber_struct rb_fiber_t;
1017
1019 struct rb_waiting_list *next;
1020 struct rb_thread_struct *thread;
1021 struct rb_fiber_struct *fiber;
1022};
1023
1025 /* execution information */
1026 VALUE *vm_stack; /* must free, must mark */
1027 size_t vm_stack_size; /* size in word (byte size / sizeof(VALUE)) */
1028 rb_control_frame_t *cfp;
1029
1030 struct rb_vm_tag *tag;
1031
1032 /* interrupt flags */
1033 rb_atomic_t interrupt_flag;
1034 rb_atomic_t interrupt_mask; /* size should match flag */
1035#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
1036 uint32_t checked_clock;
1037#endif
1038
1039 rb_fiber_t *fiber_ptr;
1040 struct rb_thread_struct *thread_ptr;
1041
1042 /* storage (ec (fiber) local) */
1043 struct rb_id_table *local_storage;
1044 VALUE local_storage_recursive_hash;
1045 VALUE local_storage_recursive_hash_for_trace;
1046
1047 /* Inheritable fiber storage. */
1048 VALUE storage;
1049
1050 /* eval env */
1051 const VALUE *root_lep;
1052 VALUE root_svar;
1053
1054 /* trace information */
1055 struct rb_trace_arg_struct *trace_arg;
1056
1057 /* temporary places */
1058 VALUE errinfo;
1059 VALUE passed_block_handler; /* for rb_iterate */
1060
1061 uint8_t raised_flag; /* only 3 bits needed */
1062
1063 /* n.b. only 7 bits needed, really: */
1064 BITFIELD(enum method_missing_reason, method_missing_reason, 8);
1065
1066 VALUE private_const_reference;
1067
1068 struct {
1069 VALUE obj;
1070 VALUE fields_obj;
1071 } gen_fields_cache;
1072
1073 /* for GC */
1074 struct {
1075 VALUE *stack_start;
1076 VALUE *stack_end;
1077 size_t stack_maxsize;
1079
1080#ifdef RUBY_ASAN_ENABLED
1081 void *asan_fake_stack_handle;
1082#endif
1083 } machine;
1084};
1085
1086#ifndef rb_execution_context_t
1088#define rb_execution_context_t rb_execution_context_t
1089#endif
1090
1091// for builtin.h
1092#define VM_CORE_H_EC_DEFINED 1
1093
1094// Set the vm_stack pointer in the execution context.
1095void rb_ec_set_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
1096
1097// Initialize the vm_stack pointer in the execution context and push the initial stack frame.
1098// @param ec the execution context to update.
1099// @param stack a pointer to the stack to use.
1100// @param size the size of the stack, as in `VALUE stack[size]`.
1101void rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
1102
1103// Clear (set to `NULL`) the vm_stack pointer.
1104// @param ec the execution context to update.
1105void rb_ec_clear_vm_stack(rb_execution_context_t *ec);
1106
1107// Close an execution context and free related resources that are no longer needed.
1108// @param ec the execution context to close.
1109void rb_ec_close(rb_execution_context_t *ec);
1110
1112 bool ractor_safe;
1113};
1114
1115typedef struct rb_ractor_struct rb_ractor_t;
1116
1117struct rb_native_thread;
1118
1119typedef struct rb_thread_struct {
1120 struct ccan_list_node lt_node; // managed by a ractor (r->threads.set)
1121 VALUE self;
1122 rb_ractor_t *ractor;
1123 rb_vm_t *vm;
1124 struct rb_native_thread *nt;
1126
1127 struct rb_thread_sched_item sched;
1128 bool mn_schedulable;
1129 rb_atomic_t serial; // only for RUBY_DEBUG_LOG()
1130
1131 VALUE last_status; /* $? */
1132
1133 /* for cfunc */
1134 struct rb_calling_info *calling;
1135
1136 /* for load(true) */
1137 VALUE top_self;
1138 VALUE top_wrapper;
1139
1140 /* thread control */
1141
1142 BITFIELD(enum rb_thread_status, status, 2);
1143 /* bit flags */
1144 unsigned int has_dedicated_nt : 1;
1145 unsigned int to_kill : 1;
1146 unsigned int abort_on_exception: 1;
1147 unsigned int report_on_exception: 1;
1148 unsigned int pending_interrupt_queue_checked: 1;
1149 int8_t priority; /* -3 .. 3 (RUBY_THREAD_PRIORITY_{MIN,MAX}) */
1150 uint32_t running_time_us; /* 12500..800000 */
1151
1152 void *blocking_region_buffer;
1153
1154 VALUE thgroup;
1155 VALUE value;
1156
1157 /* temporary place of retval on OPT_CALL_THREADED_CODE */
1158#if OPT_CALL_THREADED_CODE
1159 VALUE retval;
1160#endif
1161
1162 /* async errinfo queue */
1163 VALUE pending_interrupt_queue;
1164 VALUE pending_interrupt_mask_stack;
1165
1166 /* interrupt management */
1167 rb_nativethread_lock_t interrupt_lock;
1168 struct rb_unblock_callback unblock;
1169 VALUE locking_mutex;
1170 struct rb_mutex_struct *keeping_mutexes;
1171 struct ccan_list_head interrupt_exec_tasks;
1172
1173 struct rb_waiting_list *join_list;
1174
1175 union {
1176 struct {
1177 VALUE proc;
1178 VALUE args;
1179 int kw_splat;
1180 } proc;
1181 struct {
1182 VALUE (*func)(void *);
1183 void *arg;
1184 } func;
1185 } invoke_arg;
1186
1187 enum thread_invoke_type {
1188 thread_invoke_type_none = 0,
1189 thread_invoke_type_proc,
1190 thread_invoke_type_ractor_proc,
1191 thread_invoke_type_func
1192 } invoke_type;
1193
1194 /* fiber */
1195 rb_fiber_t *root_fiber;
1196
1197 VALUE scheduler;
1198 unsigned int blocking;
1199
1200 /* misc */
1201 VALUE name;
1202 void **specific_storage;
1203
1204 struct rb_ext_config ext_config;
1205} rb_thread_t;
1206
1207static inline unsigned int
1208rb_th_serial(const rb_thread_t *th)
1209{
1210 return th ? (unsigned int)th->serial : 0;
1211}
1212
1213typedef enum {
1214 VM_DEFINECLASS_TYPE_CLASS = 0x00,
1215 VM_DEFINECLASS_TYPE_SINGLETON_CLASS = 0x01,
1216 VM_DEFINECLASS_TYPE_MODULE = 0x02,
1217 /* 0x03..0x06 is reserved */
1218 VM_DEFINECLASS_TYPE_MASK = 0x07
1219} rb_vm_defineclass_type_t;
1220
1221#define VM_DEFINECLASS_TYPE(x) ((rb_vm_defineclass_type_t)(x) & VM_DEFINECLASS_TYPE_MASK)
1222#define VM_DEFINECLASS_FLAG_SCOPED 0x08
1223#define VM_DEFINECLASS_FLAG_HAS_SUPERCLASS 0x10
1224#define VM_DEFINECLASS_SCOPED_P(x) ((x) & VM_DEFINECLASS_FLAG_SCOPED)
1225#define VM_DEFINECLASS_HAS_SUPERCLASS_P(x) \
1226 ((x) & VM_DEFINECLASS_FLAG_HAS_SUPERCLASS)
1227
1228/* iseq.c */
1229RUBY_SYMBOL_EXPORT_BEGIN
1230
1231/* node -> iseq */
1232rb_iseq_t *rb_iseq_new (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent, enum rb_iseq_type);
1233rb_iseq_t *rb_iseq_new_top (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent);
1234rb_iseq_t *rb_iseq_new_main (const VALUE ast_value, VALUE path, VALUE realpath, const rb_iseq_t *parent, int opt);
1235rb_iseq_t *rb_iseq_new_eval (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, int first_lineno, const rb_iseq_t *parent, int isolated_depth);
1236rb_iseq_t *rb_iseq_new_with_opt( VALUE ast_value, VALUE name, VALUE path, VALUE realpath, int first_lineno, const rb_iseq_t *parent, int isolated_depth,
1237 enum rb_iseq_type, const rb_compile_option_t*,
1238 VALUE script_lines);
1239
1240struct iseq_link_anchor;
1242 VALUE flags;
1243 VALUE reserved;
1244 void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *);
1245 const void *data;
1246};
1247static inline struct rb_iseq_new_with_callback_callback_func *
1248rb_iseq_new_with_callback_new_callback(
1249 void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *), const void *ptr)
1250{
1252 IMEMO_NEW(struct rb_iseq_new_with_callback_callback_func, imemo_ifunc, Qfalse);
1253 memo->func = func;
1254 memo->data = ptr;
1255
1256 return memo;
1257}
1258rb_iseq_t *rb_iseq_new_with_callback(const struct rb_iseq_new_with_callback_callback_func * ifunc,
1259 VALUE name, VALUE path, VALUE realpath, int first_lineno,
1260 const rb_iseq_t *parent, enum rb_iseq_type, const rb_compile_option_t*);
1261
1262VALUE rb_iseq_disasm(const rb_iseq_t *iseq);
1263int rb_iseq_disasm_insn(VALUE str, const VALUE *iseqval, size_t pos, const rb_iseq_t *iseq, VALUE child);
1264
1265VALUE rb_iseq_coverage(const rb_iseq_t *iseq);
1266
1267RUBY_EXTERN VALUE rb_cISeq;
1268RUBY_EXTERN VALUE rb_cRubyVM;
1269RUBY_EXTERN VALUE rb_mRubyVMFrozenCore;
1270RUBY_EXTERN VALUE rb_block_param_proxy;
1271RUBY_SYMBOL_EXPORT_END
1272
1273#define GetProcPtr(obj, ptr) \
1274 GetCoreDataFromValue((obj), rb_proc_t, (ptr))
1275
1276typedef struct {
1277 const struct rb_block block;
1278 unsigned int is_from_method: 1; /* bool */
1279 unsigned int is_lambda: 1; /* bool */
1280 unsigned int is_isolated: 1; /* bool */
1281} rb_proc_t;
1282
1283RUBY_SYMBOL_EXPORT_BEGIN
1284VALUE rb_proc_isolate(VALUE self);
1285VALUE rb_proc_isolate_bang(VALUE self, VALUE replace_self);
1286VALUE rb_proc_ractor_make_shareable(VALUE proc, VALUE replace_self);
1287RUBY_SYMBOL_EXPORT_END
1288
1289typedef struct {
1290 VALUE flags; /* imemo header */
1291 rb_iseq_t *iseq;
1292 const VALUE *ep;
1293 const VALUE *env;
1294 unsigned int env_size;
1295} rb_env_t;
1296
1297extern const rb_data_type_t ruby_binding_data_type;
1298
1299#define GetBindingPtr(obj, ptr) \
1300 GetCoreDataFromValue((obj), rb_binding_t, (ptr))
1301
1302typedef struct {
1303 const struct rb_block block;
1304 const VALUE pathobj;
1305 int first_lineno;
1306} rb_binding_t;
1307
1308/* used by compile time and send insn */
1309
1310enum vm_check_match_type {
1311 VM_CHECKMATCH_TYPE_WHEN = 1,
1312 VM_CHECKMATCH_TYPE_CASE = 2,
1313 VM_CHECKMATCH_TYPE_RESCUE = 3
1314};
1315
1316#define VM_CHECKMATCH_TYPE_MASK 0x03
1317#define VM_CHECKMATCH_ARRAY 0x04
1318
1319enum vm_opt_newarray_send_type {
1320 VM_OPT_NEWARRAY_SEND_MAX = 1,
1321 VM_OPT_NEWARRAY_SEND_MIN = 2,
1322 VM_OPT_NEWARRAY_SEND_HASH = 3,
1323 VM_OPT_NEWARRAY_SEND_PACK = 4,
1324 VM_OPT_NEWARRAY_SEND_PACK_BUFFER = 5,
1325 VM_OPT_NEWARRAY_SEND_INCLUDE_P = 6,
1326};
1327
1328enum vm_special_object_type {
1329 VM_SPECIAL_OBJECT_VMCORE = 1,
1330 VM_SPECIAL_OBJECT_CBASE,
1331 VM_SPECIAL_OBJECT_CONST_BASE
1332};
1333
1334enum vm_svar_index {
1335 VM_SVAR_LASTLINE = 0, /* $_ */
1336 VM_SVAR_BACKREF = 1, /* $~ */
1337
1338 VM_SVAR_EXTRA_START = 2,
1339 VM_SVAR_FLIPFLOP_START = 2 /* flipflop */
1340};
1341
1342/* inline cache */
1343typedef struct iseq_inline_constant_cache *IC;
1344typedef struct iseq_inline_iv_cache_entry *IVC;
1345typedef struct iseq_inline_cvar_cache_entry *ICVARC;
1346typedef union iseq_inline_storage_entry *ISE;
1347typedef const struct rb_callinfo *CALL_INFO;
1348typedef const struct rb_callcache *CALL_CACHE;
1349typedef struct rb_call_data *CALL_DATA;
1350
1351typedef VALUE CDHASH;
1352
1353#ifndef FUNC_FASTCALL
1354#define FUNC_FASTCALL(x) x
1355#endif
1356
1357typedef rb_control_frame_t *
1358 (FUNC_FASTCALL(*rb_insn_func_t))(rb_execution_context_t *, rb_control_frame_t *);
1359
1360#define VM_TAGGED_PTR_SET(p, tag) ((VALUE)(p) | (tag))
1361#define VM_TAGGED_PTR_REF(v, mask) ((void *)((v) & ~mask))
1362
1363#define GC_GUARDED_PTR(p) VM_TAGGED_PTR_SET((p), 0x01)
1364#define GC_GUARDED_PTR_REF(p) VM_TAGGED_PTR_REF((p), 0x03)
1365#define GC_GUARDED_PTR_P(p) (((VALUE)(p)) & 0x01)
1366
1367enum vm_frame_env_flags {
1368 /* Frame/Environment flag bits:
1369 * MMMM MMMM MMMM MMMM ___F FFFF FFFE EEEX (LSB)
1370 *
1371 * X : tag for GC marking (It seems as Fixnum)
1372 * EEE : 4 bits Env flags
1373 * FF..: 8 bits Frame flags
1374 * MM..: 15 bits frame magic (to check frame corruption)
1375 */
1376
1377 /* frame types */
1378 VM_FRAME_MAGIC_METHOD = 0x11110001,
1379 VM_FRAME_MAGIC_BLOCK = 0x22220001,
1380 VM_FRAME_MAGIC_CLASS = 0x33330001,
1381 VM_FRAME_MAGIC_TOP = 0x44440001,
1382 VM_FRAME_MAGIC_CFUNC = 0x55550001,
1383 VM_FRAME_MAGIC_IFUNC = 0x66660001,
1384 VM_FRAME_MAGIC_EVAL = 0x77770001,
1385 VM_FRAME_MAGIC_RESCUE = 0x78880001,
1386 VM_FRAME_MAGIC_DUMMY = 0x79990001,
1387
1388 VM_FRAME_MAGIC_MASK = 0x7fff0001,
1389
1390 /* frame flag */
1391 VM_FRAME_FLAG_FINISH = 0x0020,
1392 VM_FRAME_FLAG_BMETHOD = 0x0040,
1393 VM_FRAME_FLAG_CFRAME = 0x0080,
1394 VM_FRAME_FLAG_LAMBDA = 0x0100,
1395 VM_FRAME_FLAG_MODIFIED_BLOCK_PARAM = 0x0200,
1396 VM_FRAME_FLAG_CFRAME_KW = 0x0400,
1397 VM_FRAME_FLAG_PASSED = 0x0800,
1398 VM_FRAME_FLAG_BOX_REQUIRE = 0x1000,
1399
1400 /* env flag */
1401 VM_ENV_FLAG_LOCAL = 0x0002,
1402 VM_ENV_FLAG_ESCAPED = 0x0004,
1403 VM_ENV_FLAG_WB_REQUIRED = 0x0008,
1404 VM_ENV_FLAG_ISOLATED = 0x0010,
1405};
1406
1407#define VM_ENV_DATA_SIZE ( 3)
1408
1409#define VM_ENV_DATA_INDEX_ME_CREF (-2) /* ep[-2] */
1410#define VM_ENV_DATA_INDEX_SPECVAL (-1) /* ep[-1] */
1411#define VM_ENV_DATA_INDEX_FLAGS ( 0) /* ep[ 0] */
1412#define VM_ENV_DATA_INDEX_ENV ( 1) /* ep[ 1] */
1413
1414#define VM_ENV_INDEX_LAST_LVAR (-VM_ENV_DATA_SIZE)
1415
1416static inline void VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value);
1417
1418static inline void
1419VM_ENV_FLAGS_SET(const VALUE *ep, VALUE flag)
1420{
1421 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1422 VM_ASSERT(FIXNUM_P(flags));
1423 VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags | flag);
1424}
1425
1426static inline void
1427VM_ENV_FLAGS_UNSET(const VALUE *ep, VALUE flag)
1428{
1429 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1430 VM_ASSERT(FIXNUM_P(flags));
1431 VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags & ~flag);
1432}
1433
1434static inline unsigned long
1435VM_ENV_FLAGS(const VALUE *ep, long flag)
1436{
1437 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1438 VM_ASSERT(FIXNUM_P(flags));
1439 return flags & flag;
1440}
1441
1442static inline unsigned long
1443VM_ENV_FLAGS_UNCHECKED(const VALUE *ep, long flag)
1444{
1445 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1446 return flags & flag;
1447}
1448
1449static inline unsigned long
1450VM_ENV_FRAME_TYPE_P(const VALUE *ep, unsigned long frame_type)
1451{
1452 return VM_ENV_FLAGS(ep, VM_FRAME_MAGIC_MASK) == frame_type;
1453}
1454
1455static inline unsigned long
1456VM_FRAME_TYPE(const rb_control_frame_t *cfp)
1457{
1458 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_MAGIC_MASK);
1459}
1460
1461static inline unsigned long
1462VM_FRAME_TYPE_UNCHECKED(const rb_control_frame_t *cfp)
1463{
1464 return VM_ENV_FLAGS_UNCHECKED(cfp->ep, VM_FRAME_MAGIC_MASK);
1465}
1466
1467static inline int
1468VM_FRAME_LAMBDA_P(const rb_control_frame_t *cfp)
1469{
1470 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_LAMBDA) != 0;
1471}
1472
1473static inline int
1474VM_FRAME_CFRAME_KW_P(const rb_control_frame_t *cfp)
1475{
1476 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME_KW) != 0;
1477}
1478
1479static inline int
1480VM_FRAME_FINISHED_P(const rb_control_frame_t *cfp)
1481{
1482 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_FINISH) != 0;
1483}
1484
1485static inline int
1486VM_FRAME_FINISHED_P_UNCHECKED(const rb_control_frame_t *cfp)
1487{
1488 return VM_ENV_FLAGS_UNCHECKED(cfp->ep, VM_FRAME_FLAG_FINISH) != 0;
1489}
1490
1491static inline int
1492VM_FRAME_BMETHOD_P(const rb_control_frame_t *cfp)
1493{
1494 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_BMETHOD) != 0;
1495}
1496
1497static inline int
1498rb_obj_is_iseq(VALUE iseq)
1499{
1500 return imemo_type_p(iseq, imemo_iseq);
1501}
1502
1503#if VM_CHECK_MODE > 0
1504#define RUBY_VM_NORMAL_ISEQ_P(iseq) rb_obj_is_iseq((VALUE)iseq)
1505#endif
1506
1507static inline int
1508VM_FRAME_CFRAME_P(const rb_control_frame_t *cfp)
1509{
1510 int cframe_p = VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME) != 0;
1511 VM_ASSERT(RUBY_VM_NORMAL_ISEQ_P(cfp->iseq) != cframe_p ||
1512 (VM_FRAME_TYPE(cfp) & VM_FRAME_MAGIC_MASK) == VM_FRAME_MAGIC_DUMMY);
1513 return cframe_p;
1514}
1515
1516static inline int
1517VM_FRAME_CFRAME_P_UNCHECKED(const rb_control_frame_t *cfp)
1518{
1519 return VM_ENV_FLAGS_UNCHECKED(cfp->ep, VM_FRAME_FLAG_CFRAME) != 0;
1520}
1521
1522static inline int
1523VM_FRAME_RUBYFRAME_P(const rb_control_frame_t *cfp)
1524{
1525 return !VM_FRAME_CFRAME_P(cfp);
1526}
1527
1528static inline int
1529VM_FRAME_RUBYFRAME_P_UNCHECKED(const rb_control_frame_t *cfp)
1530{
1531 return !VM_FRAME_CFRAME_P_UNCHECKED(cfp);
1532}
1533
1534static inline int
1535VM_FRAME_NS_REQUIRE_P(const rb_control_frame_t *cfp)
1536{
1537 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_BOX_REQUIRE) != 0;
1538}
1539
1540#define RUBYVM_CFUNC_FRAME_P(cfp) \
1541 (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_CFUNC)
1542
1543#define VM_GUARDED_PREV_EP(ep) GC_GUARDED_PTR(ep)
1544#define VM_BLOCK_HANDLER_NONE 0
1545
1546static inline int
1547VM_ENV_LOCAL_P(const VALUE *ep)
1548{
1549 return VM_ENV_FLAGS(ep, VM_ENV_FLAG_LOCAL) ? 1 : 0;
1550}
1551
1552static inline int
1553VM_ENV_LOCAL_P_UNCHECKED(const VALUE *ep)
1554{
1555 return VM_ENV_FLAGS_UNCHECKED(ep, VM_ENV_FLAG_LOCAL) ? 1 : 0;
1556}
1557
1558static inline const VALUE *
1559VM_ENV_PREV_EP_UNCHECKED(const VALUE *ep)
1560{
1561 return GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
1562}
1563
1564static inline const VALUE *
1565VM_ENV_PREV_EP(const VALUE *ep)
1566{
1567 VM_ASSERT(VM_ENV_LOCAL_P(ep) == 0);
1568 return VM_ENV_PREV_EP_UNCHECKED(ep);
1569}
1570
1571static inline bool
1572VM_ENV_BOXED_P(const VALUE *ep)
1573{
1574 return VM_ENV_FRAME_TYPE_P(ep, VM_FRAME_MAGIC_CLASS) || VM_ENV_FRAME_TYPE_P(ep, VM_FRAME_MAGIC_TOP);
1575}
1576
1577static inline VALUE
1578VM_ENV_BLOCK_HANDLER(const VALUE *ep)
1579{
1580 if (VM_ENV_BOXED_P(ep)) {
1581 VM_ASSERT(VM_ENV_LOCAL_P(ep));
1582 return VM_BLOCK_HANDLER_NONE;
1583 }
1584
1585 VM_ASSERT(VM_ENV_LOCAL_P(ep));
1586 return ep[VM_ENV_DATA_INDEX_SPECVAL];
1587}
1588
1589static inline const rb_box_t *
1590VM_ENV_BOX(const VALUE *ep)
1591{
1592 VM_ASSERT(VM_ENV_BOXED_P(ep));
1593 VM_ASSERT(VM_ENV_LOCAL_P(ep));
1594 return (const rb_box_t *)GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
1595}
1596
1597static inline const rb_box_t *
1598VM_ENV_BOX_UNCHECKED(const VALUE *ep)
1599{
1600 return (const rb_box_t *)GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
1601}
1602
1603#if VM_CHECK_MODE > 0
1604int rb_vm_ep_in_heap_p(const VALUE *ep);
1605#endif
1606
1607static inline int
1608VM_ENV_ESCAPED_P(const VALUE *ep)
1609{
1610 VM_ASSERT(rb_vm_ep_in_heap_p(ep) == !!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED));
1611 return VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) ? 1 : 0;
1612}
1613
1615static inline VALUE
1616VM_ENV_ENVVAL(const VALUE *ep)
1617{
1618 VALUE envval = ep[VM_ENV_DATA_INDEX_ENV];
1619 VM_ASSERT(VM_ENV_ESCAPED_P(ep));
1620 VM_ASSERT(envval == Qundef || imemo_type_p(envval, imemo_env));
1621 return envval;
1622}
1623
1625static inline const rb_env_t *
1626VM_ENV_ENVVAL_PTR(const VALUE *ep)
1627{
1628 return (const rb_env_t *)VM_ENV_ENVVAL(ep);
1629}
1630
1631static inline const rb_env_t *
1632vm_env_new(VALUE *env_ep, VALUE *env_body, unsigned int env_size, const rb_iseq_t *iseq)
1633{
1634 rb_env_t *env = IMEMO_NEW(rb_env_t, imemo_env, (VALUE)iseq);
1635 env->ep = env_ep;
1636 env->env = env_body;
1637 env->env_size = env_size;
1638 env_ep[VM_ENV_DATA_INDEX_ENV] = (VALUE)env;
1639 return env;
1640}
1641
1642static inline void
1643VM_FORCE_WRITE(const VALUE *ptr, VALUE v)
1644{
1645 *((VALUE *)ptr) = v;
1646}
1647
1648static inline void
1649VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value)
1650{
1651 VM_ASSERT(RB_SPECIAL_CONST_P(special_const_value));
1652 VM_FORCE_WRITE(ptr, special_const_value);
1653}
1654
1655static inline void
1656VM_STACK_ENV_WRITE(const VALUE *ep, int index, VALUE v)
1657{
1658 VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_WB_REQUIRED) == 0);
1659 VM_FORCE_WRITE(&ep[index], v);
1660}
1661
1662const VALUE *rb_vm_ep_local_ep(const VALUE *ep);
1663const VALUE *rb_vm_proc_local_ep(VALUE proc);
1664void rb_vm_block_ep_update(VALUE obj, const struct rb_block *dst, const VALUE *ep);
1665void rb_vm_block_copy(VALUE obj, const struct rb_block *dst, const struct rb_block *src);
1666
1667VALUE rb_vm_frame_block_handler(const rb_control_frame_t *cfp);
1668
1669#define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp) ((cfp)+1)
1670#define RUBY_VM_NEXT_CONTROL_FRAME(cfp) ((cfp)-1)
1671
1672#define RUBY_VM_VALID_CONTROL_FRAME_P(cfp, ecfp) \
1673 ((void *)(ecfp) > (void *)(cfp))
1674
1675static inline const rb_control_frame_t *
1676RUBY_VM_END_CONTROL_FRAME(const rb_execution_context_t *ec)
1677{
1678 return (rb_control_frame_t *)(ec->vm_stack + ec->vm_stack_size);
1679}
1680
1681static inline int
1682RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
1683{
1684 return !RUBY_VM_VALID_CONTROL_FRAME_P(cfp, RUBY_VM_END_CONTROL_FRAME(ec));
1685}
1686
1687static inline int
1688VM_BH_ISEQ_BLOCK_P(VALUE block_handler)
1689{
1690 if ((block_handler & 0x03) == 0x01) {
1691#if VM_CHECK_MODE > 0
1692 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1693 VM_ASSERT(imemo_type_p(captured->code.val, imemo_iseq));
1694#endif
1695 return 1;
1696 }
1697 else {
1698 return 0;
1699 }
1700}
1701
1702static inline VALUE
1703VM_BH_FROM_ISEQ_BLOCK(const struct rb_captured_block *captured)
1704{
1705 VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x01);
1706 VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1707 return block_handler;
1708}
1709
1710static inline const struct rb_captured_block *
1711VM_BH_TO_ISEQ_BLOCK(VALUE block_handler)
1712{
1713 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1714 VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1715 return captured;
1716}
1717
1718static inline int
1719VM_BH_IFUNC_P(VALUE block_handler)
1720{
1721 if ((block_handler & 0x03) == 0x03) {
1722#if VM_CHECK_MODE > 0
1723 struct rb_captured_block *captured = (void *)(block_handler & ~0x03);
1724 VM_ASSERT(imemo_type_p(captured->code.val, imemo_ifunc));
1725#endif
1726 return 1;
1727 }
1728 else {
1729 return 0;
1730 }
1731}
1732
1733static inline VALUE
1734VM_BH_FROM_IFUNC_BLOCK(const struct rb_captured_block *captured)
1735{
1736 VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x03);
1737 VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1738 return block_handler;
1739}
1740
1741static inline const struct rb_captured_block *
1742VM_BH_TO_IFUNC_BLOCK(VALUE block_handler)
1743{
1744 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1745 VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1746 return captured;
1747}
1748
1749static inline const struct rb_captured_block *
1750VM_BH_TO_CAPT_BLOCK(VALUE block_handler)
1751{
1752 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1753 VM_ASSERT(VM_BH_IFUNC_P(block_handler) || VM_BH_ISEQ_BLOCK_P(block_handler));
1754 return captured;
1755}
1756
1757static inline enum rb_block_handler_type
1758vm_block_handler_type(VALUE block_handler)
1759{
1760 if (VM_BH_ISEQ_BLOCK_P(block_handler)) {
1761 return block_handler_type_iseq;
1762 }
1763 else if (VM_BH_IFUNC_P(block_handler)) {
1764 return block_handler_type_ifunc;
1765 }
1766 else if (SYMBOL_P(block_handler)) {
1767 return block_handler_type_symbol;
1768 }
1769 else {
1770 VM_ASSERT(rb_obj_is_proc(block_handler));
1771 return block_handler_type_proc;
1772 }
1773}
1774
1775static inline void
1776vm_block_handler_verify(MAYBE_UNUSED(VALUE block_handler))
1777{
1778 VM_ASSERT(block_handler == VM_BLOCK_HANDLER_NONE ||
1779 (vm_block_handler_type(block_handler), 1));
1780}
1781
1782static inline enum rb_block_type
1783vm_block_type(const struct rb_block *block)
1784{
1785#if VM_CHECK_MODE > 0
1786 switch (block->type) {
1787 case block_type_iseq:
1788 VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_iseq));
1789 break;
1790 case block_type_ifunc:
1791 VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_ifunc));
1792 break;
1793 case block_type_symbol:
1794 VM_ASSERT(SYMBOL_P(block->as.symbol));
1795 break;
1796 case block_type_proc:
1797 VM_ASSERT(rb_obj_is_proc(block->as.proc));
1798 break;
1799 }
1800#endif
1801 return block->type;
1802}
1803
1804static inline void
1805vm_block_type_set(const struct rb_block *block, enum rb_block_type type)
1806{
1807 struct rb_block *mb = (struct rb_block *)block;
1808 mb->type = type;
1809}
1810
1811static inline const struct rb_block *
1812vm_proc_block(VALUE procval)
1813{
1814 VM_ASSERT(rb_obj_is_proc(procval));
1815 return &((rb_proc_t *)RTYPEDDATA_DATA(procval))->block;
1816}
1817
1818static inline const rb_iseq_t *vm_block_iseq(const struct rb_block *block);
1819static inline const VALUE *vm_block_ep(const struct rb_block *block);
1820
1821static inline const rb_iseq_t *
1822vm_proc_iseq(VALUE procval)
1823{
1824 return vm_block_iseq(vm_proc_block(procval));
1825}
1826
1827static inline const VALUE *
1828vm_proc_ep(VALUE procval)
1829{
1830 return vm_block_ep(vm_proc_block(procval));
1831}
1832
1833static inline const rb_iseq_t *
1834vm_block_iseq(const struct rb_block *block)
1835{
1836 switch (vm_block_type(block)) {
1837 case block_type_iseq: return rb_iseq_check(block->as.captured.code.iseq);
1838 case block_type_proc: return vm_proc_iseq(block->as.proc);
1839 case block_type_ifunc:
1840 case block_type_symbol: return NULL;
1841 }
1842 VM_UNREACHABLE(vm_block_iseq);
1843 return NULL;
1844}
1845
1846static inline const VALUE *
1847vm_block_ep(const struct rb_block *block)
1848{
1849 switch (vm_block_type(block)) {
1850 case block_type_iseq:
1851 case block_type_ifunc: return block->as.captured.ep;
1852 case block_type_proc: return vm_proc_ep(block->as.proc);
1853 case block_type_symbol: return NULL;
1854 }
1855 VM_UNREACHABLE(vm_block_ep);
1856 return NULL;
1857}
1858
1859static inline VALUE
1860vm_block_self(const struct rb_block *block)
1861{
1862 switch (vm_block_type(block)) {
1863 case block_type_iseq:
1864 case block_type_ifunc:
1865 return block->as.captured.self;
1866 case block_type_proc:
1867 return vm_block_self(vm_proc_block(block->as.proc));
1868 case block_type_symbol:
1869 return Qundef;
1870 }
1871 VM_UNREACHABLE(vm_block_self);
1872 return Qundef;
1873}
1874
1875static inline VALUE
1876VM_BH_TO_SYMBOL(VALUE block_handler)
1877{
1878 VM_ASSERT(SYMBOL_P(block_handler));
1879 return block_handler;
1880}
1881
1882static inline VALUE
1883VM_BH_FROM_SYMBOL(VALUE symbol)
1884{
1885 VM_ASSERT(SYMBOL_P(symbol));
1886 return symbol;
1887}
1888
1889static inline VALUE
1890VM_BH_TO_PROC(VALUE block_handler)
1891{
1892 VM_ASSERT(rb_obj_is_proc(block_handler));
1893 return block_handler;
1894}
1895
1896static inline VALUE
1897VM_BH_FROM_PROC(VALUE procval)
1898{
1899 VM_ASSERT(rb_obj_is_proc(procval));
1900 return procval;
1901}
1902
1903/* VM related object allocate functions */
1904VALUE rb_thread_alloc(VALUE klass);
1905VALUE rb_binding_alloc(VALUE klass);
1906VALUE rb_proc_alloc(VALUE klass);
1907VALUE rb_proc_dup(VALUE self);
1908
1909/* for debug */
1910extern bool rb_vmdebug_stack_dump_raw(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, FILE *);
1911extern bool rb_vmdebug_debug_print_pre(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, const VALUE *_pc, FILE *);
1912extern bool rb_vmdebug_debug_print_post(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, FILE *);
1913
1914#define SDR() rb_vmdebug_stack_dump_raw(GET_EC(), GET_EC()->cfp, stderr)
1915#define SDR2(cfp) rb_vmdebug_stack_dump_raw(GET_EC(), (cfp), stderr)
1916bool rb_vm_bugreport(const void *, FILE *);
1917typedef void (*ruby_sighandler_t)(int);
1918RBIMPL_ATTR_FORMAT(RBIMPL_PRINTF_FORMAT, 4, 5)
1919NORETURN(void rb_bug_for_fatal_signal(ruby_sighandler_t default_sighandler, int sig, const void *, const char *fmt, ...));
1920
1921/* functions about thread/vm execution */
1922RUBY_SYMBOL_EXPORT_BEGIN
1923VALUE rb_iseq_eval(const rb_iseq_t *iseq, const rb_box_t *box);
1924VALUE rb_iseq_eval_main(const rb_iseq_t *iseq);
1925VALUE rb_iseq_path(const rb_iseq_t *iseq);
1926VALUE rb_iseq_realpath(const rb_iseq_t *iseq);
1927RUBY_SYMBOL_EXPORT_END
1928
1929VALUE rb_iseq_pathobj_new(VALUE path, VALUE realpath);
1930void rb_iseq_pathobj_set(const rb_iseq_t *iseq, VALUE path, VALUE realpath);
1931
1932int rb_ec_frame_method_id_and_class(const rb_execution_context_t *ec, ID *idp, ID *called_idp, VALUE *klassp);
1933void rb_ec_setup_exception(const rb_execution_context_t *ec, VALUE mesg, VALUE cause);
1934
1935VALUE rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, int argc, const VALUE *argv, int kw_splat, VALUE block_handler);
1936
1937VALUE rb_vm_make_proc_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda);
1938static inline VALUE
1939rb_vm_make_proc(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1940{
1941 return rb_vm_make_proc_lambda(ec, captured, klass, 0);
1942}
1943
1944static inline VALUE
1945rb_vm_make_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1946{
1947 return rb_vm_make_proc_lambda(ec, captured, klass, 1);
1948}
1949
1950VALUE rb_vm_make_binding(const rb_execution_context_t *ec, const rb_control_frame_t *src_cfp);
1951VALUE rb_vm_env_local_variables(const rb_env_t *env);
1952VALUE rb_vm_env_numbered_parameters(const rb_env_t *env);
1953const rb_env_t *rb_vm_env_prev_env(const rb_env_t *env);
1954const VALUE *rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const ID *dynvars);
1955void rb_vm_inc_const_missing_count(void);
1956VALUE rb_vm_call_kw(rb_execution_context_t *ec, VALUE recv, VALUE id, int argc,
1957 const VALUE *argv, const rb_callable_method_entry_t *me, int kw_splat);
1958void rb_vm_pop_frame_no_int(rb_execution_context_t *ec);
1959void rb_vm_pop_frame(rb_execution_context_t *ec);
1960
1961void rb_thread_start_timer_thread(void);
1962void rb_thread_stop_timer_thread(void);
1963void rb_thread_reset_timer_thread(void);
1964void rb_thread_wakeup_timer_thread(int);
1965
1966static inline void
1967rb_vm_living_threads_init(rb_vm_t *vm)
1968{
1969 ccan_list_head_init(&vm->workqueue);
1970 ccan_list_head_init(&vm->ractor.set);
1971#ifdef RUBY_THREAD_PTHREAD_H
1972 ccan_list_head_init(&vm->ractor.sched.zombie_threads);
1973#endif
1974}
1975
1976typedef int rb_backtrace_iter_func(void *, VALUE, int, VALUE);
1977rb_control_frame_t *rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1978rb_control_frame_t *rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1979VALUE *rb_vm_svar_lep(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1980int rb_vm_get_sourceline(const rb_control_frame_t *);
1981void rb_vm_stack_to_heap(rb_execution_context_t *ec);
1982void ruby_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame);
1983void rb_thread_malloc_stack_set(rb_thread_t *th, void *stack);
1984rb_thread_t * ruby_thread_from_native(void);
1985int ruby_thread_set_native(rb_thread_t *th);
1986int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp);
1987void rb_vm_rewind_cfp(rb_execution_context_t *ec, rb_control_frame_t *cfp);
1988void rb_vm_env_write(const VALUE *ep, int index, VALUE v);
1989VALUE rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler);
1990
1991void rb_vm_register_special_exception_str(enum ruby_special_exceptions sp, VALUE exception_class, VALUE mesg);
1992
1993#define rb_vm_register_special_exception(sp, e, m) \
1994 rb_vm_register_special_exception_str(sp, e, rb_usascii_str_new_static((m), (long)rb_strlen_lit(m)))
1995
1996void rb_gc_mark_machine_context(const rb_execution_context_t *ec);
1997
1998rb_cref_t *rb_vm_rewrite_cref(rb_cref_t *node, VALUE old_klass, VALUE new_klass);
1999
2000const rb_callable_method_entry_t *rb_vm_frame_method_entry(const rb_control_frame_t *cfp);
2001const rb_callable_method_entry_t *rb_vm_frame_method_entry_unchecked(const rb_control_frame_t *cfp);
2002
2003#define sysstack_error GET_VM()->special_exceptions[ruby_error_sysstack]
2004
2005#define CHECK_VM_STACK_OVERFLOW0(cfp, sp, margin) do { \
2006 STATIC_ASSERT(sizeof_sp, sizeof(*(sp)) == sizeof(VALUE)); \
2007 STATIC_ASSERT(sizeof_cfp, sizeof(*(cfp)) == sizeof(rb_control_frame_t)); \
2008 const struct rb_control_frame_struct *bound = (void *)&(sp)[(margin)]; \
2009 if (UNLIKELY((cfp) <= &bound[1])) { \
2010 vm_stackoverflow(); \
2011 } \
2012} while (0)
2013
2014#define CHECK_VM_STACK_OVERFLOW(cfp, margin) \
2015 CHECK_VM_STACK_OVERFLOW0((cfp), (cfp)->sp, (margin))
2016
2017VALUE rb_catch_protect(VALUE t, rb_block_call_func *func, VALUE data, enum ruby_tag_type *stateptr);
2018
2019rb_execution_context_t *rb_vm_main_ractor_ec(rb_vm_t *vm); // ractor.c
2020
2021/* for thread */
2022
2023#if RUBY_VM_THREAD_MODEL == 2
2024
2025RUBY_EXTERN struct rb_ractor_struct *ruby_single_main_ractor; // ractor.c
2026RUBY_EXTERN rb_vm_t *ruby_current_vm_ptr;
2027RUBY_EXTERN rb_event_flag_t ruby_vm_event_flags;
2028RUBY_EXTERN rb_event_flag_t ruby_vm_event_enabled_global_flags;
2029RUBY_EXTERN unsigned int ruby_vm_event_local_num;
2030
2031#define GET_VM() rb_current_vm()
2032#define GET_RACTOR() rb_current_ractor()
2033#define GET_THREAD() rb_current_thread()
2034#define GET_EC() rb_current_execution_context(true)
2035
2036static inline rb_thread_t *
2037rb_ec_thread_ptr(const rb_execution_context_t *ec)
2038{
2039 return ec->thread_ptr;
2040}
2041
2042static inline rb_ractor_t *
2043rb_ec_ractor_ptr(const rb_execution_context_t *ec)
2044{
2045 const rb_thread_t *th = rb_ec_thread_ptr(ec);
2046 if (th) {
2047 VM_ASSERT(th->ractor != NULL);
2048 return th->ractor;
2049 }
2050 else {
2051 return NULL;
2052 }
2053}
2054
2055static inline rb_vm_t *
2056rb_ec_vm_ptr(const rb_execution_context_t *ec)
2057{
2058 const rb_thread_t *th = rb_ec_thread_ptr(ec);
2059 if (th) {
2060 return th->vm;
2061 }
2062 else {
2063 return NULL;
2064 }
2065}
2066
2067NOINLINE(struct rb_execution_context_struct *rb_current_ec_noinline(void));
2068
2069static inline rb_execution_context_t *
2070rb_current_execution_context(bool expect_ec)
2071{
2072#ifdef RB_THREAD_LOCAL_SPECIFIER
2073 #ifdef RB_THREAD_CURRENT_EC_NOINLINE
2074 rb_execution_context_t * volatile ec = rb_current_ec();
2075 #else
2076 rb_execution_context_t * volatile ec = ruby_current_ec;
2077 #endif
2078
2079 /* On the shared objects, `__tls_get_addr()` is used to access the TLS
2080 * and the address of the `ruby_current_ec` can be stored on a function
2081 * frame. However, this address can be mis-used after native thread
2082 * migration of a coroutine.
2083 * 1) Get `ptr =&ruby_current_ec` op NT1 and store it on the frame.
2084 * 2) Context switch and resume it on the NT2.
2085 * 3) `ptr` is used on NT2 but it accesses to the TLS on NT1.
2086 * This assertion checks such misusage.
2087 *
2088 * To avoid accidents, `GET_EC()` should be called once on the frame.
2089 * Note that inlining can produce the problem.
2090 */
2091 VM_ASSERT(ec == rb_current_ec_noinline());
2092#else
2093 rb_execution_context_t * volatile ec = native_tls_get(ruby_current_ec_key);
2094#endif
2095 VM_ASSERT(!expect_ec || ec != NULL);
2096 return ec;
2097}
2098
2099static inline rb_thread_t *
2100rb_current_thread(void)
2101{
2102 const rb_execution_context_t *ec = GET_EC();
2103 return rb_ec_thread_ptr(ec);
2104}
2105
2106static inline rb_ractor_t *
2107rb_current_ractor_raw(bool expect)
2108{
2109 if (ruby_single_main_ractor) {
2110 return ruby_single_main_ractor;
2111 }
2112 else {
2113 const rb_execution_context_t *ec = rb_current_execution_context(expect);
2114 return (expect || ec) ? rb_ec_ractor_ptr(ec) : NULL;
2115 }
2116}
2117
2118static inline rb_ractor_t *
2119rb_current_ractor(void)
2120{
2121 return rb_current_ractor_raw(true);
2122}
2123
2124static inline rb_vm_t *
2125rb_current_vm(void)
2126{
2127#if 0 // TODO: reconsider the assertions
2128 VM_ASSERT(ruby_current_vm_ptr == NULL ||
2129 ruby_current_execution_context_ptr == NULL ||
2130 rb_ec_thread_ptr(GET_EC()) == NULL ||
2131 rb_ec_thread_ptr(GET_EC())->status == THREAD_KILLED ||
2132 rb_ec_vm_ptr(GET_EC()) == ruby_current_vm_ptr);
2133#endif
2134
2135 return ruby_current_vm_ptr;
2136}
2137
2138void rb_ec_vm_lock_rec_release(const rb_execution_context_t *ec,
2139 unsigned int recorded_lock_rec,
2140 unsigned int current_lock_rec);
2141
2142/* This technically is a data race, as it's checked without the lock, however we
2143 * check against a value only our own thread will write. */
2144NO_SANITIZE("thread", static inline bool
2145vm_locked_by_ractor_p(rb_vm_t *vm, rb_ractor_t *cr))
2146{
2147 VM_ASSERT(cr == GET_RACTOR());
2148 return vm->ractor.sync.lock_owner == cr;
2149}
2150
2151static inline unsigned int
2152rb_ec_vm_lock_rec(const rb_execution_context_t *ec)
2153{
2154 rb_vm_t *vm = rb_ec_vm_ptr(ec);
2155
2156 if (!vm_locked_by_ractor_p(vm, rb_ec_ractor_ptr(ec))) {
2157 return 0;
2158 }
2159 else {
2160 return vm->ractor.sync.lock_rec;
2161 }
2162}
2163
2164#else
2165#error "unsupported thread model"
2166#endif
2167
2168enum {
2169 TIMER_INTERRUPT_MASK = 0x01,
2170 PENDING_INTERRUPT_MASK = 0x02,
2171 POSTPONED_JOB_INTERRUPT_MASK = 0x04,
2172 TRAP_INTERRUPT_MASK = 0x08,
2173 TERMINATE_INTERRUPT_MASK = 0x10,
2174 VM_BARRIER_INTERRUPT_MASK = 0x20,
2175};
2176
2177#define RUBY_VM_SET_TIMER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TIMER_INTERRUPT_MASK)
2178#define RUBY_VM_SET_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, PENDING_INTERRUPT_MASK)
2179#define RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, POSTPONED_JOB_INTERRUPT_MASK)
2180#define RUBY_VM_SET_TRAP_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TRAP_INTERRUPT_MASK)
2181#define RUBY_VM_SET_TERMINATE_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TERMINATE_INTERRUPT_MASK)
2182#define RUBY_VM_SET_VM_BARRIER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, VM_BARRIER_INTERRUPT_MASK)
2183
2184static inline bool
2185RUBY_VM_INTERRUPTED(rb_execution_context_t *ec)
2186{
2187 return (ATOMIC_LOAD_RELAXED(ec->interrupt_flag) & ~(ec->interrupt_mask) & (PENDING_INTERRUPT_MASK|TRAP_INTERRUPT_MASK));
2188}
2189
2190static inline bool
2191RUBY_VM_INTERRUPTED_ANY(rb_execution_context_t *ec)
2192{
2193#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
2194 uint32_t current_clock = rb_ec_vm_ptr(ec)->clock;
2195
2196 if (current_clock != ec->checked_clock) {
2197 ec->checked_clock = current_clock;
2198 RUBY_VM_SET_TIMER_INTERRUPT(ec);
2199 }
2200#endif
2201 return ATOMIC_LOAD_RELAXED(ec->interrupt_flag) & ~(ec)->interrupt_mask;
2202}
2203
2204VALUE rb_exc_set_backtrace(VALUE exc, VALUE bt);
2205int rb_signal_buff_size(void);
2206int rb_signal_exec(rb_thread_t *th, int sig);
2207void rb_threadptr_check_signal(rb_thread_t *mth);
2208void rb_threadptr_signal_raise(rb_thread_t *th, int sig);
2209void rb_threadptr_signal_exit(rb_thread_t *th);
2210int rb_threadptr_execute_interrupts(rb_thread_t *, int);
2211void rb_threadptr_interrupt(rb_thread_t *th);
2212void rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th);
2213void rb_threadptr_pending_interrupt_clear(rb_thread_t *th);
2214void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v);
2215VALUE rb_ec_get_errinfo(const rb_execution_context_t *ec);
2216void rb_ec_error_print(rb_execution_context_t * volatile ec, volatile VALUE errinfo);
2217void rb_execution_context_update(rb_execution_context_t *ec);
2218void rb_execution_context_mark(const rb_execution_context_t *ec);
2219void rb_fiber_close(rb_fiber_t *fib);
2220void Init_native_thread(rb_thread_t *th);
2221int rb_vm_check_ints_blocking(rb_execution_context_t *ec);
2222
2223// vm_sync.h
2224void rb_vm_cond_wait(rb_vm_t *vm, rb_nativethread_cond_t *cond);
2225void rb_vm_cond_timedwait(rb_vm_t *vm, rb_nativethread_cond_t *cond, unsigned long msec);
2226
2227#define RUBY_VM_CHECK_INTS(ec) rb_vm_check_ints(ec)
2228static inline void
2229rb_vm_check_ints(rb_execution_context_t *ec)
2230{
2231#ifdef RUBY_ASSERT_CRITICAL_SECTION
2232 VM_ASSERT(ruby_assert_critical_section_entered == 0);
2233#endif
2234
2235 VM_ASSERT(ec == rb_current_ec_noinline());
2236
2237 if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(ec))) {
2238 rb_threadptr_execute_interrupts(rb_ec_thread_ptr(ec), 0);
2239 }
2240}
2241
2242/* tracer */
2243
2245 rb_event_flag_t event;
2247 const rb_control_frame_t *cfp;
2248 VALUE self;
2249 ID id;
2250 ID called_id;
2251 VALUE klass;
2252 VALUE data;
2253
2254 int klass_solved;
2255
2256 /* calc from cfp */
2257 int lineno;
2258 VALUE path;
2259};
2260
2261void rb_hook_list_mark(rb_hook_list_t *hooks);
2262void rb_hook_list_mark_and_move(rb_hook_list_t *hooks);
2263void rb_hook_list_free(rb_hook_list_t *hooks);
2264void rb_hook_list_connect_tracepoint(VALUE target, rb_hook_list_t *list, VALUE tpval, unsigned int target_line);
2265void rb_hook_list_remove_tracepoint(rb_hook_list_t *list, VALUE tpval);
2266
2267void rb_exec_event_hooks(struct rb_trace_arg_struct *trace_arg, rb_hook_list_t *hooks, int pop_p);
2268
2269#define EXEC_EVENT_HOOK_ORIG(ec_, hooks_, flag_, self_, id_, called_id_, klass_, data_, pop_p_) do { \
2270 const rb_event_flag_t flag_arg_ = (flag_); \
2271 rb_hook_list_t *hooks_arg_ = (hooks_); \
2272 if (UNLIKELY((hooks_arg_)->events & (flag_arg_))) { \
2273 /* defer evaluating the other arguments */ \
2274 rb_exec_event_hook_orig(ec_, hooks_arg_, flag_arg_, self_, id_, called_id_, klass_, data_, pop_p_); \
2275 } \
2276} while (0)
2277
2278static inline void
2279rb_exec_event_hook_orig(rb_execution_context_t *ec, rb_hook_list_t *hooks, rb_event_flag_t flag,
2280 VALUE self, ID id, ID called_id, VALUE klass, VALUE data, int pop_p)
2281{
2282 struct rb_trace_arg_struct trace_arg;
2283
2284 VM_ASSERT((hooks->events & flag) != 0);
2285
2286 trace_arg.event = flag;
2287 trace_arg.ec = ec;
2288 trace_arg.cfp = ec->cfp;
2289 trace_arg.self = self;
2290 trace_arg.id = id;
2291 trace_arg.called_id = called_id;
2292 trace_arg.klass = klass;
2293 trace_arg.data = data;
2294 trace_arg.path = Qundef;
2295 trace_arg.klass_solved = 0;
2296
2297 rb_exec_event_hooks(&trace_arg, hooks, pop_p);
2298}
2299
2301 VALUE self;
2302 uint32_t id;
2303 rb_hook_list_t hooks;
2304};
2305
2306static inline rb_hook_list_t *
2307rb_ec_ractor_hooks(const rb_execution_context_t *ec)
2308{
2309 struct rb_ractor_pub *cr_pub = (struct rb_ractor_pub *)rb_ec_ractor_ptr(ec);
2310 return &cr_pub->hooks;
2311}
2312
2313#define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2314 EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_ractor_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 0)
2315
2316#define EXEC_EVENT_HOOK_AND_POP_FRAME(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2317 EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_ractor_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 1)
2318
2319static inline void
2320rb_exec_event_hook_script_compiled(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE eval_script)
2321{
2322 EXEC_EVENT_HOOK(ec, RUBY_EVENT_SCRIPT_COMPILED, ec->cfp->self, 0, 0, 0,
2323 NIL_P(eval_script) ? (VALUE)iseq :
2324 rb_ary_new_from_args(2, eval_script, (VALUE)iseq));
2325}
2326
2327void rb_vm_trap_exit(rb_vm_t *vm);
2328void rb_vm_postponed_job_atfork(void); /* vm_trace.c */
2329void rb_vm_postponed_job_free(void); /* vm_trace.c */
2330size_t rb_vm_memsize_postponed_job_queue(void); /* vm_trace.c */
2331void rb_vm_postponed_job_queue_init(rb_vm_t *vm); /* vm_trace.c */
2332
2333RUBY_SYMBOL_EXPORT_BEGIN
2334
2335int rb_thread_check_trap_pending(void);
2336
2337/* #define RUBY_EVENT_RESERVED_FOR_INTERNAL_USE 0x030000 */ /* from vm_core.h */
2338#define RUBY_EVENT_COVERAGE_LINE 0x010000
2339#define RUBY_EVENT_COVERAGE_BRANCH 0x020000
2340
2341extern VALUE rb_get_coverages(void);
2342extern void rb_set_coverages(VALUE, int, VALUE);
2343extern void rb_clear_coverages(void);
2344extern void rb_reset_coverages(void);
2345extern void rb_resume_coverages(void);
2346extern void rb_suspend_coverages(void);
2347
2348void rb_postponed_job_flush(rb_vm_t *vm);
2349
2350// ractor.c
2351RUBY_EXTERN VALUE rb_eRactorUnsafeError;
2352RUBY_EXTERN VALUE rb_eRactorIsolationError;
2353
2354RUBY_SYMBOL_EXPORT_END
2355
2356#endif /* RUBY_VM_CORE_H */
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ALIGNAS
Wraps (or simulates) alignas.
Definition stdalign.h:27
#define RUBY_EXTERN
Declaration of externally visible global variables.
Definition dllexport.h:45
#define RUBY_EVENT_SCRIPT_COMPILED
Encountered an eval.
Definition event.h:60
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RBIMPL_ATTR_FORMAT(x, y, z)
Wraps (or simulates) __attribute__((format))
Definition format.h:29
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define Qundef
Old name of RUBY_Qundef.
#define Qfalse
Old name of RUBY_Qfalse.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define NIL_P
Old name of RB_NIL_P.
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
void * rb_check_typeddata(VALUE obj, const rb_data_type_t *data_type)
Identical to rb_typeddata_is_kind_of(), except it raises exceptions instead of returning false.
Definition error.c:1398
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
Definition proc.c:120
void rb_unblock_function_t(void *)
This is the type of UBFs.
Definition thread.h:336
VALUE rb_block_call_func(RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg))
This is the type of a function that the interpreter expect for C-backended blocks.
Definition iterator.h:83
VALUE type(ANYARGS)
ANYARGS-ed function type.
#define RBIMPL_ATTR_NONNULL(list)
Wraps (or simulates) __attribute__((nonnull))
Definition nonnull.h:30
#define inline
Old Visual Studio versions do not support the inline keyword, so we need to define it to be __inline.
Definition defines.h:91
Functions related to nodes in the AST.
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RTYPEDDATA_DATA(v)
Convenient getter macro.
Definition rtypeddata.h:103
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
Defines old _.
C99 shim for <stdbool.h>
Definition vm_core.h:261
const ID * segments
A null-terminated list of ids, used to represent a constant's path idNULL is used to represent the ::...
Definition vm_core.h:285
Definition vm_core.h:293
Definition vm_core.h:288
Definition iseq.h:257
Internal header for Ruby Box.
Definition box.h:14
Definition method.h:63
CREF (Class REFerence)
Definition method.h:45
Definition class.h:72
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:202
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:143
Definition st.h:79
IFUNC (Internal FUNCtion)
Definition imemo.h:85
Definition vm_core.h:253
Definition vm_core.h:297
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
Definition value.h:69
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376