Ruby 3.5.0dev (2025-08-30 revision 01a57bd6cde82ad58f938d075f569d57048d8a60)
vm_core.h (01a57bd6cde82ad58f938d075f569d57048d8a60)
1#ifndef RUBY_VM_CORE_H
2#define RUBY_VM_CORE_H
3/**********************************************************************
4
5 vm_core.h -
6
7 $Author$
8 created at: 04/01/01 19:41:38 JST
9
10 Copyright (C) 2004-2007 Koichi Sasada
11
12**********************************************************************/
13
14/*
15 * Enable check mode.
16 * 1: enable local assertions.
17 */
18#ifndef VM_CHECK_MODE
19
20// respect RUBY_DUBUG: if given n is 0, then use RUBY_DEBUG
21#define N_OR_RUBY_DEBUG(n) (((n) > 0) ? (n) : RUBY_DEBUG)
22
23#define VM_CHECK_MODE N_OR_RUBY_DEBUG(0)
24#endif
25
39#ifndef VMDEBUG
40#define VMDEBUG 0
41#endif
42
43#if 0
44#undef VMDEBUG
45#define VMDEBUG 3
46#endif
47
48#include "ruby/internal/config.h"
49
50#include <stddef.h>
51#include <signal.h>
52#include <stdarg.h>
53
54#include "ruby_assert.h"
55
56#define RVALUE_SIZE (sizeof(struct RBasic) + sizeof(VALUE[RBIMPL_RVALUE_EMBED_LEN_MAX]))
57
58#if VM_CHECK_MODE > 0
59#define VM_ASSERT(expr, ...) \
60 RUBY_ASSERT_MESG_WHEN(VM_CHECK_MODE > 0, expr, #expr RBIMPL_VA_OPT_ARGS(__VA_ARGS__))
61#define VM_UNREACHABLE(func) rb_bug(#func ": unreachable")
62#define RUBY_ASSERT_CRITICAL_SECTION
63#define RUBY_DEBUG_THREAD_SCHEDULE() rb_thread_schedule()
64#else
65#define VM_ASSERT(/*expr, */...) ((void)0)
66#define VM_UNREACHABLE(func) UNREACHABLE
67#define RUBY_DEBUG_THREAD_SCHEDULE()
68#endif
69
70#define RUBY_ASSERT_MUTEX_OWNED(mutex) VM_ASSERT(rb_mutex_owned_p(mutex))
71
72#if defined(RUBY_ASSERT_CRITICAL_SECTION)
73/*
74# Critical Section Assertions
75
76These assertions are used to ensure that context switching does not occur between two points in the code. In theory,
77such code should already be protected by a mutex, but these assertions are used to ensure that the mutex is held.
78
79The specific case where it can be useful is where a mutex is held further up the call stack, and the code in question
80may not directly hold the mutex. In this case, the critical section assertions can be used to ensure that the mutex is
81held by someone else.
82
83These assertions are only enabled when RUBY_ASSERT_CRITICAL_SECTION is defined, which is only defined if VM_CHECK_MODE
84is set.
85
86## Example Usage
87
88```c
89RUBY_ASSERT_CRITICAL_SECTION_ENTER();
90// ... some code which does not invoke rb_vm_check_ints() ...
91RUBY_ASSERT_CRITICAL_SECTION_LEAVE();
92```
93
94If `rb_vm_check_ints()` is called between the `RUBY_ASSERT_CRITICAL_SECTION_ENTER()` and
95`RUBY_ASSERT_CRITICAL_SECTION_LEAVE()`, a failed assertion will result.
96*/
97extern int ruby_assert_critical_section_entered;
98#define RUBY_ASSERT_CRITICAL_SECTION_ENTER() do{ruby_assert_critical_section_entered += 1;}while(false)
99#define RUBY_ASSERT_CRITICAL_SECTION_LEAVE() do{VM_ASSERT(ruby_assert_critical_section_entered > 0);ruby_assert_critical_section_entered -= 1;}while(false)
100#else
101#define RUBY_ASSERT_CRITICAL_SECTION_ENTER()
102#define RUBY_ASSERT_CRITICAL_SECTION_LEAVE()
103#endif
104
105#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
106# include "wasm/setjmp.h"
107#else
108# include <setjmp.h>
109#endif
110
111#if defined(__linux__) || defined(__FreeBSD__)
112# define RB_THREAD_T_HAS_NATIVE_ID
113#endif
114
116#include "ccan/list/list.h"
117#include "id.h"
118#include "internal.h"
119#include "internal/array.h"
120#include "internal/basic_operators.h"
121#include "internal/namespace.h"
122#include "internal/sanitizers.h"
123#include "internal/serial.h"
124#include "internal/set_table.h"
125#include "internal/vm.h"
126#include "method.h"
127#include "node.h"
128#include "ruby/ruby.h"
129#include "ruby/st.h"
130#include "ruby_atomic.h"
131#include "vm_opts.h"
132
133#include "ruby/thread_native.h"
134/*
135 * implementation selector of get_insn_info algorithm
136 * 0: linear search
137 * 1: binary search
138 * 2: succinct bitvector
139 */
140#ifndef VM_INSN_INFO_TABLE_IMPL
141# define VM_INSN_INFO_TABLE_IMPL 2
142#endif
143
144#if defined(NSIG_MAX) /* POSIX issue 8 */
145# undef NSIG
146# define NSIG NSIG_MAX
147#elif defined(_SIG_MAXSIG) /* FreeBSD */
148# undef NSIG
149# define NSIG _SIG_MAXSIG
150#elif defined(_SIGMAX) /* QNX */
151# define NSIG (_SIGMAX + 1)
152#elif defined(NSIG) /* 99% of everything else */
153# /* take it */
154#else /* Last resort */
155# define NSIG (sizeof(sigset_t) * CHAR_BIT + 1)
156#endif
157
158#define RUBY_NSIG NSIG
159
160#if defined(SIGCLD)
161# define RUBY_SIGCHLD (SIGCLD)
162#elif defined(SIGCHLD)
163# define RUBY_SIGCHLD (SIGCHLD)
164#endif
165
166#if defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) && defined(SA_SIGINFO) && !defined(__NetBSD__)
167# define USE_SIGALTSTACK
168void *rb_allocate_sigaltstack(void);
169void *rb_register_sigaltstack(void *);
170# define RB_ALTSTACK_INIT(var, altstack) var = rb_register_sigaltstack(altstack)
171# define RB_ALTSTACK_FREE(var) free(var)
172# define RB_ALTSTACK(var) var
173#else /* noop */
174# define RB_ALTSTACK_INIT(var, altstack)
175# define RB_ALTSTACK_FREE(var)
176# define RB_ALTSTACK(var) (0)
177#endif
178
179#include THREAD_IMPL_H
180#define RUBY_VM_THREAD_MODEL 2
181
182/*****************/
183/* configuration */
184/*****************/
185
186/* gcc ver. check */
187#if defined(__GNUC__) && __GNUC__ >= 2
188
189#if OPT_TOKEN_THREADED_CODE
190#if OPT_DIRECT_THREADED_CODE
191#undef OPT_DIRECT_THREADED_CODE
192#endif
193#endif
194
195#else /* defined(__GNUC__) && __GNUC__ >= 2 */
196
197/* disable threaded code options */
198#if OPT_DIRECT_THREADED_CODE
199#undef OPT_DIRECT_THREADED_CODE
200#endif
201#if OPT_TOKEN_THREADED_CODE
202#undef OPT_TOKEN_THREADED_CODE
203#endif
204#endif
205
206/* call threaded code */
207#if OPT_CALL_THREADED_CODE
208#if OPT_DIRECT_THREADED_CODE
209#undef OPT_DIRECT_THREADED_CODE
210#endif /* OPT_DIRECT_THREADED_CODE */
211#endif /* OPT_CALL_THREADED_CODE */
212
213void rb_vm_encoded_insn_data_table_init(void);
214typedef unsigned long rb_num_t;
215typedef signed long rb_snum_t;
216
217enum ruby_tag_type {
218 RUBY_TAG_NONE = 0x0,
219 RUBY_TAG_RETURN = 0x1,
220 RUBY_TAG_BREAK = 0x2,
221 RUBY_TAG_NEXT = 0x3,
222 RUBY_TAG_RETRY = 0x4,
223 RUBY_TAG_REDO = 0x5,
224 RUBY_TAG_RAISE = 0x6,
225 RUBY_TAG_THROW = 0x7,
226 RUBY_TAG_FATAL = 0x8,
227 RUBY_TAG_MASK = 0xf
228};
229
230#define TAG_NONE RUBY_TAG_NONE
231#define TAG_RETURN RUBY_TAG_RETURN
232#define TAG_BREAK RUBY_TAG_BREAK
233#define TAG_NEXT RUBY_TAG_NEXT
234#define TAG_RETRY RUBY_TAG_RETRY
235#define TAG_REDO RUBY_TAG_REDO
236#define TAG_RAISE RUBY_TAG_RAISE
237#define TAG_THROW RUBY_TAG_THROW
238#define TAG_FATAL RUBY_TAG_FATAL
239#define TAG_MASK RUBY_TAG_MASK
240
241enum ruby_vm_throw_flags {
242 VM_THROW_NO_ESCAPE_FLAG = 0x8000,
243 VM_THROW_STATE_MASK = 0xff
244};
245
246/* forward declarations */
247struct rb_thread_struct;
249
250/* iseq data type */
252
254 rb_serial_t raw;
255 VALUE data[2];
256};
257
258#define IMEMO_CONST_CACHE_SHAREABLE IMEMO_FL_USER0
259
260// imemo_constcache
262 VALUE flags;
263
264 VALUE value; // v0
265 VALUE _unused1; // v1
266 VALUE _unused2; // v2
267 const rb_cref_t *ic_cref; // v3
268};
269STATIC_ASSERT(sizeof_iseq_inline_constant_cache_entry,
270 (offsetof(struct iseq_inline_constant_cache_entry, ic_cref) +
271 sizeof(const rb_cref_t *)) <= RVALUE_SIZE);
272
289
291 uint64_t value; // dest_shape_id in former half, attr_index in latter half
292 ID iv_set_name;
293};
294
298
300 struct {
301 struct rb_thread_struct *running_thread;
302 VALUE value;
303 } once;
304 struct iseq_inline_constant_cache ic_cache;
305 struct iseq_inline_iv_cache_entry iv_cache;
306};
307
309 const struct rb_call_data *cd;
310 const struct rb_callcache *cc;
311 VALUE block_handler;
312 VALUE recv;
313 int argc;
314 bool kw_splat;
315 VALUE heap_argv;
316 const rb_namespace_t *proc_ns;
317};
318
319#ifndef VM_ARGC_STACK_MAX
320#define VM_ARGC_STACK_MAX 128
321#endif
322
323# define CALLING_ARGC(calling) ((calling)->heap_argv ? RARRAY_LENINT((calling)->heap_argv) : (calling)->argc)
324
326
327#if 1
328#define CoreDataFromValue(obj, type) (type*)DATA_PTR(obj)
329#else
330#define CoreDataFromValue(obj, type) (type*)rb_data_object_get(obj)
331#endif
332#define GetCoreDataFromValue(obj, type, ptr) ((ptr) = CoreDataFromValue((obj), type))
333
335 VALUE pathobj; /* String (path) or Array [path, realpath]. Frozen. */
336 VALUE base_label; /* String */
337 VALUE label; /* String */
338 int first_lineno;
339 int node_id;
340 rb_code_location_t code_location;
342
343#define PATHOBJ_PATH 0
344#define PATHOBJ_REALPATH 1
345
346static inline VALUE
347pathobj_path(VALUE pathobj)
348{
349 if (RB_TYPE_P(pathobj, T_STRING)) {
350 return pathobj;
351 }
352 else {
353 VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
354 return RARRAY_AREF(pathobj, PATHOBJ_PATH);
355 }
356}
357
358static inline VALUE
359pathobj_realpath(VALUE pathobj)
360{
361 if (RB_TYPE_P(pathobj, T_STRING)) {
362 return pathobj;
363 }
364 else {
365 VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
366 return RARRAY_AREF(pathobj, PATHOBJ_REALPATH);
367 }
368}
369
370/* Forward declarations */
371typedef uintptr_t iseq_bits_t;
372
373#define ISEQ_IS_SIZE(body) (body->ic_size + body->ivc_size + body->ise_size + body->icvarc_size)
374
375/* [ TS_IVC | TS_ICVARC | TS_ISE | TS_IC ] */
376#define ISEQ_IS_IC_ENTRY(body, idx) (body->is_entries[(idx) + body->ise_size + body->icvarc_size + body->ivc_size].ic_cache);
377
378/* instruction sequence type */
379enum rb_iseq_type {
380 ISEQ_TYPE_TOP,
381 ISEQ_TYPE_METHOD,
382 ISEQ_TYPE_BLOCK,
383 ISEQ_TYPE_CLASS,
384 ISEQ_TYPE_RESCUE,
385 ISEQ_TYPE_ENSURE,
386 ISEQ_TYPE_EVAL,
387 ISEQ_TYPE_MAIN,
388 ISEQ_TYPE_PLAIN
389};
390
391// Attributes specified by Primitive.attr!
392enum rb_builtin_attr {
393 // The iseq does not call methods.
394 BUILTIN_ATTR_LEAF = 0x01,
395 // This iseq only contains single `opt_invokebuiltin_delegate_leave` instruction with 0 arguments.
396 BUILTIN_ATTR_SINGLE_NOARG_LEAF = 0x02,
397 // This attribute signals JIT to duplicate the iseq for each block iseq so that its `yield` will be monomorphic.
398 BUILTIN_ATTR_INLINE_BLOCK = 0x04,
399 // The iseq acts like a C method in backtraces.
400 BUILTIN_ATTR_C_TRACE = 0x08,
401};
402
403typedef VALUE (*rb_jit_func_t)(struct rb_execution_context_struct *, struct rb_control_frame_struct *);
404
406 enum rb_iseq_type type;
407
408 unsigned int iseq_size;
409 VALUE *iseq_encoded; /* encoded iseq (insn addr and operands) */
410
434 struct {
435 struct {
436 unsigned int has_lead : 1;
437 unsigned int has_opt : 1;
438 unsigned int has_rest : 1;
439 unsigned int has_post : 1;
440 unsigned int has_kw : 1;
441 unsigned int has_kwrest : 1;
442 unsigned int has_block : 1;
443
444 unsigned int ambiguous_param0 : 1; /* {|a|} */
445 unsigned int accepts_no_kwarg : 1;
446 unsigned int ruby2_keywords: 1;
447 unsigned int anon_rest: 1;
448 unsigned int anon_kwrest: 1;
449 unsigned int use_block: 1;
450 unsigned int forwardable: 1;
451 } flags;
452
453 unsigned int size;
454
455 int lead_num;
456 int opt_num;
457 int rest_start;
458 int post_start;
459 int post_num;
460 int block_start;
461
462 const VALUE *opt_table; /* (opt_num + 1) entries. */
463 /* opt_num and opt_table:
464 *
465 * def foo o1=e1, o2=e2, ..., oN=eN
466 * #=>
467 * # prologue code
468 * A1: e1
469 * A2: e2
470 * ...
471 * AN: eN
472 * AL: body
473 * opt_num = N
474 * opt_table = [A1, A2, ..., AN, AL]
475 */
476
477 const struct rb_iseq_param_keyword {
478 int num;
479 int required_num;
480 int bits_start;
481 int rest_start;
482 const ID *table;
483 VALUE *default_values;
484 } *keyword;
486
487 rb_iseq_location_t location;
488
489 /* insn info, must be freed */
491 const struct iseq_insn_info_entry *body;
492 unsigned int *positions;
493 unsigned int size;
494#if VM_INSN_INFO_TABLE_IMPL == 2
495 struct succ_index_table *succ_index_table;
496#endif
497 } insns_info;
498
499 const ID *local_table; /* must free */
500
501 /* catch table */
502 struct iseq_catch_table *catch_table;
503
504 /* for child iseq */
505 const struct rb_iseq_struct *parent_iseq;
506 struct rb_iseq_struct *local_iseq; /* local_iseq->flip_cnt can be modified */
507
508 union iseq_inline_storage_entry *is_entries; /* [ TS_IVC | TS_ICVARC | TS_ISE | TS_IC ] */
509 struct rb_call_data *call_data; //struct rb_call_data calls[ci_size];
510
511 struct {
512 rb_snum_t flip_count;
513 VALUE script_lines;
514 VALUE coverage;
515 VALUE pc2branchindex;
516 VALUE *original_iseq;
517 } variable;
518
519 unsigned int local_table_size;
520 unsigned int ic_size; // Number of IC caches
521 unsigned int ise_size; // Number of ISE caches
522 unsigned int ivc_size; // Number of IVC caches
523 unsigned int icvarc_size; // Number of ICVARC caches
524 unsigned int ci_size;
525 unsigned int stack_max; /* for stack overflow check */
526
527 unsigned int builtin_attrs; // Union of rb_builtin_attr
528
529 bool prism; // ISEQ was generated from prism compiler
530
531 union {
532 iseq_bits_t * list; /* Find references for GC */
533 iseq_bits_t single;
534 } mark_bits;
535
536 struct rb_id_table *outer_variables;
537
538 const rb_iseq_t *mandatory_only_iseq;
539
540#if USE_YJIT || USE_ZJIT
541 // Function pointer for JIT code on jit_exec()
542 rb_jit_func_t jit_entry;
543 // Number of calls on jit_exec()
544 long unsigned jit_entry_calls;
545 // Function pointer for JIT code on jit_exec_exception()
546 rb_jit_func_t jit_exception;
547 // Number of calls on jit_exec_exception()
548 long unsigned jit_exception_calls;
549#endif
550
551#if USE_YJIT
552 // YJIT stores some data on each iseq.
553 void *yjit_payload;
554 // Used to estimate how frequently this ISEQ gets called
555 uint64_t yjit_calls_at_interv;
556#endif
557
558#if USE_ZJIT
559 // ZJIT stores some data on each iseq.
560 void *zjit_payload;
561#endif
562};
563
564/* T_IMEMO/iseq */
565/* typedef rb_iseq_t is in method.h */
567 VALUE flags; /* 1 */
568 VALUE wrapper; /* 2 */
569
570 struct rb_iseq_constant_body *body; /* 3 */
571
572 union { /* 4, 5 words */
573 struct iseq_compile_data *compile_data; /* used at compile time */
574
575 struct {
576 VALUE obj;
577 int index;
578 } loader;
579
580 struct {
581 struct rb_hook_list_struct *local_hooks;
582 rb_event_flag_t global_trace_events;
583 } exec;
584 } aux;
585};
586
587#define ISEQ_BODY(iseq) ((iseq)->body)
588
589#if !defined(USE_LAZY_LOAD) || !(USE_LAZY_LOAD+0)
590#define USE_LAZY_LOAD 0
591#endif
592
593#if !USE_LAZY_LOAD
594static inline const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq) {return 0;}
595#endif
596const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq);
597
598static inline const rb_iseq_t *
599rb_iseq_check(const rb_iseq_t *iseq)
600{
601 if (USE_LAZY_LOAD && ISEQ_BODY(iseq) == NULL) {
602 rb_iseq_complete((rb_iseq_t *)iseq);
603 }
604 return iseq;
605}
606
607static inline bool
608rb_iseq_attr_p(const rb_iseq_t *iseq, enum rb_builtin_attr attr)
609{
610 return (ISEQ_BODY(iseq)->builtin_attrs & attr) == attr;
611}
612
613static inline const rb_iseq_t *
614def_iseq_ptr(rb_method_definition_t *def)
615{
616//TODO: re-visit. to check the bug, enable this assertion.
617#if VM_CHECK_MODE > 0
618 if (def->type != VM_METHOD_TYPE_ISEQ) rb_bug("def_iseq_ptr: not iseq (%d)", def->type);
619#endif
620 return rb_iseq_check(def->body.iseq.iseqptr);
621}
622
623enum ruby_special_exceptions {
624 ruby_error_reenter,
625 ruby_error_nomemory,
626 ruby_error_sysstack,
627 ruby_error_stackfatal,
628 ruby_error_stream_closed,
629 ruby_special_error_count
630};
631
632#define GetVMPtr(obj, ptr) \
633 GetCoreDataFromValue((obj), rb_vm_t, (ptr))
634
635struct rb_vm_struct;
636typedef void rb_vm_at_exit_func(struct rb_vm_struct*);
637
638typedef struct rb_at_exit_list {
639 rb_vm_at_exit_func *func;
640 struct rb_at_exit_list *next;
642
643void *rb_objspace_alloc(void);
644void rb_objspace_free(void *objspace);
645void rb_objspace_call_finalizer(void);
646
647typedef struct rb_hook_list_struct {
648 struct rb_event_hook_struct *hooks;
649 rb_event_flag_t events;
650 unsigned int running;
651 bool need_clean;
652 bool is_local;
654
655
656// see builtin.h for definition
657typedef const struct rb_builtin_function *RB_BUILTIN;
658
660 VALUE *varptr;
661 struct global_object_list *next;
662};
663
664typedef struct rb_vm_struct {
665 VALUE self;
666
667 struct {
668 struct ccan_list_head set;
669 unsigned int cnt;
670 unsigned int blocking_cnt;
671
672 struct rb_ractor_struct *main_ractor;
673 struct rb_thread_struct *main_thread; // == vm->ractor.main_ractor->threads.main
674
675 struct {
676 // monitor
677 rb_nativethread_lock_t lock;
678 struct rb_ractor_struct *lock_owner;
679 unsigned int lock_rec;
680
681 // join at exit
682 rb_nativethread_cond_t terminate_cond;
683 bool terminate_waiting;
684
685#ifndef RUBY_THREAD_PTHREAD_H
686 // win32
687 bool barrier_waiting;
688 unsigned int barrier_cnt;
689 rb_nativethread_cond_t barrier_complete_cond;
690 rb_nativethread_cond_t barrier_release_cond;
691#endif
692 } sync;
693
694#ifdef RUBY_THREAD_PTHREAD_H
695 // ractor scheduling
696 struct {
697 rb_nativethread_lock_t lock;
698 struct rb_ractor_struct *lock_owner;
699 bool locked;
700
701 rb_nativethread_cond_t cond; // GRQ
702 unsigned int snt_cnt; // count of shared NTs
703 unsigned int dnt_cnt; // count of dedicated NTs
704
705 unsigned int running_cnt;
706
707 unsigned int max_cpu;
708 struct ccan_list_head grq; // // Global Ready Queue
709 unsigned int grq_cnt;
710
711 // running threads
712 struct ccan_list_head running_threads;
713
714 // threads which switch context by timeslice
715 struct ccan_list_head timeslice_threads;
716
717 struct ccan_list_head zombie_threads;
718
719 // true if timeslice timer is not enable
720 bool timeslice_wait_inf;
721
722 // barrier
723 rb_nativethread_cond_t barrier_complete_cond;
724 rb_nativethread_cond_t barrier_release_cond;
725 bool barrier_waiting;
726 unsigned int barrier_waiting_cnt;
727 unsigned int barrier_serial;
728 struct rb_ractor_struct *barrier_ractor;
729 unsigned int barrier_lock_rec;
730 } sched;
731#endif
732 } ractor;
733
734#ifdef USE_SIGALTSTACK
735 void *main_altstack;
736#endif
737
738 rb_serial_t fork_gen;
739
740 /* set in single-threaded processes only: */
741 volatile int ubf_async_safe;
742
743 unsigned int running: 1;
744 unsigned int thread_abort_on_exception: 1;
745 unsigned int thread_report_on_exception: 1;
746 unsigned int thread_ignore_deadlock: 1;
747
748 /* object management */
749 VALUE mark_object_ary;
751 const VALUE special_exceptions[ruby_special_error_count];
752
753 /* namespace */
754 rb_namespace_t *main_namespace;
755
756 /* load */
757 VALUE top_self;
758 VALUE load_path;
759 VALUE load_path_snapshot;
760 VALUE load_path_check_cache;
761 VALUE expanded_load_path;
762 VALUE loaded_features;
763 VALUE loaded_features_snapshot;
764 VALUE loaded_features_realpaths;
765 VALUE loaded_features_realpath_map;
766 struct st_table *loaded_features_index;
767 struct st_table *loading_table;
768 // For running the init function of statically linked
769 // extensions when they are loaded
770 struct st_table *static_ext_inits;
771
772 /* signal */
773 struct {
774 VALUE cmd[RUBY_NSIG];
775 } trap_list;
776
777 /* postponed_job (async-signal-safe, and thread-safe) */
778 struct rb_postponed_job_queue *postponed_job_queue;
779
780 int src_encoding_index;
781
782 /* workqueue (thread-safe, NOT async-signal-safe) */
783 struct ccan_list_head workqueue; /* <=> rb_workqueue_job.jnode */
784 rb_nativethread_lock_t workqueue_lock;
785
786 VALUE orig_progname, progname;
787 VALUE coverages, me2counter;
788 int coverage_mode;
789
790 struct {
791 struct rb_objspace *objspace;
792 struct gc_mark_func_data_struct {
793 void *data;
794 void (*mark_func)(VALUE v, void *data);
795 } *mark_func_data;
796 } gc;
797
798 rb_at_exit_list *at_exit;
799
800 const struct rb_builtin_function *builtin_function_table;
801
802 st_table *ci_table;
803 struct rb_id_table *negative_cme_table;
804 st_table *overloaded_cme_table; // cme -> overloaded_cme
805 set_table *unused_block_warning_table;
806 set_table *cc_refinement_table;
807
808 // This id table contains a mapping from ID to ICs. It does this with ID
809 // keys and nested st_tables as values. The nested tables have ICs as keys
810 // and Qtrue as values. It is used when inline constant caches need to be
811 // invalidated or ISEQs are being freed.
812 struct rb_id_table *constant_cache;
813 ID inserting_constant_cache_id;
814
815#ifndef VM_GLOBAL_CC_CACHE_TABLE_SIZE
816#define VM_GLOBAL_CC_CACHE_TABLE_SIZE 1023
817#endif
818 const struct rb_callcache *global_cc_cache_table[VM_GLOBAL_CC_CACHE_TABLE_SIZE]; // vm_eval.c
819
820#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
821 uint32_t clock;
822#endif
823
824 /* params */
825 struct { /* size in byte */
826 size_t thread_vm_stack_size;
827 size_t thread_machine_stack_size;
828 size_t fiber_vm_stack_size;
829 size_t fiber_machine_stack_size;
830 } default_params;
831
832 // TODO: a single require_stack can't support multi-threaded require trees
833 VALUE require_stack;
834} rb_vm_t;
835
836/* default values */
837
838#define RUBY_VM_SIZE_ALIGN 4096
839
840#define RUBY_VM_THREAD_VM_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
841#define RUBY_VM_THREAD_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
842#define RUBY_VM_THREAD_MACHINE_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
843#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
844
845#define RUBY_VM_FIBER_VM_STACK_SIZE ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
846#define RUBY_VM_FIBER_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
847#define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 64 * 1024 * sizeof(VALUE)) /* 256 KB or 512 KB */
848#if defined(__powerpc64__) || defined(__ppc64__) // macOS has __ppc64__
849#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 32 * 1024 * sizeof(VALUE)) /* 128 KB or 256 KB */
850#else
851#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
852#endif
853
854#if __has_feature(memory_sanitizer) || __has_feature(address_sanitizer)
855/* It seems sanitizers consume A LOT of machine stacks */
856#undef RUBY_VM_THREAD_MACHINE_STACK_SIZE
857#define RUBY_VM_THREAD_MACHINE_STACK_SIZE (1024 * 1024 * sizeof(VALUE))
858#undef RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN
859#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 512 * 1024 * sizeof(VALUE))
860#undef RUBY_VM_FIBER_MACHINE_STACK_SIZE
861#define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 256 * 1024 * sizeof(VALUE))
862#undef RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN
863#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 128 * 1024 * sizeof(VALUE))
864#endif
865
866#ifndef VM_DEBUG_BP_CHECK
867#define VM_DEBUG_BP_CHECK 0
868#endif
869
870#ifndef VM_DEBUG_VERIFY_METHOD_CACHE
871#define VM_DEBUG_VERIFY_METHOD_CACHE (VMDEBUG != 0)
872#endif
873
875 VALUE self;
876 const VALUE *ep;
877 union {
878 const rb_iseq_t *iseq;
879 const struct vm_ifunc *ifunc;
880 VALUE val;
881 } code;
882};
883
884enum rb_block_handler_type {
885 block_handler_type_iseq,
886 block_handler_type_ifunc,
887 block_handler_type_symbol,
888 block_handler_type_proc
889};
890
891enum rb_block_type {
892 block_type_iseq,
893 block_type_ifunc,
894 block_type_symbol,
895 block_type_proc
896};
897
898struct rb_block {
899 union {
900 struct rb_captured_block captured;
901 VALUE symbol;
902 VALUE proc;
903 } as;
904 enum rb_block_type type;
905};
906
908 const VALUE *pc; // cfp[0]
909 VALUE *sp; // cfp[1]
910 const rb_iseq_t *iseq; // cfp[2]
911 VALUE self; // cfp[3] / block[0]
912 const VALUE *ep; // cfp[4] / block[1]
913 const void *block_code; // cfp[5] / block[2] -- iseq, ifunc, or forwarded block handler
914 void *jit_return; // cfp[6] -- return address for JIT code
915#if VM_DEBUG_BP_CHECK
916 VALUE *bp_check; // cfp[7]
917#endif
919
920extern const rb_data_type_t ruby_threadptr_data_type;
921
922static inline struct rb_thread_struct *
923rb_thread_ptr(VALUE thval)
924{
925 return (struct rb_thread_struct *)rb_check_typeddata(thval, &ruby_threadptr_data_type);
926}
927
928enum rb_thread_status {
929 THREAD_RUNNABLE,
930 THREAD_STOPPED,
931 THREAD_STOPPED_FOREVER,
932 THREAD_KILLED
933};
934
935#ifdef RUBY_JMP_BUF
936typedef RUBY_JMP_BUF rb_jmpbuf_t;
937#else
938typedef void *rb_jmpbuf_t[5];
939#endif
940
941/*
942 `rb_vm_tag_jmpbuf_t` type represents a buffer used to
943 long jump to a C frame associated with `rb_vm_tag`.
944
945 Use-site of `rb_vm_tag_jmpbuf_t` is responsible for calling the
946 following functions:
947 - `rb_vm_tag_jmpbuf_init` once `rb_vm_tag_jmpbuf_t` is allocated.
948 - `rb_vm_tag_jmpbuf_deinit` once `rb_vm_tag_jmpbuf_t` is no longer necessary.
949
950 `RB_VM_TAG_JMPBUF_GET` transforms a `rb_vm_tag_jmpbuf_t` into a
951 `rb_jmpbuf_t` to be passed to `rb_setjmp/rb_longjmp`.
952*/
953#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
954/*
955 WebAssembly target with Asyncify-based SJLJ needs
956 to capture the execution context by unwind/rewind-ing
957 call frames into a jump buffer. The buffer space tends
958 to be considerably large unlike other architectures'
959 register-based buffers.
960 Therefore, we allocates the buffer on the heap on such
961 environments.
962*/
963typedef rb_jmpbuf_t *rb_vm_tag_jmpbuf_t;
964
965#define RB_VM_TAG_JMPBUF_GET(buf) (*buf)
966
967static inline void
968rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
969{
970 *jmpbuf = ruby_xmalloc(sizeof(rb_jmpbuf_t));
971}
972
973static inline void
974rb_vm_tag_jmpbuf_deinit(const rb_vm_tag_jmpbuf_t *jmpbuf)
975{
976 ruby_xfree(*jmpbuf);
977}
978#else
979typedef rb_jmpbuf_t rb_vm_tag_jmpbuf_t;
980
981#define RB_VM_TAG_JMPBUF_GET(buf) (buf)
982
983static inline void
984rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
985{
986 // no-op
987}
988
989static inline void
990rb_vm_tag_jmpbuf_deinit(const rb_vm_tag_jmpbuf_t *jmpbuf)
991{
992 // no-op
993}
994#endif
995
996/*
997 the members which are written in EC_PUSH_TAG() should be placed at
998 the beginning and the end, so that entire region is accessible.
999*/
1001 VALUE tag;
1002 VALUE retval;
1003 rb_vm_tag_jmpbuf_t buf;
1004 struct rb_vm_tag *prev;
1005 enum ruby_tag_type state;
1006 unsigned int lock_rec;
1007};
1008
1009STATIC_ASSERT(rb_vm_tag_buf_offset, offsetof(struct rb_vm_tag, buf) > 0);
1010STATIC_ASSERT(rb_vm_tag_buf_end,
1011 offsetof(struct rb_vm_tag, buf) + sizeof(rb_vm_tag_jmpbuf_t) <
1012 sizeof(struct rb_vm_tag));
1013
1016 void *arg;
1017};
1018
1019struct rb_mutex_struct;
1020
1021typedef struct rb_fiber_struct rb_fiber_t;
1022
1024 struct rb_waiting_list *next;
1025 struct rb_thread_struct *thread;
1026 struct rb_fiber_struct *fiber;
1027};
1028
1030 /* execution information */
1031 VALUE *vm_stack; /* must free, must mark */
1032 size_t vm_stack_size; /* size in word (byte size / sizeof(VALUE)) */
1033 rb_control_frame_t *cfp;
1034
1035 struct rb_vm_tag *tag;
1036
1037 /* interrupt flags */
1038 rb_atomic_t interrupt_flag;
1039 rb_atomic_t interrupt_mask; /* size should match flag */
1040#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
1041 uint32_t checked_clock;
1042#endif
1043
1044 rb_fiber_t *fiber_ptr;
1045 struct rb_thread_struct *thread_ptr;
1046
1047 /* storage (ec (fiber) local) */
1048 struct rb_id_table *local_storage;
1049 VALUE local_storage_recursive_hash;
1050 VALUE local_storage_recursive_hash_for_trace;
1051
1052 /* Inheritable fiber storage. */
1053 VALUE storage;
1054
1055 /* eval env */
1056 const VALUE *root_lep;
1057 VALUE root_svar;
1058
1059 /* trace information */
1060 struct rb_trace_arg_struct *trace_arg;
1061
1062 /* temporary places */
1063 VALUE errinfo;
1064 VALUE passed_block_handler; /* for rb_iterate */
1065
1066 uint8_t raised_flag; /* only 3 bits needed */
1067
1068 /* n.b. only 7 bits needed, really: */
1069 BITFIELD(enum method_missing_reason, method_missing_reason, 8);
1070
1071 VALUE private_const_reference;
1072
1073 struct {
1074 VALUE obj;
1075 VALUE fields_obj;
1076 } gen_fields_cache;
1077
1078 /* for GC */
1079 struct {
1080 VALUE *stack_start;
1081 VALUE *stack_end;
1082 size_t stack_maxsize;
1084
1085#ifdef RUBY_ASAN_ENABLED
1086 void *asan_fake_stack_handle;
1087#endif
1088 } machine;
1089};
1090
1091#ifndef rb_execution_context_t
1093#define rb_execution_context_t rb_execution_context_t
1094#endif
1095
1096// for builtin.h
1097#define VM_CORE_H_EC_DEFINED 1
1098
1099// Set the vm_stack pointer in the execution context.
1100void rb_ec_set_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
1101
1102// Initialize the vm_stack pointer in the execution context and push the initial stack frame.
1103// @param ec the execution context to update.
1104// @param stack a pointer to the stack to use.
1105// @param size the size of the stack, as in `VALUE stack[size]`.
1106void rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
1107
1108// Clear (set to `NULL`) the vm_stack pointer.
1109// @param ec the execution context to update.
1110void rb_ec_clear_vm_stack(rb_execution_context_t *ec);
1111
1113 bool ractor_safe;
1114};
1115
1116typedef struct rb_ractor_struct rb_ractor_t;
1117
1118struct rb_native_thread;
1119
1120typedef struct rb_thread_struct {
1121 struct ccan_list_node lt_node; // managed by a ractor (r->threads.set)
1122 VALUE self;
1123 rb_ractor_t *ractor;
1124 rb_vm_t *vm;
1125 struct rb_native_thread *nt;
1127
1128 struct rb_thread_sched_item sched;
1129 bool mn_schedulable;
1130 rb_atomic_t serial; // only for RUBY_DEBUG_LOG()
1131
1132 VALUE last_status; /* $? */
1133
1134 /* for cfunc */
1135 struct rb_calling_info *calling;
1136
1137 /* for load(true) */
1138 VALUE top_self;
1139 VALUE top_wrapper;
1140 /* for namespace */
1141 VALUE namespaces; // Stack of namespaces
1142 rb_namespace_t *ns; // The current one
1143
1144 /* thread control */
1145
1146 BITFIELD(enum rb_thread_status, status, 2);
1147 /* bit flags */
1148 unsigned int has_dedicated_nt : 1;
1149 unsigned int to_kill : 1;
1150 unsigned int abort_on_exception: 1;
1151 unsigned int report_on_exception: 1;
1152 unsigned int pending_interrupt_queue_checked: 1;
1153 int8_t priority; /* -3 .. 3 (RUBY_THREAD_PRIORITY_{MIN,MAX}) */
1154 uint32_t running_time_us; /* 12500..800000 */
1155
1156 void *blocking_region_buffer;
1157
1158 VALUE thgroup;
1159 VALUE value;
1160
1161 /* temporary place of retval on OPT_CALL_THREADED_CODE */
1162#if OPT_CALL_THREADED_CODE
1163 VALUE retval;
1164#endif
1165
1166 /* async errinfo queue */
1167 VALUE pending_interrupt_queue;
1168 VALUE pending_interrupt_mask_stack;
1169
1170 /* interrupt management */
1171 rb_nativethread_lock_t interrupt_lock;
1172 struct rb_unblock_callback unblock;
1173 VALUE locking_mutex;
1174 struct rb_mutex_struct *keeping_mutexes;
1175 struct ccan_list_head interrupt_exec_tasks;
1176
1177 struct rb_waiting_list *join_list;
1178
1179 union {
1180 struct {
1181 VALUE proc;
1182 VALUE args;
1183 int kw_splat;
1184 } proc;
1185 struct {
1186 VALUE (*func)(void *);
1187 void *arg;
1188 } func;
1189 } invoke_arg;
1190
1191 enum thread_invoke_type {
1192 thread_invoke_type_none = 0,
1193 thread_invoke_type_proc,
1194 thread_invoke_type_ractor_proc,
1195 thread_invoke_type_func
1196 } invoke_type;
1197
1198 /* fiber */
1199 rb_fiber_t *root_fiber;
1200
1201 VALUE scheduler;
1202 unsigned int blocking;
1203
1204 /* misc */
1205 VALUE name;
1206 void **specific_storage;
1207
1208 struct rb_ext_config ext_config;
1209} rb_thread_t;
1210
1211static inline unsigned int
1212rb_th_serial(const rb_thread_t *th)
1213{
1214 return th ? (unsigned int)th->serial : 0;
1215}
1216
1217typedef enum {
1218 VM_DEFINECLASS_TYPE_CLASS = 0x00,
1219 VM_DEFINECLASS_TYPE_SINGLETON_CLASS = 0x01,
1220 VM_DEFINECLASS_TYPE_MODULE = 0x02,
1221 /* 0x03..0x06 is reserved */
1222 VM_DEFINECLASS_TYPE_MASK = 0x07
1223} rb_vm_defineclass_type_t;
1224
1225#define VM_DEFINECLASS_TYPE(x) ((rb_vm_defineclass_type_t)(x) & VM_DEFINECLASS_TYPE_MASK)
1226#define VM_DEFINECLASS_FLAG_SCOPED 0x08
1227#define VM_DEFINECLASS_FLAG_HAS_SUPERCLASS 0x10
1228#define VM_DEFINECLASS_SCOPED_P(x) ((x) & VM_DEFINECLASS_FLAG_SCOPED)
1229#define VM_DEFINECLASS_HAS_SUPERCLASS_P(x) \
1230 ((x) & VM_DEFINECLASS_FLAG_HAS_SUPERCLASS)
1231
1232/* iseq.c */
1233RUBY_SYMBOL_EXPORT_BEGIN
1234
1235/* node -> iseq */
1236rb_iseq_t *rb_iseq_new (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent, enum rb_iseq_type);
1237rb_iseq_t *rb_iseq_new_top (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent);
1238rb_iseq_t *rb_iseq_new_main (const VALUE ast_value, VALUE path, VALUE realpath, const rb_iseq_t *parent, int opt);
1239rb_iseq_t *rb_iseq_new_eval (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, int first_lineno, const rb_iseq_t *parent, int isolated_depth);
1240rb_iseq_t *rb_iseq_new_with_opt( VALUE ast_value, VALUE name, VALUE path, VALUE realpath, int first_lineno, const rb_iseq_t *parent, int isolated_depth,
1241 enum rb_iseq_type, const rb_compile_option_t*,
1242 VALUE script_lines);
1243
1244struct iseq_link_anchor;
1246 VALUE flags;
1247 VALUE reserved;
1248 void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *);
1249 const void *data;
1250};
1251static inline struct rb_iseq_new_with_callback_callback_func *
1252rb_iseq_new_with_callback_new_callback(
1253 void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *), const void *ptr)
1254{
1256 IMEMO_NEW(struct rb_iseq_new_with_callback_callback_func, imemo_ifunc, Qfalse);
1257 memo->func = func;
1258 memo->data = ptr;
1259
1260 return memo;
1261}
1262rb_iseq_t *rb_iseq_new_with_callback(const struct rb_iseq_new_with_callback_callback_func * ifunc,
1263 VALUE name, VALUE path, VALUE realpath, int first_lineno,
1264 const rb_iseq_t *parent, enum rb_iseq_type, const rb_compile_option_t*);
1265
1266VALUE rb_iseq_disasm(const rb_iseq_t *iseq);
1267int rb_iseq_disasm_insn(VALUE str, const VALUE *iseqval, size_t pos, const rb_iseq_t *iseq, VALUE child);
1268
1269VALUE rb_iseq_coverage(const rb_iseq_t *iseq);
1270
1271RUBY_EXTERN VALUE rb_cISeq;
1272RUBY_EXTERN VALUE rb_cRubyVM;
1273RUBY_EXTERN VALUE rb_mRubyVMFrozenCore;
1274RUBY_EXTERN VALUE rb_block_param_proxy;
1275RUBY_SYMBOL_EXPORT_END
1276
1277#define GetProcPtr(obj, ptr) \
1278 GetCoreDataFromValue((obj), rb_proc_t, (ptr))
1279
1280typedef struct {
1281 const struct rb_block block;
1282 const rb_namespace_t *ns;
1283 unsigned int is_from_method: 1; /* bool */
1284 unsigned int is_lambda: 1; /* bool */
1285 unsigned int is_isolated: 1; /* bool */
1286} rb_proc_t;
1287
1288RUBY_SYMBOL_EXPORT_BEGIN
1289VALUE rb_proc_isolate(VALUE self);
1290VALUE rb_proc_isolate_bang(VALUE self);
1291VALUE rb_proc_ractor_make_shareable(VALUE self);
1292RUBY_SYMBOL_EXPORT_END
1293
1294typedef struct {
1295 VALUE flags; /* imemo header */
1296 rb_iseq_t *iseq;
1297 const VALUE *ep;
1298 const VALUE *env;
1299 unsigned int env_size;
1300} rb_env_t;
1301
1302extern const rb_data_type_t ruby_binding_data_type;
1303
1304#define GetBindingPtr(obj, ptr) \
1305 GetCoreDataFromValue((obj), rb_binding_t, (ptr))
1306
1307typedef struct {
1308 const struct rb_block block;
1309 const VALUE pathobj;
1310 int first_lineno;
1311} rb_binding_t;
1312
1313/* used by compile time and send insn */
1314
1315enum vm_check_match_type {
1316 VM_CHECKMATCH_TYPE_WHEN = 1,
1317 VM_CHECKMATCH_TYPE_CASE = 2,
1318 VM_CHECKMATCH_TYPE_RESCUE = 3
1319};
1320
1321#define VM_CHECKMATCH_TYPE_MASK 0x03
1322#define VM_CHECKMATCH_ARRAY 0x04
1323
1324enum vm_opt_newarray_send_type {
1325 VM_OPT_NEWARRAY_SEND_MAX = 1,
1326 VM_OPT_NEWARRAY_SEND_MIN = 2,
1327 VM_OPT_NEWARRAY_SEND_HASH = 3,
1328 VM_OPT_NEWARRAY_SEND_PACK = 4,
1329 VM_OPT_NEWARRAY_SEND_PACK_BUFFER = 5,
1330 VM_OPT_NEWARRAY_SEND_INCLUDE_P = 6,
1331};
1332
1333enum vm_special_object_type {
1334 VM_SPECIAL_OBJECT_VMCORE = 1,
1335 VM_SPECIAL_OBJECT_CBASE,
1336 VM_SPECIAL_OBJECT_CONST_BASE
1337};
1338
1339enum vm_svar_index {
1340 VM_SVAR_LASTLINE = 0, /* $_ */
1341 VM_SVAR_BACKREF = 1, /* $~ */
1342
1343 VM_SVAR_EXTRA_START = 2,
1344 VM_SVAR_FLIPFLOP_START = 2 /* flipflop */
1345};
1346
1347/* inline cache */
1348typedef struct iseq_inline_constant_cache *IC;
1349typedef struct iseq_inline_iv_cache_entry *IVC;
1350typedef struct iseq_inline_cvar_cache_entry *ICVARC;
1351typedef union iseq_inline_storage_entry *ISE;
1352typedef const struct rb_callinfo *CALL_INFO;
1353typedef const struct rb_callcache *CALL_CACHE;
1354typedef struct rb_call_data *CALL_DATA;
1355
1356typedef VALUE CDHASH;
1357
1358#ifndef FUNC_FASTCALL
1359#define FUNC_FASTCALL(x) x
1360#endif
1361
1362typedef rb_control_frame_t *
1363 (FUNC_FASTCALL(*rb_insn_func_t))(rb_execution_context_t *, rb_control_frame_t *);
1364
1365#define VM_TAGGED_PTR_SET(p, tag) ((VALUE)(p) | (tag))
1366#define VM_TAGGED_PTR_REF(v, mask) ((void *)((v) & ~mask))
1367
1368#define GC_GUARDED_PTR(p) VM_TAGGED_PTR_SET((p), 0x01)
1369#define GC_GUARDED_PTR_REF(p) VM_TAGGED_PTR_REF((p), 0x03)
1370#define GC_GUARDED_PTR_P(p) (((VALUE)(p)) & 0x01)
1371
1372enum vm_frame_env_flags {
1373 /* Frame/Environment flag bits:
1374 * MMMM MMMM MMMM MMMM __FF FFFF FFFE EEEX (LSB)
1375 *
1376 * X : tag for GC marking (It seems as Fixnum)
1377 * EEE : 4 bits Env flags
1378 * FF..: 9 bits Frame flags
1379 * MM..: 15 bits frame magic (to check frame corruption)
1380 */
1381
1382 /* frame types */
1383 VM_FRAME_MAGIC_METHOD = 0x11110001,
1384 VM_FRAME_MAGIC_BLOCK = 0x22220001,
1385 VM_FRAME_MAGIC_CLASS = 0x33330001,
1386 VM_FRAME_MAGIC_TOP = 0x44440001,
1387 VM_FRAME_MAGIC_CFUNC = 0x55550001,
1388 VM_FRAME_MAGIC_IFUNC = 0x66660001,
1389 VM_FRAME_MAGIC_EVAL = 0x77770001,
1390 VM_FRAME_MAGIC_RESCUE = 0x78880001,
1391 VM_FRAME_MAGIC_DUMMY = 0x79990001,
1392
1393 VM_FRAME_MAGIC_MASK = 0x7fff0001,
1394
1395 /* frame flag */
1396 VM_FRAME_FLAG_FINISH = 0x0020,
1397 VM_FRAME_FLAG_BMETHOD = 0x0040,
1398 VM_FRAME_FLAG_CFRAME = 0x0080,
1399 VM_FRAME_FLAG_LAMBDA = 0x0100,
1400 VM_FRAME_FLAG_MODIFIED_BLOCK_PARAM = 0x0200,
1401 VM_FRAME_FLAG_CFRAME_KW = 0x0400,
1402 VM_FRAME_FLAG_PASSED = 0x0800,
1403 VM_FRAME_FLAG_NS_SWITCH = 0x1000,
1404 VM_FRAME_FLAG_LOAD_ISEQ = 0x2000,
1405
1406 /* env flag */
1407 VM_ENV_FLAG_LOCAL = 0x0002,
1408 VM_ENV_FLAG_ESCAPED = 0x0004,
1409 VM_ENV_FLAG_WB_REQUIRED = 0x0008,
1410 VM_ENV_FLAG_ISOLATED = 0x0010,
1411};
1412
1413#define VM_ENV_DATA_SIZE ( 3)
1414
1415#define VM_ENV_DATA_INDEX_ME_CREF (-2) /* ep[-2] */
1416#define VM_ENV_DATA_INDEX_SPECVAL (-1) /* ep[-1] */
1417#define VM_ENV_DATA_INDEX_FLAGS ( 0) /* ep[ 0] */
1418#define VM_ENV_DATA_INDEX_ENV ( 1) /* ep[ 1] */
1419
1420#define VM_ENV_INDEX_LAST_LVAR (-VM_ENV_DATA_SIZE)
1421
1422static inline void VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value);
1423
1424static inline void
1425VM_ENV_FLAGS_SET(const VALUE *ep, VALUE flag)
1426{
1427 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1428 VM_ASSERT(FIXNUM_P(flags));
1429 VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags | flag);
1430}
1431
1432static inline void
1433VM_ENV_FLAGS_UNSET(const VALUE *ep, VALUE flag)
1434{
1435 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1436 VM_ASSERT(FIXNUM_P(flags));
1437 VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags & ~flag);
1438}
1439
1440static inline unsigned long
1441VM_ENV_FLAGS(const VALUE *ep, long flag)
1442{
1443 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1444 VM_ASSERT(FIXNUM_P(flags));
1445 return flags & flag;
1446}
1447
1448static inline unsigned long
1449VM_FRAME_TYPE(const rb_control_frame_t *cfp)
1450{
1451 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_MAGIC_MASK);
1452}
1453
1454static inline int
1455VM_FRAME_LAMBDA_P(const rb_control_frame_t *cfp)
1456{
1457 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_LAMBDA) != 0;
1458}
1459
1460static inline int
1461VM_FRAME_CFRAME_KW_P(const rb_control_frame_t *cfp)
1462{
1463 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME_KW) != 0;
1464}
1465
1466static inline int
1467VM_FRAME_FINISHED_P(const rb_control_frame_t *cfp)
1468{
1469 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_FINISH) != 0;
1470}
1471
1472static inline int
1473VM_FRAME_BMETHOD_P(const rb_control_frame_t *cfp)
1474{
1475 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_BMETHOD) != 0;
1476}
1477
1478static inline int
1479rb_obj_is_iseq(VALUE iseq)
1480{
1481 return imemo_type_p(iseq, imemo_iseq);
1482}
1483
1484#if VM_CHECK_MODE > 0
1485#define RUBY_VM_NORMAL_ISEQ_P(iseq) rb_obj_is_iseq((VALUE)iseq)
1486#endif
1487
1488static inline int
1489VM_FRAME_CFRAME_P(const rb_control_frame_t *cfp)
1490{
1491 int cframe_p = VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME) != 0;
1492 VM_ASSERT(RUBY_VM_NORMAL_ISEQ_P(cfp->iseq) != cframe_p ||
1493 (VM_FRAME_TYPE(cfp) & VM_FRAME_MAGIC_MASK) == VM_FRAME_MAGIC_DUMMY);
1494 return cframe_p;
1495}
1496
1497static inline int
1498VM_FRAME_RUBYFRAME_P(const rb_control_frame_t *cfp)
1499{
1500 return !VM_FRAME_CFRAME_P(cfp);
1501}
1502
1503static inline int
1504VM_FRAME_NS_SWITCH_P(const rb_control_frame_t *cfp)
1505{
1506 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_NS_SWITCH) != 0;
1507}
1508
1509#define RUBYVM_CFUNC_FRAME_P(cfp) \
1510 (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_CFUNC)
1511
1512#define VM_GUARDED_PREV_EP(ep) GC_GUARDED_PTR(ep)
1513#define VM_BLOCK_HANDLER_NONE 0
1514
1515static inline int
1516VM_ENV_LOCAL_P(const VALUE *ep)
1517{
1518 return VM_ENV_FLAGS(ep, VM_ENV_FLAG_LOCAL) ? 1 : 0;
1519}
1520
1521static inline const VALUE *
1522VM_ENV_PREV_EP(const VALUE *ep)
1523{
1524 VM_ASSERT(VM_ENV_LOCAL_P(ep) == 0);
1525 return GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
1526}
1527
1528static inline VALUE
1529VM_ENV_BLOCK_HANDLER(const VALUE *ep)
1530{
1531 VM_ASSERT(VM_ENV_LOCAL_P(ep));
1532 return ep[VM_ENV_DATA_INDEX_SPECVAL];
1533}
1534
1535#if VM_CHECK_MODE > 0
1536int rb_vm_ep_in_heap_p(const VALUE *ep);
1537#endif
1538
1539static inline int
1540VM_ENV_ESCAPED_P(const VALUE *ep)
1541{
1542 VM_ASSERT(rb_vm_ep_in_heap_p(ep) == !!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED));
1543 return VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) ? 1 : 0;
1544}
1545
1547static inline VALUE
1548VM_ENV_ENVVAL(const VALUE *ep)
1549{
1550 VALUE envval = ep[VM_ENV_DATA_INDEX_ENV];
1551 VM_ASSERT(VM_ENV_ESCAPED_P(ep));
1552 VM_ASSERT(envval == Qundef || imemo_type_p(envval, imemo_env));
1553 return envval;
1554}
1555
1557static inline const rb_env_t *
1558VM_ENV_ENVVAL_PTR(const VALUE *ep)
1559{
1560 return (const rb_env_t *)VM_ENV_ENVVAL(ep);
1561}
1562
1563static inline const rb_env_t *
1564vm_env_new(VALUE *env_ep, VALUE *env_body, unsigned int env_size, const rb_iseq_t *iseq)
1565{
1566 rb_env_t *env = IMEMO_NEW(rb_env_t, imemo_env, (VALUE)iseq);
1567 env->ep = env_ep;
1568 env->env = env_body;
1569 env->env_size = env_size;
1570 env_ep[VM_ENV_DATA_INDEX_ENV] = (VALUE)env;
1571 return env;
1572}
1573
1574static inline void
1575VM_FORCE_WRITE(const VALUE *ptr, VALUE v)
1576{
1577 *((VALUE *)ptr) = v;
1578}
1579
1580static inline void
1581VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value)
1582{
1583 VM_ASSERT(RB_SPECIAL_CONST_P(special_const_value));
1584 VM_FORCE_WRITE(ptr, special_const_value);
1585}
1586
1587static inline void
1588VM_STACK_ENV_WRITE(const VALUE *ep, int index, VALUE v)
1589{
1590 VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_WB_REQUIRED) == 0);
1591 VM_FORCE_WRITE(&ep[index], v);
1592}
1593
1594const VALUE *rb_vm_ep_local_ep(const VALUE *ep);
1595const VALUE *rb_vm_proc_local_ep(VALUE proc);
1596void rb_vm_block_ep_update(VALUE obj, const struct rb_block *dst, const VALUE *ep);
1597void rb_vm_block_copy(VALUE obj, const struct rb_block *dst, const struct rb_block *src);
1598
1599VALUE rb_vm_frame_block_handler(const rb_control_frame_t *cfp);
1600
1601#define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp) ((cfp)+1)
1602#define RUBY_VM_NEXT_CONTROL_FRAME(cfp) ((cfp)-1)
1603
1604#define RUBY_VM_VALID_CONTROL_FRAME_P(cfp, ecfp) \
1605 ((void *)(ecfp) > (void *)(cfp))
1606
1607static inline const rb_control_frame_t *
1608RUBY_VM_END_CONTROL_FRAME(const rb_execution_context_t *ec)
1609{
1610 return (rb_control_frame_t *)(ec->vm_stack + ec->vm_stack_size);
1611}
1612
1613static inline int
1614RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
1615{
1616 return !RUBY_VM_VALID_CONTROL_FRAME_P(cfp, RUBY_VM_END_CONTROL_FRAME(ec));
1617}
1618
1619static inline int
1620VM_BH_ISEQ_BLOCK_P(VALUE block_handler)
1621{
1622 if ((block_handler & 0x03) == 0x01) {
1623#if VM_CHECK_MODE > 0
1624 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1625 VM_ASSERT(imemo_type_p(captured->code.val, imemo_iseq));
1626#endif
1627 return 1;
1628 }
1629 else {
1630 return 0;
1631 }
1632}
1633
1634static inline VALUE
1635VM_BH_FROM_ISEQ_BLOCK(const struct rb_captured_block *captured)
1636{
1637 VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x01);
1638 VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1639 return block_handler;
1640}
1641
1642static inline const struct rb_captured_block *
1643VM_BH_TO_ISEQ_BLOCK(VALUE block_handler)
1644{
1645 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1646 VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1647 return captured;
1648}
1649
1650static inline int
1651VM_BH_IFUNC_P(VALUE block_handler)
1652{
1653 if ((block_handler & 0x03) == 0x03) {
1654#if VM_CHECK_MODE > 0
1655 struct rb_captured_block *captured = (void *)(block_handler & ~0x03);
1656 VM_ASSERT(imemo_type_p(captured->code.val, imemo_ifunc));
1657#endif
1658 return 1;
1659 }
1660 else {
1661 return 0;
1662 }
1663}
1664
1665static inline VALUE
1666VM_BH_FROM_IFUNC_BLOCK(const struct rb_captured_block *captured)
1667{
1668 VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x03);
1669 VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1670 return block_handler;
1671}
1672
1673static inline const struct rb_captured_block *
1674VM_BH_TO_IFUNC_BLOCK(VALUE block_handler)
1675{
1676 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1677 VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1678 return captured;
1679}
1680
1681static inline const struct rb_captured_block *
1682VM_BH_TO_CAPT_BLOCK(VALUE block_handler)
1683{
1684 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1685 VM_ASSERT(VM_BH_IFUNC_P(block_handler) || VM_BH_ISEQ_BLOCK_P(block_handler));
1686 return captured;
1687}
1688
1689static inline enum rb_block_handler_type
1690vm_block_handler_type(VALUE block_handler)
1691{
1692 if (VM_BH_ISEQ_BLOCK_P(block_handler)) {
1693 return block_handler_type_iseq;
1694 }
1695 else if (VM_BH_IFUNC_P(block_handler)) {
1696 return block_handler_type_ifunc;
1697 }
1698 else if (SYMBOL_P(block_handler)) {
1699 return block_handler_type_symbol;
1700 }
1701 else {
1702 VM_ASSERT(rb_obj_is_proc(block_handler));
1703 return block_handler_type_proc;
1704 }
1705}
1706
1707static inline void
1708vm_block_handler_verify(MAYBE_UNUSED(VALUE block_handler))
1709{
1710 VM_ASSERT(block_handler == VM_BLOCK_HANDLER_NONE ||
1711 (vm_block_handler_type(block_handler), 1));
1712}
1713
1714static inline enum rb_block_type
1715vm_block_type(const struct rb_block *block)
1716{
1717#if VM_CHECK_MODE > 0
1718 switch (block->type) {
1719 case block_type_iseq:
1720 VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_iseq));
1721 break;
1722 case block_type_ifunc:
1723 VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_ifunc));
1724 break;
1725 case block_type_symbol:
1726 VM_ASSERT(SYMBOL_P(block->as.symbol));
1727 break;
1728 case block_type_proc:
1729 VM_ASSERT(rb_obj_is_proc(block->as.proc));
1730 break;
1731 }
1732#endif
1733 return block->type;
1734}
1735
1736static inline void
1737vm_block_type_set(const struct rb_block *block, enum rb_block_type type)
1738{
1739 struct rb_block *mb = (struct rb_block *)block;
1740 mb->type = type;
1741}
1742
1743static inline const struct rb_block *
1744vm_proc_block(VALUE procval)
1745{
1746 VM_ASSERT(rb_obj_is_proc(procval));
1747 return &((rb_proc_t *)RTYPEDDATA_DATA(procval))->block;
1748}
1749
1750static inline const rb_iseq_t *vm_block_iseq(const struct rb_block *block);
1751static inline const VALUE *vm_block_ep(const struct rb_block *block);
1752
1753static inline const rb_iseq_t *
1754vm_proc_iseq(VALUE procval)
1755{
1756 return vm_block_iseq(vm_proc_block(procval));
1757}
1758
1759static inline const VALUE *
1760vm_proc_ep(VALUE procval)
1761{
1762 return vm_block_ep(vm_proc_block(procval));
1763}
1764
1765static inline const rb_iseq_t *
1766vm_block_iseq(const struct rb_block *block)
1767{
1768 switch (vm_block_type(block)) {
1769 case block_type_iseq: return rb_iseq_check(block->as.captured.code.iseq);
1770 case block_type_proc: return vm_proc_iseq(block->as.proc);
1771 case block_type_ifunc:
1772 case block_type_symbol: return NULL;
1773 }
1774 VM_UNREACHABLE(vm_block_iseq);
1775 return NULL;
1776}
1777
1778static inline const VALUE *
1779vm_block_ep(const struct rb_block *block)
1780{
1781 switch (vm_block_type(block)) {
1782 case block_type_iseq:
1783 case block_type_ifunc: return block->as.captured.ep;
1784 case block_type_proc: return vm_proc_ep(block->as.proc);
1785 case block_type_symbol: return NULL;
1786 }
1787 VM_UNREACHABLE(vm_block_ep);
1788 return NULL;
1789}
1790
1791static inline VALUE
1792vm_block_self(const struct rb_block *block)
1793{
1794 switch (vm_block_type(block)) {
1795 case block_type_iseq:
1796 case block_type_ifunc:
1797 return block->as.captured.self;
1798 case block_type_proc:
1799 return vm_block_self(vm_proc_block(block->as.proc));
1800 case block_type_symbol:
1801 return Qundef;
1802 }
1803 VM_UNREACHABLE(vm_block_self);
1804 return Qundef;
1805}
1806
1807static inline VALUE
1808VM_BH_TO_SYMBOL(VALUE block_handler)
1809{
1810 VM_ASSERT(SYMBOL_P(block_handler));
1811 return block_handler;
1812}
1813
1814static inline VALUE
1815VM_BH_FROM_SYMBOL(VALUE symbol)
1816{
1817 VM_ASSERT(SYMBOL_P(symbol));
1818 return symbol;
1819}
1820
1821static inline VALUE
1822VM_BH_TO_PROC(VALUE block_handler)
1823{
1824 VM_ASSERT(rb_obj_is_proc(block_handler));
1825 return block_handler;
1826}
1827
1828static inline VALUE
1829VM_BH_FROM_PROC(VALUE procval)
1830{
1831 VM_ASSERT(rb_obj_is_proc(procval));
1832 return procval;
1833}
1834
1835/* VM related object allocate functions */
1836VALUE rb_thread_alloc(VALUE klass);
1837VALUE rb_binding_alloc(VALUE klass);
1838VALUE rb_proc_alloc(VALUE klass);
1839VALUE rb_proc_dup(VALUE self);
1840
1841/* for debug */
1842extern bool rb_vmdebug_stack_dump_raw(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, FILE *);
1843extern bool rb_vmdebug_debug_print_pre(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, const VALUE *_pc, FILE *);
1844extern bool rb_vmdebug_debug_print_post(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, FILE *);
1845
1846#define SDR() rb_vmdebug_stack_dump_raw(GET_EC(), GET_EC()->cfp, stderr)
1847#define SDR2(cfp) rb_vmdebug_stack_dump_raw(GET_EC(), (cfp), stderr)
1848bool rb_vm_bugreport(const void *, FILE *);
1849typedef void (*ruby_sighandler_t)(int);
1850RBIMPL_ATTR_FORMAT(RBIMPL_PRINTF_FORMAT, 4, 5)
1851NORETURN(void rb_bug_for_fatal_signal(ruby_sighandler_t default_sighandler, int sig, const void *, const char *fmt, ...));
1852
1853/* functions about thread/vm execution */
1854RUBY_SYMBOL_EXPORT_BEGIN
1855VALUE rb_iseq_eval(const rb_iseq_t *iseq);
1856VALUE rb_iseq_eval_with_refinement(const rb_iseq_t *iseq, VALUE mod);
1857VALUE rb_iseq_eval_main(const rb_iseq_t *iseq);
1858VALUE rb_iseq_path(const rb_iseq_t *iseq);
1859VALUE rb_iseq_realpath(const rb_iseq_t *iseq);
1860RUBY_SYMBOL_EXPORT_END
1861
1862VALUE rb_iseq_pathobj_new(VALUE path, VALUE realpath);
1863void rb_iseq_pathobj_set(const rb_iseq_t *iseq, VALUE path, VALUE realpath);
1864
1865int rb_ec_frame_method_id_and_class(const rb_execution_context_t *ec, ID *idp, ID *called_idp, VALUE *klassp);
1866void rb_ec_setup_exception(const rb_execution_context_t *ec, VALUE mesg, VALUE cause);
1867
1868VALUE rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, int argc, const VALUE *argv, int kw_splat, VALUE block_handler);
1869
1870VALUE rb_vm_make_proc_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda);
1871static inline VALUE
1872rb_vm_make_proc(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1873{
1874 return rb_vm_make_proc_lambda(ec, captured, klass, 0);
1875}
1876
1877static inline VALUE
1878rb_vm_make_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1879{
1880 return rb_vm_make_proc_lambda(ec, captured, klass, 1);
1881}
1882
1883VALUE rb_vm_make_binding(const rb_execution_context_t *ec, const rb_control_frame_t *src_cfp);
1884VALUE rb_vm_env_local_variables(const rb_env_t *env);
1885VALUE rb_vm_env_numbered_parameters(const rb_env_t *env);
1886const rb_env_t *rb_vm_env_prev_env(const rb_env_t *env);
1887const VALUE *rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const ID *dynvars);
1888void rb_vm_inc_const_missing_count(void);
1889VALUE rb_vm_call_kw(rb_execution_context_t *ec, VALUE recv, VALUE id, int argc,
1890 const VALUE *argv, const rb_callable_method_entry_t *me, int kw_splat);
1891void rb_vm_pop_frame_no_int(rb_execution_context_t *ec);
1892void rb_vm_pop_frame(rb_execution_context_t *ec);
1893
1894void rb_thread_start_timer_thread(void);
1895void rb_thread_stop_timer_thread(void);
1896void rb_thread_reset_timer_thread(void);
1897void rb_thread_wakeup_timer_thread(int);
1898
1899static inline void
1900rb_vm_living_threads_init(rb_vm_t *vm)
1901{
1902 ccan_list_head_init(&vm->workqueue);
1903 ccan_list_head_init(&vm->ractor.set);
1904#ifdef RUBY_THREAD_PTHREAD_H
1905 ccan_list_head_init(&vm->ractor.sched.zombie_threads);
1906#endif
1907}
1908
1909typedef int rb_backtrace_iter_func(void *, VALUE, int, VALUE);
1910rb_control_frame_t *rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1911rb_control_frame_t *rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1912VALUE *rb_vm_svar_lep(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1913int rb_vm_get_sourceline(const rb_control_frame_t *);
1914void rb_vm_stack_to_heap(rb_execution_context_t *ec);
1915void ruby_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame);
1916rb_thread_t * ruby_thread_from_native(void);
1917int ruby_thread_set_native(rb_thread_t *th);
1918int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp);
1919void rb_vm_rewind_cfp(rb_execution_context_t *ec, rb_control_frame_t *cfp);
1920void rb_vm_env_write(const VALUE *ep, int index, VALUE v);
1921VALUE rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler);
1922
1923void rb_vm_register_special_exception_str(enum ruby_special_exceptions sp, VALUE exception_class, VALUE mesg);
1924
1925#define rb_vm_register_special_exception(sp, e, m) \
1926 rb_vm_register_special_exception_str(sp, e, rb_usascii_str_new_static((m), (long)rb_strlen_lit(m)))
1927
1928void rb_gc_mark_machine_context(const rb_execution_context_t *ec);
1929
1930rb_cref_t *rb_vm_rewrite_cref(rb_cref_t *node, VALUE old_klass, VALUE new_klass);
1931
1932const rb_callable_method_entry_t *rb_vm_frame_method_entry(const rb_control_frame_t *cfp);
1933
1934#define sysstack_error GET_VM()->special_exceptions[ruby_error_sysstack]
1935
1936#define CHECK_VM_STACK_OVERFLOW0(cfp, sp, margin) do { \
1937 STATIC_ASSERT(sizeof_sp, sizeof(*(sp)) == sizeof(VALUE)); \
1938 STATIC_ASSERT(sizeof_cfp, sizeof(*(cfp)) == sizeof(rb_control_frame_t)); \
1939 const struct rb_control_frame_struct *bound = (void *)&(sp)[(margin)]; \
1940 if (UNLIKELY((cfp) <= &bound[1])) { \
1941 vm_stackoverflow(); \
1942 } \
1943} while (0)
1944
1945#define CHECK_VM_STACK_OVERFLOW(cfp, margin) \
1946 CHECK_VM_STACK_OVERFLOW0((cfp), (cfp)->sp, (margin))
1947
1948VALUE rb_catch_protect(VALUE t, rb_block_call_func *func, VALUE data, enum ruby_tag_type *stateptr);
1949
1950rb_execution_context_t *rb_vm_main_ractor_ec(rb_vm_t *vm); // ractor.c
1951
1952/* for thread */
1953
1954#if RUBY_VM_THREAD_MODEL == 2
1955
1956RUBY_EXTERN struct rb_ractor_struct *ruby_single_main_ractor; // ractor.c
1957RUBY_EXTERN rb_vm_t *ruby_current_vm_ptr;
1958RUBY_EXTERN rb_event_flag_t ruby_vm_event_flags;
1959RUBY_EXTERN rb_event_flag_t ruby_vm_event_enabled_global_flags;
1960RUBY_EXTERN unsigned int ruby_vm_event_local_num;
1961
1962#define GET_VM() rb_current_vm()
1963#define GET_RACTOR() rb_current_ractor()
1964#define GET_THREAD() rb_current_thread()
1965#define GET_EC() rb_current_execution_context(true)
1966
1967static inline rb_thread_t *
1968rb_ec_thread_ptr(const rb_execution_context_t *ec)
1969{
1970 return ec->thread_ptr;
1971}
1972
1973static inline rb_ractor_t *
1974rb_ec_ractor_ptr(const rb_execution_context_t *ec)
1975{
1976 const rb_thread_t *th = rb_ec_thread_ptr(ec);
1977 if (th) {
1978 VM_ASSERT(th->ractor != NULL);
1979 return th->ractor;
1980 }
1981 else {
1982 return NULL;
1983 }
1984}
1985
1986static inline rb_vm_t *
1987rb_ec_vm_ptr(const rb_execution_context_t *ec)
1988{
1989 const rb_thread_t *th = rb_ec_thread_ptr(ec);
1990 if (th) {
1991 return th->vm;
1992 }
1993 else {
1994 return NULL;
1995 }
1996}
1997
1998NOINLINE(struct rb_execution_context_struct *rb_current_ec_noinline(void));
1999
2000static inline rb_execution_context_t *
2001rb_current_execution_context(bool expect_ec)
2002{
2003#ifdef RB_THREAD_LOCAL_SPECIFIER
2004 #if defined(__arm64__) || defined(__aarch64__)
2005 rb_execution_context_t * volatile ec = rb_current_ec();
2006 #else
2007 rb_execution_context_t * volatile ec = ruby_current_ec;
2008 #endif
2009
2010 /* On the shared objects, `__tls_get_addr()` is used to access the TLS
2011 * and the address of the `ruby_current_ec` can be stored on a function
2012 * frame. However, this address can be mis-used after native thread
2013 * migration of a coroutine.
2014 * 1) Get `ptr =&ruby_current_ec` op NT1 and store it on the frame.
2015 * 2) Context switch and resume it on the NT2.
2016 * 3) `ptr` is used on NT2 but it accesses to the TLS on NT1.
2017 * This assertion checks such misusage.
2018 *
2019 * To avoid accidents, `GET_EC()` should be called once on the frame.
2020 * Note that inlining can produce the problem.
2021 */
2022 VM_ASSERT(ec == rb_current_ec_noinline());
2023#else
2024 rb_execution_context_t * volatile ec = native_tls_get(ruby_current_ec_key);
2025#endif
2026 VM_ASSERT(!expect_ec || ec != NULL);
2027 return ec;
2028}
2029
2030static inline rb_thread_t *
2031rb_current_thread(void)
2032{
2033 const rb_execution_context_t *ec = GET_EC();
2034 return rb_ec_thread_ptr(ec);
2035}
2036
2037static inline rb_ractor_t *
2038rb_current_ractor_raw(bool expect)
2039{
2040 if (ruby_single_main_ractor) {
2041 return ruby_single_main_ractor;
2042 }
2043 else {
2044 const rb_execution_context_t *ec = rb_current_execution_context(expect);
2045 return (expect || ec) ? rb_ec_ractor_ptr(ec) : NULL;
2046 }
2047}
2048
2049static inline rb_ractor_t *
2050rb_current_ractor(void)
2051{
2052 return rb_current_ractor_raw(true);
2053}
2054
2055static inline rb_vm_t *
2056rb_current_vm(void)
2057{
2058#if 0 // TODO: reconsider the assertions
2059 VM_ASSERT(ruby_current_vm_ptr == NULL ||
2060 ruby_current_execution_context_ptr == NULL ||
2061 rb_ec_thread_ptr(GET_EC()) == NULL ||
2062 rb_ec_thread_ptr(GET_EC())->status == THREAD_KILLED ||
2063 rb_ec_vm_ptr(GET_EC()) == ruby_current_vm_ptr);
2064#endif
2065
2066 return ruby_current_vm_ptr;
2067}
2068
2069void rb_ec_vm_lock_rec_release(const rb_execution_context_t *ec,
2070 unsigned int recorded_lock_rec,
2071 unsigned int current_lock_rec);
2072
2073/* This technically is a data race, as it's checked without the lock, however we
2074 * check against a value only our own thread will write. */
2075NO_SANITIZE("thread", static inline bool
2076vm_locked_by_ractor_p(rb_vm_t *vm, rb_ractor_t *cr))
2077{
2078 VM_ASSERT(cr == GET_RACTOR());
2079 return vm->ractor.sync.lock_owner == cr;
2080}
2081
2082static inline unsigned int
2083rb_ec_vm_lock_rec(const rb_execution_context_t *ec)
2084{
2085 rb_vm_t *vm = rb_ec_vm_ptr(ec);
2086
2087 if (!vm_locked_by_ractor_p(vm, rb_ec_ractor_ptr(ec))) {
2088 return 0;
2089 }
2090 else {
2091 return vm->ractor.sync.lock_rec;
2092 }
2093}
2094
2095#else
2096#error "unsupported thread model"
2097#endif
2098
2099enum {
2100 TIMER_INTERRUPT_MASK = 0x01,
2101 PENDING_INTERRUPT_MASK = 0x02,
2102 POSTPONED_JOB_INTERRUPT_MASK = 0x04,
2103 TRAP_INTERRUPT_MASK = 0x08,
2104 TERMINATE_INTERRUPT_MASK = 0x10,
2105 VM_BARRIER_INTERRUPT_MASK = 0x20,
2106};
2107
2108#define RUBY_VM_SET_TIMER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TIMER_INTERRUPT_MASK)
2109#define RUBY_VM_SET_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, PENDING_INTERRUPT_MASK)
2110#define RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, POSTPONED_JOB_INTERRUPT_MASK)
2111#define RUBY_VM_SET_TRAP_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TRAP_INTERRUPT_MASK)
2112#define RUBY_VM_SET_TERMINATE_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TERMINATE_INTERRUPT_MASK)
2113#define RUBY_VM_SET_VM_BARRIER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, VM_BARRIER_INTERRUPT_MASK)
2114
2115static inline bool
2116RUBY_VM_INTERRUPTED(rb_execution_context_t *ec)
2117{
2118 return (ATOMIC_LOAD_RELAXED(ec->interrupt_flag) & ~(ec->interrupt_mask) & (PENDING_INTERRUPT_MASK|TRAP_INTERRUPT_MASK));
2119}
2120
2121static inline bool
2122RUBY_VM_INTERRUPTED_ANY(rb_execution_context_t *ec)
2123{
2124#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
2125 uint32_t current_clock = rb_ec_vm_ptr(ec)->clock;
2126
2127 if (current_clock != ec->checked_clock) {
2128 ec->checked_clock = current_clock;
2129 RUBY_VM_SET_TIMER_INTERRUPT(ec);
2130 }
2131#endif
2132 return ATOMIC_LOAD_RELAXED(ec->interrupt_flag) & ~(ec)->interrupt_mask;
2133}
2134
2135VALUE rb_exc_set_backtrace(VALUE exc, VALUE bt);
2136int rb_signal_buff_size(void);
2137int rb_signal_exec(rb_thread_t *th, int sig);
2138void rb_threadptr_check_signal(rb_thread_t *mth);
2139void rb_threadptr_signal_raise(rb_thread_t *th, int sig);
2140void rb_threadptr_signal_exit(rb_thread_t *th);
2141int rb_threadptr_execute_interrupts(rb_thread_t *, int);
2142void rb_threadptr_interrupt(rb_thread_t *th);
2143void rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th);
2144void rb_threadptr_pending_interrupt_clear(rb_thread_t *th);
2145void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v);
2146VALUE rb_ec_get_errinfo(const rb_execution_context_t *ec);
2147void rb_ec_error_print(rb_execution_context_t * volatile ec, volatile VALUE errinfo);
2148void rb_execution_context_update(rb_execution_context_t *ec);
2149void rb_execution_context_mark(const rb_execution_context_t *ec);
2150void rb_fiber_close(rb_fiber_t *fib);
2151void Init_native_thread(rb_thread_t *th);
2152int rb_vm_check_ints_blocking(rb_execution_context_t *ec);
2153
2154// vm_sync.h
2155void rb_vm_cond_wait(rb_vm_t *vm, rb_nativethread_cond_t *cond);
2156void rb_vm_cond_timedwait(rb_vm_t *vm, rb_nativethread_cond_t *cond, unsigned long msec);
2157
2158#define RUBY_VM_CHECK_INTS(ec) rb_vm_check_ints(ec)
2159static inline void
2160rb_vm_check_ints(rb_execution_context_t *ec)
2161{
2162#ifdef RUBY_ASSERT_CRITICAL_SECTION
2163 VM_ASSERT(ruby_assert_critical_section_entered == 0);
2164#endif
2165
2166 VM_ASSERT(ec == rb_current_ec_noinline());
2167
2168 if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(ec))) {
2169 rb_threadptr_execute_interrupts(rb_ec_thread_ptr(ec), 0);
2170 }
2171}
2172
2173/* tracer */
2174
2176 rb_event_flag_t event;
2178 const rb_control_frame_t *cfp;
2179 VALUE self;
2180 ID id;
2181 ID called_id;
2182 VALUE klass;
2183 VALUE data;
2184
2185 int klass_solved;
2186
2187 /* calc from cfp */
2188 int lineno;
2189 VALUE path;
2190};
2191
2192void rb_hook_list_mark(rb_hook_list_t *hooks);
2193void rb_hook_list_mark_and_move(rb_hook_list_t *hooks);
2194void rb_hook_list_free(rb_hook_list_t *hooks);
2195void rb_hook_list_connect_tracepoint(VALUE target, rb_hook_list_t *list, VALUE tpval, unsigned int target_line);
2196void rb_hook_list_remove_tracepoint(rb_hook_list_t *list, VALUE tpval);
2197
2198void rb_exec_event_hooks(struct rb_trace_arg_struct *trace_arg, rb_hook_list_t *hooks, int pop_p);
2199
2200#define EXEC_EVENT_HOOK_ORIG(ec_, hooks_, flag_, self_, id_, called_id_, klass_, data_, pop_p_) do { \
2201 const rb_event_flag_t flag_arg_ = (flag_); \
2202 rb_hook_list_t *hooks_arg_ = (hooks_); \
2203 if (UNLIKELY((hooks_arg_)->events & (flag_arg_))) { \
2204 /* defer evaluating the other arguments */ \
2205 rb_exec_event_hook_orig(ec_, hooks_arg_, flag_arg_, self_, id_, called_id_, klass_, data_, pop_p_); \
2206 } \
2207} while (0)
2208
2209static inline void
2210rb_exec_event_hook_orig(rb_execution_context_t *ec, rb_hook_list_t *hooks, rb_event_flag_t flag,
2211 VALUE self, ID id, ID called_id, VALUE klass, VALUE data, int pop_p)
2212{
2213 struct rb_trace_arg_struct trace_arg;
2214
2215 VM_ASSERT((hooks->events & flag) != 0);
2216
2217 trace_arg.event = flag;
2218 trace_arg.ec = ec;
2219 trace_arg.cfp = ec->cfp;
2220 trace_arg.self = self;
2221 trace_arg.id = id;
2222 trace_arg.called_id = called_id;
2223 trace_arg.klass = klass;
2224 trace_arg.data = data;
2225 trace_arg.path = Qundef;
2226 trace_arg.klass_solved = 0;
2227
2228 rb_exec_event_hooks(&trace_arg, hooks, pop_p);
2229}
2230
2232 VALUE self;
2233 uint32_t id;
2234 rb_hook_list_t hooks;
2235};
2236
2237static inline rb_hook_list_t *
2238rb_ec_ractor_hooks(const rb_execution_context_t *ec)
2239{
2240 struct rb_ractor_pub *cr_pub = (struct rb_ractor_pub *)rb_ec_ractor_ptr(ec);
2241 return &cr_pub->hooks;
2242}
2243
2244#define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2245 EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_ractor_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 0)
2246
2247#define EXEC_EVENT_HOOK_AND_POP_FRAME(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2248 EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_ractor_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 1)
2249
2250static inline void
2251rb_exec_event_hook_script_compiled(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE eval_script)
2252{
2253 EXEC_EVENT_HOOK(ec, RUBY_EVENT_SCRIPT_COMPILED, ec->cfp->self, 0, 0, 0,
2254 NIL_P(eval_script) ? (VALUE)iseq :
2255 rb_ary_new_from_args(2, eval_script, (VALUE)iseq));
2256}
2257
2258void rb_vm_trap_exit(rb_vm_t *vm);
2259void rb_vm_postponed_job_atfork(void); /* vm_trace.c */
2260void rb_vm_postponed_job_free(void); /* vm_trace.c */
2261size_t rb_vm_memsize_postponed_job_queue(void); /* vm_trace.c */
2262void rb_vm_postponed_job_queue_init(rb_vm_t *vm); /* vm_trace.c */
2263
2264RUBY_SYMBOL_EXPORT_BEGIN
2265
2266int rb_thread_check_trap_pending(void);
2267
2268/* #define RUBY_EVENT_RESERVED_FOR_INTERNAL_USE 0x030000 */ /* from vm_core.h */
2269#define RUBY_EVENT_COVERAGE_LINE 0x010000
2270#define RUBY_EVENT_COVERAGE_BRANCH 0x020000
2271
2272extern VALUE rb_get_coverages(void);
2273extern void rb_set_coverages(VALUE, int, VALUE);
2274extern void rb_clear_coverages(void);
2275extern void rb_reset_coverages(void);
2276extern void rb_resume_coverages(void);
2277extern void rb_suspend_coverages(void);
2278
2279void rb_postponed_job_flush(rb_vm_t *vm);
2280
2281// ractor.c
2282RUBY_EXTERN VALUE rb_eRactorUnsafeError;
2283RUBY_EXTERN VALUE rb_eRactorIsolationError;
2284
2285RUBY_SYMBOL_EXPORT_END
2286
2287#endif /* RUBY_VM_CORE_H */
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ALIGNAS
Wraps (or simulates) alignas.
Definition stdalign.h:27
#define RUBY_EXTERN
Declaration of externally visible global variables.
Definition dllexport.h:45
#define RUBY_EVENT_SCRIPT_COMPILED
Encountered an eval.
Definition event.h:60
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RBIMPL_ATTR_FORMAT(x, y, z)
Wraps (or simulates) __attribute__((format))
Definition format.h:29
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define Qundef
Old name of RUBY_Qundef.
#define Qfalse
Old name of RUBY_Qfalse.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define NIL_P
Old name of RB_NIL_P.
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
void * rb_check_typeddata(VALUE obj, const rb_data_type_t *data_type)
Identical to rb_typeddata_is_kind_of(), except it raises exceptions instead of returning false.
Definition error.c:1397
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
Definition proc.c:120
void rb_unblock_function_t(void *)
This is the type of UBFs.
Definition thread.h:336
VALUE rb_block_call_func(RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg))
This is the type of a function that the interpreter expect for C-backended blocks.
Definition iterator.h:83
VALUE type(ANYARGS)
ANYARGS-ed function type.
#define RBIMPL_ATTR_NONNULL(list)
Wraps (or simulates) __attribute__((nonnull))
Definition nonnull.h:30
#define inline
Old Visual Studio versions do not support the inline keyword, so we need to define it to be __inline.
Definition defines.h:91
Functions related to nodes in the AST.
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RTYPEDDATA_DATA(v)
Convenient getter macro.
Definition rtypeddata.h:103
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
Defines old _.
C99 shim for <stdbool.h>
Definition vm_core.h:261
const ID * segments
A null-terminated list of ids, used to represent a constant's path idNULL is used to represent the ::...
Definition vm_core.h:287
Definition vm_core.h:295
Definition vm_core.h:290
Definition iseq.h:251
Definition method.h:63
CREF (Class REFerence)
Definition method.h:45
Definition class.h:72
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:204
struct rb_iseq_constant_body::@154 param
parameter information
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:136
Internal header for Namespace.
Definition namespace.h:14
Definition st.h:79
IFUNC (Internal FUNCtion)
Definition imemo.h:85
Definition vm_core.h:253
Definition vm_core.h:299
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
Definition value.h:69
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376