Ruby 3.5.0dev (2025-05-16 revision 04f538c1441e65def90d5b4224010e7d4f4ffab3)
vm_core.h (04f538c1441e65def90d5b4224010e7d4f4ffab3)
1#ifndef RUBY_VM_CORE_H
2#define RUBY_VM_CORE_H
3/**********************************************************************
4
5 vm_core.h -
6
7 $Author$
8 created at: 04/01/01 19:41:38 JST
9
10 Copyright (C) 2004-2007 Koichi Sasada
11
12**********************************************************************/
13
14/*
15 * Enable check mode.
16 * 1: enable local assertions.
17 */
18#ifndef VM_CHECK_MODE
19
20// respect RUBY_DUBUG: if given n is 0, then use RUBY_DEBUG
21#define N_OR_RUBY_DEBUG(n) (((n) > 0) ? (n) : RUBY_DEBUG)
22
23#define VM_CHECK_MODE N_OR_RUBY_DEBUG(0)
24#endif
25
39#ifndef VMDEBUG
40#define VMDEBUG 0
41#endif
42
43#if 0
44#undef VMDEBUG
45#define VMDEBUG 3
46#endif
47
48#include "ruby/internal/config.h"
49
50#include <stddef.h>
51#include <signal.h>
52#include <stdarg.h>
53
54#include "ruby_assert.h"
55
56#define RVALUE_SIZE (sizeof(struct RBasic) + sizeof(VALUE[RBIMPL_RVALUE_EMBED_LEN_MAX]))
57
58#if VM_CHECK_MODE > 0
59#define VM_ASSERT(expr, ...) \
60 RUBY_ASSERT_MESG_WHEN(VM_CHECK_MODE > 0, expr, #expr RBIMPL_VA_OPT_ARGS(__VA_ARGS__))
61#define VM_UNREACHABLE(func) rb_bug(#func ": unreachable")
62#define RUBY_ASSERT_CRITICAL_SECTION
63#define RUBY_DEBUG_THREAD_SCHEDULE() rb_thread_schedule()
64#else
65#define VM_ASSERT(/*expr, */...) ((void)0)
66#define VM_UNREACHABLE(func) UNREACHABLE
67#define RUBY_DEBUG_THREAD_SCHEDULE()
68#endif
69
70#define RUBY_ASSERT_MUTEX_OWNED(mutex) VM_ASSERT(rb_mutex_owned_p(mutex))
71
72#if defined(RUBY_ASSERT_CRITICAL_SECTION)
73/*
74# Critical Section Assertions
75
76These assertions are used to ensure that context switching does not occur between two points in the code. In theory,
77such code should already be protected by a mutex, but these assertions are used to ensure that the mutex is held.
78
79The specific case where it can be useful is where a mutex is held further up the call stack, and the code in question
80may not directly hold the mutex. In this case, the critical section assertions can be used to ensure that the mutex is
81held by someone else.
82
83These assertions are only enabled when RUBY_ASSERT_CRITICAL_SECTION is defined, which is only defined if VM_CHECK_MODE
84is set.
85
86## Example Usage
87
88```c
89RUBY_ASSERT_CRITICAL_SECTION_ENTER();
90// ... some code which does not invoke rb_vm_check_ints() ...
91RUBY_ASSERT_CRITICAL_SECTION_LEAVE();
92```
93
94If `rb_vm_check_ints()` is called between the `RUBY_ASSERT_CRITICAL_SECTION_ENTER()` and
95`RUBY_ASSERT_CRITICAL_SECTION_LEAVE()`, a failed assertion will result.
96*/
97extern int ruby_assert_critical_section_entered;
98#define RUBY_ASSERT_CRITICAL_SECTION_ENTER() do{ruby_assert_critical_section_entered += 1;}while(false)
99#define RUBY_ASSERT_CRITICAL_SECTION_LEAVE() do{VM_ASSERT(ruby_assert_critical_section_entered > 0);ruby_assert_critical_section_entered -= 1;}while(false)
100#else
101#define RUBY_ASSERT_CRITICAL_SECTION_ENTER()
102#define RUBY_ASSERT_CRITICAL_SECTION_LEAVE()
103#endif
104
105#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
106# include "wasm/setjmp.h"
107#else
108# include <setjmp.h>
109#endif
110
111#if defined(__linux__) || defined(__FreeBSD__)
112# define RB_THREAD_T_HAS_NATIVE_ID
113#endif
114
116#include "ccan/list/list.h"
117#include "id.h"
118#include "internal.h"
119#include "internal/array.h"
120#include "internal/basic_operators.h"
121#include "internal/namespace.h"
122#include "internal/sanitizers.h"
123#include "internal/serial.h"
124#include "internal/set_table.h"
125#include "internal/vm.h"
126#include "method.h"
127#include "node.h"
128#include "ruby/ruby.h"
129#include "ruby/st.h"
130#include "ruby_atomic.h"
131#include "vm_opts.h"
132
133#include "ruby/thread_native.h"
134/*
135 * implementation selector of get_insn_info algorithm
136 * 0: linear search
137 * 1: binary search
138 * 2: succinct bitvector
139 */
140#ifndef VM_INSN_INFO_TABLE_IMPL
141# define VM_INSN_INFO_TABLE_IMPL 2
142#endif
143
144#if defined(NSIG_MAX) /* POSIX issue 8 */
145# undef NSIG
146# define NSIG NSIG_MAX
147#elif defined(_SIG_MAXSIG) /* FreeBSD */
148# undef NSIG
149# define NSIG _SIG_MAXSIG
150#elif defined(_SIGMAX) /* QNX */
151# define NSIG (_SIGMAX + 1)
152#elif defined(NSIG) /* 99% of everything else */
153# /* take it */
154#else /* Last resort */
155# define NSIG (sizeof(sigset_t) * CHAR_BIT + 1)
156#endif
157
158#define RUBY_NSIG NSIG
159
160#if defined(SIGCLD)
161# define RUBY_SIGCHLD (SIGCLD)
162#elif defined(SIGCHLD)
163# define RUBY_SIGCHLD (SIGCHLD)
164#endif
165
166#if defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) && defined(SA_SIGINFO) && !defined(__NetBSD__)
167# define USE_SIGALTSTACK
168void *rb_allocate_sigaltstack(void);
169void *rb_register_sigaltstack(void *);
170# define RB_ALTSTACK_INIT(var, altstack) var = rb_register_sigaltstack(altstack)
171# define RB_ALTSTACK_FREE(var) free(var)
172# define RB_ALTSTACK(var) var
173#else /* noop */
174# define RB_ALTSTACK_INIT(var, altstack)
175# define RB_ALTSTACK_FREE(var)
176# define RB_ALTSTACK(var) (0)
177#endif
178
179#include THREAD_IMPL_H
180#define RUBY_VM_THREAD_MODEL 2
181
182/*****************/
183/* configuration */
184/*****************/
185
186/* gcc ver. check */
187#if defined(__GNUC__) && __GNUC__ >= 2
188
189#if OPT_TOKEN_THREADED_CODE
190#if OPT_DIRECT_THREADED_CODE
191#undef OPT_DIRECT_THREADED_CODE
192#endif
193#endif
194
195#else /* defined(__GNUC__) && __GNUC__ >= 2 */
196
197/* disable threaded code options */
198#if OPT_DIRECT_THREADED_CODE
199#undef OPT_DIRECT_THREADED_CODE
200#endif
201#if OPT_TOKEN_THREADED_CODE
202#undef OPT_TOKEN_THREADED_CODE
203#endif
204#endif
205
206/* call threaded code */
207#if OPT_CALL_THREADED_CODE
208#if OPT_DIRECT_THREADED_CODE
209#undef OPT_DIRECT_THREADED_CODE
210#endif /* OPT_DIRECT_THREADED_CODE */
211#endif /* OPT_CALL_THREADED_CODE */
212
213void rb_vm_encoded_insn_data_table_init(void);
214typedef unsigned long rb_num_t;
215typedef signed long rb_snum_t;
216
217enum ruby_tag_type {
218 RUBY_TAG_NONE = 0x0,
219 RUBY_TAG_RETURN = 0x1,
220 RUBY_TAG_BREAK = 0x2,
221 RUBY_TAG_NEXT = 0x3,
222 RUBY_TAG_RETRY = 0x4,
223 RUBY_TAG_REDO = 0x5,
224 RUBY_TAG_RAISE = 0x6,
225 RUBY_TAG_THROW = 0x7,
226 RUBY_TAG_FATAL = 0x8,
227 RUBY_TAG_MASK = 0xf
228};
229
230#define TAG_NONE RUBY_TAG_NONE
231#define TAG_RETURN RUBY_TAG_RETURN
232#define TAG_BREAK RUBY_TAG_BREAK
233#define TAG_NEXT RUBY_TAG_NEXT
234#define TAG_RETRY RUBY_TAG_RETRY
235#define TAG_REDO RUBY_TAG_REDO
236#define TAG_RAISE RUBY_TAG_RAISE
237#define TAG_THROW RUBY_TAG_THROW
238#define TAG_FATAL RUBY_TAG_FATAL
239#define TAG_MASK RUBY_TAG_MASK
240
241enum ruby_vm_throw_flags {
242 VM_THROW_NO_ESCAPE_FLAG = 0x8000,
243 VM_THROW_STATE_MASK = 0xff
244};
245
246/* forward declarations */
247struct rb_thread_struct;
249
250/* iseq data type */
252
254 rb_serial_t raw;
255 VALUE data[2];
256};
257
258#define IMEMO_CONST_CACHE_SHAREABLE IMEMO_FL_USER0
259
260// imemo_constcache
262 VALUE flags;
263
264 VALUE value; // v0
265 VALUE _unused1; // v1
266 VALUE _unused2; // v2
267 const rb_cref_t *ic_cref; // v3
268};
269STATIC_ASSERT(sizeof_iseq_inline_constant_cache_entry,
270 (offsetof(struct iseq_inline_constant_cache_entry, ic_cref) +
271 sizeof(const rb_cref_t *)) <= RVALUE_SIZE);
272
289
291 uintptr_t value; // attr_index in lower bits, dest_shape_id in upper bits
292 ID iv_set_name;
293};
294
298
300 struct {
301 struct rb_thread_struct *running_thread;
302 VALUE value;
303 } once;
304 struct iseq_inline_constant_cache ic_cache;
305 struct iseq_inline_iv_cache_entry iv_cache;
306};
307
309 const struct rb_call_data *cd;
310 const struct rb_callcache *cc;
311 VALUE block_handler;
312 VALUE recv;
313 int argc;
314 bool kw_splat;
315 VALUE heap_argv;
316 const rb_namespace_t *proc_ns;
317};
318
319#ifndef VM_ARGC_STACK_MAX
320#define VM_ARGC_STACK_MAX 128
321#endif
322
323# define CALLING_ARGC(calling) ((calling)->heap_argv ? RARRAY_LENINT((calling)->heap_argv) : (calling)->argc)
324
326
327#if 1
328#define CoreDataFromValue(obj, type) (type*)DATA_PTR(obj)
329#else
330#define CoreDataFromValue(obj, type) (type*)rb_data_object_get(obj)
331#endif
332#define GetCoreDataFromValue(obj, type, ptr) ((ptr) = CoreDataFromValue((obj), type))
333
335 VALUE pathobj; /* String (path) or Array [path, realpath]. Frozen. */
336 VALUE base_label; /* String */
337 VALUE label; /* String */
338 int first_lineno;
339 int node_id;
340 rb_code_location_t code_location;
342
343#define PATHOBJ_PATH 0
344#define PATHOBJ_REALPATH 1
345
346static inline VALUE
347pathobj_path(VALUE pathobj)
348{
349 if (RB_TYPE_P(pathobj, T_STRING)) {
350 return pathobj;
351 }
352 else {
353 VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
354 return RARRAY_AREF(pathobj, PATHOBJ_PATH);
355 }
356}
357
358static inline VALUE
359pathobj_realpath(VALUE pathobj)
360{
361 if (RB_TYPE_P(pathobj, T_STRING)) {
362 return pathobj;
363 }
364 else {
365 VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
366 return RARRAY_AREF(pathobj, PATHOBJ_REALPATH);
367 }
368}
369
370/* Forward declarations */
371typedef uintptr_t iseq_bits_t;
372
373#define ISEQ_IS_SIZE(body) (body->ic_size + body->ivc_size + body->ise_size + body->icvarc_size)
374
375/* [ TS_IVC | TS_ICVARC | TS_ISE | TS_IC ] */
376#define ISEQ_IS_IC_ENTRY(body, idx) (body->is_entries[(idx) + body->ise_size + body->icvarc_size + body->ivc_size].ic_cache);
377
378/* instruction sequence type */
379enum rb_iseq_type {
380 ISEQ_TYPE_TOP,
381 ISEQ_TYPE_METHOD,
382 ISEQ_TYPE_BLOCK,
383 ISEQ_TYPE_CLASS,
384 ISEQ_TYPE_RESCUE,
385 ISEQ_TYPE_ENSURE,
386 ISEQ_TYPE_EVAL,
387 ISEQ_TYPE_MAIN,
388 ISEQ_TYPE_PLAIN
389};
390
391// Attributes specified by Primitive.attr!
392enum rb_builtin_attr {
393 // The iseq does not call methods.
394 BUILTIN_ATTR_LEAF = 0x01,
395 // This iseq only contains single `opt_invokebuiltin_delegate_leave` instruction with 0 arguments.
396 BUILTIN_ATTR_SINGLE_NOARG_LEAF = 0x02,
397 // This attribute signals JIT to duplicate the iseq for each block iseq so that its `yield` will be monomorphic.
398 BUILTIN_ATTR_INLINE_BLOCK = 0x04,
399 // The iseq acts like a C method in backtraces.
400 BUILTIN_ATTR_C_TRACE = 0x08,
401};
402
403typedef VALUE (*rb_jit_func_t)(struct rb_execution_context_struct *, struct rb_control_frame_struct *);
404
406 enum rb_iseq_type type;
407
408 unsigned int iseq_size;
409 VALUE *iseq_encoded; /* encoded iseq (insn addr and operands) */
410
434 struct {
435 struct {
436 unsigned int has_lead : 1;
437 unsigned int has_opt : 1;
438 unsigned int has_rest : 1;
439 unsigned int has_post : 1;
440 unsigned int has_kw : 1;
441 unsigned int has_kwrest : 1;
442 unsigned int has_block : 1;
443
444 unsigned int ambiguous_param0 : 1; /* {|a|} */
445 unsigned int accepts_no_kwarg : 1;
446 unsigned int ruby2_keywords: 1;
447 unsigned int anon_rest: 1;
448 unsigned int anon_kwrest: 1;
449 unsigned int use_block: 1;
450 unsigned int forwardable: 1;
451 } flags;
452
453 unsigned int size;
454
455 int lead_num;
456 int opt_num;
457 int rest_start;
458 int post_start;
459 int post_num;
460 int block_start;
461
462 const VALUE *opt_table; /* (opt_num + 1) entries. */
463 /* opt_num and opt_table:
464 *
465 * def foo o1=e1, o2=e2, ..., oN=eN
466 * #=>
467 * # prologue code
468 * A1: e1
469 * A2: e2
470 * ...
471 * AN: eN
472 * AL: body
473 * opt_num = N
474 * opt_table = [A1, A2, ..., AN, AL]
475 */
476
477 const struct rb_iseq_param_keyword {
478 int num;
479 int required_num;
480 int bits_start;
481 int rest_start;
482 const ID *table;
483 VALUE *default_values;
484 } *keyword;
486
487 rb_iseq_location_t location;
488
489 /* insn info, must be freed */
491 const struct iseq_insn_info_entry *body;
492 unsigned int *positions;
493 unsigned int size;
494#if VM_INSN_INFO_TABLE_IMPL == 2
495 struct succ_index_table *succ_index_table;
496#endif
497 } insns_info;
498
499 const ID *local_table; /* must free */
500
501 /* catch table */
502 struct iseq_catch_table *catch_table;
503
504 /* for child iseq */
505 const struct rb_iseq_struct *parent_iseq;
506 struct rb_iseq_struct *local_iseq; /* local_iseq->flip_cnt can be modified */
507
508 union iseq_inline_storage_entry *is_entries; /* [ TS_IVC | TS_ICVARC | TS_ISE | TS_IC ] */
509 struct rb_call_data *call_data; //struct rb_call_data calls[ci_size];
510
511 struct {
512 rb_snum_t flip_count;
513 VALUE script_lines;
514 VALUE coverage;
515 VALUE pc2branchindex;
516 VALUE *original_iseq;
517 } variable;
518
519 unsigned int local_table_size;
520 unsigned int ic_size; // Number of IC caches
521 unsigned int ise_size; // Number of ISE caches
522 unsigned int ivc_size; // Number of IVC caches
523 unsigned int icvarc_size; // Number of ICVARC caches
524 unsigned int ci_size;
525 unsigned int stack_max; /* for stack overflow check */
526
527 unsigned int builtin_attrs; // Union of rb_builtin_attr
528
529 bool prism; // ISEQ was generated from prism compiler
530
531 union {
532 iseq_bits_t * list; /* Find references for GC */
533 iseq_bits_t single;
534 } mark_bits;
535
536 struct rb_id_table *outer_variables;
537
538 const rb_iseq_t *mandatory_only_iseq;
539
540#if USE_YJIT || USE_ZJIT
541 // Function pointer for JIT code on jit_exec()
542 rb_jit_func_t jit_entry;
543 // Number of calls on jit_exec()
544 long unsigned jit_entry_calls;
545 // Function pointer for JIT code on jit_exec_exception()
546 rb_jit_func_t jit_exception;
547 // Number of calls on jit_exec_exception()
548 long unsigned jit_exception_calls;
549#endif
550
551#if USE_YJIT
552 // YJIT stores some data on each iseq.
553 void *yjit_payload;
554 // Used to estimate how frequently this ISEQ gets called
555 uint64_t yjit_calls_at_interv;
556#endif
557
558#if USE_ZJIT
559 // ZJIT stores some data on each iseq.
560 void *zjit_payload;
561#endif
562};
563
564/* T_IMEMO/iseq */
565/* typedef rb_iseq_t is in method.h */
567 VALUE flags; /* 1 */
568 VALUE wrapper; /* 2 */
569
570 struct rb_iseq_constant_body *body; /* 3 */
571
572 union { /* 4, 5 words */
573 struct iseq_compile_data *compile_data; /* used at compile time */
574
575 struct {
576 VALUE obj;
577 int index;
578 } loader;
579
580 struct {
581 struct rb_hook_list_struct *local_hooks;
582 rb_event_flag_t global_trace_events;
583 } exec;
584 } aux;
585};
586
587#define ISEQ_BODY(iseq) ((iseq)->body)
588
589#if !defined(USE_LAZY_LOAD) || !(USE_LAZY_LOAD+0)
590#define USE_LAZY_LOAD 0
591#endif
592
593#if !USE_LAZY_LOAD
594static inline const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq) {return 0;}
595#endif
596const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq);
597
598static inline const rb_iseq_t *
599rb_iseq_check(const rb_iseq_t *iseq)
600{
601 if (USE_LAZY_LOAD && ISEQ_BODY(iseq) == NULL) {
602 rb_iseq_complete((rb_iseq_t *)iseq);
603 }
604 return iseq;
605}
606
607static inline bool
608rb_iseq_attr_p(const rb_iseq_t *iseq, enum rb_builtin_attr attr)
609{
610 return (ISEQ_BODY(iseq)->builtin_attrs & attr) == attr;
611}
612
613static inline const rb_iseq_t *
614def_iseq_ptr(rb_method_definition_t *def)
615{
616//TODO: re-visit. to check the bug, enable this assertion.
617#if VM_CHECK_MODE > 0
618 if (def->type != VM_METHOD_TYPE_ISEQ) rb_bug("def_iseq_ptr: not iseq (%d)", def->type);
619#endif
620 return rb_iseq_check(def->body.iseq.iseqptr);
621}
622
623enum ruby_special_exceptions {
624 ruby_error_reenter,
625 ruby_error_nomemory,
626 ruby_error_sysstack,
627 ruby_error_stackfatal,
628 ruby_error_stream_closed,
629 ruby_special_error_count
630};
631
632#define GetVMPtr(obj, ptr) \
633 GetCoreDataFromValue((obj), rb_vm_t, (ptr))
634
635struct rb_vm_struct;
636typedef void rb_vm_at_exit_func(struct rb_vm_struct*);
637
638typedef struct rb_at_exit_list {
639 rb_vm_at_exit_func *func;
640 struct rb_at_exit_list *next;
642
643void *rb_objspace_alloc(void);
644void rb_objspace_free(void *objspace);
645void rb_objspace_call_finalizer(void);
646
647typedef struct rb_hook_list_struct {
648 struct rb_event_hook_struct *hooks;
649 rb_event_flag_t events;
650 unsigned int running;
651 bool need_clean;
652 bool is_local;
654
655
656// see builtin.h for definition
657typedef const struct rb_builtin_function *RB_BUILTIN;
658
660 VALUE *varptr;
661 struct global_object_list *next;
662};
663
664typedef struct rb_vm_struct {
665 VALUE self;
666
667 struct {
668 struct ccan_list_head set;
669 unsigned int cnt;
670 unsigned int blocking_cnt;
671
672 struct rb_ractor_struct *main_ractor;
673 struct rb_thread_struct *main_thread; // == vm->ractor.main_ractor->threads.main
674
675 struct {
676 // monitor
677 rb_nativethread_lock_t lock;
678 struct rb_ractor_struct *lock_owner;
679 unsigned int lock_rec;
680
681 // join at exit
682 rb_nativethread_cond_t terminate_cond;
683 bool terminate_waiting;
684
685#ifndef RUBY_THREAD_PTHREAD_H
686 bool barrier_waiting;
687 unsigned int barrier_cnt;
688 rb_nativethread_cond_t barrier_cond;
689#endif
690 } sync;
691
692 // ractor scheduling
693 struct {
694 rb_nativethread_lock_t lock;
695 struct rb_ractor_struct *lock_owner;
696 bool locked;
697
698 rb_nativethread_cond_t cond; // GRQ
699 unsigned int snt_cnt; // count of shared NTs
700 unsigned int dnt_cnt; // count of dedicated NTs
701
702 unsigned int running_cnt;
703
704 unsigned int max_cpu;
705 struct ccan_list_head grq; // // Global Ready Queue
706 unsigned int grq_cnt;
707
708 // running threads
709 struct ccan_list_head running_threads;
710
711 // threads which switch context by timeslice
712 struct ccan_list_head timeslice_threads;
713
714 struct ccan_list_head zombie_threads;
715
716 // true if timeslice timer is not enable
717 bool timeslice_wait_inf;
718
719 // barrier
720 rb_nativethread_cond_t barrier_complete_cond;
721 rb_nativethread_cond_t barrier_release_cond;
722 bool barrier_waiting;
723 unsigned int barrier_waiting_cnt;
724 unsigned int barrier_serial;
725 } sched;
726 } ractor;
727
728#ifdef USE_SIGALTSTACK
729 void *main_altstack;
730#endif
731
732 rb_serial_t fork_gen;
733
734 /* set in single-threaded processes only: */
735 volatile int ubf_async_safe;
736
737 unsigned int running: 1;
738 unsigned int thread_abort_on_exception: 1;
739 unsigned int thread_report_on_exception: 1;
740 unsigned int thread_ignore_deadlock: 1;
741
742 /* object management */
743 VALUE mark_object_ary;
745 const VALUE special_exceptions[ruby_special_error_count];
746
747 /* namespace */
748 rb_namespace_t *main_namespace;
749
750 /* load */
751 VALUE top_self;
752 VALUE load_path;
753 VALUE load_path_snapshot;
754 VALUE load_path_check_cache;
755 VALUE expanded_load_path;
756 VALUE loaded_features;
757 VALUE loaded_features_snapshot;
758 VALUE loaded_features_realpaths;
759 VALUE loaded_features_realpath_map;
760 struct st_table *loaded_features_index;
761 struct st_table *loading_table;
762 // For running the init function of statically linked
763 // extensions when they are loaded
764 struct st_table *static_ext_inits;
765
766 /* signal */
767 struct {
768 VALUE cmd[RUBY_NSIG];
769 } trap_list;
770
771 /* postponed_job (async-signal-safe, and thread-safe) */
772 struct rb_postponed_job_queue *postponed_job_queue;
773
774 int src_encoding_index;
775
776 /* workqueue (thread-safe, NOT async-signal-safe) */
777 struct ccan_list_head workqueue; /* <=> rb_workqueue_job.jnode */
778 rb_nativethread_lock_t workqueue_lock;
779
780 VALUE orig_progname, progname;
781 VALUE coverages, me2counter;
782 int coverage_mode;
783
784 struct {
785 struct rb_objspace *objspace;
786 struct gc_mark_func_data_struct {
787 void *data;
788 void (*mark_func)(VALUE v, void *data);
789 } *mark_func_data;
790 } gc;
791
792 rb_at_exit_list *at_exit;
793
794 const struct rb_builtin_function *builtin_function_table;
795
796 st_table *ci_table;
797 struct rb_id_table *negative_cme_table;
798 st_table *overloaded_cme_table; // cme -> overloaded_cme
799 set_table *unused_block_warning_table;
800
801 // This id table contains a mapping from ID to ICs. It does this with ID
802 // keys and nested st_tables as values. The nested tables have ICs as keys
803 // and Qtrue as values. It is used when inline constant caches need to be
804 // invalidated or ISEQs are being freed.
805 struct rb_id_table *constant_cache;
806 ID inserting_constant_cache_id;
807
808#ifndef VM_GLOBAL_CC_CACHE_TABLE_SIZE
809#define VM_GLOBAL_CC_CACHE_TABLE_SIZE 1023
810#endif
811 const struct rb_callcache *global_cc_cache_table[VM_GLOBAL_CC_CACHE_TABLE_SIZE]; // vm_eval.c
812
813#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
814 uint32_t clock;
815#endif
816
817 /* params */
818 struct { /* size in byte */
819 size_t thread_vm_stack_size;
820 size_t thread_machine_stack_size;
821 size_t fiber_vm_stack_size;
822 size_t fiber_machine_stack_size;
823 } default_params;
824
825 // TODO: a single require_stack can't support multi-threaded require trees
826 VALUE require_stack;
827} rb_vm_t;
828
829/* default values */
830
831#define RUBY_VM_SIZE_ALIGN 4096
832
833#define RUBY_VM_THREAD_VM_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
834#define RUBY_VM_THREAD_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
835#define RUBY_VM_THREAD_MACHINE_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
836#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
837
838#define RUBY_VM_FIBER_VM_STACK_SIZE ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
839#define RUBY_VM_FIBER_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
840#define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 64 * 1024 * sizeof(VALUE)) /* 256 KB or 512 KB */
841#if defined(__powerpc64__) || defined(__ppc64__) // macOS has __ppc64__
842#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 32 * 1024 * sizeof(VALUE)) /* 128 KB or 256 KB */
843#else
844#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
845#endif
846
847#if __has_feature(memory_sanitizer) || __has_feature(address_sanitizer)
848/* It seems sanitizers consume A LOT of machine stacks */
849#undef RUBY_VM_THREAD_MACHINE_STACK_SIZE
850#define RUBY_VM_THREAD_MACHINE_STACK_SIZE (1024 * 1024 * sizeof(VALUE))
851#undef RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN
852#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 512 * 1024 * sizeof(VALUE))
853#undef RUBY_VM_FIBER_MACHINE_STACK_SIZE
854#define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 256 * 1024 * sizeof(VALUE))
855#undef RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN
856#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 128 * 1024 * sizeof(VALUE))
857#endif
858
859#ifndef VM_DEBUG_BP_CHECK
860#define VM_DEBUG_BP_CHECK 0
861#endif
862
863#ifndef VM_DEBUG_VERIFY_METHOD_CACHE
864#define VM_DEBUG_VERIFY_METHOD_CACHE (VMDEBUG != 0)
865#endif
866
868 VALUE self;
869 const VALUE *ep;
870 union {
871 const rb_iseq_t *iseq;
872 const struct vm_ifunc *ifunc;
873 VALUE val;
874 } code;
875};
876
877enum rb_block_handler_type {
878 block_handler_type_iseq,
879 block_handler_type_ifunc,
880 block_handler_type_symbol,
881 block_handler_type_proc
882};
883
884enum rb_block_type {
885 block_type_iseq,
886 block_type_ifunc,
887 block_type_symbol,
888 block_type_proc
889};
890
891struct rb_block {
892 union {
893 struct rb_captured_block captured;
894 VALUE symbol;
895 VALUE proc;
896 } as;
897 enum rb_block_type type;
898};
899
901 const VALUE *pc; // cfp[0]
902 VALUE *sp; // cfp[1]
903 const rb_iseq_t *iseq; // cfp[2]
904 VALUE self; // cfp[3] / block[0]
905 const VALUE *ep; // cfp[4] / block[1]
906 const void *block_code; // cfp[5] / block[2] -- iseq, ifunc, or forwarded block handler
907 void *jit_return; // cfp[6] -- return address for JIT code
908#if VM_DEBUG_BP_CHECK
909 VALUE *bp_check; // cfp[7]
910#endif
912
913extern const rb_data_type_t ruby_threadptr_data_type;
914
915static inline struct rb_thread_struct *
916rb_thread_ptr(VALUE thval)
917{
918 return (struct rb_thread_struct *)rb_check_typeddata(thval, &ruby_threadptr_data_type);
919}
920
921enum rb_thread_status {
922 THREAD_RUNNABLE,
923 THREAD_STOPPED,
924 THREAD_STOPPED_FOREVER,
925 THREAD_KILLED
926};
927
928#ifdef RUBY_JMP_BUF
929typedef RUBY_JMP_BUF rb_jmpbuf_t;
930#else
931typedef void *rb_jmpbuf_t[5];
932#endif
933
934/*
935 `rb_vm_tag_jmpbuf_t` type represents a buffer used to
936 long jump to a C frame associated with `rb_vm_tag`.
937
938 Use-site of `rb_vm_tag_jmpbuf_t` is responsible for calling the
939 following functions:
940 - `rb_vm_tag_jmpbuf_init` once `rb_vm_tag_jmpbuf_t` is allocated.
941 - `rb_vm_tag_jmpbuf_deinit` once `rb_vm_tag_jmpbuf_t` is no longer necessary.
942
943 `RB_VM_TAG_JMPBUF_GET` transforms a `rb_vm_tag_jmpbuf_t` into a
944 `rb_jmpbuf_t` to be passed to `rb_setjmp/rb_longjmp`.
945*/
946#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
947/*
948 WebAssembly target with Asyncify-based SJLJ needs
949 to capture the execution context by unwind/rewind-ing
950 call frames into a jump buffer. The buffer space tends
951 to be considerably large unlike other architectures'
952 register-based buffers.
953 Therefore, we allocates the buffer on the heap on such
954 environments.
955*/
956typedef rb_jmpbuf_t *rb_vm_tag_jmpbuf_t;
957
958#define RB_VM_TAG_JMPBUF_GET(buf) (*buf)
959
960static inline void
961rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
962{
963 *jmpbuf = ruby_xmalloc(sizeof(rb_jmpbuf_t));
964}
965
966static inline void
967rb_vm_tag_jmpbuf_deinit(const rb_vm_tag_jmpbuf_t *jmpbuf)
968{
969 ruby_xfree(*jmpbuf);
970}
971#else
972typedef rb_jmpbuf_t rb_vm_tag_jmpbuf_t;
973
974#define RB_VM_TAG_JMPBUF_GET(buf) (buf)
975
976static inline void
977rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
978{
979 // no-op
980}
981
982static inline void
983rb_vm_tag_jmpbuf_deinit(const rb_vm_tag_jmpbuf_t *jmpbuf)
984{
985 // no-op
986}
987#endif
988
989/*
990 the members which are written in EC_PUSH_TAG() should be placed at
991 the beginning and the end, so that entire region is accessible.
992*/
993struct rb_vm_tag {
994 VALUE tag;
995 VALUE retval;
996 rb_vm_tag_jmpbuf_t buf;
997 struct rb_vm_tag *prev;
998 enum ruby_tag_type state;
999 unsigned int lock_rec;
1000};
1001
1002STATIC_ASSERT(rb_vm_tag_buf_offset, offsetof(struct rb_vm_tag, buf) > 0);
1003STATIC_ASSERT(rb_vm_tag_buf_end,
1004 offsetof(struct rb_vm_tag, buf) + sizeof(rb_vm_tag_jmpbuf_t) <
1005 sizeof(struct rb_vm_tag));
1006
1009 void *arg;
1010};
1011
1012struct rb_mutex_struct;
1013
1014typedef struct rb_fiber_struct rb_fiber_t;
1015
1017 struct rb_waiting_list *next;
1018 struct rb_thread_struct *thread;
1019 struct rb_fiber_struct *fiber;
1020};
1021
1023 /* execution information */
1024 VALUE *vm_stack; /* must free, must mark */
1025 size_t vm_stack_size; /* size in word (byte size / sizeof(VALUE)) */
1026 rb_control_frame_t *cfp;
1027
1028 struct rb_vm_tag *tag;
1029
1030 /* interrupt flags */
1031 rb_atomic_t interrupt_flag;
1032 rb_atomic_t interrupt_mask; /* size should match flag */
1033#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
1034 uint32_t checked_clock;
1035#endif
1036
1037 rb_fiber_t *fiber_ptr;
1038 struct rb_thread_struct *thread_ptr;
1039
1040 /* storage (ec (fiber) local) */
1041 struct rb_id_table *local_storage;
1042 VALUE local_storage_recursive_hash;
1043 VALUE local_storage_recursive_hash_for_trace;
1044
1045 /* Inheritable fiber storage. */
1046 VALUE storage;
1047
1048 /* eval env */
1049 const VALUE *root_lep;
1050 VALUE root_svar;
1051
1052 /* trace information */
1053 struct rb_trace_arg_struct *trace_arg;
1054
1055 /* temporary places */
1056 VALUE errinfo;
1057 VALUE passed_block_handler; /* for rb_iterate */
1058
1059 uint8_t raised_flag; /* only 3 bits needed */
1060
1061 /* n.b. only 7 bits needed, really: */
1062 BITFIELD(enum method_missing_reason, method_missing_reason, 8);
1063
1064 VALUE private_const_reference;
1065
1066 /* for GC */
1067 struct {
1068 VALUE *stack_start;
1069 VALUE *stack_end;
1070 size_t stack_maxsize;
1072
1073#ifdef RUBY_ASAN_ENABLED
1074 void *asan_fake_stack_handle;
1075#endif
1076 } machine;
1077};
1078
1079#ifndef rb_execution_context_t
1081#define rb_execution_context_t rb_execution_context_t
1082#endif
1083
1084// for builtin.h
1085#define VM_CORE_H_EC_DEFINED 1
1086
1087// Set the vm_stack pointer in the execution context.
1088void rb_ec_set_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
1089
1090// Initialize the vm_stack pointer in the execution context and push the initial stack frame.
1091// @param ec the execution context to update.
1092// @param stack a pointer to the stack to use.
1093// @param size the size of the stack, as in `VALUE stack[size]`.
1094void rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
1095
1096// Clear (set to `NULL`) the vm_stack pointer.
1097// @param ec the execution context to update.
1098void rb_ec_clear_vm_stack(rb_execution_context_t *ec);
1099
1101 bool ractor_safe;
1102};
1103
1104typedef struct rb_ractor_struct rb_ractor_t;
1105
1106struct rb_native_thread;
1107
1109 //enum rb_ractor_wait_status wait_status;
1110 int wait_status;
1111 //enum rb_ractor_wakeup_status wakeup_status;
1112 int wakeup_status;
1113 struct ccan_list_node waiting_node; // the rb_thread_t
1114 VALUE receiving_mutex; // protects Ractor.receive_if
1115#ifndef RUBY_THREAD_PTHREAD_H
1116 rb_nativethread_cond_t cond;
1117#endif
1118};
1119
1120typedef struct rb_thread_struct {
1121 struct ccan_list_node lt_node; // managed by a ractor (r->threads.set)
1122 VALUE self;
1123 rb_ractor_t *ractor;
1124 rb_vm_t *vm;
1125 struct rb_native_thread *nt;
1127
1128 struct rb_thread_sched_item sched;
1129 bool mn_schedulable;
1130 rb_atomic_t serial; // only for RUBY_DEBUG_LOG()
1131
1132 struct rb_thread_ractor_waiting ractor_waiting;
1133
1134 VALUE last_status; /* $? */
1135
1136 /* for cfunc */
1137 struct rb_calling_info *calling;
1138
1139 /* for load(true) */
1140 VALUE top_self;
1141 VALUE top_wrapper;
1142 /* for namespace */
1143 VALUE namespaces; // Stack of namespaces
1144 rb_namespace_t *ns; // The current one
1145
1146 /* thread control */
1147
1148 BITFIELD(enum rb_thread_status, status, 2);
1149 /* bit flags */
1150 unsigned int has_dedicated_nt : 1;
1151 unsigned int to_kill : 1;
1152 unsigned int abort_on_exception: 1;
1153 unsigned int report_on_exception: 1;
1154 unsigned int pending_interrupt_queue_checked: 1;
1155 int8_t priority; /* -3 .. 3 (RUBY_THREAD_PRIORITY_{MIN,MAX}) */
1156 uint32_t running_time_us; /* 12500..800000 */
1157
1158 void *blocking_region_buffer;
1159
1160 VALUE thgroup;
1161 VALUE value;
1162
1163 /* temporary place of retval on OPT_CALL_THREADED_CODE */
1164#if OPT_CALL_THREADED_CODE
1165 VALUE retval;
1166#endif
1167
1168 /* async errinfo queue */
1169 VALUE pending_interrupt_queue;
1170 VALUE pending_interrupt_mask_stack;
1171
1172 /* interrupt management */
1173 rb_nativethread_lock_t interrupt_lock;
1174 struct rb_unblock_callback unblock;
1175 VALUE locking_mutex;
1176 struct rb_mutex_struct *keeping_mutexes;
1177 struct ccan_list_head interrupt_exec_tasks;
1178
1179 struct rb_waiting_list *join_list;
1180
1181 union {
1182 struct {
1183 VALUE proc;
1184 VALUE args;
1185 int kw_splat;
1186 } proc;
1187 struct {
1188 VALUE (*func)(void *);
1189 void *arg;
1190 } func;
1191 } invoke_arg;
1192
1193 enum thread_invoke_type {
1194 thread_invoke_type_none = 0,
1195 thread_invoke_type_proc,
1196 thread_invoke_type_ractor_proc,
1197 thread_invoke_type_func
1198 } invoke_type;
1199
1200 /* fiber */
1201 rb_fiber_t *root_fiber;
1202
1203 VALUE scheduler;
1204 unsigned int blocking;
1205
1206 /* misc */
1207 VALUE name;
1208 void **specific_storage;
1209
1210 struct rb_ext_config ext_config;
1211} rb_thread_t;
1212
1213static inline unsigned int
1214rb_th_serial(const rb_thread_t *th)
1215{
1216 return th ? (unsigned int)th->serial : 0;
1217}
1218
1219typedef enum {
1220 VM_DEFINECLASS_TYPE_CLASS = 0x00,
1221 VM_DEFINECLASS_TYPE_SINGLETON_CLASS = 0x01,
1222 VM_DEFINECLASS_TYPE_MODULE = 0x02,
1223 /* 0x03..0x06 is reserved */
1224 VM_DEFINECLASS_TYPE_MASK = 0x07
1225} rb_vm_defineclass_type_t;
1226
1227#define VM_DEFINECLASS_TYPE(x) ((rb_vm_defineclass_type_t)(x) & VM_DEFINECLASS_TYPE_MASK)
1228#define VM_DEFINECLASS_FLAG_SCOPED 0x08
1229#define VM_DEFINECLASS_FLAG_HAS_SUPERCLASS 0x10
1230#define VM_DEFINECLASS_SCOPED_P(x) ((x) & VM_DEFINECLASS_FLAG_SCOPED)
1231#define VM_DEFINECLASS_HAS_SUPERCLASS_P(x) \
1232 ((x) & VM_DEFINECLASS_FLAG_HAS_SUPERCLASS)
1233
1234/* iseq.c */
1235RUBY_SYMBOL_EXPORT_BEGIN
1236
1237/* node -> iseq */
1238rb_iseq_t *rb_iseq_new (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent, enum rb_iseq_type);
1239rb_iseq_t *rb_iseq_new_top (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent);
1240rb_iseq_t *rb_iseq_new_main (const VALUE ast_value, VALUE path, VALUE realpath, const rb_iseq_t *parent, int opt);
1241rb_iseq_t *rb_iseq_new_eval (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, int first_lineno, const rb_iseq_t *parent, int isolated_depth);
1242rb_iseq_t *rb_iseq_new_with_opt( VALUE ast_value, VALUE name, VALUE path, VALUE realpath, int first_lineno, const rb_iseq_t *parent, int isolated_depth,
1243 enum rb_iseq_type, const rb_compile_option_t*,
1244 VALUE script_lines);
1245
1246struct iseq_link_anchor;
1248 VALUE flags;
1249 VALUE reserved;
1250 void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *);
1251 const void *data;
1252};
1253static inline struct rb_iseq_new_with_callback_callback_func *
1254rb_iseq_new_with_callback_new_callback(
1255 void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *), const void *ptr)
1256{
1258 IMEMO_NEW(struct rb_iseq_new_with_callback_callback_func, imemo_ifunc, Qfalse);
1259 memo->func = func;
1260 memo->data = ptr;
1261
1262 return memo;
1263}
1264rb_iseq_t *rb_iseq_new_with_callback(const struct rb_iseq_new_with_callback_callback_func * ifunc,
1265 VALUE name, VALUE path, VALUE realpath, int first_lineno,
1266 const rb_iseq_t *parent, enum rb_iseq_type, const rb_compile_option_t*);
1267
1268VALUE rb_iseq_disasm(const rb_iseq_t *iseq);
1269int rb_iseq_disasm_insn(VALUE str, const VALUE *iseqval, size_t pos, const rb_iseq_t *iseq, VALUE child);
1270
1271VALUE rb_iseq_coverage(const rb_iseq_t *iseq);
1272
1273RUBY_EXTERN VALUE rb_cISeq;
1274RUBY_EXTERN VALUE rb_cRubyVM;
1275RUBY_EXTERN VALUE rb_mRubyVMFrozenCore;
1276RUBY_EXTERN VALUE rb_block_param_proxy;
1277RUBY_SYMBOL_EXPORT_END
1278
1279#define GetProcPtr(obj, ptr) \
1280 GetCoreDataFromValue((obj), rb_proc_t, (ptr))
1281
1282typedef struct {
1283 const struct rb_block block;
1284 const rb_namespace_t *ns;
1285 unsigned int is_from_method: 1; /* bool */
1286 unsigned int is_lambda: 1; /* bool */
1287 unsigned int is_isolated: 1; /* bool */
1288} rb_proc_t;
1289
1290RUBY_SYMBOL_EXPORT_BEGIN
1291VALUE rb_proc_isolate(VALUE self);
1292VALUE rb_proc_isolate_bang(VALUE self);
1293VALUE rb_proc_ractor_make_shareable(VALUE self);
1294RUBY_SYMBOL_EXPORT_END
1295
1296typedef struct {
1297 VALUE flags; /* imemo header */
1298 rb_iseq_t *iseq;
1299 const VALUE *ep;
1300 const VALUE *env;
1301 unsigned int env_size;
1302} rb_env_t;
1303
1304extern const rb_data_type_t ruby_binding_data_type;
1305
1306#define GetBindingPtr(obj, ptr) \
1307 GetCoreDataFromValue((obj), rb_binding_t, (ptr))
1308
1309typedef struct {
1310 const struct rb_block block;
1311 const VALUE pathobj;
1312 int first_lineno;
1313} rb_binding_t;
1314
1315/* used by compile time and send insn */
1316
1317enum vm_check_match_type {
1318 VM_CHECKMATCH_TYPE_WHEN = 1,
1319 VM_CHECKMATCH_TYPE_CASE = 2,
1320 VM_CHECKMATCH_TYPE_RESCUE = 3
1321};
1322
1323#define VM_CHECKMATCH_TYPE_MASK 0x03
1324#define VM_CHECKMATCH_ARRAY 0x04
1325
1326enum vm_opt_newarray_send_type {
1327 VM_OPT_NEWARRAY_SEND_MAX = 1,
1328 VM_OPT_NEWARRAY_SEND_MIN = 2,
1329 VM_OPT_NEWARRAY_SEND_HASH = 3,
1330 VM_OPT_NEWARRAY_SEND_PACK = 4,
1331 VM_OPT_NEWARRAY_SEND_PACK_BUFFER = 5,
1332 VM_OPT_NEWARRAY_SEND_INCLUDE_P = 6,
1333};
1334
1335enum vm_special_object_type {
1336 VM_SPECIAL_OBJECT_VMCORE = 1,
1337 VM_SPECIAL_OBJECT_CBASE,
1338 VM_SPECIAL_OBJECT_CONST_BASE
1339};
1340
1341enum vm_svar_index {
1342 VM_SVAR_LASTLINE = 0, /* $_ */
1343 VM_SVAR_BACKREF = 1, /* $~ */
1344
1345 VM_SVAR_EXTRA_START = 2,
1346 VM_SVAR_FLIPFLOP_START = 2 /* flipflop */
1347};
1348
1349/* inline cache */
1350typedef struct iseq_inline_constant_cache *IC;
1351typedef struct iseq_inline_iv_cache_entry *IVC;
1352typedef struct iseq_inline_cvar_cache_entry *ICVARC;
1353typedef union iseq_inline_storage_entry *ISE;
1354typedef const struct rb_callinfo *CALL_INFO;
1355typedef const struct rb_callcache *CALL_CACHE;
1356typedef struct rb_call_data *CALL_DATA;
1357
1358typedef VALUE CDHASH;
1359
1360#ifndef FUNC_FASTCALL
1361#define FUNC_FASTCALL(x) x
1362#endif
1363
1364typedef rb_control_frame_t *
1365 (FUNC_FASTCALL(*rb_insn_func_t))(rb_execution_context_t *, rb_control_frame_t *);
1366
1367#define VM_TAGGED_PTR_SET(p, tag) ((VALUE)(p) | (tag))
1368#define VM_TAGGED_PTR_REF(v, mask) ((void *)((v) & ~mask))
1369
1370#define GC_GUARDED_PTR(p) VM_TAGGED_PTR_SET((p), 0x01)
1371#define GC_GUARDED_PTR_REF(p) VM_TAGGED_PTR_REF((p), 0x03)
1372#define GC_GUARDED_PTR_P(p) (((VALUE)(p)) & 0x01)
1373
1374enum vm_frame_env_flags {
1375 /* Frame/Environment flag bits:
1376 * MMMM MMMM MMMM MMMM __FF FFFF FFFE EEEX (LSB)
1377 *
1378 * X : tag for GC marking (It seems as Fixnum)
1379 * EEE : 4 bits Env flags
1380 * FF..: 9 bits Frame flags
1381 * MM..: 15 bits frame magic (to check frame corruption)
1382 */
1383
1384 /* frame types */
1385 VM_FRAME_MAGIC_METHOD = 0x11110001,
1386 VM_FRAME_MAGIC_BLOCK = 0x22220001,
1387 VM_FRAME_MAGIC_CLASS = 0x33330001,
1388 VM_FRAME_MAGIC_TOP = 0x44440001,
1389 VM_FRAME_MAGIC_CFUNC = 0x55550001,
1390 VM_FRAME_MAGIC_IFUNC = 0x66660001,
1391 VM_FRAME_MAGIC_EVAL = 0x77770001,
1392 VM_FRAME_MAGIC_RESCUE = 0x78880001,
1393 VM_FRAME_MAGIC_DUMMY = 0x79990001,
1394
1395 VM_FRAME_MAGIC_MASK = 0x7fff0001,
1396
1397 /* frame flag */
1398 VM_FRAME_FLAG_FINISH = 0x0020,
1399 VM_FRAME_FLAG_BMETHOD = 0x0040,
1400 VM_FRAME_FLAG_CFRAME = 0x0080,
1401 VM_FRAME_FLAG_LAMBDA = 0x0100,
1402 VM_FRAME_FLAG_MODIFIED_BLOCK_PARAM = 0x0200,
1403 VM_FRAME_FLAG_CFRAME_KW = 0x0400,
1404 VM_FRAME_FLAG_PASSED = 0x0800,
1405 VM_FRAME_FLAG_NS_SWITCH = 0x1000,
1406 VM_FRAME_FLAG_LOAD_ISEQ = 0x2000,
1407
1408 /* env flag */
1409 VM_ENV_FLAG_LOCAL = 0x0002,
1410 VM_ENV_FLAG_ESCAPED = 0x0004,
1411 VM_ENV_FLAG_WB_REQUIRED = 0x0008,
1412 VM_ENV_FLAG_ISOLATED = 0x0010,
1413};
1414
1415#define VM_ENV_DATA_SIZE ( 3)
1416
1417#define VM_ENV_DATA_INDEX_ME_CREF (-2) /* ep[-2] */
1418#define VM_ENV_DATA_INDEX_SPECVAL (-1) /* ep[-1] */
1419#define VM_ENV_DATA_INDEX_FLAGS ( 0) /* ep[ 0] */
1420#define VM_ENV_DATA_INDEX_ENV ( 1) /* ep[ 1] */
1421
1422#define VM_ENV_INDEX_LAST_LVAR (-VM_ENV_DATA_SIZE)
1423
1424static inline void VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value);
1425
1426static inline void
1427VM_ENV_FLAGS_SET(const VALUE *ep, VALUE flag)
1428{
1429 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1430 VM_ASSERT(FIXNUM_P(flags));
1431 VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags | flag);
1432}
1433
1434static inline void
1435VM_ENV_FLAGS_UNSET(const VALUE *ep, VALUE flag)
1436{
1437 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1438 VM_ASSERT(FIXNUM_P(flags));
1439 VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags & ~flag);
1440}
1441
1442static inline unsigned long
1443VM_ENV_FLAGS(const VALUE *ep, long flag)
1444{
1445 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1446 VM_ASSERT(FIXNUM_P(flags));
1447 return flags & flag;
1448}
1449
1450static inline unsigned long
1451VM_FRAME_TYPE(const rb_control_frame_t *cfp)
1452{
1453 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_MAGIC_MASK);
1454}
1455
1456static inline int
1457VM_FRAME_LAMBDA_P(const rb_control_frame_t *cfp)
1458{
1459 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_LAMBDA) != 0;
1460}
1461
1462static inline int
1463VM_FRAME_CFRAME_KW_P(const rb_control_frame_t *cfp)
1464{
1465 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME_KW) != 0;
1466}
1467
1468static inline int
1469VM_FRAME_FINISHED_P(const rb_control_frame_t *cfp)
1470{
1471 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_FINISH) != 0;
1472}
1473
1474static inline int
1475VM_FRAME_BMETHOD_P(const rb_control_frame_t *cfp)
1476{
1477 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_BMETHOD) != 0;
1478}
1479
1480static inline int
1481rb_obj_is_iseq(VALUE iseq)
1482{
1483 return imemo_type_p(iseq, imemo_iseq);
1484}
1485
1486#if VM_CHECK_MODE > 0
1487#define RUBY_VM_NORMAL_ISEQ_P(iseq) rb_obj_is_iseq((VALUE)iseq)
1488#endif
1489
1490static inline int
1491VM_FRAME_CFRAME_P(const rb_control_frame_t *cfp)
1492{
1493 int cframe_p = VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME) != 0;
1494 VM_ASSERT(RUBY_VM_NORMAL_ISEQ_P(cfp->iseq) != cframe_p ||
1495 (VM_FRAME_TYPE(cfp) & VM_FRAME_MAGIC_MASK) == VM_FRAME_MAGIC_DUMMY);
1496 return cframe_p;
1497}
1498
1499static inline int
1500VM_FRAME_RUBYFRAME_P(const rb_control_frame_t *cfp)
1501{
1502 return !VM_FRAME_CFRAME_P(cfp);
1503}
1504
1505static inline int
1506VM_FRAME_NS_SWITCH_P(const rb_control_frame_t *cfp)
1507{
1508 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_NS_SWITCH) != 0;
1509}
1510
1511#define RUBYVM_CFUNC_FRAME_P(cfp) \
1512 (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_CFUNC)
1513
1514#define VM_GUARDED_PREV_EP(ep) GC_GUARDED_PTR(ep)
1515#define VM_BLOCK_HANDLER_NONE 0
1516
1517static inline int
1518VM_ENV_LOCAL_P(const VALUE *ep)
1519{
1520 return VM_ENV_FLAGS(ep, VM_ENV_FLAG_LOCAL) ? 1 : 0;
1521}
1522
1523static inline const VALUE *
1524VM_ENV_PREV_EP(const VALUE *ep)
1525{
1526 VM_ASSERT(VM_ENV_LOCAL_P(ep) == 0);
1527 return GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
1528}
1529
1530static inline VALUE
1531VM_ENV_BLOCK_HANDLER(const VALUE *ep)
1532{
1533 VM_ASSERT(VM_ENV_LOCAL_P(ep));
1534 return ep[VM_ENV_DATA_INDEX_SPECVAL];
1535}
1536
1537#if VM_CHECK_MODE > 0
1538int rb_vm_ep_in_heap_p(const VALUE *ep);
1539#endif
1540
1541static inline int
1542VM_ENV_ESCAPED_P(const VALUE *ep)
1543{
1544 VM_ASSERT(rb_vm_ep_in_heap_p(ep) == !!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED));
1545 return VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) ? 1 : 0;
1546}
1547
1549static inline VALUE
1550VM_ENV_ENVVAL(const VALUE *ep)
1551{
1552 VALUE envval = ep[VM_ENV_DATA_INDEX_ENV];
1553 VM_ASSERT(VM_ENV_ESCAPED_P(ep));
1554 VM_ASSERT(envval == Qundef || imemo_type_p(envval, imemo_env));
1555 return envval;
1556}
1557
1559static inline const rb_env_t *
1560VM_ENV_ENVVAL_PTR(const VALUE *ep)
1561{
1562 return (const rb_env_t *)VM_ENV_ENVVAL(ep);
1563}
1564
1565static inline const rb_env_t *
1566vm_env_new(VALUE *env_ep, VALUE *env_body, unsigned int env_size, const rb_iseq_t *iseq)
1567{
1568 rb_env_t *env = IMEMO_NEW(rb_env_t, imemo_env, (VALUE)iseq);
1569 env->ep = env_ep;
1570 env->env = env_body;
1571 env->env_size = env_size;
1572 env_ep[VM_ENV_DATA_INDEX_ENV] = (VALUE)env;
1573 return env;
1574}
1575
1576static inline void
1577VM_FORCE_WRITE(const VALUE *ptr, VALUE v)
1578{
1579 *((VALUE *)ptr) = v;
1580}
1581
1582static inline void
1583VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value)
1584{
1585 VM_ASSERT(RB_SPECIAL_CONST_P(special_const_value));
1586 VM_FORCE_WRITE(ptr, special_const_value);
1587}
1588
1589static inline void
1590VM_STACK_ENV_WRITE(const VALUE *ep, int index, VALUE v)
1591{
1592 VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_WB_REQUIRED) == 0);
1593 VM_FORCE_WRITE(&ep[index], v);
1594}
1595
1596const VALUE *rb_vm_ep_local_ep(const VALUE *ep);
1597const VALUE *rb_vm_proc_local_ep(VALUE proc);
1598void rb_vm_block_ep_update(VALUE obj, const struct rb_block *dst, const VALUE *ep);
1599void rb_vm_block_copy(VALUE obj, const struct rb_block *dst, const struct rb_block *src);
1600
1601VALUE rb_vm_frame_block_handler(const rb_control_frame_t *cfp);
1602
1603#define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp) ((cfp)+1)
1604#define RUBY_VM_NEXT_CONTROL_FRAME(cfp) ((cfp)-1)
1605
1606#define RUBY_VM_VALID_CONTROL_FRAME_P(cfp, ecfp) \
1607 ((void *)(ecfp) > (void *)(cfp))
1608
1609static inline const rb_control_frame_t *
1610RUBY_VM_END_CONTROL_FRAME(const rb_execution_context_t *ec)
1611{
1612 return (rb_control_frame_t *)(ec->vm_stack + ec->vm_stack_size);
1613}
1614
1615static inline int
1616RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
1617{
1618 return !RUBY_VM_VALID_CONTROL_FRAME_P(cfp, RUBY_VM_END_CONTROL_FRAME(ec));
1619}
1620
1621static inline int
1622VM_BH_ISEQ_BLOCK_P(VALUE block_handler)
1623{
1624 if ((block_handler & 0x03) == 0x01) {
1625#if VM_CHECK_MODE > 0
1626 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1627 VM_ASSERT(imemo_type_p(captured->code.val, imemo_iseq));
1628#endif
1629 return 1;
1630 }
1631 else {
1632 return 0;
1633 }
1634}
1635
1636static inline VALUE
1637VM_BH_FROM_ISEQ_BLOCK(const struct rb_captured_block *captured)
1638{
1639 VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x01);
1640 VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1641 return block_handler;
1642}
1643
1644static inline const struct rb_captured_block *
1645VM_BH_TO_ISEQ_BLOCK(VALUE block_handler)
1646{
1647 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1648 VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1649 return captured;
1650}
1651
1652static inline int
1653VM_BH_IFUNC_P(VALUE block_handler)
1654{
1655 if ((block_handler & 0x03) == 0x03) {
1656#if VM_CHECK_MODE > 0
1657 struct rb_captured_block *captured = (void *)(block_handler & ~0x03);
1658 VM_ASSERT(imemo_type_p(captured->code.val, imemo_ifunc));
1659#endif
1660 return 1;
1661 }
1662 else {
1663 return 0;
1664 }
1665}
1666
1667static inline VALUE
1668VM_BH_FROM_IFUNC_BLOCK(const struct rb_captured_block *captured)
1669{
1670 VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x03);
1671 VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1672 return block_handler;
1673}
1674
1675static inline const struct rb_captured_block *
1676VM_BH_TO_IFUNC_BLOCK(VALUE block_handler)
1677{
1678 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1679 VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1680 return captured;
1681}
1682
1683static inline const struct rb_captured_block *
1684VM_BH_TO_CAPT_BLOCK(VALUE block_handler)
1685{
1686 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1687 VM_ASSERT(VM_BH_IFUNC_P(block_handler) || VM_BH_ISEQ_BLOCK_P(block_handler));
1688 return captured;
1689}
1690
1691static inline enum rb_block_handler_type
1692vm_block_handler_type(VALUE block_handler)
1693{
1694 if (VM_BH_ISEQ_BLOCK_P(block_handler)) {
1695 return block_handler_type_iseq;
1696 }
1697 else if (VM_BH_IFUNC_P(block_handler)) {
1698 return block_handler_type_ifunc;
1699 }
1700 else if (SYMBOL_P(block_handler)) {
1701 return block_handler_type_symbol;
1702 }
1703 else {
1704 VM_ASSERT(rb_obj_is_proc(block_handler));
1705 return block_handler_type_proc;
1706 }
1707}
1708
1709static inline void
1710vm_block_handler_verify(MAYBE_UNUSED(VALUE block_handler))
1711{
1712 VM_ASSERT(block_handler == VM_BLOCK_HANDLER_NONE ||
1713 (vm_block_handler_type(block_handler), 1));
1714}
1715
1716static inline enum rb_block_type
1717vm_block_type(const struct rb_block *block)
1718{
1719#if VM_CHECK_MODE > 0
1720 switch (block->type) {
1721 case block_type_iseq:
1722 VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_iseq));
1723 break;
1724 case block_type_ifunc:
1725 VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_ifunc));
1726 break;
1727 case block_type_symbol:
1728 VM_ASSERT(SYMBOL_P(block->as.symbol));
1729 break;
1730 case block_type_proc:
1731 VM_ASSERT(rb_obj_is_proc(block->as.proc));
1732 break;
1733 }
1734#endif
1735 return block->type;
1736}
1737
1738static inline void
1739vm_block_type_set(const struct rb_block *block, enum rb_block_type type)
1740{
1741 struct rb_block *mb = (struct rb_block *)block;
1742 mb->type = type;
1743}
1744
1745static inline const struct rb_block *
1746vm_proc_block(VALUE procval)
1747{
1748 VM_ASSERT(rb_obj_is_proc(procval));
1749 return &((rb_proc_t *)RTYPEDDATA_DATA(procval))->block;
1750}
1751
1752static inline const rb_iseq_t *vm_block_iseq(const struct rb_block *block);
1753static inline const VALUE *vm_block_ep(const struct rb_block *block);
1754
1755static inline const rb_iseq_t *
1756vm_proc_iseq(VALUE procval)
1757{
1758 return vm_block_iseq(vm_proc_block(procval));
1759}
1760
1761static inline const VALUE *
1762vm_proc_ep(VALUE procval)
1763{
1764 return vm_block_ep(vm_proc_block(procval));
1765}
1766
1767static inline const rb_iseq_t *
1768vm_block_iseq(const struct rb_block *block)
1769{
1770 switch (vm_block_type(block)) {
1771 case block_type_iseq: return rb_iseq_check(block->as.captured.code.iseq);
1772 case block_type_proc: return vm_proc_iseq(block->as.proc);
1773 case block_type_ifunc:
1774 case block_type_symbol: return NULL;
1775 }
1776 VM_UNREACHABLE(vm_block_iseq);
1777 return NULL;
1778}
1779
1780static inline const VALUE *
1781vm_block_ep(const struct rb_block *block)
1782{
1783 switch (vm_block_type(block)) {
1784 case block_type_iseq:
1785 case block_type_ifunc: return block->as.captured.ep;
1786 case block_type_proc: return vm_proc_ep(block->as.proc);
1787 case block_type_symbol: return NULL;
1788 }
1789 VM_UNREACHABLE(vm_block_ep);
1790 return NULL;
1791}
1792
1793static inline VALUE
1794vm_block_self(const struct rb_block *block)
1795{
1796 switch (vm_block_type(block)) {
1797 case block_type_iseq:
1798 case block_type_ifunc:
1799 return block->as.captured.self;
1800 case block_type_proc:
1801 return vm_block_self(vm_proc_block(block->as.proc));
1802 case block_type_symbol:
1803 return Qundef;
1804 }
1805 VM_UNREACHABLE(vm_block_self);
1806 return Qundef;
1807}
1808
1809static inline VALUE
1810VM_BH_TO_SYMBOL(VALUE block_handler)
1811{
1812 VM_ASSERT(SYMBOL_P(block_handler));
1813 return block_handler;
1814}
1815
1816static inline VALUE
1817VM_BH_FROM_SYMBOL(VALUE symbol)
1818{
1819 VM_ASSERT(SYMBOL_P(symbol));
1820 return symbol;
1821}
1822
1823static inline VALUE
1824VM_BH_TO_PROC(VALUE block_handler)
1825{
1826 VM_ASSERT(rb_obj_is_proc(block_handler));
1827 return block_handler;
1828}
1829
1830static inline VALUE
1831VM_BH_FROM_PROC(VALUE procval)
1832{
1833 VM_ASSERT(rb_obj_is_proc(procval));
1834 return procval;
1835}
1836
1837/* VM related object allocate functions */
1838VALUE rb_thread_alloc(VALUE klass);
1839VALUE rb_binding_alloc(VALUE klass);
1840VALUE rb_proc_alloc(VALUE klass);
1841VALUE rb_proc_dup(VALUE self);
1842
1843/* for debug */
1844extern bool rb_vmdebug_stack_dump_raw(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, FILE *);
1845extern bool rb_vmdebug_debug_print_pre(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, const VALUE *_pc, FILE *);
1846extern bool rb_vmdebug_debug_print_post(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, FILE *);
1847
1848#define SDR() rb_vmdebug_stack_dump_raw(GET_EC(), GET_EC()->cfp, stderr)
1849#define SDR2(cfp) rb_vmdebug_stack_dump_raw(GET_EC(), (cfp), stderr)
1850bool rb_vm_bugreport(const void *, FILE *);
1851typedef void (*ruby_sighandler_t)(int);
1852RBIMPL_ATTR_FORMAT(RBIMPL_PRINTF_FORMAT, 4, 5)
1853NORETURN(void rb_bug_for_fatal_signal(ruby_sighandler_t default_sighandler, int sig, const void *, const char *fmt, ...));
1854
1855/* functions about thread/vm execution */
1856RUBY_SYMBOL_EXPORT_BEGIN
1857VALUE rb_iseq_eval(const rb_iseq_t *iseq);
1858VALUE rb_iseq_eval_with_refinement(const rb_iseq_t *iseq, VALUE mod);
1859VALUE rb_iseq_eval_main(const rb_iseq_t *iseq);
1860VALUE rb_iseq_path(const rb_iseq_t *iseq);
1861VALUE rb_iseq_realpath(const rb_iseq_t *iseq);
1862RUBY_SYMBOL_EXPORT_END
1863
1864VALUE rb_iseq_pathobj_new(VALUE path, VALUE realpath);
1865void rb_iseq_pathobj_set(const rb_iseq_t *iseq, VALUE path, VALUE realpath);
1866
1867int rb_ec_frame_method_id_and_class(const rb_execution_context_t *ec, ID *idp, ID *called_idp, VALUE *klassp);
1868void rb_ec_setup_exception(const rb_execution_context_t *ec, VALUE mesg, VALUE cause);
1869
1870VALUE rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, int argc, const VALUE *argv, int kw_splat, VALUE block_handler);
1871
1872VALUE rb_vm_make_proc_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda);
1873static inline VALUE
1874rb_vm_make_proc(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1875{
1876 return rb_vm_make_proc_lambda(ec, captured, klass, 0);
1877}
1878
1879static inline VALUE
1880rb_vm_make_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1881{
1882 return rb_vm_make_proc_lambda(ec, captured, klass, 1);
1883}
1884
1885VALUE rb_vm_make_binding(const rb_execution_context_t *ec, const rb_control_frame_t *src_cfp);
1886VALUE rb_vm_env_local_variables(const rb_env_t *env);
1887VALUE rb_vm_env_numbered_parameters(const rb_env_t *env);
1888const rb_env_t *rb_vm_env_prev_env(const rb_env_t *env);
1889const VALUE *rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const ID *dynvars);
1890void rb_vm_inc_const_missing_count(void);
1891VALUE rb_vm_call_kw(rb_execution_context_t *ec, VALUE recv, VALUE id, int argc,
1892 const VALUE *argv, const rb_callable_method_entry_t *me, int kw_splat);
1893void rb_vm_pop_frame_no_int(rb_execution_context_t *ec);
1894void rb_vm_pop_frame(rb_execution_context_t *ec);
1895
1896void rb_thread_start_timer_thread(void);
1897void rb_thread_stop_timer_thread(void);
1898void rb_thread_reset_timer_thread(void);
1899void rb_thread_wakeup_timer_thread(int);
1900
1901static inline void
1902rb_vm_living_threads_init(rb_vm_t *vm)
1903{
1904 ccan_list_head_init(&vm->workqueue);
1905 ccan_list_head_init(&vm->ractor.set);
1906 ccan_list_head_init(&vm->ractor.sched.zombie_threads);
1907}
1908
1909typedef int rb_backtrace_iter_func(void *, VALUE, int, VALUE);
1910rb_control_frame_t *rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1911rb_control_frame_t *rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1912VALUE *rb_vm_svar_lep(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1913int rb_vm_get_sourceline(const rb_control_frame_t *);
1914void rb_vm_stack_to_heap(rb_execution_context_t *ec);
1915void ruby_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame);
1916rb_thread_t * ruby_thread_from_native(void);
1917int ruby_thread_set_native(rb_thread_t *th);
1918int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp);
1919void rb_vm_rewind_cfp(rb_execution_context_t *ec, rb_control_frame_t *cfp);
1920void rb_vm_env_write(const VALUE *ep, int index, VALUE v);
1921VALUE rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler);
1922
1923void rb_vm_register_special_exception_str(enum ruby_special_exceptions sp, VALUE exception_class, VALUE mesg);
1924
1925#define rb_vm_register_special_exception(sp, e, m) \
1926 rb_vm_register_special_exception_str(sp, e, rb_usascii_str_new_static((m), (long)rb_strlen_lit(m)))
1927
1928void rb_gc_mark_machine_context(const rb_execution_context_t *ec);
1929
1930void rb_vm_rewrite_cref(rb_cref_t *node, VALUE old_klass, VALUE new_klass, rb_cref_t **new_cref_ptr);
1931
1932const rb_callable_method_entry_t *rb_vm_frame_method_entry(const rb_control_frame_t *cfp);
1933
1934#define sysstack_error GET_VM()->special_exceptions[ruby_error_sysstack]
1935
1936#define CHECK_VM_STACK_OVERFLOW0(cfp, sp, margin) do { \
1937 STATIC_ASSERT(sizeof_sp, sizeof(*(sp)) == sizeof(VALUE)); \
1938 STATIC_ASSERT(sizeof_cfp, sizeof(*(cfp)) == sizeof(rb_control_frame_t)); \
1939 const struct rb_control_frame_struct *bound = (void *)&(sp)[(margin)]; \
1940 if (UNLIKELY((cfp) <= &bound[1])) { \
1941 vm_stackoverflow(); \
1942 } \
1943} while (0)
1944
1945#define CHECK_VM_STACK_OVERFLOW(cfp, margin) \
1946 CHECK_VM_STACK_OVERFLOW0((cfp), (cfp)->sp, (margin))
1947
1948VALUE rb_catch_protect(VALUE t, rb_block_call_func *func, VALUE data, enum ruby_tag_type *stateptr);
1949
1950rb_execution_context_t *rb_vm_main_ractor_ec(rb_vm_t *vm); // ractor.c
1951
1952/* for thread */
1953
1954#if RUBY_VM_THREAD_MODEL == 2
1955
1956RUBY_EXTERN struct rb_ractor_struct *ruby_single_main_ractor; // ractor.c
1957RUBY_EXTERN rb_vm_t *ruby_current_vm_ptr;
1958RUBY_EXTERN rb_event_flag_t ruby_vm_event_flags;
1959RUBY_EXTERN rb_event_flag_t ruby_vm_event_enabled_global_flags;
1960RUBY_EXTERN unsigned int ruby_vm_event_local_num;
1961
1962#define GET_VM() rb_current_vm()
1963#define GET_RACTOR() rb_current_ractor()
1964#define GET_THREAD() rb_current_thread()
1965#define GET_EC() rb_current_execution_context(true)
1966
1967static inline rb_thread_t *
1968rb_ec_thread_ptr(const rb_execution_context_t *ec)
1969{
1970 return ec->thread_ptr;
1971}
1972
1973static inline rb_ractor_t *
1974rb_ec_ractor_ptr(const rb_execution_context_t *ec)
1975{
1976 const rb_thread_t *th = rb_ec_thread_ptr(ec);
1977 if (th) {
1978 VM_ASSERT(th->ractor != NULL);
1979 return th->ractor;
1980 }
1981 else {
1982 return NULL;
1983 }
1984}
1985
1986static inline rb_vm_t *
1987rb_ec_vm_ptr(const rb_execution_context_t *ec)
1988{
1989 const rb_thread_t *th = rb_ec_thread_ptr(ec);
1990 if (th) {
1991 return th->vm;
1992 }
1993 else {
1994 return NULL;
1995 }
1996}
1997
1998NOINLINE(struct rb_execution_context_struct *rb_current_ec_noinline(void));
1999
2000static inline rb_execution_context_t *
2001rb_current_execution_context(bool expect_ec)
2002{
2003#ifdef RB_THREAD_LOCAL_SPECIFIER
2004 #if defined(__arm64__) || defined(__aarch64__)
2005 rb_execution_context_t *ec = rb_current_ec();
2006 #else
2007 rb_execution_context_t *ec = ruby_current_ec;
2008 #endif
2009
2010 /* On the shared objects, `__tls_get_addr()` is used to access the TLS
2011 * and the address of the `ruby_current_ec` can be stored on a function
2012 * frame. However, this address can be mis-used after native thread
2013 * migration of a coroutine.
2014 * 1) Get `ptr =&ruby_current_ec` op NT1 and store it on the frame.
2015 * 2) Context switch and resume it on the NT2.
2016 * 3) `ptr` is used on NT2 but it accesses to the TLS on NT1.
2017 * This assertion checks such misusage.
2018 *
2019 * To avoid accidents, `GET_EC()` should be called once on the frame.
2020 * Note that inlining can produce the problem.
2021 */
2022 VM_ASSERT(ec == rb_current_ec_noinline());
2023#else
2024 rb_execution_context_t *ec = native_tls_get(ruby_current_ec_key);
2025#endif
2026 VM_ASSERT(!expect_ec || ec != NULL);
2027 return ec;
2028}
2029
2030static inline rb_thread_t *
2031rb_current_thread(void)
2032{
2033 const rb_execution_context_t *ec = GET_EC();
2034 return rb_ec_thread_ptr(ec);
2035}
2036
2037static inline rb_ractor_t *
2038rb_current_ractor_raw(bool expect)
2039{
2040 if (ruby_single_main_ractor) {
2041 return ruby_single_main_ractor;
2042 }
2043 else {
2044 const rb_execution_context_t *ec = rb_current_execution_context(expect);
2045 return (expect || ec) ? rb_ec_ractor_ptr(ec) : NULL;
2046 }
2047}
2048
2049static inline rb_ractor_t *
2050rb_current_ractor(void)
2051{
2052 return rb_current_ractor_raw(true);
2053}
2054
2055static inline rb_vm_t *
2056rb_current_vm(void)
2057{
2058#if 0 // TODO: reconsider the assertions
2059 VM_ASSERT(ruby_current_vm_ptr == NULL ||
2060 ruby_current_execution_context_ptr == NULL ||
2061 rb_ec_thread_ptr(GET_EC()) == NULL ||
2062 rb_ec_thread_ptr(GET_EC())->status == THREAD_KILLED ||
2063 rb_ec_vm_ptr(GET_EC()) == ruby_current_vm_ptr);
2064#endif
2065
2066 return ruby_current_vm_ptr;
2067}
2068
2069void rb_ec_vm_lock_rec_release(const rb_execution_context_t *ec,
2070 unsigned int recorded_lock_rec,
2071 unsigned int current_lock_rec);
2072
2073static inline unsigned int
2074rb_ec_vm_lock_rec(const rb_execution_context_t *ec)
2075{
2076 rb_vm_t *vm = rb_ec_vm_ptr(ec);
2077
2078 if (vm->ractor.sync.lock_owner != rb_ec_ractor_ptr(ec)) {
2079 return 0;
2080 }
2081 else {
2082 return vm->ractor.sync.lock_rec;
2083 }
2084}
2085
2086#else
2087#error "unsupported thread model"
2088#endif
2089
2090enum {
2091 TIMER_INTERRUPT_MASK = 0x01,
2092 PENDING_INTERRUPT_MASK = 0x02,
2093 POSTPONED_JOB_INTERRUPT_MASK = 0x04,
2094 TRAP_INTERRUPT_MASK = 0x08,
2095 TERMINATE_INTERRUPT_MASK = 0x10,
2096 VM_BARRIER_INTERRUPT_MASK = 0x20,
2097};
2098
2099#define RUBY_VM_SET_TIMER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TIMER_INTERRUPT_MASK)
2100#define RUBY_VM_SET_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, PENDING_INTERRUPT_MASK)
2101#define RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, POSTPONED_JOB_INTERRUPT_MASK)
2102#define RUBY_VM_SET_TRAP_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TRAP_INTERRUPT_MASK)
2103#define RUBY_VM_SET_TERMINATE_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TERMINATE_INTERRUPT_MASK)
2104#define RUBY_VM_SET_VM_BARRIER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, VM_BARRIER_INTERRUPT_MASK)
2105#define RUBY_VM_INTERRUPTED(ec) ((ec)->interrupt_flag & ~(ec)->interrupt_mask & \
2106 (PENDING_INTERRUPT_MASK|TRAP_INTERRUPT_MASK))
2107
2108static inline bool
2109RUBY_VM_INTERRUPTED_ANY(rb_execution_context_t *ec)
2110{
2111#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
2112 uint32_t current_clock = rb_ec_vm_ptr(ec)->clock;
2113
2114 if (current_clock != ec->checked_clock) {
2115 ec->checked_clock = current_clock;
2116 RUBY_VM_SET_TIMER_INTERRUPT(ec);
2117 }
2118#endif
2119 return ec->interrupt_flag & ~(ec)->interrupt_mask;
2120}
2121
2122VALUE rb_exc_set_backtrace(VALUE exc, VALUE bt);
2123int rb_signal_buff_size(void);
2124int rb_signal_exec(rb_thread_t *th, int sig);
2125void rb_threadptr_check_signal(rb_thread_t *mth);
2126void rb_threadptr_signal_raise(rb_thread_t *th, int sig);
2127void rb_threadptr_signal_exit(rb_thread_t *th);
2128int rb_threadptr_execute_interrupts(rb_thread_t *, int);
2129void rb_threadptr_interrupt(rb_thread_t *th);
2130void rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th);
2131void rb_threadptr_pending_interrupt_clear(rb_thread_t *th);
2132void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v);
2133VALUE rb_ec_get_errinfo(const rb_execution_context_t *ec);
2134void rb_ec_error_print(rb_execution_context_t * volatile ec, volatile VALUE errinfo);
2135void rb_execution_context_update(rb_execution_context_t *ec);
2136void rb_execution_context_mark(const rb_execution_context_t *ec);
2137void rb_fiber_close(rb_fiber_t *fib);
2138void Init_native_thread(rb_thread_t *th);
2139int rb_vm_check_ints_blocking(rb_execution_context_t *ec);
2140
2141// vm_sync.h
2142void rb_vm_cond_wait(rb_vm_t *vm, rb_nativethread_cond_t *cond);
2143void rb_vm_cond_timedwait(rb_vm_t *vm, rb_nativethread_cond_t *cond, unsigned long msec);
2144
2145#define RUBY_VM_CHECK_INTS(ec) rb_vm_check_ints(ec)
2146static inline void
2147rb_vm_check_ints(rb_execution_context_t *ec)
2148{
2149#ifdef RUBY_ASSERT_CRITICAL_SECTION
2150 VM_ASSERT(ruby_assert_critical_section_entered == 0);
2151#endif
2152
2153 VM_ASSERT(ec == rb_current_ec_noinline());
2154
2155 if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(ec))) {
2156 rb_threadptr_execute_interrupts(rb_ec_thread_ptr(ec), 0);
2157 }
2158}
2159
2160/* tracer */
2161
2163 rb_event_flag_t event;
2165 const rb_control_frame_t *cfp;
2166 VALUE self;
2167 ID id;
2168 ID called_id;
2169 VALUE klass;
2170 VALUE data;
2171
2172 int klass_solved;
2173
2174 /* calc from cfp */
2175 int lineno;
2176 VALUE path;
2177};
2178
2179void rb_hook_list_mark(rb_hook_list_t *hooks);
2180void rb_hook_list_mark_and_update(rb_hook_list_t *hooks);
2181void rb_hook_list_free(rb_hook_list_t *hooks);
2182void rb_hook_list_connect_tracepoint(VALUE target, rb_hook_list_t *list, VALUE tpval, unsigned int target_line);
2183void rb_hook_list_remove_tracepoint(rb_hook_list_t *list, VALUE tpval);
2184
2185void rb_exec_event_hooks(struct rb_trace_arg_struct *trace_arg, rb_hook_list_t *hooks, int pop_p);
2186
2187#define EXEC_EVENT_HOOK_ORIG(ec_, hooks_, flag_, self_, id_, called_id_, klass_, data_, pop_p_) do { \
2188 const rb_event_flag_t flag_arg_ = (flag_); \
2189 rb_hook_list_t *hooks_arg_ = (hooks_); \
2190 if (UNLIKELY((hooks_arg_)->events & (flag_arg_))) { \
2191 /* defer evaluating the other arguments */ \
2192 rb_exec_event_hook_orig(ec_, hooks_arg_, flag_arg_, self_, id_, called_id_, klass_, data_, pop_p_); \
2193 } \
2194} while (0)
2195
2196static inline void
2197rb_exec_event_hook_orig(rb_execution_context_t *ec, rb_hook_list_t *hooks, rb_event_flag_t flag,
2198 VALUE self, ID id, ID called_id, VALUE klass, VALUE data, int pop_p)
2199{
2200 struct rb_trace_arg_struct trace_arg;
2201
2202 VM_ASSERT((hooks->events & flag) != 0);
2203
2204 trace_arg.event = flag;
2205 trace_arg.ec = ec;
2206 trace_arg.cfp = ec->cfp;
2207 trace_arg.self = self;
2208 trace_arg.id = id;
2209 trace_arg.called_id = called_id;
2210 trace_arg.klass = klass;
2211 trace_arg.data = data;
2212 trace_arg.path = Qundef;
2213 trace_arg.klass_solved = 0;
2214
2215 rb_exec_event_hooks(&trace_arg, hooks, pop_p);
2216}
2217
2219 VALUE self;
2220 uint32_t id;
2221 rb_hook_list_t hooks;
2222};
2223
2224static inline rb_hook_list_t *
2225rb_ec_ractor_hooks(const rb_execution_context_t *ec)
2226{
2227 struct rb_ractor_pub *cr_pub = (struct rb_ractor_pub *)rb_ec_ractor_ptr(ec);
2228 return &cr_pub->hooks;
2229}
2230
2231#define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2232 EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_ractor_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 0)
2233
2234#define EXEC_EVENT_HOOK_AND_POP_FRAME(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2235 EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_ractor_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 1)
2236
2237static inline void
2238rb_exec_event_hook_script_compiled(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE eval_script)
2239{
2240 EXEC_EVENT_HOOK(ec, RUBY_EVENT_SCRIPT_COMPILED, ec->cfp->self, 0, 0, 0,
2241 NIL_P(eval_script) ? (VALUE)iseq :
2242 rb_ary_new_from_args(2, eval_script, (VALUE)iseq));
2243}
2244
2245void rb_vm_trap_exit(rb_vm_t *vm);
2246void rb_vm_postponed_job_atfork(void); /* vm_trace.c */
2247void rb_vm_postponed_job_free(void); /* vm_trace.c */
2248size_t rb_vm_memsize_postponed_job_queue(void); /* vm_trace.c */
2249void rb_vm_postponed_job_queue_init(rb_vm_t *vm); /* vm_trace.c */
2250
2251RUBY_SYMBOL_EXPORT_BEGIN
2252
2253int rb_thread_check_trap_pending(void);
2254
2255/* #define RUBY_EVENT_RESERVED_FOR_INTERNAL_USE 0x030000 */ /* from vm_core.h */
2256#define RUBY_EVENT_COVERAGE_LINE 0x010000
2257#define RUBY_EVENT_COVERAGE_BRANCH 0x020000
2258
2259extern VALUE rb_get_coverages(void);
2260extern void rb_set_coverages(VALUE, int, VALUE);
2261extern void rb_clear_coverages(void);
2262extern void rb_reset_coverages(void);
2263extern void rb_resume_coverages(void);
2264extern void rb_suspend_coverages(void);
2265
2266void rb_postponed_job_flush(rb_vm_t *vm);
2267
2268// ractor.c
2269RUBY_EXTERN VALUE rb_eRactorUnsafeError;
2270RUBY_EXTERN VALUE rb_eRactorIsolationError;
2271
2272RUBY_SYMBOL_EXPORT_END
2273
2274#endif /* RUBY_VM_CORE_H */
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ALIGNAS
Wraps (or simulates) alignas.
Definition stdalign.h:27
#define RUBY_EXTERN
Declaration of externally visible global variables.
Definition dllexport.h:45
#define RUBY_EVENT_SCRIPT_COMPILED
Encountered an eval.
Definition event.h:60
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RBIMPL_ATTR_FORMAT(x, y, z)
Wraps (or simulates) __attribute__((format))
Definition format.h:29
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define Qundef
Old name of RUBY_Qundef.
#define Qfalse
Old name of RUBY_Qfalse.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define NIL_P
Old name of RB_NIL_P.
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
void * rb_check_typeddata(VALUE obj, const rb_data_type_t *data_type)
Identical to rb_typeddata_is_kind_of(), except it raises exceptions instead of returning false.
Definition error.c:1397
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
Definition proc.c:119
void rb_unblock_function_t(void *)
This is the type of UBFs.
Definition thread.h:336
VALUE rb_block_call_func(RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg))
This is the type of a function that the interpreter expect for C-backended blocks.
Definition iterator.h:83
VALUE type(ANYARGS)
ANYARGS-ed function type.
#define RBIMPL_ATTR_NONNULL(list)
Wraps (or simulates) __attribute__((nonnull))
Definition nonnull.h:30
#define inline
Old Visual Studio versions do not support the inline keyword, so we need to define it to be __inline.
Definition defines.h:91
Functions related to nodes in the AST.
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RTYPEDDATA_DATA(v)
Convenient getter macro.
Definition rtypeddata.h:102
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
Defines old _.
C99 shim for <stdbool.h>
Definition vm_core.h:261
const ID * segments
A null-terminated list of ids, used to represent a constant's path idNULL is used to represent the ::...
Definition vm_core.h:287
Definition vm_core.h:295
Definition vm_core.h:290
Definition iseq.h:251
Definition method.h:63
CREF (Class REFerence)
Definition method.h:45
Definition class.h:72
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:203
struct rb_iseq_constant_body::@156 param
parameter information
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:136
Internal header for Namespace.
Definition namespace.h:14
Definition st.h:79
IFUNC (Internal FUNCtion)
Definition imemo.h:86
Definition vm_core.h:253
Definition vm_core.h:299
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
Definition value.h:69
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376