Ruby 3.5.0dev (2025-04-03 revision 1dddc6c78b5f6dc6ae18ee04ebe44abfce3b0433)
vm_core.h (1dddc6c78b5f6dc6ae18ee04ebe44abfce3b0433)
1#ifndef RUBY_VM_CORE_H
2#define RUBY_VM_CORE_H
3/**********************************************************************
4
5 vm_core.h -
6
7 $Author$
8 created at: 04/01/01 19:41:38 JST
9
10 Copyright (C) 2004-2007 Koichi Sasada
11
12**********************************************************************/
13
14/*
15 * Enable check mode.
16 * 1: enable local assertions.
17 */
18#ifndef VM_CHECK_MODE
19
20// respect RUBY_DUBUG: if given n is 0, then use RUBY_DEBUG
21#define N_OR_RUBY_DEBUG(n) (((n) > 0) ? (n) : RUBY_DEBUG)
22
23#define VM_CHECK_MODE N_OR_RUBY_DEBUG(0)
24#endif
25
39#ifndef VMDEBUG
40#define VMDEBUG 0
41#endif
42
43#if 0
44#undef VMDEBUG
45#define VMDEBUG 3
46#endif
47
48#include "ruby/internal/config.h"
49
50#include <stddef.h>
51#include <signal.h>
52#include <stdarg.h>
53
54#include "ruby_assert.h"
55
56#define RVALUE_SIZE (sizeof(struct RBasic) + sizeof(VALUE[RBIMPL_RVALUE_EMBED_LEN_MAX]))
57
58#if VM_CHECK_MODE > 0
59#define VM_ASSERT(expr, ...) \
60 RUBY_ASSERT_MESG_WHEN(VM_CHECK_MODE > 0, expr, #expr RBIMPL_VA_OPT_ARGS(__VA_ARGS__))
61#define VM_UNREACHABLE(func) rb_bug(#func ": unreachable")
62#define RUBY_ASSERT_CRITICAL_SECTION
63#define RUBY_DEBUG_THREAD_SCHEDULE() rb_thread_schedule()
64#else
65#define VM_ASSERT(/*expr, */...) ((void)0)
66#define VM_UNREACHABLE(func) UNREACHABLE
67#define RUBY_DEBUG_THREAD_SCHEDULE()
68#endif
69
70#define RUBY_ASSERT_MUTEX_OWNED(mutex) VM_ASSERT(rb_mutex_owned_p(mutex))
71
72#if defined(RUBY_ASSERT_CRITICAL_SECTION)
73/*
74# Critical Section Assertions
75
76These assertions are used to ensure that context switching does not occur between two points in the code. In theory,
77such code should already be protected by a mutex, but these assertions are used to ensure that the mutex is held.
78
79The specific case where it can be useful is where a mutex is held further up the call stack, and the code in question
80may not directly hold the mutex. In this case, the critical section assertions can be used to ensure that the mutex is
81held by someone else.
82
83These assertions are only enabled when RUBY_ASSERT_CRITICAL_SECTION is defined, which is only defined if VM_CHECK_MODE
84is set.
85
86## Example Usage
87
88```c
89RUBY_ASSERT_CRITICAL_SECTION_ENTER();
90// ... some code which does not invoke rb_vm_check_ints() ...
91RUBY_ASSERT_CRITICAL_SECTION_LEAVE();
92```
93
94If `rb_vm_check_ints()` is called between the `RUBY_ASSERT_CRITICAL_SECTION_ENTER()` and
95`RUBY_ASSERT_CRITICAL_SECTION_LEAVE()`, a failed assertion will result.
96*/
97extern int ruby_assert_critical_section_entered;
98#define RUBY_ASSERT_CRITICAL_SECTION_ENTER() do{ruby_assert_critical_section_entered += 1;}while(false)
99#define RUBY_ASSERT_CRITICAL_SECTION_LEAVE() do{VM_ASSERT(ruby_assert_critical_section_entered > 0);ruby_assert_critical_section_entered -= 1;}while(false)
100#else
101#define RUBY_ASSERT_CRITICAL_SECTION_ENTER()
102#define RUBY_ASSERT_CRITICAL_SECTION_LEAVE()
103#endif
104
105#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
106# include "wasm/setjmp.h"
107#else
108# include <setjmp.h>
109#endif
110
111#if defined(__linux__) || defined(__FreeBSD__)
112# define RB_THREAD_T_HAS_NATIVE_ID
113#endif
114
116#include "ccan/list/list.h"
117#include "id.h"
118#include "internal.h"
119#include "internal/array.h"
120#include "internal/basic_operators.h"
121#include "internal/sanitizers.h"
122#include "internal/serial.h"
123#include "internal/vm.h"
124#include "method.h"
125#include "node.h"
126#include "ruby/ruby.h"
127#include "ruby/st.h"
128#include "ruby_atomic.h"
129#include "vm_opts.h"
130
131#include "ruby/thread_native.h"
132/*
133 * implementation selector of get_insn_info algorithm
134 * 0: linear search
135 * 1: binary search
136 * 2: succinct bitvector
137 */
138#ifndef VM_INSN_INFO_TABLE_IMPL
139# define VM_INSN_INFO_TABLE_IMPL 2
140#endif
141
142#if defined(NSIG_MAX) /* POSIX issue 8 */
143# undef NSIG
144# define NSIG NSIG_MAX
145#elif defined(_SIG_MAXSIG) /* FreeBSD */
146# undef NSIG
147# define NSIG _SIG_MAXSIG
148#elif defined(_SIGMAX) /* QNX */
149# define NSIG (_SIGMAX + 1)
150#elif defined(NSIG) /* 99% of everything else */
151# /* take it */
152#else /* Last resort */
153# define NSIG (sizeof(sigset_t) * CHAR_BIT + 1)
154#endif
155
156#define RUBY_NSIG NSIG
157
158#if defined(SIGCLD)
159# define RUBY_SIGCHLD (SIGCLD)
160#elif defined(SIGCHLD)
161# define RUBY_SIGCHLD (SIGCHLD)
162#endif
163
164#if defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) && defined(SA_SIGINFO) && !defined(__NetBSD__)
165# define USE_SIGALTSTACK
166void *rb_allocate_sigaltstack(void);
167void *rb_register_sigaltstack(void *);
168# define RB_ALTSTACK_INIT(var, altstack) var = rb_register_sigaltstack(altstack)
169# define RB_ALTSTACK_FREE(var) free(var)
170# define RB_ALTSTACK(var) var
171#else /* noop */
172# define RB_ALTSTACK_INIT(var, altstack)
173# define RB_ALTSTACK_FREE(var)
174# define RB_ALTSTACK(var) (0)
175#endif
176
177#include THREAD_IMPL_H
178#define RUBY_VM_THREAD_MODEL 2
179
180/*****************/
181/* configuration */
182/*****************/
183
184/* gcc ver. check */
185#if defined(__GNUC__) && __GNUC__ >= 2
186
187#if OPT_TOKEN_THREADED_CODE
188#if OPT_DIRECT_THREADED_CODE
189#undef OPT_DIRECT_THREADED_CODE
190#endif
191#endif
192
193#else /* defined(__GNUC__) && __GNUC__ >= 2 */
194
195/* disable threaded code options */
196#if OPT_DIRECT_THREADED_CODE
197#undef OPT_DIRECT_THREADED_CODE
198#endif
199#if OPT_TOKEN_THREADED_CODE
200#undef OPT_TOKEN_THREADED_CODE
201#endif
202#endif
203
204/* call threaded code */
205#if OPT_CALL_THREADED_CODE
206#if OPT_DIRECT_THREADED_CODE
207#undef OPT_DIRECT_THREADED_CODE
208#endif /* OPT_DIRECT_THREADED_CODE */
209#endif /* OPT_CALL_THREADED_CODE */
210
211void rb_vm_encoded_insn_data_table_init(void);
212typedef unsigned long rb_num_t;
213typedef signed long rb_snum_t;
214
215enum ruby_tag_type {
216 RUBY_TAG_NONE = 0x0,
217 RUBY_TAG_RETURN = 0x1,
218 RUBY_TAG_BREAK = 0x2,
219 RUBY_TAG_NEXT = 0x3,
220 RUBY_TAG_RETRY = 0x4,
221 RUBY_TAG_REDO = 0x5,
222 RUBY_TAG_RAISE = 0x6,
223 RUBY_TAG_THROW = 0x7,
224 RUBY_TAG_FATAL = 0x8,
225 RUBY_TAG_MASK = 0xf
226};
227
228#define TAG_NONE RUBY_TAG_NONE
229#define TAG_RETURN RUBY_TAG_RETURN
230#define TAG_BREAK RUBY_TAG_BREAK
231#define TAG_NEXT RUBY_TAG_NEXT
232#define TAG_RETRY RUBY_TAG_RETRY
233#define TAG_REDO RUBY_TAG_REDO
234#define TAG_RAISE RUBY_TAG_RAISE
235#define TAG_THROW RUBY_TAG_THROW
236#define TAG_FATAL RUBY_TAG_FATAL
237#define TAG_MASK RUBY_TAG_MASK
238
239enum ruby_vm_throw_flags {
240 VM_THROW_NO_ESCAPE_FLAG = 0x8000,
241 VM_THROW_STATE_MASK = 0xff
242};
243
244/* forward declarations */
245struct rb_thread_struct;
247
248/* iseq data type */
250
252 rb_serial_t raw;
253 VALUE data[2];
254};
255
256#define IMEMO_CONST_CACHE_SHAREABLE IMEMO_FL_USER0
257
258// imemo_constcache
260 VALUE flags;
261
262 VALUE value; // v0
263 VALUE _unused1; // v1
264 VALUE _unused2; // v2
265 const rb_cref_t *ic_cref; // v3
266};
267STATIC_ASSERT(sizeof_iseq_inline_constant_cache_entry,
268 (offsetof(struct iseq_inline_constant_cache_entry, ic_cref) +
269 sizeof(const rb_cref_t *)) <= RVALUE_SIZE);
270
287
289 uintptr_t value; // attr_index in lower bits, dest_shape_id in upper bits
290 ID iv_set_name;
291};
292
296
298 struct {
299 struct rb_thread_struct *running_thread;
300 VALUE value;
301 } once;
302 struct iseq_inline_constant_cache ic_cache;
303 struct iseq_inline_iv_cache_entry iv_cache;
304};
305
307 const struct rb_call_data *cd;
308 const struct rb_callcache *cc;
309 VALUE block_handler;
310 VALUE recv;
311 int argc;
312 bool kw_splat;
313 VALUE heap_argv;
314};
315
316#ifndef VM_ARGC_STACK_MAX
317#define VM_ARGC_STACK_MAX 128
318#endif
319
320# define CALLING_ARGC(calling) ((calling)->heap_argv ? RARRAY_LENINT((calling)->heap_argv) : (calling)->argc)
321
323
324#if 1
325#define CoreDataFromValue(obj, type) (type*)DATA_PTR(obj)
326#else
327#define CoreDataFromValue(obj, type) (type*)rb_data_object_get(obj)
328#endif
329#define GetCoreDataFromValue(obj, type, ptr) ((ptr) = CoreDataFromValue((obj), type))
330
332 VALUE pathobj; /* String (path) or Array [path, realpath]. Frozen. */
333 VALUE base_label; /* String */
334 VALUE label; /* String */
335 int first_lineno;
336 int node_id;
337 rb_code_location_t code_location;
339
340#define PATHOBJ_PATH 0
341#define PATHOBJ_REALPATH 1
342
343static inline VALUE
344pathobj_path(VALUE pathobj)
345{
346 if (RB_TYPE_P(pathobj, T_STRING)) {
347 return pathobj;
348 }
349 else {
350 VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
351 return RARRAY_AREF(pathobj, PATHOBJ_PATH);
352 }
353}
354
355static inline VALUE
356pathobj_realpath(VALUE pathobj)
357{
358 if (RB_TYPE_P(pathobj, T_STRING)) {
359 return pathobj;
360 }
361 else {
362 VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
363 return RARRAY_AREF(pathobj, PATHOBJ_REALPATH);
364 }
365}
366
367/* Forward declarations */
368typedef uintptr_t iseq_bits_t;
369
370#define ISEQ_IS_SIZE(body) (body->ic_size + body->ivc_size + body->ise_size + body->icvarc_size)
371
372/* [ TS_IVC | TS_ICVARC | TS_ISE | TS_IC ] */
373#define ISEQ_IS_IC_ENTRY(body, idx) (body->is_entries[(idx) + body->ise_size + body->icvarc_size + body->ivc_size].ic_cache);
374
375/* instruction sequence type */
376enum rb_iseq_type {
377 ISEQ_TYPE_TOP,
378 ISEQ_TYPE_METHOD,
379 ISEQ_TYPE_BLOCK,
380 ISEQ_TYPE_CLASS,
381 ISEQ_TYPE_RESCUE,
382 ISEQ_TYPE_ENSURE,
383 ISEQ_TYPE_EVAL,
384 ISEQ_TYPE_MAIN,
385 ISEQ_TYPE_PLAIN
386};
387
388// Attributes specified by Primitive.attr!
389enum rb_builtin_attr {
390 // The iseq does not call methods.
391 BUILTIN_ATTR_LEAF = 0x01,
392 // This iseq only contains single `opt_invokebuiltin_delegate_leave` instruction with 0 arguments.
393 BUILTIN_ATTR_SINGLE_NOARG_LEAF = 0x02,
394 // This attribute signals JIT to duplicate the iseq for each block iseq so that its `yield` will be monomorphic.
395 BUILTIN_ATTR_INLINE_BLOCK = 0x04,
396 // The iseq acts like a C method in backtraces.
397 BUILTIN_ATTR_C_TRACE = 0x08,
398};
399
400typedef VALUE (*rb_jit_func_t)(struct rb_execution_context_struct *, struct rb_control_frame_struct *);
401
403 enum rb_iseq_type type;
404
405 unsigned int iseq_size;
406 VALUE *iseq_encoded; /* encoded iseq (insn addr and operands) */
407
431 struct {
432 struct {
433 unsigned int has_lead : 1;
434 unsigned int has_opt : 1;
435 unsigned int has_rest : 1;
436 unsigned int has_post : 1;
437 unsigned int has_kw : 1;
438 unsigned int has_kwrest : 1;
439 unsigned int has_block : 1;
440
441 unsigned int ambiguous_param0 : 1; /* {|a|} */
442 unsigned int accepts_no_kwarg : 1;
443 unsigned int ruby2_keywords: 1;
444 unsigned int anon_rest: 1;
445 unsigned int anon_kwrest: 1;
446 unsigned int use_block: 1;
447 unsigned int forwardable: 1;
448 } flags;
449
450 unsigned int size;
451
452 int lead_num;
453 int opt_num;
454 int rest_start;
455 int post_start;
456 int post_num;
457 int block_start;
458
459 const VALUE *opt_table; /* (opt_num + 1) entries. */
460 /* opt_num and opt_table:
461 *
462 * def foo o1=e1, o2=e2, ..., oN=eN
463 * #=>
464 * # prologue code
465 * A1: e1
466 * A2: e2
467 * ...
468 * AN: eN
469 * AL: body
470 * opt_num = N
471 * opt_table = [A1, A2, ..., AN, AL]
472 */
473
474 const struct rb_iseq_param_keyword {
475 int num;
476 int required_num;
477 int bits_start;
478 int rest_start;
479 const ID *table;
480 VALUE *default_values;
481 } *keyword;
483
484 rb_iseq_location_t location;
485
486 /* insn info, must be freed */
488 const struct iseq_insn_info_entry *body;
489 unsigned int *positions;
490 unsigned int size;
491#if VM_INSN_INFO_TABLE_IMPL == 2
492 struct succ_index_table *succ_index_table;
493#endif
494 } insns_info;
495
496 const ID *local_table; /* must free */
497
498 /* catch table */
499 struct iseq_catch_table *catch_table;
500
501 /* for child iseq */
502 const struct rb_iseq_struct *parent_iseq;
503 struct rb_iseq_struct *local_iseq; /* local_iseq->flip_cnt can be modified */
504
505 union iseq_inline_storage_entry *is_entries; /* [ TS_IVC | TS_ICVARC | TS_ISE | TS_IC ] */
506 struct rb_call_data *call_data; //struct rb_call_data calls[ci_size];
507
508 struct {
509 rb_snum_t flip_count;
510 VALUE script_lines;
511 VALUE coverage;
512 VALUE pc2branchindex;
513 VALUE *original_iseq;
514 } variable;
515
516 unsigned int local_table_size;
517 unsigned int ic_size; // Number of IC caches
518 unsigned int ise_size; // Number of ISE caches
519 unsigned int ivc_size; // Number of IVC caches
520 unsigned int icvarc_size; // Number of ICVARC caches
521 unsigned int ci_size;
522 unsigned int stack_max; /* for stack overflow check */
523
524 unsigned int builtin_attrs; // Union of rb_builtin_attr
525
526 bool prism; // ISEQ was generated from prism compiler
527
528 union {
529 iseq_bits_t * list; /* Find references for GC */
530 iseq_bits_t single;
531 } mark_bits;
532
533 struct rb_id_table *outer_variables;
534
535 const rb_iseq_t *mandatory_only_iseq;
536
537#if USE_YJIT
538 // Function pointer for JIT code on jit_exec()
539 rb_jit_func_t jit_entry;
540 // Number of calls on jit_exec()
541 long unsigned jit_entry_calls;
542 // Function pointer for JIT code on jit_exec_exception()
543 rb_jit_func_t jit_exception;
544 // Number of calls on jit_exec_exception()
545 long unsigned jit_exception_calls;
546 // YJIT stores some data on each iseq.
547 void *yjit_payload;
548 // Used to estimate how frequently this ISEQ gets called
549 uint64_t yjit_calls_at_interv;
550#endif
551};
552
553/* T_IMEMO/iseq */
554/* typedef rb_iseq_t is in method.h */
556 VALUE flags; /* 1 */
557 VALUE wrapper; /* 2 */
558
559 struct rb_iseq_constant_body *body; /* 3 */
560
561 union { /* 4, 5 words */
562 struct iseq_compile_data *compile_data; /* used at compile time */
563
564 struct {
565 VALUE obj;
566 int index;
567 } loader;
568
569 struct {
570 struct rb_hook_list_struct *local_hooks;
571 rb_event_flag_t global_trace_events;
572 } exec;
573 } aux;
574};
575
576#define ISEQ_BODY(iseq) ((iseq)->body)
577
578#if !defined(USE_LAZY_LOAD) || !(USE_LAZY_LOAD+0)
579#define USE_LAZY_LOAD 0
580#endif
581
582#if !USE_LAZY_LOAD
583static inline const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq) {return 0;}
584#endif
585const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq);
586
587static inline const rb_iseq_t *
588rb_iseq_check(const rb_iseq_t *iseq)
589{
590 if (USE_LAZY_LOAD && ISEQ_BODY(iseq) == NULL) {
591 rb_iseq_complete((rb_iseq_t *)iseq);
592 }
593 return iseq;
594}
595
596static inline bool
597rb_iseq_attr_p(const rb_iseq_t *iseq, enum rb_builtin_attr attr)
598{
599 return (ISEQ_BODY(iseq)->builtin_attrs & attr) == attr;
600}
601
602static inline const rb_iseq_t *
603def_iseq_ptr(rb_method_definition_t *def)
604{
605//TODO: re-visit. to check the bug, enable this assertion.
606#if VM_CHECK_MODE > 0
607 if (def->type != VM_METHOD_TYPE_ISEQ) rb_bug("def_iseq_ptr: not iseq (%d)", def->type);
608#endif
609 return rb_iseq_check(def->body.iseq.iseqptr);
610}
611
612enum ruby_special_exceptions {
613 ruby_error_reenter,
614 ruby_error_nomemory,
615 ruby_error_sysstack,
616 ruby_error_stackfatal,
617 ruby_error_stream_closed,
618 ruby_special_error_count
619};
620
621#define GetVMPtr(obj, ptr) \
622 GetCoreDataFromValue((obj), rb_vm_t, (ptr))
623
624struct rb_vm_struct;
625typedef void rb_vm_at_exit_func(struct rb_vm_struct*);
626
627typedef struct rb_at_exit_list {
628 rb_vm_at_exit_func *func;
629 struct rb_at_exit_list *next;
631
632void *rb_objspace_alloc(void);
633void rb_objspace_free(void *objspace);
634void rb_objspace_call_finalizer(void);
635
636typedef struct rb_hook_list_struct {
637 struct rb_event_hook_struct *hooks;
638 rb_event_flag_t events;
639 unsigned int running;
640 bool need_clean;
641 bool is_local;
643
644
645// see builtin.h for definition
646typedef const struct rb_builtin_function *RB_BUILTIN;
647
649 VALUE *varptr;
650 struct global_object_list *next;
651};
652
653typedef struct rb_vm_struct {
654 VALUE self;
655
656 struct {
657 struct ccan_list_head set;
658 unsigned int cnt;
659 unsigned int blocking_cnt;
660
661 struct rb_ractor_struct *main_ractor;
662 struct rb_thread_struct *main_thread; // == vm->ractor.main_ractor->threads.main
663
664 struct {
665 // monitor
666 rb_nativethread_lock_t lock;
667 struct rb_ractor_struct *lock_owner;
668 unsigned int lock_rec;
669
670 // join at exit
671 rb_nativethread_cond_t terminate_cond;
672 bool terminate_waiting;
673
674#ifndef RUBY_THREAD_PTHREAD_H
675 bool barrier_waiting;
676 unsigned int barrier_cnt;
677 rb_nativethread_cond_t barrier_cond;
678#endif
679 } sync;
680
681 // ractor scheduling
682 struct {
683 rb_nativethread_lock_t lock;
684 struct rb_ractor_struct *lock_owner;
685 bool locked;
686
687 rb_nativethread_cond_t cond; // GRQ
688 unsigned int snt_cnt; // count of shared NTs
689 unsigned int dnt_cnt; // count of dedicated NTs
690
691 unsigned int running_cnt;
692
693 unsigned int max_cpu;
694 struct ccan_list_head grq; // // Global Ready Queue
695 unsigned int grq_cnt;
696
697 // running threads
698 struct ccan_list_head running_threads;
699
700 // threads which switch context by timeslice
701 struct ccan_list_head timeslice_threads;
702
703 struct ccan_list_head zombie_threads;
704
705 // true if timeslice timer is not enable
706 bool timeslice_wait_inf;
707
708 // barrier
709 rb_nativethread_cond_t barrier_complete_cond;
710 rb_nativethread_cond_t barrier_release_cond;
711 bool barrier_waiting;
712 unsigned int barrier_waiting_cnt;
713 unsigned int barrier_serial;
714 } sched;
715 } ractor;
716
717#ifdef USE_SIGALTSTACK
718 void *main_altstack;
719#endif
720
721 rb_serial_t fork_gen;
722 struct ccan_list_head waiting_fds; /* <=> struct waiting_fd */
723
724 /* set in single-threaded processes only: */
725 volatile int ubf_async_safe;
726
727 unsigned int running: 1;
728 unsigned int thread_abort_on_exception: 1;
729 unsigned int thread_report_on_exception: 1;
730 unsigned int thread_ignore_deadlock: 1;
731
732 /* object management */
733 VALUE mark_object_ary;
735 const VALUE special_exceptions[ruby_special_error_count];
736
737 /* load */
738 VALUE top_self;
739 VALUE load_path;
740 VALUE load_path_snapshot;
741 VALUE load_path_check_cache;
742 VALUE expanded_load_path;
743 VALUE loaded_features;
744 VALUE loaded_features_snapshot;
745 VALUE loaded_features_realpaths;
746 VALUE loaded_features_realpath_map;
747 struct st_table *loaded_features_index;
748 struct st_table *loading_table;
749 // For running the init function of statically linked
750 // extensions when they are loaded
751 struct st_table *static_ext_inits;
752
753 /* signal */
754 struct {
755 VALUE cmd[RUBY_NSIG];
756 } trap_list;
757
758 /* postponed_job (async-signal-safe, and thread-safe) */
759 struct rb_postponed_job_queue *postponed_job_queue;
760
761 int src_encoding_index;
762
763 /* workqueue (thread-safe, NOT async-signal-safe) */
764 struct ccan_list_head workqueue; /* <=> rb_workqueue_job.jnode */
765 rb_nativethread_lock_t workqueue_lock;
766
767 VALUE orig_progname, progname;
768 VALUE coverages, me2counter;
769 int coverage_mode;
770
771 struct {
772 struct rb_objspace *objspace;
773 struct gc_mark_func_data_struct {
774 void *data;
775 void (*mark_func)(VALUE v, void *data);
776 } *mark_func_data;
777 } gc;
778
779 rb_at_exit_list *at_exit;
780
781 st_table *frozen_strings;
782
783 const struct rb_builtin_function *builtin_function_table;
784
785 st_table *ci_table;
786 struct rb_id_table *negative_cme_table;
787 st_table *overloaded_cme_table; // cme -> overloaded_cme
788 st_table *unused_block_warning_table;
789
790 // This id table contains a mapping from ID to ICs. It does this with ID
791 // keys and nested st_tables as values. The nested tables have ICs as keys
792 // and Qtrue as values. It is used when inline constant caches need to be
793 // invalidated or ISEQs are being freed.
794 struct rb_id_table *constant_cache;
795 ID inserting_constant_cache_id;
796
797#ifndef VM_GLOBAL_CC_CACHE_TABLE_SIZE
798#define VM_GLOBAL_CC_CACHE_TABLE_SIZE 1023
799#endif
800 const struct rb_callcache *global_cc_cache_table[VM_GLOBAL_CC_CACHE_TABLE_SIZE]; // vm_eval.c
801
802#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
803 uint32_t clock;
804#endif
805
806 /* params */
807 struct { /* size in byte */
808 size_t thread_vm_stack_size;
809 size_t thread_machine_stack_size;
810 size_t fiber_vm_stack_size;
811 size_t fiber_machine_stack_size;
812 } default_params;
813
814} rb_vm_t;
815
816/* default values */
817
818#define RUBY_VM_SIZE_ALIGN 4096
819
820#define RUBY_VM_THREAD_VM_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
821#define RUBY_VM_THREAD_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
822#define RUBY_VM_THREAD_MACHINE_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
823#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
824
825#define RUBY_VM_FIBER_VM_STACK_SIZE ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
826#define RUBY_VM_FIBER_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
827#define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 64 * 1024 * sizeof(VALUE)) /* 256 KB or 512 KB */
828#if defined(__powerpc64__) || defined(__ppc64__) // macOS has __ppc64__
829#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 32 * 1024 * sizeof(VALUE)) /* 128 KB or 256 KB */
830#else
831#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
832#endif
833
834#if __has_feature(memory_sanitizer) || __has_feature(address_sanitizer)
835/* It seems sanitizers consume A LOT of machine stacks */
836#undef RUBY_VM_THREAD_MACHINE_STACK_SIZE
837#define RUBY_VM_THREAD_MACHINE_STACK_SIZE (1024 * 1024 * sizeof(VALUE))
838#undef RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN
839#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 512 * 1024 * sizeof(VALUE))
840#undef RUBY_VM_FIBER_MACHINE_STACK_SIZE
841#define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 256 * 1024 * sizeof(VALUE))
842#undef RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN
843#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 128 * 1024 * sizeof(VALUE))
844#endif
845
846#ifndef VM_DEBUG_BP_CHECK
847#define VM_DEBUG_BP_CHECK 0
848#endif
849
850#ifndef VM_DEBUG_VERIFY_METHOD_CACHE
851#define VM_DEBUG_VERIFY_METHOD_CACHE (VMDEBUG != 0)
852#endif
853
855 VALUE self;
856 const VALUE *ep;
857 union {
858 const rb_iseq_t *iseq;
859 const struct vm_ifunc *ifunc;
860 VALUE val;
861 } code;
862};
863
864enum rb_block_handler_type {
865 block_handler_type_iseq,
866 block_handler_type_ifunc,
867 block_handler_type_symbol,
868 block_handler_type_proc
869};
870
871enum rb_block_type {
872 block_type_iseq,
873 block_type_ifunc,
874 block_type_symbol,
875 block_type_proc
876};
877
878struct rb_block {
879 union {
880 struct rb_captured_block captured;
881 VALUE symbol;
882 VALUE proc;
883 } as;
884 enum rb_block_type type;
885};
886
888 const VALUE *pc; // cfp[0]
889 VALUE *sp; // cfp[1]
890 const rb_iseq_t *iseq; // cfp[2]
891 VALUE self; // cfp[3] / block[0]
892 const VALUE *ep; // cfp[4] / block[1]
893 const void *block_code; // cfp[5] / block[2] -- iseq, ifunc, or forwarded block handler
894 void *jit_return; // cfp[6] -- return address for JIT code
895#if VM_DEBUG_BP_CHECK
896 VALUE *bp_check; // cfp[7]
897#endif
899
900extern const rb_data_type_t ruby_threadptr_data_type;
901
902static inline struct rb_thread_struct *
903rb_thread_ptr(VALUE thval)
904{
905 return (struct rb_thread_struct *)rb_check_typeddata(thval, &ruby_threadptr_data_type);
906}
907
908enum rb_thread_status {
909 THREAD_RUNNABLE,
910 THREAD_STOPPED,
911 THREAD_STOPPED_FOREVER,
912 THREAD_KILLED
913};
914
915#ifdef RUBY_JMP_BUF
916typedef RUBY_JMP_BUF rb_jmpbuf_t;
917#else
918typedef void *rb_jmpbuf_t[5];
919#endif
920
921/*
922 `rb_vm_tag_jmpbuf_t` type represents a buffer used to
923 long jump to a C frame associated with `rb_vm_tag`.
924
925 Use-site of `rb_vm_tag_jmpbuf_t` is responsible for calling the
926 following functions:
927 - `rb_vm_tag_jmpbuf_init` once `rb_vm_tag_jmpbuf_t` is allocated.
928 - `rb_vm_tag_jmpbuf_deinit` once `rb_vm_tag_jmpbuf_t` is no longer necessary.
929
930 `RB_VM_TAG_JMPBUF_GET` transforms a `rb_vm_tag_jmpbuf_t` into a
931 `rb_jmpbuf_t` to be passed to `rb_setjmp/rb_longjmp`.
932*/
933#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
934/*
935 WebAssembly target with Asyncify-based SJLJ needs
936 to capture the execution context by unwind/rewind-ing
937 call frames into a jump buffer. The buffer space tends
938 to be considerably large unlike other architectures'
939 register-based buffers.
940 Therefore, we allocates the buffer on the heap on such
941 environments.
942*/
943typedef rb_jmpbuf_t *rb_vm_tag_jmpbuf_t;
944
945#define RB_VM_TAG_JMPBUF_GET(buf) (*buf)
946
947static inline void
948rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
949{
950 *jmpbuf = ruby_xmalloc(sizeof(rb_jmpbuf_t));
951}
952
953static inline void
954rb_vm_tag_jmpbuf_deinit(const rb_vm_tag_jmpbuf_t *jmpbuf)
955{
956 ruby_xfree(*jmpbuf);
957}
958#else
959typedef rb_jmpbuf_t rb_vm_tag_jmpbuf_t;
960
961#define RB_VM_TAG_JMPBUF_GET(buf) (buf)
962
963static inline void
964rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
965{
966 // no-op
967}
968
969static inline void
970rb_vm_tag_jmpbuf_deinit(const rb_vm_tag_jmpbuf_t *jmpbuf)
971{
972 // no-op
973}
974#endif
975
976/*
977 the members which are written in EC_PUSH_TAG() should be placed at
978 the beginning and the end, so that entire region is accessible.
979*/
980struct rb_vm_tag {
981 VALUE tag;
982 VALUE retval;
983 rb_vm_tag_jmpbuf_t buf;
984 struct rb_vm_tag *prev;
985 enum ruby_tag_type state;
986 unsigned int lock_rec;
987};
988
989STATIC_ASSERT(rb_vm_tag_buf_offset, offsetof(struct rb_vm_tag, buf) > 0);
990STATIC_ASSERT(rb_vm_tag_buf_end,
991 offsetof(struct rb_vm_tag, buf) + sizeof(rb_vm_tag_jmpbuf_t) <
992 sizeof(struct rb_vm_tag));
993
996 void *arg;
997};
998
999struct rb_mutex_struct;
1000
1001typedef struct rb_fiber_struct rb_fiber_t;
1002
1004 struct rb_waiting_list *next;
1005 struct rb_thread_struct *thread;
1006 struct rb_fiber_struct *fiber;
1007};
1008
1010 /* execution information */
1011 VALUE *vm_stack; /* must free, must mark */
1012 size_t vm_stack_size; /* size in word (byte size / sizeof(VALUE)) */
1013 rb_control_frame_t *cfp;
1014
1015 struct rb_vm_tag *tag;
1016
1017 /* interrupt flags */
1018 rb_atomic_t interrupt_flag;
1019 rb_atomic_t interrupt_mask; /* size should match flag */
1020#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
1021 uint32_t checked_clock;
1022#endif
1023
1024 rb_fiber_t *fiber_ptr;
1025 struct rb_thread_struct *thread_ptr;
1026
1027 /* storage (ec (fiber) local) */
1028 struct rb_id_table *local_storage;
1029 VALUE local_storage_recursive_hash;
1030 VALUE local_storage_recursive_hash_for_trace;
1031
1032 /* Inheritable fiber storage. */
1033 VALUE storage;
1034
1035 /* eval env */
1036 const VALUE *root_lep;
1037 VALUE root_svar;
1038
1039 /* trace information */
1040 struct rb_trace_arg_struct *trace_arg;
1041
1042 /* temporary places */
1043 VALUE errinfo;
1044 VALUE passed_block_handler; /* for rb_iterate */
1045
1046 uint8_t raised_flag; /* only 3 bits needed */
1047
1048 /* n.b. only 7 bits needed, really: */
1049 BITFIELD(enum method_missing_reason, method_missing_reason, 8);
1050
1051 VALUE private_const_reference;
1052
1053 /* for GC */
1054 struct {
1055 VALUE *stack_start;
1056 VALUE *stack_end;
1057 size_t stack_maxsize;
1059
1060#ifdef RUBY_ASAN_ENABLED
1061 void *asan_fake_stack_handle;
1062#endif
1063 } machine;
1064};
1065
1066#ifndef rb_execution_context_t
1068#define rb_execution_context_t rb_execution_context_t
1069#endif
1070
1071// for builtin.h
1072#define VM_CORE_H_EC_DEFINED 1
1073
1074// Set the vm_stack pointer in the execution context.
1075void rb_ec_set_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
1076
1077// Initialize the vm_stack pointer in the execution context and push the initial stack frame.
1078// @param ec the execution context to update.
1079// @param stack a pointer to the stack to use.
1080// @param size the size of the stack, as in `VALUE stack[size]`.
1081void rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
1082
1083// Clear (set to `NULL`) the vm_stack pointer.
1084// @param ec the execution context to update.
1085void rb_ec_clear_vm_stack(rb_execution_context_t *ec);
1086
1088 bool ractor_safe;
1089};
1090
1091typedef struct rb_ractor_struct rb_ractor_t;
1092
1093struct rb_native_thread;
1094
1095typedef struct rb_thread_struct {
1096 struct ccan_list_node lt_node; // managed by a ractor
1097 VALUE self;
1098 rb_ractor_t *ractor;
1099 rb_vm_t *vm;
1100 struct rb_native_thread *nt;
1102
1103 struct rb_thread_sched_item sched;
1104 bool mn_schedulable;
1105 rb_atomic_t serial; // only for RUBY_DEBUG_LOG()
1106
1107 VALUE last_status; /* $? */
1108
1109 /* for cfunc */
1110 struct rb_calling_info *calling;
1111
1112 /* for load(true) */
1113 VALUE top_self;
1114 VALUE top_wrapper;
1115
1116 /* thread control */
1117
1118 BITFIELD(enum rb_thread_status, status, 2);
1119 /* bit flags */
1120 unsigned int has_dedicated_nt : 1;
1121 unsigned int to_kill : 1;
1122 unsigned int abort_on_exception: 1;
1123 unsigned int report_on_exception: 1;
1124 unsigned int pending_interrupt_queue_checked: 1;
1125 int8_t priority; /* -3 .. 3 (RUBY_THREAD_PRIORITY_{MIN,MAX}) */
1126 uint32_t running_time_us; /* 12500..800000 */
1127
1128 void *blocking_region_buffer;
1129
1130 VALUE thgroup;
1131 VALUE value;
1132
1133 /* temporary place of retval on OPT_CALL_THREADED_CODE */
1134#if OPT_CALL_THREADED_CODE
1135 VALUE retval;
1136#endif
1137
1138 /* async errinfo queue */
1139 VALUE pending_interrupt_queue;
1140 VALUE pending_interrupt_mask_stack;
1141
1142 /* interrupt management */
1143 rb_nativethread_lock_t interrupt_lock;
1144 struct rb_unblock_callback unblock;
1145 VALUE locking_mutex;
1146 struct rb_mutex_struct *keeping_mutexes;
1147 struct ccan_list_head interrupt_exec_tasks;
1148
1149 struct rb_waiting_list *join_list;
1150
1151 union {
1152 struct {
1153 VALUE proc;
1154 VALUE args;
1155 int kw_splat;
1156 } proc;
1157 struct {
1158 VALUE (*func)(void *);
1159 void *arg;
1160 } func;
1161 } invoke_arg;
1162
1163 enum thread_invoke_type {
1164 thread_invoke_type_none = 0,
1165 thread_invoke_type_proc,
1166 thread_invoke_type_ractor_proc,
1167 thread_invoke_type_func
1168 } invoke_type;
1169
1170 /* statistics data for profiler */
1171 VALUE stat_insn_usage;
1172
1173 /* fiber */
1174 rb_fiber_t *root_fiber;
1175
1176 VALUE scheduler;
1177 unsigned int blocking;
1178
1179 /* misc */
1180 VALUE name;
1181 void **specific_storage;
1182
1183 struct rb_ext_config ext_config;
1184} rb_thread_t;
1185
1186static inline unsigned int
1187rb_th_serial(const rb_thread_t *th)
1188{
1189 return th ? (unsigned int)th->serial : 0;
1190}
1191
1192typedef enum {
1193 VM_DEFINECLASS_TYPE_CLASS = 0x00,
1194 VM_DEFINECLASS_TYPE_SINGLETON_CLASS = 0x01,
1195 VM_DEFINECLASS_TYPE_MODULE = 0x02,
1196 /* 0x03..0x06 is reserved */
1197 VM_DEFINECLASS_TYPE_MASK = 0x07
1198} rb_vm_defineclass_type_t;
1199
1200#define VM_DEFINECLASS_TYPE(x) ((rb_vm_defineclass_type_t)(x) & VM_DEFINECLASS_TYPE_MASK)
1201#define VM_DEFINECLASS_FLAG_SCOPED 0x08
1202#define VM_DEFINECLASS_FLAG_HAS_SUPERCLASS 0x10
1203#define VM_DEFINECLASS_SCOPED_P(x) ((x) & VM_DEFINECLASS_FLAG_SCOPED)
1204#define VM_DEFINECLASS_HAS_SUPERCLASS_P(x) \
1205 ((x) & VM_DEFINECLASS_FLAG_HAS_SUPERCLASS)
1206
1207/* iseq.c */
1208RUBY_SYMBOL_EXPORT_BEGIN
1209
1210/* node -> iseq */
1211rb_iseq_t *rb_iseq_new (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent, enum rb_iseq_type);
1212rb_iseq_t *rb_iseq_new_top (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent);
1213rb_iseq_t *rb_iseq_new_main (const VALUE ast_value, VALUE path, VALUE realpath, const rb_iseq_t *parent, int opt);
1214rb_iseq_t *rb_iseq_new_eval (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, int first_lineno, const rb_iseq_t *parent, int isolated_depth);
1215rb_iseq_t *rb_iseq_new_with_opt( VALUE ast_value, VALUE name, VALUE path, VALUE realpath, int first_lineno, const rb_iseq_t *parent, int isolated_depth,
1216 enum rb_iseq_type, const rb_compile_option_t*,
1217 VALUE script_lines);
1218
1219struct iseq_link_anchor;
1221 VALUE flags;
1222 VALUE reserved;
1223 void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *);
1224 const void *data;
1225};
1226static inline struct rb_iseq_new_with_callback_callback_func *
1227rb_iseq_new_with_callback_new_callback(
1228 void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *), const void *ptr)
1229{
1231 IMEMO_NEW(struct rb_iseq_new_with_callback_callback_func, imemo_ifunc, Qfalse);
1232 memo->func = func;
1233 memo->data = ptr;
1234
1235 return memo;
1236}
1237rb_iseq_t *rb_iseq_new_with_callback(const struct rb_iseq_new_with_callback_callback_func * ifunc,
1238 VALUE name, VALUE path, VALUE realpath, int first_lineno,
1239 const rb_iseq_t *parent, enum rb_iseq_type, const rb_compile_option_t*);
1240
1241VALUE rb_iseq_disasm(const rb_iseq_t *iseq);
1242int rb_iseq_disasm_insn(VALUE str, const VALUE *iseqval, size_t pos, const rb_iseq_t *iseq, VALUE child);
1243
1244VALUE rb_iseq_coverage(const rb_iseq_t *iseq);
1245
1246RUBY_EXTERN VALUE rb_cISeq;
1247RUBY_EXTERN VALUE rb_cRubyVM;
1248RUBY_EXTERN VALUE rb_mRubyVMFrozenCore;
1249RUBY_EXTERN VALUE rb_block_param_proxy;
1250RUBY_SYMBOL_EXPORT_END
1251
1252#define GetProcPtr(obj, ptr) \
1253 GetCoreDataFromValue((obj), rb_proc_t, (ptr))
1254
1255typedef struct {
1256 const struct rb_block block;
1257 unsigned int is_from_method: 1; /* bool */
1258 unsigned int is_lambda: 1; /* bool */
1259 unsigned int is_isolated: 1; /* bool */
1260} rb_proc_t;
1261
1262RUBY_SYMBOL_EXPORT_BEGIN
1263VALUE rb_proc_isolate(VALUE self);
1264VALUE rb_proc_isolate_bang(VALUE self);
1265VALUE rb_proc_ractor_make_shareable(VALUE self);
1266RUBY_SYMBOL_EXPORT_END
1267
1268typedef struct {
1269 VALUE flags; /* imemo header */
1270 rb_iseq_t *iseq;
1271 const VALUE *ep;
1272 const VALUE *env;
1273 unsigned int env_size;
1274} rb_env_t;
1275
1276extern const rb_data_type_t ruby_binding_data_type;
1277
1278#define GetBindingPtr(obj, ptr) \
1279 GetCoreDataFromValue((obj), rb_binding_t, (ptr))
1280
1281typedef struct {
1282 const struct rb_block block;
1283 const VALUE pathobj;
1284 int first_lineno;
1285} rb_binding_t;
1286
1287/* used by compile time and send insn */
1288
1289enum vm_check_match_type {
1290 VM_CHECKMATCH_TYPE_WHEN = 1,
1291 VM_CHECKMATCH_TYPE_CASE = 2,
1292 VM_CHECKMATCH_TYPE_RESCUE = 3
1293};
1294
1295#define VM_CHECKMATCH_TYPE_MASK 0x03
1296#define VM_CHECKMATCH_ARRAY 0x04
1297
1298enum vm_opt_newarray_send_type {
1299 VM_OPT_NEWARRAY_SEND_MAX = 1,
1300 VM_OPT_NEWARRAY_SEND_MIN = 2,
1301 VM_OPT_NEWARRAY_SEND_HASH = 3,
1302 VM_OPT_NEWARRAY_SEND_PACK = 4,
1303 VM_OPT_NEWARRAY_SEND_PACK_BUFFER = 5,
1304 VM_OPT_NEWARRAY_SEND_INCLUDE_P = 6,
1305};
1306
1307enum vm_special_object_type {
1308 VM_SPECIAL_OBJECT_VMCORE = 1,
1309 VM_SPECIAL_OBJECT_CBASE,
1310 VM_SPECIAL_OBJECT_CONST_BASE
1311};
1312
1313enum vm_svar_index {
1314 VM_SVAR_LASTLINE = 0, /* $_ */
1315 VM_SVAR_BACKREF = 1, /* $~ */
1316
1317 VM_SVAR_EXTRA_START = 2,
1318 VM_SVAR_FLIPFLOP_START = 2 /* flipflop */
1319};
1320
1321/* inline cache */
1322typedef struct iseq_inline_constant_cache *IC;
1323typedef struct iseq_inline_iv_cache_entry *IVC;
1324typedef struct iseq_inline_cvar_cache_entry *ICVARC;
1325typedef union iseq_inline_storage_entry *ISE;
1326typedef const struct rb_callinfo *CALL_INFO;
1327typedef const struct rb_callcache *CALL_CACHE;
1328typedef struct rb_call_data *CALL_DATA;
1329
1330typedef VALUE CDHASH;
1331
1332#ifndef FUNC_FASTCALL
1333#define FUNC_FASTCALL(x) x
1334#endif
1335
1336typedef rb_control_frame_t *
1337 (FUNC_FASTCALL(*rb_insn_func_t))(rb_execution_context_t *, rb_control_frame_t *);
1338
1339#define VM_TAGGED_PTR_SET(p, tag) ((VALUE)(p) | (tag))
1340#define VM_TAGGED_PTR_REF(v, mask) ((void *)((v) & ~mask))
1341
1342#define GC_GUARDED_PTR(p) VM_TAGGED_PTR_SET((p), 0x01)
1343#define GC_GUARDED_PTR_REF(p) VM_TAGGED_PTR_REF((p), 0x03)
1344#define GC_GUARDED_PTR_P(p) (((VALUE)(p)) & 0x01)
1345
1346enum vm_frame_env_flags {
1347 /* Frame/Environment flag bits:
1348 * MMMM MMMM MMMM MMMM ____ FFFF FFFE EEEX (LSB)
1349 *
1350 * X : tag for GC marking (It seems as Fixnum)
1351 * EEE : 4 bits Env flags
1352 * FF..: 7 bits Frame flags
1353 * MM..: 15 bits frame magic (to check frame corruption)
1354 */
1355
1356 /* frame types */
1357 VM_FRAME_MAGIC_METHOD = 0x11110001,
1358 VM_FRAME_MAGIC_BLOCK = 0x22220001,
1359 VM_FRAME_MAGIC_CLASS = 0x33330001,
1360 VM_FRAME_MAGIC_TOP = 0x44440001,
1361 VM_FRAME_MAGIC_CFUNC = 0x55550001,
1362 VM_FRAME_MAGIC_IFUNC = 0x66660001,
1363 VM_FRAME_MAGIC_EVAL = 0x77770001,
1364 VM_FRAME_MAGIC_RESCUE = 0x78880001,
1365 VM_FRAME_MAGIC_DUMMY = 0x79990001,
1366
1367 VM_FRAME_MAGIC_MASK = 0x7fff0001,
1368
1369 /* frame flag */
1370 VM_FRAME_FLAG_FINISH = 0x0020,
1371 VM_FRAME_FLAG_BMETHOD = 0x0040,
1372 VM_FRAME_FLAG_CFRAME = 0x0080,
1373 VM_FRAME_FLAG_LAMBDA = 0x0100,
1374 VM_FRAME_FLAG_MODIFIED_BLOCK_PARAM = 0x0200,
1375 VM_FRAME_FLAG_CFRAME_KW = 0x0400,
1376 VM_FRAME_FLAG_PASSED = 0x0800,
1377
1378 /* env flag */
1379 VM_ENV_FLAG_LOCAL = 0x0002,
1380 VM_ENV_FLAG_ESCAPED = 0x0004,
1381 VM_ENV_FLAG_WB_REQUIRED = 0x0008,
1382 VM_ENV_FLAG_ISOLATED = 0x0010,
1383};
1384
1385#define VM_ENV_DATA_SIZE ( 3)
1386
1387#define VM_ENV_DATA_INDEX_ME_CREF (-2) /* ep[-2] */
1388#define VM_ENV_DATA_INDEX_SPECVAL (-1) /* ep[-1] */
1389#define VM_ENV_DATA_INDEX_FLAGS ( 0) /* ep[ 0] */
1390#define VM_ENV_DATA_INDEX_ENV ( 1) /* ep[ 1] */
1391
1392#define VM_ENV_INDEX_LAST_LVAR (-VM_ENV_DATA_SIZE)
1393
1394static inline void VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value);
1395
1396static inline void
1397VM_ENV_FLAGS_SET(const VALUE *ep, VALUE flag)
1398{
1399 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1400 VM_ASSERT(FIXNUM_P(flags));
1401 VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags | flag);
1402}
1403
1404static inline void
1405VM_ENV_FLAGS_UNSET(const VALUE *ep, VALUE flag)
1406{
1407 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1408 VM_ASSERT(FIXNUM_P(flags));
1409 VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags & ~flag);
1410}
1411
1412static inline unsigned long
1413VM_ENV_FLAGS(const VALUE *ep, long flag)
1414{
1415 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1416 VM_ASSERT(FIXNUM_P(flags));
1417 return flags & flag;
1418}
1419
1420static inline unsigned long
1421VM_FRAME_TYPE(const rb_control_frame_t *cfp)
1422{
1423 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_MAGIC_MASK);
1424}
1425
1426static inline int
1427VM_FRAME_LAMBDA_P(const rb_control_frame_t *cfp)
1428{
1429 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_LAMBDA) != 0;
1430}
1431
1432static inline int
1433VM_FRAME_CFRAME_KW_P(const rb_control_frame_t *cfp)
1434{
1435 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME_KW) != 0;
1436}
1437
1438static inline int
1439VM_FRAME_FINISHED_P(const rb_control_frame_t *cfp)
1440{
1441 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_FINISH) != 0;
1442}
1443
1444static inline int
1445VM_FRAME_BMETHOD_P(const rb_control_frame_t *cfp)
1446{
1447 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_BMETHOD) != 0;
1448}
1449
1450static inline int
1451rb_obj_is_iseq(VALUE iseq)
1452{
1453 return imemo_type_p(iseq, imemo_iseq);
1454}
1455
1456#if VM_CHECK_MODE > 0
1457#define RUBY_VM_NORMAL_ISEQ_P(iseq) rb_obj_is_iseq((VALUE)iseq)
1458#endif
1459
1460static inline int
1461VM_FRAME_CFRAME_P(const rb_control_frame_t *cfp)
1462{
1463 int cframe_p = VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME) != 0;
1464 VM_ASSERT(RUBY_VM_NORMAL_ISEQ_P(cfp->iseq) != cframe_p ||
1465 (VM_FRAME_TYPE(cfp) & VM_FRAME_MAGIC_MASK) == VM_FRAME_MAGIC_DUMMY);
1466 return cframe_p;
1467}
1468
1469static inline int
1470VM_FRAME_RUBYFRAME_P(const rb_control_frame_t *cfp)
1471{
1472 return !VM_FRAME_CFRAME_P(cfp);
1473}
1474
1475#define RUBYVM_CFUNC_FRAME_P(cfp) \
1476 (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_CFUNC)
1477
1478#define VM_GUARDED_PREV_EP(ep) GC_GUARDED_PTR(ep)
1479#define VM_BLOCK_HANDLER_NONE 0
1480
1481static inline int
1482VM_ENV_LOCAL_P(const VALUE *ep)
1483{
1484 return VM_ENV_FLAGS(ep, VM_ENV_FLAG_LOCAL) ? 1 : 0;
1485}
1486
1487static inline const VALUE *
1488VM_ENV_PREV_EP(const VALUE *ep)
1489{
1490 VM_ASSERT(VM_ENV_LOCAL_P(ep) == 0);
1491 return GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
1492}
1493
1494static inline VALUE
1495VM_ENV_BLOCK_HANDLER(const VALUE *ep)
1496{
1497 VM_ASSERT(VM_ENV_LOCAL_P(ep));
1498 return ep[VM_ENV_DATA_INDEX_SPECVAL];
1499}
1500
1501#if VM_CHECK_MODE > 0
1502int rb_vm_ep_in_heap_p(const VALUE *ep);
1503#endif
1504
1505static inline int
1506VM_ENV_ESCAPED_P(const VALUE *ep)
1507{
1508 VM_ASSERT(rb_vm_ep_in_heap_p(ep) == !!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED));
1509 return VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) ? 1 : 0;
1510}
1511
1513static inline VALUE
1514VM_ENV_ENVVAL(const VALUE *ep)
1515{
1516 VALUE envval = ep[VM_ENV_DATA_INDEX_ENV];
1517 VM_ASSERT(VM_ENV_ESCAPED_P(ep));
1518 VM_ASSERT(envval == Qundef || imemo_type_p(envval, imemo_env));
1519 return envval;
1520}
1521
1523static inline const rb_env_t *
1524VM_ENV_ENVVAL_PTR(const VALUE *ep)
1525{
1526 return (const rb_env_t *)VM_ENV_ENVVAL(ep);
1527}
1528
1529static inline const rb_env_t *
1530vm_env_new(VALUE *env_ep, VALUE *env_body, unsigned int env_size, const rb_iseq_t *iseq)
1531{
1532 rb_env_t *env = IMEMO_NEW(rb_env_t, imemo_env, (VALUE)iseq);
1533 env->ep = env_ep;
1534 env->env = env_body;
1535 env->env_size = env_size;
1536 env_ep[VM_ENV_DATA_INDEX_ENV] = (VALUE)env;
1537 return env;
1538}
1539
1540static inline void
1541VM_FORCE_WRITE(const VALUE *ptr, VALUE v)
1542{
1543 *((VALUE *)ptr) = v;
1544}
1545
1546static inline void
1547VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value)
1548{
1549 VM_ASSERT(RB_SPECIAL_CONST_P(special_const_value));
1550 VM_FORCE_WRITE(ptr, special_const_value);
1551}
1552
1553static inline void
1554VM_STACK_ENV_WRITE(const VALUE *ep, int index, VALUE v)
1555{
1556 VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_WB_REQUIRED) == 0);
1557 VM_FORCE_WRITE(&ep[index], v);
1558}
1559
1560const VALUE *rb_vm_ep_local_ep(const VALUE *ep);
1561const VALUE *rb_vm_proc_local_ep(VALUE proc);
1562void rb_vm_block_ep_update(VALUE obj, const struct rb_block *dst, const VALUE *ep);
1563void rb_vm_block_copy(VALUE obj, const struct rb_block *dst, const struct rb_block *src);
1564
1565VALUE rb_vm_frame_block_handler(const rb_control_frame_t *cfp);
1566
1567#define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp) ((cfp)+1)
1568#define RUBY_VM_NEXT_CONTROL_FRAME(cfp) ((cfp)-1)
1569
1570#define RUBY_VM_VALID_CONTROL_FRAME_P(cfp, ecfp) \
1571 ((void *)(ecfp) > (void *)(cfp))
1572
1573static inline const rb_control_frame_t *
1574RUBY_VM_END_CONTROL_FRAME(const rb_execution_context_t *ec)
1575{
1576 return (rb_control_frame_t *)(ec->vm_stack + ec->vm_stack_size);
1577}
1578
1579static inline int
1580RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
1581{
1582 return !RUBY_VM_VALID_CONTROL_FRAME_P(cfp, RUBY_VM_END_CONTROL_FRAME(ec));
1583}
1584
1585static inline int
1586VM_BH_ISEQ_BLOCK_P(VALUE block_handler)
1587{
1588 if ((block_handler & 0x03) == 0x01) {
1589#if VM_CHECK_MODE > 0
1590 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1591 VM_ASSERT(imemo_type_p(captured->code.val, imemo_iseq));
1592#endif
1593 return 1;
1594 }
1595 else {
1596 return 0;
1597 }
1598}
1599
1600static inline VALUE
1601VM_BH_FROM_ISEQ_BLOCK(const struct rb_captured_block *captured)
1602{
1603 VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x01);
1604 VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1605 return block_handler;
1606}
1607
1608static inline const struct rb_captured_block *
1609VM_BH_TO_ISEQ_BLOCK(VALUE block_handler)
1610{
1611 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1612 VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1613 return captured;
1614}
1615
1616static inline int
1617VM_BH_IFUNC_P(VALUE block_handler)
1618{
1619 if ((block_handler & 0x03) == 0x03) {
1620#if VM_CHECK_MODE > 0
1621 struct rb_captured_block *captured = (void *)(block_handler & ~0x03);
1622 VM_ASSERT(imemo_type_p(captured->code.val, imemo_ifunc));
1623#endif
1624 return 1;
1625 }
1626 else {
1627 return 0;
1628 }
1629}
1630
1631static inline VALUE
1632VM_BH_FROM_IFUNC_BLOCK(const struct rb_captured_block *captured)
1633{
1634 VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x03);
1635 VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1636 return block_handler;
1637}
1638
1639static inline const struct rb_captured_block *
1640VM_BH_TO_IFUNC_BLOCK(VALUE block_handler)
1641{
1642 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1643 VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1644 return captured;
1645}
1646
1647static inline const struct rb_captured_block *
1648VM_BH_TO_CAPT_BLOCK(VALUE block_handler)
1649{
1650 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1651 VM_ASSERT(VM_BH_IFUNC_P(block_handler) || VM_BH_ISEQ_BLOCK_P(block_handler));
1652 return captured;
1653}
1654
1655static inline enum rb_block_handler_type
1656vm_block_handler_type(VALUE block_handler)
1657{
1658 if (VM_BH_ISEQ_BLOCK_P(block_handler)) {
1659 return block_handler_type_iseq;
1660 }
1661 else if (VM_BH_IFUNC_P(block_handler)) {
1662 return block_handler_type_ifunc;
1663 }
1664 else if (SYMBOL_P(block_handler)) {
1665 return block_handler_type_symbol;
1666 }
1667 else {
1668 VM_ASSERT(rb_obj_is_proc(block_handler));
1669 return block_handler_type_proc;
1670 }
1671}
1672
1673static inline void
1674vm_block_handler_verify(MAYBE_UNUSED(VALUE block_handler))
1675{
1676 VM_ASSERT(block_handler == VM_BLOCK_HANDLER_NONE ||
1677 (vm_block_handler_type(block_handler), 1));
1678}
1679
1680static inline enum rb_block_type
1681vm_block_type(const struct rb_block *block)
1682{
1683#if VM_CHECK_MODE > 0
1684 switch (block->type) {
1685 case block_type_iseq:
1686 VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_iseq));
1687 break;
1688 case block_type_ifunc:
1689 VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_ifunc));
1690 break;
1691 case block_type_symbol:
1692 VM_ASSERT(SYMBOL_P(block->as.symbol));
1693 break;
1694 case block_type_proc:
1695 VM_ASSERT(rb_obj_is_proc(block->as.proc));
1696 break;
1697 }
1698#endif
1699 return block->type;
1700}
1701
1702static inline void
1703vm_block_type_set(const struct rb_block *block, enum rb_block_type type)
1704{
1705 struct rb_block *mb = (struct rb_block *)block;
1706 mb->type = type;
1707}
1708
1709static inline const struct rb_block *
1710vm_proc_block(VALUE procval)
1711{
1712 VM_ASSERT(rb_obj_is_proc(procval));
1713 return &((rb_proc_t *)RTYPEDDATA_DATA(procval))->block;
1714}
1715
1716static inline const rb_iseq_t *vm_block_iseq(const struct rb_block *block);
1717static inline const VALUE *vm_block_ep(const struct rb_block *block);
1718
1719static inline const rb_iseq_t *
1720vm_proc_iseq(VALUE procval)
1721{
1722 return vm_block_iseq(vm_proc_block(procval));
1723}
1724
1725static inline const VALUE *
1726vm_proc_ep(VALUE procval)
1727{
1728 return vm_block_ep(vm_proc_block(procval));
1729}
1730
1731static inline const rb_iseq_t *
1732vm_block_iseq(const struct rb_block *block)
1733{
1734 switch (vm_block_type(block)) {
1735 case block_type_iseq: return rb_iseq_check(block->as.captured.code.iseq);
1736 case block_type_proc: return vm_proc_iseq(block->as.proc);
1737 case block_type_ifunc:
1738 case block_type_symbol: return NULL;
1739 }
1740 VM_UNREACHABLE(vm_block_iseq);
1741 return NULL;
1742}
1743
1744static inline const VALUE *
1745vm_block_ep(const struct rb_block *block)
1746{
1747 switch (vm_block_type(block)) {
1748 case block_type_iseq:
1749 case block_type_ifunc: return block->as.captured.ep;
1750 case block_type_proc: return vm_proc_ep(block->as.proc);
1751 case block_type_symbol: return NULL;
1752 }
1753 VM_UNREACHABLE(vm_block_ep);
1754 return NULL;
1755}
1756
1757static inline VALUE
1758vm_block_self(const struct rb_block *block)
1759{
1760 switch (vm_block_type(block)) {
1761 case block_type_iseq:
1762 case block_type_ifunc:
1763 return block->as.captured.self;
1764 case block_type_proc:
1765 return vm_block_self(vm_proc_block(block->as.proc));
1766 case block_type_symbol:
1767 return Qundef;
1768 }
1769 VM_UNREACHABLE(vm_block_self);
1770 return Qundef;
1771}
1772
1773static inline VALUE
1774VM_BH_TO_SYMBOL(VALUE block_handler)
1775{
1776 VM_ASSERT(SYMBOL_P(block_handler));
1777 return block_handler;
1778}
1779
1780static inline VALUE
1781VM_BH_FROM_SYMBOL(VALUE symbol)
1782{
1783 VM_ASSERT(SYMBOL_P(symbol));
1784 return symbol;
1785}
1786
1787static inline VALUE
1788VM_BH_TO_PROC(VALUE block_handler)
1789{
1790 VM_ASSERT(rb_obj_is_proc(block_handler));
1791 return block_handler;
1792}
1793
1794static inline VALUE
1795VM_BH_FROM_PROC(VALUE procval)
1796{
1797 VM_ASSERT(rb_obj_is_proc(procval));
1798 return procval;
1799}
1800
1801/* VM related object allocate functions */
1802VALUE rb_thread_alloc(VALUE klass);
1803VALUE rb_binding_alloc(VALUE klass);
1804VALUE rb_proc_alloc(VALUE klass);
1805VALUE rb_proc_dup(VALUE self);
1806
1807/* for debug */
1808extern bool rb_vmdebug_stack_dump_raw(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, FILE *);
1809extern bool rb_vmdebug_debug_print_pre(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, const VALUE *_pc, FILE *);
1810extern bool rb_vmdebug_debug_print_post(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, FILE *);
1811
1812#define SDR() rb_vmdebug_stack_dump_raw(GET_EC(), GET_EC()->cfp, stderr)
1813#define SDR2(cfp) rb_vmdebug_stack_dump_raw(GET_EC(), (cfp), stderr)
1814bool rb_vm_bugreport(const void *, FILE *);
1815typedef void (*ruby_sighandler_t)(int);
1816RBIMPL_ATTR_FORMAT(RBIMPL_PRINTF_FORMAT, 4, 5)
1817NORETURN(void rb_bug_for_fatal_signal(ruby_sighandler_t default_sighandler, int sig, const void *, const char *fmt, ...));
1818
1819/* functions about thread/vm execution */
1820RUBY_SYMBOL_EXPORT_BEGIN
1821VALUE rb_iseq_eval(const rb_iseq_t *iseq);
1822VALUE rb_iseq_eval_main(const rb_iseq_t *iseq);
1823VALUE rb_iseq_path(const rb_iseq_t *iseq);
1824VALUE rb_iseq_realpath(const rb_iseq_t *iseq);
1825RUBY_SYMBOL_EXPORT_END
1826
1827VALUE rb_iseq_pathobj_new(VALUE path, VALUE realpath);
1828void rb_iseq_pathobj_set(const rb_iseq_t *iseq, VALUE path, VALUE realpath);
1829
1830int rb_ec_frame_method_id_and_class(const rb_execution_context_t *ec, ID *idp, ID *called_idp, VALUE *klassp);
1831void rb_ec_setup_exception(const rb_execution_context_t *ec, VALUE mesg, VALUE cause);
1832
1833VALUE rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, int argc, const VALUE *argv, int kw_splat, VALUE block_handler);
1834
1835VALUE rb_vm_make_proc_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda);
1836static inline VALUE
1837rb_vm_make_proc(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1838{
1839 return rb_vm_make_proc_lambda(ec, captured, klass, 0);
1840}
1841
1842static inline VALUE
1843rb_vm_make_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1844{
1845 return rb_vm_make_proc_lambda(ec, captured, klass, 1);
1846}
1847
1848VALUE rb_vm_make_binding(const rb_execution_context_t *ec, const rb_control_frame_t *src_cfp);
1849VALUE rb_vm_env_local_variables(const rb_env_t *env);
1850VALUE rb_vm_env_numbered_parameters(const rb_env_t *env);
1851const rb_env_t *rb_vm_env_prev_env(const rb_env_t *env);
1852const VALUE *rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const ID *dynvars);
1853void rb_vm_inc_const_missing_count(void);
1854VALUE rb_vm_call_kw(rb_execution_context_t *ec, VALUE recv, VALUE id, int argc,
1855 const VALUE *argv, const rb_callable_method_entry_t *me, int kw_splat);
1856void rb_vm_pop_frame_no_int(rb_execution_context_t *ec);
1857void rb_vm_pop_frame(rb_execution_context_t *ec);
1858
1859void rb_thread_start_timer_thread(void);
1860void rb_thread_stop_timer_thread(void);
1861void rb_thread_reset_timer_thread(void);
1862void rb_thread_wakeup_timer_thread(int);
1863
1864static inline void
1865rb_vm_living_threads_init(rb_vm_t *vm)
1866{
1867 ccan_list_head_init(&vm->waiting_fds);
1868 ccan_list_head_init(&vm->workqueue);
1869 ccan_list_head_init(&vm->ractor.set);
1870 ccan_list_head_init(&vm->ractor.sched.zombie_threads);
1871}
1872
1873typedef int rb_backtrace_iter_func(void *, VALUE, int, VALUE);
1874rb_control_frame_t *rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1875rb_control_frame_t *rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1876VALUE *rb_vm_svar_lep(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1877int rb_vm_get_sourceline(const rb_control_frame_t *);
1878void rb_vm_stack_to_heap(rb_execution_context_t *ec);
1879void ruby_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame);
1880rb_thread_t * ruby_thread_from_native(void);
1881int ruby_thread_set_native(rb_thread_t *th);
1882int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp);
1883void rb_vm_rewind_cfp(rb_execution_context_t *ec, rb_control_frame_t *cfp);
1884void rb_vm_env_write(const VALUE *ep, int index, VALUE v);
1885VALUE rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler);
1886
1887void rb_vm_register_special_exception_str(enum ruby_special_exceptions sp, VALUE exception_class, VALUE mesg);
1888
1889#define rb_vm_register_special_exception(sp, e, m) \
1890 rb_vm_register_special_exception_str(sp, e, rb_usascii_str_new_static((m), (long)rb_strlen_lit(m)))
1891
1892void rb_gc_mark_machine_context(const rb_execution_context_t *ec);
1893
1894void rb_vm_rewrite_cref(rb_cref_t *node, VALUE old_klass, VALUE new_klass, rb_cref_t **new_cref_ptr);
1895
1896const rb_callable_method_entry_t *rb_vm_frame_method_entry(const rb_control_frame_t *cfp);
1897
1898#define sysstack_error GET_VM()->special_exceptions[ruby_error_sysstack]
1899
1900#define CHECK_VM_STACK_OVERFLOW0(cfp, sp, margin) do { \
1901 STATIC_ASSERT(sizeof_sp, sizeof(*(sp)) == sizeof(VALUE)); \
1902 STATIC_ASSERT(sizeof_cfp, sizeof(*(cfp)) == sizeof(rb_control_frame_t)); \
1903 const struct rb_control_frame_struct *bound = (void *)&(sp)[(margin)]; \
1904 if (UNLIKELY((cfp) <= &bound[1])) { \
1905 vm_stackoverflow(); \
1906 } \
1907} while (0)
1908
1909#define CHECK_VM_STACK_OVERFLOW(cfp, margin) \
1910 CHECK_VM_STACK_OVERFLOW0((cfp), (cfp)->sp, (margin))
1911
1912VALUE rb_catch_protect(VALUE t, rb_block_call_func *func, VALUE data, enum ruby_tag_type *stateptr);
1913
1914rb_execution_context_t *rb_vm_main_ractor_ec(rb_vm_t *vm); // ractor.c
1915
1916/* for thread */
1917
1918#if RUBY_VM_THREAD_MODEL == 2
1919
1920RUBY_EXTERN struct rb_ractor_struct *ruby_single_main_ractor; // ractor.c
1921RUBY_EXTERN rb_vm_t *ruby_current_vm_ptr;
1922RUBY_EXTERN rb_event_flag_t ruby_vm_event_flags;
1923RUBY_EXTERN rb_event_flag_t ruby_vm_event_enabled_global_flags;
1924RUBY_EXTERN unsigned int ruby_vm_event_local_num;
1925
1926#define GET_VM() rb_current_vm()
1927#define GET_RACTOR() rb_current_ractor()
1928#define GET_THREAD() rb_current_thread()
1929#define GET_EC() rb_current_execution_context(true)
1930
1931static inline rb_thread_t *
1932rb_ec_thread_ptr(const rb_execution_context_t *ec)
1933{
1934 return ec->thread_ptr;
1935}
1936
1937static inline rb_ractor_t *
1938rb_ec_ractor_ptr(const rb_execution_context_t *ec)
1939{
1940 const rb_thread_t *th = rb_ec_thread_ptr(ec);
1941 if (th) {
1942 VM_ASSERT(th->ractor != NULL);
1943 return th->ractor;
1944 }
1945 else {
1946 return NULL;
1947 }
1948}
1949
1950static inline rb_vm_t *
1951rb_ec_vm_ptr(const rb_execution_context_t *ec)
1952{
1953 const rb_thread_t *th = rb_ec_thread_ptr(ec);
1954 if (th) {
1955 return th->vm;
1956 }
1957 else {
1958 return NULL;
1959 }
1960}
1961
1962NOINLINE(struct rb_execution_context_struct *rb_current_ec_noinline(void));
1963
1964static inline rb_execution_context_t *
1965rb_current_execution_context(bool expect_ec)
1966{
1967#ifdef RB_THREAD_LOCAL_SPECIFIER
1968 #if defined(__arm64__) || defined(__aarch64__)
1969 rb_execution_context_t *ec = rb_current_ec();
1970 #else
1971 rb_execution_context_t *ec = ruby_current_ec;
1972 #endif
1973
1974 /* On the shared objects, `__tls_get_addr()` is used to access the TLS
1975 * and the address of the `ruby_current_ec` can be stored on a function
1976 * frame. However, this address can be mis-used after native thread
1977 * migration of a coroutine.
1978 * 1) Get `ptr =&ruby_current_ec` op NT1 and store it on the frame.
1979 * 2) Context switch and resume it on the NT2.
1980 * 3) `ptr` is used on NT2 but it accesses to the TLS on NT1.
1981 * This assertion checks such misusage.
1982 *
1983 * To avoid accidents, `GET_EC()` should be called once on the frame.
1984 * Note that inlining can produce the problem.
1985 */
1986 VM_ASSERT(ec == rb_current_ec_noinline());
1987#else
1988 rb_execution_context_t *ec = native_tls_get(ruby_current_ec_key);
1989#endif
1990 VM_ASSERT(!expect_ec || ec != NULL);
1991 return ec;
1992}
1993
1994static inline rb_thread_t *
1995rb_current_thread(void)
1996{
1997 const rb_execution_context_t *ec = GET_EC();
1998 return rb_ec_thread_ptr(ec);
1999}
2000
2001static inline rb_ractor_t *
2002rb_current_ractor_raw(bool expect)
2003{
2004 if (ruby_single_main_ractor) {
2005 return ruby_single_main_ractor;
2006 }
2007 else {
2008 const rb_execution_context_t *ec = rb_current_execution_context(expect);
2009 return (expect || ec) ? rb_ec_ractor_ptr(ec) : NULL;
2010 }
2011}
2012
2013static inline rb_ractor_t *
2014rb_current_ractor(void)
2015{
2016 return rb_current_ractor_raw(true);
2017}
2018
2019static inline rb_vm_t *
2020rb_current_vm(void)
2021{
2022#if 0 // TODO: reconsider the assertions
2023 VM_ASSERT(ruby_current_vm_ptr == NULL ||
2024 ruby_current_execution_context_ptr == NULL ||
2025 rb_ec_thread_ptr(GET_EC()) == NULL ||
2026 rb_ec_thread_ptr(GET_EC())->status == THREAD_KILLED ||
2027 rb_ec_vm_ptr(GET_EC()) == ruby_current_vm_ptr);
2028#endif
2029
2030 return ruby_current_vm_ptr;
2031}
2032
2033void rb_ec_vm_lock_rec_release(const rb_execution_context_t *ec,
2034 unsigned int recorded_lock_rec,
2035 unsigned int current_lock_rec);
2036
2037static inline unsigned int
2038rb_ec_vm_lock_rec(const rb_execution_context_t *ec)
2039{
2040 rb_vm_t *vm = rb_ec_vm_ptr(ec);
2041
2042 if (vm->ractor.sync.lock_owner != rb_ec_ractor_ptr(ec)) {
2043 return 0;
2044 }
2045 else {
2046 return vm->ractor.sync.lock_rec;
2047 }
2048}
2049
2050#else
2051#error "unsupported thread model"
2052#endif
2053
2054enum {
2055 TIMER_INTERRUPT_MASK = 0x01,
2056 PENDING_INTERRUPT_MASK = 0x02,
2057 POSTPONED_JOB_INTERRUPT_MASK = 0x04,
2058 TRAP_INTERRUPT_MASK = 0x08,
2059 TERMINATE_INTERRUPT_MASK = 0x10,
2060 VM_BARRIER_INTERRUPT_MASK = 0x20,
2061};
2062
2063#define RUBY_VM_SET_TIMER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TIMER_INTERRUPT_MASK)
2064#define RUBY_VM_SET_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, PENDING_INTERRUPT_MASK)
2065#define RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, POSTPONED_JOB_INTERRUPT_MASK)
2066#define RUBY_VM_SET_TRAP_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TRAP_INTERRUPT_MASK)
2067#define RUBY_VM_SET_TERMINATE_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TERMINATE_INTERRUPT_MASK)
2068#define RUBY_VM_SET_VM_BARRIER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, VM_BARRIER_INTERRUPT_MASK)
2069#define RUBY_VM_INTERRUPTED(ec) ((ec)->interrupt_flag & ~(ec)->interrupt_mask & \
2070 (PENDING_INTERRUPT_MASK|TRAP_INTERRUPT_MASK))
2071
2072static inline bool
2073RUBY_VM_INTERRUPTED_ANY(rb_execution_context_t *ec)
2074{
2075#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
2076 uint32_t current_clock = rb_ec_vm_ptr(ec)->clock;
2077
2078 if (current_clock != ec->checked_clock) {
2079 ec->checked_clock = current_clock;
2080 RUBY_VM_SET_TIMER_INTERRUPT(ec);
2081 }
2082#endif
2083 return ec->interrupt_flag & ~(ec)->interrupt_mask;
2084}
2085
2086VALUE rb_exc_set_backtrace(VALUE exc, VALUE bt);
2087int rb_signal_buff_size(void);
2088int rb_signal_exec(rb_thread_t *th, int sig);
2089void rb_threadptr_check_signal(rb_thread_t *mth);
2090void rb_threadptr_signal_raise(rb_thread_t *th, int sig);
2091void rb_threadptr_signal_exit(rb_thread_t *th);
2092int rb_threadptr_execute_interrupts(rb_thread_t *, int);
2093void rb_threadptr_interrupt(rb_thread_t *th);
2094void rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th);
2095void rb_threadptr_pending_interrupt_clear(rb_thread_t *th);
2096void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v);
2097VALUE rb_ec_get_errinfo(const rb_execution_context_t *ec);
2098void rb_ec_error_print(rb_execution_context_t * volatile ec, volatile VALUE errinfo);
2099void rb_execution_context_update(rb_execution_context_t *ec);
2100void rb_execution_context_mark(const rb_execution_context_t *ec);
2101void rb_fiber_close(rb_fiber_t *fib);
2102void Init_native_thread(rb_thread_t *th);
2103int rb_vm_check_ints_blocking(rb_execution_context_t *ec);
2104
2105// vm_sync.h
2106void rb_vm_cond_wait(rb_vm_t *vm, rb_nativethread_cond_t *cond);
2107void rb_vm_cond_timedwait(rb_vm_t *vm, rb_nativethread_cond_t *cond, unsigned long msec);
2108
2109#define RUBY_VM_CHECK_INTS(ec) rb_vm_check_ints(ec)
2110static inline void
2111rb_vm_check_ints(rb_execution_context_t *ec)
2112{
2113#ifdef RUBY_ASSERT_CRITICAL_SECTION
2114 VM_ASSERT(ruby_assert_critical_section_entered == 0);
2115#endif
2116
2117 VM_ASSERT(ec == GET_EC());
2118
2119 if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(ec))) {
2120 rb_threadptr_execute_interrupts(rb_ec_thread_ptr(ec), 0);
2121 }
2122}
2123
2124/* tracer */
2125
2127 rb_event_flag_t event;
2129 const rb_control_frame_t *cfp;
2130 VALUE self;
2131 ID id;
2132 ID called_id;
2133 VALUE klass;
2134 VALUE data;
2135
2136 int klass_solved;
2137
2138 /* calc from cfp */
2139 int lineno;
2140 VALUE path;
2141};
2142
2143void rb_hook_list_mark(rb_hook_list_t *hooks);
2144void rb_hook_list_mark_and_update(rb_hook_list_t *hooks);
2145void rb_hook_list_free(rb_hook_list_t *hooks);
2146void rb_hook_list_connect_tracepoint(VALUE target, rb_hook_list_t *list, VALUE tpval, unsigned int target_line);
2147void rb_hook_list_remove_tracepoint(rb_hook_list_t *list, VALUE tpval);
2148
2149void rb_exec_event_hooks(struct rb_trace_arg_struct *trace_arg, rb_hook_list_t *hooks, int pop_p);
2150
2151#define EXEC_EVENT_HOOK_ORIG(ec_, hooks_, flag_, self_, id_, called_id_, klass_, data_, pop_p_) do { \
2152 const rb_event_flag_t flag_arg_ = (flag_); \
2153 rb_hook_list_t *hooks_arg_ = (hooks_); \
2154 if (UNLIKELY((hooks_arg_)->events & (flag_arg_))) { \
2155 /* defer evaluating the other arguments */ \
2156 rb_exec_event_hook_orig(ec_, hooks_arg_, flag_arg_, self_, id_, called_id_, klass_, data_, pop_p_); \
2157 } \
2158} while (0)
2159
2160static inline void
2161rb_exec_event_hook_orig(rb_execution_context_t *ec, rb_hook_list_t *hooks, rb_event_flag_t flag,
2162 VALUE self, ID id, ID called_id, VALUE klass, VALUE data, int pop_p)
2163{
2164 struct rb_trace_arg_struct trace_arg;
2165
2166 VM_ASSERT((hooks->events & flag) != 0);
2167
2168 trace_arg.event = flag;
2169 trace_arg.ec = ec;
2170 trace_arg.cfp = ec->cfp;
2171 trace_arg.self = self;
2172 trace_arg.id = id;
2173 trace_arg.called_id = called_id;
2174 trace_arg.klass = klass;
2175 trace_arg.data = data;
2176 trace_arg.path = Qundef;
2177 trace_arg.klass_solved = 0;
2178
2179 rb_exec_event_hooks(&trace_arg, hooks, pop_p);
2180}
2181
2183 VALUE self;
2184 uint32_t id;
2185 rb_hook_list_t hooks;
2186};
2187
2188static inline rb_hook_list_t *
2189rb_ec_ractor_hooks(const rb_execution_context_t *ec)
2190{
2191 struct rb_ractor_pub *cr_pub = (struct rb_ractor_pub *)rb_ec_ractor_ptr(ec);
2192 return &cr_pub->hooks;
2193}
2194
2195#define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2196 EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_ractor_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 0)
2197
2198#define EXEC_EVENT_HOOK_AND_POP_FRAME(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2199 EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_ractor_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 1)
2200
2201static inline void
2202rb_exec_event_hook_script_compiled(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE eval_script)
2203{
2204 EXEC_EVENT_HOOK(ec, RUBY_EVENT_SCRIPT_COMPILED, ec->cfp->self, 0, 0, 0,
2205 NIL_P(eval_script) ? (VALUE)iseq :
2206 rb_ary_new_from_args(2, eval_script, (VALUE)iseq));
2207}
2208
2209void rb_vm_trap_exit(rb_vm_t *vm);
2210void rb_vm_postponed_job_atfork(void); /* vm_trace.c */
2211void rb_vm_postponed_job_free(void); /* vm_trace.c */
2212size_t rb_vm_memsize_postponed_job_queue(void); /* vm_trace.c */
2213void rb_vm_postponed_job_queue_init(rb_vm_t *vm); /* vm_trace.c */
2214
2215RUBY_SYMBOL_EXPORT_BEGIN
2216
2217int rb_thread_check_trap_pending(void);
2218
2219/* #define RUBY_EVENT_RESERVED_FOR_INTERNAL_USE 0x030000 */ /* from vm_core.h */
2220#define RUBY_EVENT_COVERAGE_LINE 0x010000
2221#define RUBY_EVENT_COVERAGE_BRANCH 0x020000
2222
2223extern VALUE rb_get_coverages(void);
2224extern void rb_set_coverages(VALUE, int, VALUE);
2225extern void rb_clear_coverages(void);
2226extern void rb_reset_coverages(void);
2227extern void rb_resume_coverages(void);
2228extern void rb_suspend_coverages(void);
2229
2230void rb_postponed_job_flush(rb_vm_t *vm);
2231
2232// ractor.c
2233RUBY_EXTERN VALUE rb_eRactorUnsafeError;
2234RUBY_EXTERN VALUE rb_eRactorIsolationError;
2235
2236RUBY_SYMBOL_EXPORT_END
2237
2238#endif /* RUBY_VM_CORE_H */
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ALIGNAS
Wraps (or simulates) alignas.
Definition stdalign.h:27
#define RUBY_EXTERN
Declaration of externally visible global variables.
Definition dllexport.h:45
#define RUBY_EVENT_SCRIPT_COMPILED
Encountered an eval.
Definition event.h:60
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RBIMPL_ATTR_FORMAT(x, y, z)
Wraps (or simulates) __attribute__((format))
Definition format.h:29
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define Qundef
Old name of RUBY_Qundef.
#define Qfalse
Old name of RUBY_Qfalse.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define NIL_P
Old name of RB_NIL_P.
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
void * rb_check_typeddata(VALUE obj, const rb_data_type_t *data_type)
Identical to rb_typeddata_is_kind_of(), except it raises exceptions instead of returning false.
Definition error.c:1397
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
Definition proc.c:119
void rb_unblock_function_t(void *)
This is the type of UBFs.
Definition thread.h:336
VALUE rb_block_call_func(RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg))
This is the type of a function that the interpreter expect for C-backended blocks.
Definition iterator.h:83
VALUE type(ANYARGS)
ANYARGS-ed function type.
#define RBIMPL_ATTR_NONNULL(list)
Wraps (or simulates) __attribute__((nonnull))
Definition nonnull.h:30
#define inline
Old Visual Studio versions do not support the inline keyword, so we need to define it to be __inline.
Definition defines.h:91
Functions related to nodes in the AST.
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RTYPEDDATA_DATA(v)
Convenient getter macro.
Definition rtypeddata.h:102
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
Defines old _.
C99 shim for <stdbool.h>
Definition vm_core.h:259
const ID * segments
A null-terminated list of ids, used to represent a constant's path idNULL is used to represent the ::...
Definition vm_core.h:285
Definition vm_core.h:293
Definition vm_core.h:288
Definition iseq.h:251
Definition method.h:63
CREF (Class REFerence)
Definition method.h:45
Definition class.h:36
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:200
struct rb_iseq_constant_body::@155 param
parameter information
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:136
Definition st.h:79
IFUNC (Internal FUNCtion)
Definition imemo.h:84
Definition vm_core.h:251
Definition vm_core.h:297
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
Definition value.h:69
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376