Ruby 3.5.0dev (2025-02-20 revision 34098b669c0cbc024cd08e686891f1dfe0a10aaf)
vm_core.h (34098b669c0cbc024cd08e686891f1dfe0a10aaf)
1#ifndef RUBY_VM_CORE_H
2#define RUBY_VM_CORE_H
3/**********************************************************************
4
5 vm_core.h -
6
7 $Author$
8 created at: 04/01/01 19:41:38 JST
9
10 Copyright (C) 2004-2007 Koichi Sasada
11
12**********************************************************************/
13
14/*
15 * Enable check mode.
16 * 1: enable local assertions.
17 */
18#ifndef VM_CHECK_MODE
19
20// respect RUBY_DUBUG: if given n is 0, then use RUBY_DEBUG
21#define N_OR_RUBY_DEBUG(n) (((n) > 0) ? (n) : RUBY_DEBUG)
22
23#define VM_CHECK_MODE N_OR_RUBY_DEBUG(0)
24#endif
25
39#ifndef VMDEBUG
40#define VMDEBUG 0
41#endif
42
43#if 0
44#undef VMDEBUG
45#define VMDEBUG 3
46#endif
47
48#include "ruby/internal/config.h"
49
50#include <stddef.h>
51#include <signal.h>
52#include <stdarg.h>
53
54#include "ruby_assert.h"
55
56#define RVALUE_SIZE (sizeof(struct RBasic) + sizeof(VALUE[RBIMPL_RVALUE_EMBED_LEN_MAX]))
57
58#if VM_CHECK_MODE > 0
59#define VM_ASSERT(expr, ...) \
60 RUBY_ASSERT_MESG_WHEN(VM_CHECK_MODE > 0, expr, #expr RBIMPL_VA_OPT_ARGS(__VA_ARGS__))
61#define VM_UNREACHABLE(func) rb_bug(#func ": unreachable")
62#define RUBY_ASSERT_CRITICAL_SECTION
63#define RUBY_DEBUG_THREAD_SCHEDULE() rb_thread_schedule()
64#else
65#define VM_ASSERT(/*expr, */...) ((void)0)
66#define VM_UNREACHABLE(func) UNREACHABLE
67#define RUBY_DEBUG_THREAD_SCHEDULE()
68#endif
69
70#define RUBY_ASSERT_MUTEX_OWNED(mutex) VM_ASSERT(rb_mutex_owned_p(mutex))
71
72#if defined(RUBY_ASSERT_CRITICAL_SECTION)
73/*
74# Critical Section Assertions
75
76These assertions are used to ensure that context switching does not occur between two points in the code. In theory,
77such code should already be protected by a mutex, but these assertions are used to ensure that the mutex is held.
78
79The specific case where it can be useful is where a mutex is held further up the call stack, and the code in question
80may not directly hold the mutex. In this case, the critical section assertions can be used to ensure that the mutex is
81held by someone else.
82
83These assertions are only enabled when RUBY_ASSERT_CRITICAL_SECTION is defined, which is only defined if VM_CHECK_MODE
84is set.
85
86## Example Usage
87
88```c
89RUBY_ASSERT_CRITICAL_SECTION_ENTER();
90// ... some code which does not invoke rb_vm_check_ints() ...
91RUBY_ASSERT_CRITICAL_SECTION_LEAVE();
92```
93
94If `rb_vm_check_ints()` is called between the `RUBY_ASSERT_CRITICAL_SECTION_ENTER()` and
95`RUBY_ASSERT_CRITICAL_SECTION_LEAVE()`, a failed assertion will result.
96*/
97extern int ruby_assert_critical_section_entered;
98#define RUBY_ASSERT_CRITICAL_SECTION_ENTER() do{ruby_assert_critical_section_entered += 1;}while(false)
99#define RUBY_ASSERT_CRITICAL_SECTION_LEAVE() do{VM_ASSERT(ruby_assert_critical_section_entered > 0);ruby_assert_critical_section_entered -= 1;}while(false)
100#else
101#define RUBY_ASSERT_CRITICAL_SECTION_ENTER()
102#define RUBY_ASSERT_CRITICAL_SECTION_LEAVE()
103#endif
104
105#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
106# include "wasm/setjmp.h"
107#else
108# include <setjmp.h>
109#endif
110
111#if defined(__linux__) || defined(__FreeBSD__)
112# define RB_THREAD_T_HAS_NATIVE_ID
113#endif
114
116#include "ccan/list/list.h"
117#include "id.h"
118#include "internal.h"
119#include "internal/array.h"
120#include "internal/basic_operators.h"
121#include "internal/sanitizers.h"
122#include "internal/serial.h"
123#include "internal/vm.h"
124#include "method.h"
125#include "node.h"
126#include "ruby/ruby.h"
127#include "ruby/st.h"
128#include "ruby_atomic.h"
129#include "vm_opts.h"
130
131#include "ruby/thread_native.h"
132/*
133 * implementation selector of get_insn_info algorithm
134 * 0: linear search
135 * 1: binary search
136 * 2: succinct bitvector
137 */
138#ifndef VM_INSN_INFO_TABLE_IMPL
139# define VM_INSN_INFO_TABLE_IMPL 2
140#endif
141
142#if defined(NSIG_MAX) /* POSIX issue 8 */
143# undef NSIG
144# define NSIG NSIG_MAX
145#elif defined(_SIG_MAXSIG) /* FreeBSD */
146# undef NSIG
147# define NSIG _SIG_MAXSIG
148#elif defined(_SIGMAX) /* QNX */
149# define NSIG (_SIGMAX + 1)
150#elif defined(NSIG) /* 99% of everything else */
151# /* take it */
152#else /* Last resort */
153# define NSIG (sizeof(sigset_t) * CHAR_BIT + 1)
154#endif
155
156#define RUBY_NSIG NSIG
157
158#if defined(SIGCLD)
159# define RUBY_SIGCHLD (SIGCLD)
160#elif defined(SIGCHLD)
161# define RUBY_SIGCHLD (SIGCHLD)
162#endif
163
164#if defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) && defined(SA_SIGINFO) && !defined(__NetBSD__)
165# define USE_SIGALTSTACK
166void *rb_allocate_sigaltstack(void);
167void *rb_register_sigaltstack(void *);
168# define RB_ALTSTACK_INIT(var, altstack) var = rb_register_sigaltstack(altstack)
169# define RB_ALTSTACK_FREE(var) free(var)
170# define RB_ALTSTACK(var) var
171#else /* noop */
172# define RB_ALTSTACK_INIT(var, altstack)
173# define RB_ALTSTACK_FREE(var)
174# define RB_ALTSTACK(var) (0)
175#endif
176
177#include THREAD_IMPL_H
178#define RUBY_VM_THREAD_MODEL 2
179
180/*****************/
181/* configuration */
182/*****************/
183
184/* gcc ver. check */
185#if defined(__GNUC__) && __GNUC__ >= 2
186
187#if OPT_TOKEN_THREADED_CODE
188#if OPT_DIRECT_THREADED_CODE
189#undef OPT_DIRECT_THREADED_CODE
190#endif
191#endif
192
193#else /* defined(__GNUC__) && __GNUC__ >= 2 */
194
195/* disable threaded code options */
196#if OPT_DIRECT_THREADED_CODE
197#undef OPT_DIRECT_THREADED_CODE
198#endif
199#if OPT_TOKEN_THREADED_CODE
200#undef OPT_TOKEN_THREADED_CODE
201#endif
202#endif
203
204/* call threaded code */
205#if OPT_CALL_THREADED_CODE
206#if OPT_DIRECT_THREADED_CODE
207#undef OPT_DIRECT_THREADED_CODE
208#endif /* OPT_DIRECT_THREADED_CODE */
209#endif /* OPT_CALL_THREADED_CODE */
210
211void rb_vm_encoded_insn_data_table_init(void);
212typedef unsigned long rb_num_t;
213typedef signed long rb_snum_t;
214
215enum ruby_tag_type {
216 RUBY_TAG_NONE = 0x0,
217 RUBY_TAG_RETURN = 0x1,
218 RUBY_TAG_BREAK = 0x2,
219 RUBY_TAG_NEXT = 0x3,
220 RUBY_TAG_RETRY = 0x4,
221 RUBY_TAG_REDO = 0x5,
222 RUBY_TAG_RAISE = 0x6,
223 RUBY_TAG_THROW = 0x7,
224 RUBY_TAG_FATAL = 0x8,
225 RUBY_TAG_MASK = 0xf
226};
227
228#define TAG_NONE RUBY_TAG_NONE
229#define TAG_RETURN RUBY_TAG_RETURN
230#define TAG_BREAK RUBY_TAG_BREAK
231#define TAG_NEXT RUBY_TAG_NEXT
232#define TAG_RETRY RUBY_TAG_RETRY
233#define TAG_REDO RUBY_TAG_REDO
234#define TAG_RAISE RUBY_TAG_RAISE
235#define TAG_THROW RUBY_TAG_THROW
236#define TAG_FATAL RUBY_TAG_FATAL
237#define TAG_MASK RUBY_TAG_MASK
238
239enum ruby_vm_throw_flags {
240 VM_THROW_NO_ESCAPE_FLAG = 0x8000,
241 VM_THROW_STATE_MASK = 0xff
242};
243
244/* forward declarations */
245struct rb_thread_struct;
247
248/* iseq data type */
250
252 rb_serial_t raw;
253 VALUE data[2];
254};
255
256#define IMEMO_CONST_CACHE_SHAREABLE IMEMO_FL_USER0
257
258// imemo_constcache
260 VALUE flags;
261
262 VALUE value; // v0
263 VALUE _unused1; // v1
264 VALUE _unused2; // v2
265 const rb_cref_t *ic_cref; // v3
266};
267STATIC_ASSERT(sizeof_iseq_inline_constant_cache_entry,
268 (offsetof(struct iseq_inline_constant_cache_entry, ic_cref) +
269 sizeof(const rb_cref_t *)) <= RVALUE_SIZE);
270
287
289 uintptr_t value; // attr_index in lower bits, dest_shape_id in upper bits
290 ID iv_set_name;
291};
292
296
298 struct {
299 struct rb_thread_struct *running_thread;
300 VALUE value;
301 } once;
302 struct iseq_inline_constant_cache ic_cache;
303 struct iseq_inline_iv_cache_entry iv_cache;
304};
305
307 const struct rb_call_data *cd;
308 const struct rb_callcache *cc;
309 VALUE block_handler;
310 VALUE recv;
311 int argc;
312 bool kw_splat;
313 VALUE heap_argv;
314};
315
316#ifndef VM_ARGC_STACK_MAX
317#define VM_ARGC_STACK_MAX 128
318#endif
319
320# define CALLING_ARGC(calling) ((calling)->heap_argv ? RARRAY_LENINT((calling)->heap_argv) : (calling)->argc)
321
323
324#if 1
325#define CoreDataFromValue(obj, type) (type*)DATA_PTR(obj)
326#else
327#define CoreDataFromValue(obj, type) (type*)rb_data_object_get(obj)
328#endif
329#define GetCoreDataFromValue(obj, type, ptr) ((ptr) = CoreDataFromValue((obj), type))
330
332 VALUE pathobj; /* String (path) or Array [path, realpath]. Frozen. */
333 VALUE base_label; /* String */
334 VALUE label; /* String */
335 int first_lineno;
336 int node_id;
337 rb_code_location_t code_location;
339
340#define PATHOBJ_PATH 0
341#define PATHOBJ_REALPATH 1
342
343static inline VALUE
344pathobj_path(VALUE pathobj)
345{
346 if (RB_TYPE_P(pathobj, T_STRING)) {
347 return pathobj;
348 }
349 else {
350 VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
351 return RARRAY_AREF(pathobj, PATHOBJ_PATH);
352 }
353}
354
355static inline VALUE
356pathobj_realpath(VALUE pathobj)
357{
358 if (RB_TYPE_P(pathobj, T_STRING)) {
359 return pathobj;
360 }
361 else {
362 VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
363 return RARRAY_AREF(pathobj, PATHOBJ_REALPATH);
364 }
365}
366
367/* Forward declarations */
368typedef uintptr_t iseq_bits_t;
369
370#define ISEQ_IS_SIZE(body) (body->ic_size + body->ivc_size + body->ise_size + body->icvarc_size)
371
372/* [ TS_IVC | TS_ICVARC | TS_ISE | TS_IC ] */
373#define ISEQ_IS_IC_ENTRY(body, idx) (body->is_entries[(idx) + body->ise_size + body->icvarc_size + body->ivc_size].ic_cache);
374
375/* instruction sequence type */
376enum rb_iseq_type {
377 ISEQ_TYPE_TOP,
378 ISEQ_TYPE_METHOD,
379 ISEQ_TYPE_BLOCK,
380 ISEQ_TYPE_CLASS,
381 ISEQ_TYPE_RESCUE,
382 ISEQ_TYPE_ENSURE,
383 ISEQ_TYPE_EVAL,
384 ISEQ_TYPE_MAIN,
385 ISEQ_TYPE_PLAIN
386};
387
388// Attributes specified by Primitive.attr!
389enum rb_builtin_attr {
390 // The iseq does not call methods.
391 BUILTIN_ATTR_LEAF = 0x01,
392 // This iseq only contains single `opt_invokebuiltin_delegate_leave` instruction with 0 arguments.
393 BUILTIN_ATTR_SINGLE_NOARG_LEAF = 0x02,
394 // This attribute signals JIT to duplicate the iseq for each block iseq so that its `yield` will be monomorphic.
395 BUILTIN_ATTR_INLINE_BLOCK = 0x04,
396 // The iseq acts like a C method in backtraces.
397 BUILTIN_ATTR_C_TRACE = 0x08,
398};
399
400typedef VALUE (*rb_jit_func_t)(struct rb_execution_context_struct *, struct rb_control_frame_struct *);
401
403 enum rb_iseq_type type;
404
405 unsigned int iseq_size;
406 VALUE *iseq_encoded; /* encoded iseq (insn addr and operands) */
407
431 struct {
432 struct {
433 unsigned int has_lead : 1;
434 unsigned int has_opt : 1;
435 unsigned int has_rest : 1;
436 unsigned int has_post : 1;
437 unsigned int has_kw : 1;
438 unsigned int has_kwrest : 1;
439 unsigned int has_block : 1;
440
441 unsigned int ambiguous_param0 : 1; /* {|a|} */
442 unsigned int accepts_no_kwarg : 1;
443 unsigned int ruby2_keywords: 1;
444 unsigned int anon_rest: 1;
445 unsigned int anon_kwrest: 1;
446 unsigned int use_block: 1;
447 unsigned int forwardable: 1;
448 } flags;
449
450 unsigned int size;
451
452 int lead_num;
453 int opt_num;
454 int rest_start;
455 int post_start;
456 int post_num;
457 int block_start;
458
459 const VALUE *opt_table; /* (opt_num + 1) entries. */
460 /* opt_num and opt_table:
461 *
462 * def foo o1=e1, o2=e2, ..., oN=eN
463 * #=>
464 * # prologue code
465 * A1: e1
466 * A2: e2
467 * ...
468 * AN: eN
469 * AL: body
470 * opt_num = N
471 * opt_table = [A1, A2, ..., AN, AL]
472 */
473
474 const struct rb_iseq_param_keyword {
475 int num;
476 int required_num;
477 int bits_start;
478 int rest_start;
479 const ID *table;
480 VALUE *default_values;
481 } *keyword;
483
484 rb_iseq_location_t location;
485
486 /* insn info, must be freed */
488 const struct iseq_insn_info_entry *body;
489 unsigned int *positions;
490 unsigned int size;
491#if VM_INSN_INFO_TABLE_IMPL == 2
492 struct succ_index_table *succ_index_table;
493#endif
494 } insns_info;
495
496 const ID *local_table; /* must free */
497
498 /* catch table */
499 struct iseq_catch_table *catch_table;
500
501 /* for child iseq */
502 const struct rb_iseq_struct *parent_iseq;
503 struct rb_iseq_struct *local_iseq; /* local_iseq->flip_cnt can be modified */
504
505 union iseq_inline_storage_entry *is_entries; /* [ TS_IVC | TS_ICVARC | TS_ISE | TS_IC ] */
506 struct rb_call_data *call_data; //struct rb_call_data calls[ci_size];
507
508 struct {
509 rb_snum_t flip_count;
510 VALUE script_lines;
511 VALUE coverage;
512 VALUE pc2branchindex;
513 VALUE *original_iseq;
514 } variable;
515
516 unsigned int local_table_size;
517 unsigned int ic_size; // Number of IC caches
518 unsigned int ise_size; // Number of ISE caches
519 unsigned int ivc_size; // Number of IVC caches
520 unsigned int icvarc_size; // Number of ICVARC caches
521 unsigned int ci_size;
522 unsigned int stack_max; /* for stack overflow check */
523
524 unsigned int builtin_attrs; // Union of rb_builtin_attr
525
526 bool prism; // ISEQ was generated from prism compiler
527
528 union {
529 iseq_bits_t * list; /* Find references for GC */
530 iseq_bits_t single;
531 } mark_bits;
532
533 struct rb_id_table *outer_variables;
534
535 const rb_iseq_t *mandatory_only_iseq;
536
537#if USE_YJIT
538 // Function pointer for JIT code on jit_exec()
539 rb_jit_func_t jit_entry;
540 // Number of calls on jit_exec()
541 long unsigned jit_entry_calls;
542#endif
543
544#if USE_YJIT
545 // Function pointer for JIT code on jit_exec_exception()
546 rb_jit_func_t jit_exception;
547 // Number of calls on jit_exec_exception()
548 long unsigned jit_exception_calls;
549#endif
550
551#if USE_YJIT
552 // YJIT stores some data on each iseq.
553 void *yjit_payload;
554 // Used to estimate how frequently this ISEQ gets called
555 uint64_t yjit_calls_at_interv;
556#endif
557};
558
559/* T_IMEMO/iseq */
560/* typedef rb_iseq_t is in method.h */
562 VALUE flags; /* 1 */
563 VALUE wrapper; /* 2 */
564
565 struct rb_iseq_constant_body *body; /* 3 */
566
567 union { /* 4, 5 words */
568 struct iseq_compile_data *compile_data; /* used at compile time */
569
570 struct {
571 VALUE obj;
572 int index;
573 } loader;
574
575 struct {
576 struct rb_hook_list_struct *local_hooks;
577 rb_event_flag_t global_trace_events;
578 } exec;
579 } aux;
580};
581
582#define ISEQ_BODY(iseq) ((iseq)->body)
583
584#if !defined(USE_LAZY_LOAD) || !(USE_LAZY_LOAD+0)
585#define USE_LAZY_LOAD 0
586#endif
587
588#if !USE_LAZY_LOAD
589static inline const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq) {return 0;}
590#endif
591const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq);
592
593static inline const rb_iseq_t *
594rb_iseq_check(const rb_iseq_t *iseq)
595{
596 if (USE_LAZY_LOAD && ISEQ_BODY(iseq) == NULL) {
597 rb_iseq_complete((rb_iseq_t *)iseq);
598 }
599 return iseq;
600}
601
602static inline bool
603rb_iseq_attr_p(const rb_iseq_t *iseq, enum rb_builtin_attr attr)
604{
605 return (ISEQ_BODY(iseq)->builtin_attrs & attr) == attr;
606}
607
608static inline const rb_iseq_t *
609def_iseq_ptr(rb_method_definition_t *def)
610{
611//TODO: re-visit. to check the bug, enable this assertion.
612#if VM_CHECK_MODE > 0
613 if (def->type != VM_METHOD_TYPE_ISEQ) rb_bug("def_iseq_ptr: not iseq (%d)", def->type);
614#endif
615 return rb_iseq_check(def->body.iseq.iseqptr);
616}
617
618enum ruby_special_exceptions {
619 ruby_error_reenter,
620 ruby_error_nomemory,
621 ruby_error_sysstack,
622 ruby_error_stackfatal,
623 ruby_error_stream_closed,
624 ruby_special_error_count
625};
626
627#define GetVMPtr(obj, ptr) \
628 GetCoreDataFromValue((obj), rb_vm_t, (ptr))
629
630struct rb_vm_struct;
631typedef void rb_vm_at_exit_func(struct rb_vm_struct*);
632
633typedef struct rb_at_exit_list {
634 rb_vm_at_exit_func *func;
635 struct rb_at_exit_list *next;
637
638void *rb_objspace_alloc(void);
639void rb_objspace_free(void *objspace);
640void rb_objspace_call_finalizer(void);
641
642typedef struct rb_hook_list_struct {
643 struct rb_event_hook_struct *hooks;
644 rb_event_flag_t events;
645 unsigned int running;
646 bool need_clean;
647 bool is_local;
649
650
651// see builtin.h for definition
652typedef const struct rb_builtin_function *RB_BUILTIN;
653
655 VALUE *varptr;
656 struct global_object_list *next;
657};
658
659typedef struct rb_vm_struct {
660 VALUE self;
661
662 struct {
663 struct ccan_list_head set;
664 unsigned int cnt;
665 unsigned int blocking_cnt;
666
667 struct rb_ractor_struct *main_ractor;
668 struct rb_thread_struct *main_thread; // == vm->ractor.main_ractor->threads.main
669
670 struct {
671 // monitor
672 rb_nativethread_lock_t lock;
673 struct rb_ractor_struct *lock_owner;
674 unsigned int lock_rec;
675
676 // join at exit
677 rb_nativethread_cond_t terminate_cond;
678 bool terminate_waiting;
679
680#ifndef RUBY_THREAD_PTHREAD_H
681 bool barrier_waiting;
682 unsigned int barrier_cnt;
683 rb_nativethread_cond_t barrier_cond;
684#endif
685 } sync;
686
687 // ractor scheduling
688 struct {
689 rb_nativethread_lock_t lock;
690 struct rb_ractor_struct *lock_owner;
691 bool locked;
692
693 rb_nativethread_cond_t cond; // GRQ
694 unsigned int snt_cnt; // count of shared NTs
695 unsigned int dnt_cnt; // count of dedicated NTs
696
697 unsigned int running_cnt;
698
699 unsigned int max_cpu;
700 struct ccan_list_head grq; // // Global Ready Queue
701 unsigned int grq_cnt;
702
703 // running threads
704 struct ccan_list_head running_threads;
705
706 // threads which switch context by timeslice
707 struct ccan_list_head timeslice_threads;
708
709 struct ccan_list_head zombie_threads;
710
711 // true if timeslice timer is not enable
712 bool timeslice_wait_inf;
713
714 // barrier
715 rb_nativethread_cond_t barrier_complete_cond;
716 rb_nativethread_cond_t barrier_release_cond;
717 bool barrier_waiting;
718 unsigned int barrier_waiting_cnt;
719 unsigned int barrier_serial;
720 } sched;
721 } ractor;
722
723#ifdef USE_SIGALTSTACK
724 void *main_altstack;
725#endif
726
727 rb_serial_t fork_gen;
728 struct ccan_list_head waiting_fds; /* <=> struct waiting_fd */
729
730 /* set in single-threaded processes only: */
731 volatile int ubf_async_safe;
732
733 unsigned int running: 1;
734 unsigned int thread_abort_on_exception: 1;
735 unsigned int thread_report_on_exception: 1;
736 unsigned int thread_ignore_deadlock: 1;
737
738 /* object management */
739 VALUE mark_object_ary;
741 const VALUE special_exceptions[ruby_special_error_count];
742
743 /* load */
744 VALUE top_self;
745 VALUE load_path;
746 VALUE load_path_snapshot;
747 VALUE load_path_check_cache;
748 VALUE expanded_load_path;
749 VALUE loaded_features;
750 VALUE loaded_features_snapshot;
751 VALUE loaded_features_realpaths;
752 VALUE loaded_features_realpath_map;
753 struct st_table *loaded_features_index;
754 struct st_table *loading_table;
755 // For running the init function of statically linked
756 // extensions when they are loaded
757 struct st_table *static_ext_inits;
758
759 /* signal */
760 struct {
761 VALUE cmd[RUBY_NSIG];
762 } trap_list;
763
764 /* postponed_job (async-signal-safe, and thread-safe) */
765 struct rb_postponed_job_queue *postponed_job_queue;
766
767 int src_encoding_index;
768
769 /* workqueue (thread-safe, NOT async-signal-safe) */
770 struct ccan_list_head workqueue; /* <=> rb_workqueue_job.jnode */
771 rb_nativethread_lock_t workqueue_lock;
772
773 VALUE orig_progname, progname;
774 VALUE coverages, me2counter;
775 int coverage_mode;
776
777 struct {
778 struct rb_objspace *objspace;
779 struct gc_mark_func_data_struct {
780 void *data;
781 void (*mark_func)(VALUE v, void *data);
782 } *mark_func_data;
783 } gc;
784
785 rb_at_exit_list *at_exit;
786
787 st_table *frozen_strings;
788
789 const struct rb_builtin_function *builtin_function_table;
790
791 st_table *ci_table;
792 struct rb_id_table *negative_cme_table;
793 st_table *overloaded_cme_table; // cme -> overloaded_cme
794 st_table *unused_block_warning_table;
795
796 // This id table contains a mapping from ID to ICs. It does this with ID
797 // keys and nested st_tables as values. The nested tables have ICs as keys
798 // and Qtrue as values. It is used when inline constant caches need to be
799 // invalidated or ISEQs are being freed.
800 struct rb_id_table *constant_cache;
801 ID inserting_constant_cache_id;
802
803#ifndef VM_GLOBAL_CC_CACHE_TABLE_SIZE
804#define VM_GLOBAL_CC_CACHE_TABLE_SIZE 1023
805#endif
806 const struct rb_callcache *global_cc_cache_table[VM_GLOBAL_CC_CACHE_TABLE_SIZE]; // vm_eval.c
807
808#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
809 uint32_t clock;
810#endif
811
812 /* params */
813 struct { /* size in byte */
814 size_t thread_vm_stack_size;
815 size_t thread_machine_stack_size;
816 size_t fiber_vm_stack_size;
817 size_t fiber_machine_stack_size;
818 } default_params;
819
820} rb_vm_t;
821
822/* default values */
823
824#define RUBY_VM_SIZE_ALIGN 4096
825
826#define RUBY_VM_THREAD_VM_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
827#define RUBY_VM_THREAD_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
828#define RUBY_VM_THREAD_MACHINE_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
829#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
830
831#define RUBY_VM_FIBER_VM_STACK_SIZE ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
832#define RUBY_VM_FIBER_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
833#define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 64 * 1024 * sizeof(VALUE)) /* 256 KB or 512 KB */
834#if defined(__powerpc64__) || defined(__ppc64__) // macOS has __ppc64__
835#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 32 * 1024 * sizeof(VALUE)) /* 128 KB or 256 KB */
836#else
837#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
838#endif
839
840#if __has_feature(memory_sanitizer) || __has_feature(address_sanitizer)
841/* It seems sanitizers consume A LOT of machine stacks */
842#undef RUBY_VM_THREAD_MACHINE_STACK_SIZE
843#define RUBY_VM_THREAD_MACHINE_STACK_SIZE (1024 * 1024 * sizeof(VALUE))
844#undef RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN
845#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 512 * 1024 * sizeof(VALUE))
846#undef RUBY_VM_FIBER_MACHINE_STACK_SIZE
847#define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 256 * 1024 * sizeof(VALUE))
848#undef RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN
849#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 128 * 1024 * sizeof(VALUE))
850#endif
851
852#ifndef VM_DEBUG_BP_CHECK
853#define VM_DEBUG_BP_CHECK 0
854#endif
855
856#ifndef VM_DEBUG_VERIFY_METHOD_CACHE
857#define VM_DEBUG_VERIFY_METHOD_CACHE (VMDEBUG != 0)
858#endif
859
861 VALUE self;
862 const VALUE *ep;
863 union {
864 const rb_iseq_t *iseq;
865 const struct vm_ifunc *ifunc;
866 VALUE val;
867 } code;
868};
869
870enum rb_block_handler_type {
871 block_handler_type_iseq,
872 block_handler_type_ifunc,
873 block_handler_type_symbol,
874 block_handler_type_proc
875};
876
877enum rb_block_type {
878 block_type_iseq,
879 block_type_ifunc,
880 block_type_symbol,
881 block_type_proc
882};
883
884struct rb_block {
885 union {
886 struct rb_captured_block captured;
887 VALUE symbol;
888 VALUE proc;
889 } as;
890 enum rb_block_type type;
891};
892
894 const VALUE *pc; // cfp[0]
895 VALUE *sp; // cfp[1]
896 const rb_iseq_t *iseq; // cfp[2]
897 VALUE self; // cfp[3] / block[0]
898 const VALUE *ep; // cfp[4] / block[1]
899 const void *block_code; // cfp[5] / block[2] -- iseq, ifunc, or forwarded block handler
900 void *jit_return; // cfp[6] -- return address for JIT code
901#if VM_DEBUG_BP_CHECK
902 VALUE *bp_check; // cfp[7]
903#endif
905
906extern const rb_data_type_t ruby_threadptr_data_type;
907
908static inline struct rb_thread_struct *
909rb_thread_ptr(VALUE thval)
910{
911 return (struct rb_thread_struct *)rb_check_typeddata(thval, &ruby_threadptr_data_type);
912}
913
914enum rb_thread_status {
915 THREAD_RUNNABLE,
916 THREAD_STOPPED,
917 THREAD_STOPPED_FOREVER,
918 THREAD_KILLED
919};
920
921#ifdef RUBY_JMP_BUF
922typedef RUBY_JMP_BUF rb_jmpbuf_t;
923#else
924typedef void *rb_jmpbuf_t[5];
925#endif
926
927/*
928 `rb_vm_tag_jmpbuf_t` type represents a buffer used to
929 long jump to a C frame associated with `rb_vm_tag`.
930
931 Use-site of `rb_vm_tag_jmpbuf_t` is responsible for calling the
932 following functions:
933 - `rb_vm_tag_jmpbuf_init` once `rb_vm_tag_jmpbuf_t` is allocated.
934 - `rb_vm_tag_jmpbuf_deinit` once `rb_vm_tag_jmpbuf_t` is no longer necessary.
935
936 `RB_VM_TAG_JMPBUF_GET` transforms a `rb_vm_tag_jmpbuf_t` into a
937 `rb_jmpbuf_t` to be passed to `rb_setjmp/rb_longjmp`.
938*/
939#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
940/*
941 WebAssembly target with Asyncify-based SJLJ needs
942 to capture the execution context by unwind/rewind-ing
943 call frames into a jump buffer. The buffer space tends
944 to be considerably large unlike other architectures'
945 register-based buffers.
946 Therefore, we allocates the buffer on the heap on such
947 environments.
948*/
949typedef rb_jmpbuf_t *rb_vm_tag_jmpbuf_t;
950
951#define RB_VM_TAG_JMPBUF_GET(buf) (*buf)
952
953static inline void
954rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
955{
956 *jmpbuf = ruby_xmalloc(sizeof(rb_jmpbuf_t));
957}
958
959static inline void
960rb_vm_tag_jmpbuf_deinit(const rb_vm_tag_jmpbuf_t *jmpbuf)
961{
962 ruby_xfree(*jmpbuf);
963}
964#else
965typedef rb_jmpbuf_t rb_vm_tag_jmpbuf_t;
966
967#define RB_VM_TAG_JMPBUF_GET(buf) (buf)
968
969static inline void
970rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
971{
972 // no-op
973}
974
975static inline void
976rb_vm_tag_jmpbuf_deinit(const rb_vm_tag_jmpbuf_t *jmpbuf)
977{
978 // no-op
979}
980#endif
981
982/*
983 the members which are written in EC_PUSH_TAG() should be placed at
984 the beginning and the end, so that entire region is accessible.
985*/
986struct rb_vm_tag {
987 VALUE tag;
988 VALUE retval;
989 rb_vm_tag_jmpbuf_t buf;
990 struct rb_vm_tag *prev;
991 enum ruby_tag_type state;
992 unsigned int lock_rec;
993};
994
995STATIC_ASSERT(rb_vm_tag_buf_offset, offsetof(struct rb_vm_tag, buf) > 0);
996STATIC_ASSERT(rb_vm_tag_buf_end,
997 offsetof(struct rb_vm_tag, buf) + sizeof(rb_vm_tag_jmpbuf_t) <
998 sizeof(struct rb_vm_tag));
999
1002 void *arg;
1003};
1004
1005struct rb_mutex_struct;
1006
1007typedef struct rb_fiber_struct rb_fiber_t;
1008
1010 struct rb_waiting_list *next;
1011 struct rb_thread_struct *thread;
1012 struct rb_fiber_struct *fiber;
1013};
1014
1016 /* execution information */
1017 VALUE *vm_stack; /* must free, must mark */
1018 size_t vm_stack_size; /* size in word (byte size / sizeof(VALUE)) */
1019 rb_control_frame_t *cfp;
1020
1021 struct rb_vm_tag *tag;
1022
1023 /* interrupt flags */
1024 rb_atomic_t interrupt_flag;
1025 rb_atomic_t interrupt_mask; /* size should match flag */
1026#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
1027 uint32_t checked_clock;
1028#endif
1029
1030 rb_fiber_t *fiber_ptr;
1031 struct rb_thread_struct *thread_ptr;
1032
1033 /* storage (ec (fiber) local) */
1034 struct rb_id_table *local_storage;
1035 VALUE local_storage_recursive_hash;
1036 VALUE local_storage_recursive_hash_for_trace;
1037
1038 /* Inheritable fiber storage. */
1039 VALUE storage;
1040
1041 /* eval env */
1042 const VALUE *root_lep;
1043 VALUE root_svar;
1044
1045 /* trace information */
1046 struct rb_trace_arg_struct *trace_arg;
1047
1048 /* temporary places */
1049 VALUE errinfo;
1050 VALUE passed_block_handler; /* for rb_iterate */
1051
1052 uint8_t raised_flag; /* only 3 bits needed */
1053
1054 /* n.b. only 7 bits needed, really: */
1055 BITFIELD(enum method_missing_reason, method_missing_reason, 8);
1056
1057 VALUE private_const_reference;
1058
1059 /* for GC */
1060 struct {
1061 VALUE *stack_start;
1062 VALUE *stack_end;
1063 size_t stack_maxsize;
1065
1066#ifdef RUBY_ASAN_ENABLED
1067 void *asan_fake_stack_handle;
1068#endif
1069 } machine;
1070};
1071
1072#ifndef rb_execution_context_t
1074#define rb_execution_context_t rb_execution_context_t
1075#endif
1076
1077// for builtin.h
1078#define VM_CORE_H_EC_DEFINED 1
1079
1080// Set the vm_stack pointer in the execution context.
1081void rb_ec_set_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
1082
1083// Initialize the vm_stack pointer in the execution context and push the initial stack frame.
1084// @param ec the execution context to update.
1085// @param stack a pointer to the stack to use.
1086// @param size the size of the stack, as in `VALUE stack[size]`.
1087void rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
1088
1089// Clear (set to `NULL`) the vm_stack pointer.
1090// @param ec the execution context to update.
1091void rb_ec_clear_vm_stack(rb_execution_context_t *ec);
1092
1094 bool ractor_safe;
1095};
1096
1097typedef struct rb_ractor_struct rb_ractor_t;
1098
1099struct rb_native_thread;
1100
1101typedef struct rb_thread_struct {
1102 struct ccan_list_node lt_node; // managed by a ractor
1103 VALUE self;
1104 rb_ractor_t *ractor;
1105 rb_vm_t *vm;
1106 struct rb_native_thread *nt;
1108
1109 struct rb_thread_sched_item sched;
1110 bool mn_schedulable;
1111 rb_atomic_t serial; // only for RUBY_DEBUG_LOG()
1112
1113 VALUE last_status; /* $? */
1114
1115 /* for cfunc */
1116 struct rb_calling_info *calling;
1117
1118 /* for load(true) */
1119 VALUE top_self;
1120 VALUE top_wrapper;
1121
1122 /* thread control */
1123
1124 BITFIELD(enum rb_thread_status, status, 2);
1125 /* bit flags */
1126 unsigned int has_dedicated_nt : 1;
1127 unsigned int to_kill : 1;
1128 unsigned int abort_on_exception: 1;
1129 unsigned int report_on_exception: 1;
1130 unsigned int pending_interrupt_queue_checked: 1;
1131 int8_t priority; /* -3 .. 3 (RUBY_THREAD_PRIORITY_{MIN,MAX}) */
1132 uint32_t running_time_us; /* 12500..800000 */
1133
1134 void *blocking_region_buffer;
1135
1136 VALUE thgroup;
1137 VALUE value;
1138
1139 /* temporary place of retval on OPT_CALL_THREADED_CODE */
1140#if OPT_CALL_THREADED_CODE
1141 VALUE retval;
1142#endif
1143
1144 /* async errinfo queue */
1145 VALUE pending_interrupt_queue;
1146 VALUE pending_interrupt_mask_stack;
1147
1148 /* interrupt management */
1149 rb_nativethread_lock_t interrupt_lock;
1150 struct rb_unblock_callback unblock;
1151 VALUE locking_mutex;
1152 struct rb_mutex_struct *keeping_mutexes;
1153 struct ccan_list_head interrupt_exec_tasks;
1154
1155 struct rb_waiting_list *join_list;
1156
1157 union {
1158 struct {
1159 VALUE proc;
1160 VALUE args;
1161 int kw_splat;
1162 } proc;
1163 struct {
1164 VALUE (*func)(void *);
1165 void *arg;
1166 } func;
1167 } invoke_arg;
1168
1169 enum thread_invoke_type {
1170 thread_invoke_type_none = 0,
1171 thread_invoke_type_proc,
1172 thread_invoke_type_ractor_proc,
1173 thread_invoke_type_func
1174 } invoke_type;
1175
1176 /* statistics data for profiler */
1177 VALUE stat_insn_usage;
1178
1179 /* fiber */
1180 rb_fiber_t *root_fiber;
1181
1182 VALUE scheduler;
1183 unsigned int blocking;
1184
1185 /* misc */
1186 VALUE name;
1187 void **specific_storage;
1188
1189 struct rb_ext_config ext_config;
1190} rb_thread_t;
1191
1192static inline unsigned int
1193rb_th_serial(const rb_thread_t *th)
1194{
1195 return th ? (unsigned int)th->serial : 0;
1196}
1197
1198typedef enum {
1199 VM_DEFINECLASS_TYPE_CLASS = 0x00,
1200 VM_DEFINECLASS_TYPE_SINGLETON_CLASS = 0x01,
1201 VM_DEFINECLASS_TYPE_MODULE = 0x02,
1202 /* 0x03..0x06 is reserved */
1203 VM_DEFINECLASS_TYPE_MASK = 0x07
1204} rb_vm_defineclass_type_t;
1205
1206#define VM_DEFINECLASS_TYPE(x) ((rb_vm_defineclass_type_t)(x) & VM_DEFINECLASS_TYPE_MASK)
1207#define VM_DEFINECLASS_FLAG_SCOPED 0x08
1208#define VM_DEFINECLASS_FLAG_HAS_SUPERCLASS 0x10
1209#define VM_DEFINECLASS_SCOPED_P(x) ((x) & VM_DEFINECLASS_FLAG_SCOPED)
1210#define VM_DEFINECLASS_HAS_SUPERCLASS_P(x) \
1211 ((x) & VM_DEFINECLASS_FLAG_HAS_SUPERCLASS)
1212
1213/* iseq.c */
1214RUBY_SYMBOL_EXPORT_BEGIN
1215
1216/* node -> iseq */
1217rb_iseq_t *rb_iseq_new (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent, enum rb_iseq_type);
1218rb_iseq_t *rb_iseq_new_top (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent);
1219rb_iseq_t *rb_iseq_new_main (const VALUE ast_value, VALUE path, VALUE realpath, const rb_iseq_t *parent, int opt);
1220rb_iseq_t *rb_iseq_new_eval (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, int first_lineno, const rb_iseq_t *parent, int isolated_depth);
1221rb_iseq_t *rb_iseq_new_with_opt( VALUE ast_value, VALUE name, VALUE path, VALUE realpath, int first_lineno, const rb_iseq_t *parent, int isolated_depth,
1222 enum rb_iseq_type, const rb_compile_option_t*,
1223 VALUE script_lines);
1224
1225struct iseq_link_anchor;
1227 VALUE flags;
1228 VALUE reserved;
1229 void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *);
1230 const void *data;
1231};
1232static inline struct rb_iseq_new_with_callback_callback_func *
1233rb_iseq_new_with_callback_new_callback(
1234 void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *), const void *ptr)
1235{
1237 IMEMO_NEW(struct rb_iseq_new_with_callback_callback_func, imemo_ifunc, Qfalse);
1238 memo->func = func;
1239 memo->data = ptr;
1240
1241 return memo;
1242}
1243rb_iseq_t *rb_iseq_new_with_callback(const struct rb_iseq_new_with_callback_callback_func * ifunc,
1244 VALUE name, VALUE path, VALUE realpath, int first_lineno,
1245 const rb_iseq_t *parent, enum rb_iseq_type, const rb_compile_option_t*);
1246
1247VALUE rb_iseq_disasm(const rb_iseq_t *iseq);
1248int rb_iseq_disasm_insn(VALUE str, const VALUE *iseqval, size_t pos, const rb_iseq_t *iseq, VALUE child);
1249
1250VALUE rb_iseq_coverage(const rb_iseq_t *iseq);
1251
1252RUBY_EXTERN VALUE rb_cISeq;
1253RUBY_EXTERN VALUE rb_cRubyVM;
1254RUBY_EXTERN VALUE rb_mRubyVMFrozenCore;
1255RUBY_EXTERN VALUE rb_block_param_proxy;
1256RUBY_SYMBOL_EXPORT_END
1257
1258#define GetProcPtr(obj, ptr) \
1259 GetCoreDataFromValue((obj), rb_proc_t, (ptr))
1260
1261typedef struct {
1262 const struct rb_block block;
1263 unsigned int is_from_method: 1; /* bool */
1264 unsigned int is_lambda: 1; /* bool */
1265 unsigned int is_isolated: 1; /* bool */
1266} rb_proc_t;
1267
1268RUBY_SYMBOL_EXPORT_BEGIN
1269VALUE rb_proc_isolate(VALUE self);
1270VALUE rb_proc_isolate_bang(VALUE self);
1271VALUE rb_proc_ractor_make_shareable(VALUE self);
1272RUBY_SYMBOL_EXPORT_END
1273
1274typedef struct {
1275 VALUE flags; /* imemo header */
1276 rb_iseq_t *iseq;
1277 const VALUE *ep;
1278 const VALUE *env;
1279 unsigned int env_size;
1280} rb_env_t;
1281
1282extern const rb_data_type_t ruby_binding_data_type;
1283
1284#define GetBindingPtr(obj, ptr) \
1285 GetCoreDataFromValue((obj), rb_binding_t, (ptr))
1286
1287typedef struct {
1288 const struct rb_block block;
1289 const VALUE pathobj;
1290 int first_lineno;
1291} rb_binding_t;
1292
1293/* used by compile time and send insn */
1294
1295enum vm_check_match_type {
1296 VM_CHECKMATCH_TYPE_WHEN = 1,
1297 VM_CHECKMATCH_TYPE_CASE = 2,
1298 VM_CHECKMATCH_TYPE_RESCUE = 3
1299};
1300
1301#define VM_CHECKMATCH_TYPE_MASK 0x03
1302#define VM_CHECKMATCH_ARRAY 0x04
1303
1304enum vm_opt_newarray_send_type {
1305 VM_OPT_NEWARRAY_SEND_MAX = 1,
1306 VM_OPT_NEWARRAY_SEND_MIN = 2,
1307 VM_OPT_NEWARRAY_SEND_HASH = 3,
1308 VM_OPT_NEWARRAY_SEND_PACK = 4,
1309 VM_OPT_NEWARRAY_SEND_PACK_BUFFER = 5,
1310 VM_OPT_NEWARRAY_SEND_INCLUDE_P = 6,
1311};
1312
1313enum vm_special_object_type {
1314 VM_SPECIAL_OBJECT_VMCORE = 1,
1315 VM_SPECIAL_OBJECT_CBASE,
1316 VM_SPECIAL_OBJECT_CONST_BASE
1317};
1318
1319enum vm_svar_index {
1320 VM_SVAR_LASTLINE = 0, /* $_ */
1321 VM_SVAR_BACKREF = 1, /* $~ */
1322
1323 VM_SVAR_EXTRA_START = 2,
1324 VM_SVAR_FLIPFLOP_START = 2 /* flipflop */
1325};
1326
1327/* inline cache */
1328typedef struct iseq_inline_constant_cache *IC;
1329typedef struct iseq_inline_iv_cache_entry *IVC;
1330typedef struct iseq_inline_cvar_cache_entry *ICVARC;
1331typedef union iseq_inline_storage_entry *ISE;
1332typedef const struct rb_callinfo *CALL_INFO;
1333typedef const struct rb_callcache *CALL_CACHE;
1334typedef struct rb_call_data *CALL_DATA;
1335
1336typedef VALUE CDHASH;
1337
1338#ifndef FUNC_FASTCALL
1339#define FUNC_FASTCALL(x) x
1340#endif
1341
1342typedef rb_control_frame_t *
1343 (FUNC_FASTCALL(*rb_insn_func_t))(rb_execution_context_t *, rb_control_frame_t *);
1344
1345#define VM_TAGGED_PTR_SET(p, tag) ((VALUE)(p) | (tag))
1346#define VM_TAGGED_PTR_REF(v, mask) ((void *)((v) & ~mask))
1347
1348#define GC_GUARDED_PTR(p) VM_TAGGED_PTR_SET((p), 0x01)
1349#define GC_GUARDED_PTR_REF(p) VM_TAGGED_PTR_REF((p), 0x03)
1350#define GC_GUARDED_PTR_P(p) (((VALUE)(p)) & 0x01)
1351
1352enum vm_frame_env_flags {
1353 /* Frame/Environment flag bits:
1354 * MMMM MMMM MMMM MMMM ____ FFFF FFFE EEEX (LSB)
1355 *
1356 * X : tag for GC marking (It seems as Fixnum)
1357 * EEE : 4 bits Env flags
1358 * FF..: 7 bits Frame flags
1359 * MM..: 15 bits frame magic (to check frame corruption)
1360 */
1361
1362 /* frame types */
1363 VM_FRAME_MAGIC_METHOD = 0x11110001,
1364 VM_FRAME_MAGIC_BLOCK = 0x22220001,
1365 VM_FRAME_MAGIC_CLASS = 0x33330001,
1366 VM_FRAME_MAGIC_TOP = 0x44440001,
1367 VM_FRAME_MAGIC_CFUNC = 0x55550001,
1368 VM_FRAME_MAGIC_IFUNC = 0x66660001,
1369 VM_FRAME_MAGIC_EVAL = 0x77770001,
1370 VM_FRAME_MAGIC_RESCUE = 0x78880001,
1371 VM_FRAME_MAGIC_DUMMY = 0x79990001,
1372
1373 VM_FRAME_MAGIC_MASK = 0x7fff0001,
1374
1375 /* frame flag */
1376 VM_FRAME_FLAG_FINISH = 0x0020,
1377 VM_FRAME_FLAG_BMETHOD = 0x0040,
1378 VM_FRAME_FLAG_CFRAME = 0x0080,
1379 VM_FRAME_FLAG_LAMBDA = 0x0100,
1380 VM_FRAME_FLAG_MODIFIED_BLOCK_PARAM = 0x0200,
1381 VM_FRAME_FLAG_CFRAME_KW = 0x0400,
1382 VM_FRAME_FLAG_PASSED = 0x0800,
1383
1384 /* env flag */
1385 VM_ENV_FLAG_LOCAL = 0x0002,
1386 VM_ENV_FLAG_ESCAPED = 0x0004,
1387 VM_ENV_FLAG_WB_REQUIRED = 0x0008,
1388 VM_ENV_FLAG_ISOLATED = 0x0010,
1389};
1390
1391#define VM_ENV_DATA_SIZE ( 3)
1392
1393#define VM_ENV_DATA_INDEX_ME_CREF (-2) /* ep[-2] */
1394#define VM_ENV_DATA_INDEX_SPECVAL (-1) /* ep[-1] */
1395#define VM_ENV_DATA_INDEX_FLAGS ( 0) /* ep[ 0] */
1396#define VM_ENV_DATA_INDEX_ENV ( 1) /* ep[ 1] */
1397
1398#define VM_ENV_INDEX_LAST_LVAR (-VM_ENV_DATA_SIZE)
1399
1400static inline void VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value);
1401
1402static inline void
1403VM_ENV_FLAGS_SET(const VALUE *ep, VALUE flag)
1404{
1405 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1406 VM_ASSERT(FIXNUM_P(flags));
1407 VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags | flag);
1408}
1409
1410static inline void
1411VM_ENV_FLAGS_UNSET(const VALUE *ep, VALUE flag)
1412{
1413 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1414 VM_ASSERT(FIXNUM_P(flags));
1415 VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags & ~flag);
1416}
1417
1418static inline unsigned long
1419VM_ENV_FLAGS(const VALUE *ep, long flag)
1420{
1421 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1422 VM_ASSERT(FIXNUM_P(flags));
1423 return flags & flag;
1424}
1425
1426static inline unsigned long
1427VM_FRAME_TYPE(const rb_control_frame_t *cfp)
1428{
1429 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_MAGIC_MASK);
1430}
1431
1432static inline int
1433VM_FRAME_LAMBDA_P(const rb_control_frame_t *cfp)
1434{
1435 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_LAMBDA) != 0;
1436}
1437
1438static inline int
1439VM_FRAME_CFRAME_KW_P(const rb_control_frame_t *cfp)
1440{
1441 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME_KW) != 0;
1442}
1443
1444static inline int
1445VM_FRAME_FINISHED_P(const rb_control_frame_t *cfp)
1446{
1447 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_FINISH) != 0;
1448}
1449
1450static inline int
1451VM_FRAME_BMETHOD_P(const rb_control_frame_t *cfp)
1452{
1453 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_BMETHOD) != 0;
1454}
1455
1456static inline int
1457rb_obj_is_iseq(VALUE iseq)
1458{
1459 return imemo_type_p(iseq, imemo_iseq);
1460}
1461
1462#if VM_CHECK_MODE > 0
1463#define RUBY_VM_NORMAL_ISEQ_P(iseq) rb_obj_is_iseq((VALUE)iseq)
1464#endif
1465
1466static inline int
1467VM_FRAME_CFRAME_P(const rb_control_frame_t *cfp)
1468{
1469 int cframe_p = VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME) != 0;
1470 VM_ASSERT(RUBY_VM_NORMAL_ISEQ_P(cfp->iseq) != cframe_p ||
1471 (VM_FRAME_TYPE(cfp) & VM_FRAME_MAGIC_MASK) == VM_FRAME_MAGIC_DUMMY);
1472 return cframe_p;
1473}
1474
1475static inline int
1476VM_FRAME_RUBYFRAME_P(const rb_control_frame_t *cfp)
1477{
1478 return !VM_FRAME_CFRAME_P(cfp);
1479}
1480
1481#define RUBYVM_CFUNC_FRAME_P(cfp) \
1482 (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_CFUNC)
1483
1484#define VM_GUARDED_PREV_EP(ep) GC_GUARDED_PTR(ep)
1485#define VM_BLOCK_HANDLER_NONE 0
1486
1487static inline int
1488VM_ENV_LOCAL_P(const VALUE *ep)
1489{
1490 return VM_ENV_FLAGS(ep, VM_ENV_FLAG_LOCAL) ? 1 : 0;
1491}
1492
1493static inline const VALUE *
1494VM_ENV_PREV_EP(const VALUE *ep)
1495{
1496 VM_ASSERT(VM_ENV_LOCAL_P(ep) == 0);
1497 return GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
1498}
1499
1500static inline VALUE
1501VM_ENV_BLOCK_HANDLER(const VALUE *ep)
1502{
1503 VM_ASSERT(VM_ENV_LOCAL_P(ep));
1504 return ep[VM_ENV_DATA_INDEX_SPECVAL];
1505}
1506
1507#if VM_CHECK_MODE > 0
1508int rb_vm_ep_in_heap_p(const VALUE *ep);
1509#endif
1510
1511static inline int
1512VM_ENV_ESCAPED_P(const VALUE *ep)
1513{
1514 VM_ASSERT(rb_vm_ep_in_heap_p(ep) == !!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED));
1515 return VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) ? 1 : 0;
1516}
1517
1519static inline VALUE
1520VM_ENV_ENVVAL(const VALUE *ep)
1521{
1522 VALUE envval = ep[VM_ENV_DATA_INDEX_ENV];
1523 VM_ASSERT(VM_ENV_ESCAPED_P(ep));
1524 VM_ASSERT(envval == Qundef || imemo_type_p(envval, imemo_env));
1525 return envval;
1526}
1527
1529static inline const rb_env_t *
1530VM_ENV_ENVVAL_PTR(const VALUE *ep)
1531{
1532 return (const rb_env_t *)VM_ENV_ENVVAL(ep);
1533}
1534
1535static inline const rb_env_t *
1536vm_env_new(VALUE *env_ep, VALUE *env_body, unsigned int env_size, const rb_iseq_t *iseq)
1537{
1538 rb_env_t *env = IMEMO_NEW(rb_env_t, imemo_env, (VALUE)iseq);
1539 env->ep = env_ep;
1540 env->env = env_body;
1541 env->env_size = env_size;
1542 env_ep[VM_ENV_DATA_INDEX_ENV] = (VALUE)env;
1543 return env;
1544}
1545
1546static inline void
1547VM_FORCE_WRITE(const VALUE *ptr, VALUE v)
1548{
1549 *((VALUE *)ptr) = v;
1550}
1551
1552static inline void
1553VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value)
1554{
1555 VM_ASSERT(RB_SPECIAL_CONST_P(special_const_value));
1556 VM_FORCE_WRITE(ptr, special_const_value);
1557}
1558
1559static inline void
1560VM_STACK_ENV_WRITE(const VALUE *ep, int index, VALUE v)
1561{
1562 VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_WB_REQUIRED) == 0);
1563 VM_FORCE_WRITE(&ep[index], v);
1564}
1565
1566const VALUE *rb_vm_ep_local_ep(const VALUE *ep);
1567const VALUE *rb_vm_proc_local_ep(VALUE proc);
1568void rb_vm_block_ep_update(VALUE obj, const struct rb_block *dst, const VALUE *ep);
1569void rb_vm_block_copy(VALUE obj, const struct rb_block *dst, const struct rb_block *src);
1570
1571VALUE rb_vm_frame_block_handler(const rb_control_frame_t *cfp);
1572
1573#define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp) ((cfp)+1)
1574#define RUBY_VM_NEXT_CONTROL_FRAME(cfp) ((cfp)-1)
1575
1576#define RUBY_VM_VALID_CONTROL_FRAME_P(cfp, ecfp) \
1577 ((void *)(ecfp) > (void *)(cfp))
1578
1579static inline const rb_control_frame_t *
1580RUBY_VM_END_CONTROL_FRAME(const rb_execution_context_t *ec)
1581{
1582 return (rb_control_frame_t *)(ec->vm_stack + ec->vm_stack_size);
1583}
1584
1585static inline int
1586RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
1587{
1588 return !RUBY_VM_VALID_CONTROL_FRAME_P(cfp, RUBY_VM_END_CONTROL_FRAME(ec));
1589}
1590
1591static inline int
1592VM_BH_ISEQ_BLOCK_P(VALUE block_handler)
1593{
1594 if ((block_handler & 0x03) == 0x01) {
1595#if VM_CHECK_MODE > 0
1596 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1597 VM_ASSERT(imemo_type_p(captured->code.val, imemo_iseq));
1598#endif
1599 return 1;
1600 }
1601 else {
1602 return 0;
1603 }
1604}
1605
1606static inline VALUE
1607VM_BH_FROM_ISEQ_BLOCK(const struct rb_captured_block *captured)
1608{
1609 VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x01);
1610 VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1611 return block_handler;
1612}
1613
1614static inline const struct rb_captured_block *
1615VM_BH_TO_ISEQ_BLOCK(VALUE block_handler)
1616{
1617 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1618 VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1619 return captured;
1620}
1621
1622static inline int
1623VM_BH_IFUNC_P(VALUE block_handler)
1624{
1625 if ((block_handler & 0x03) == 0x03) {
1626#if VM_CHECK_MODE > 0
1627 struct rb_captured_block *captured = (void *)(block_handler & ~0x03);
1628 VM_ASSERT(imemo_type_p(captured->code.val, imemo_ifunc));
1629#endif
1630 return 1;
1631 }
1632 else {
1633 return 0;
1634 }
1635}
1636
1637static inline VALUE
1638VM_BH_FROM_IFUNC_BLOCK(const struct rb_captured_block *captured)
1639{
1640 VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x03);
1641 VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1642 return block_handler;
1643}
1644
1645static inline const struct rb_captured_block *
1646VM_BH_TO_IFUNC_BLOCK(VALUE block_handler)
1647{
1648 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1649 VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1650 return captured;
1651}
1652
1653static inline const struct rb_captured_block *
1654VM_BH_TO_CAPT_BLOCK(VALUE block_handler)
1655{
1656 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1657 VM_ASSERT(VM_BH_IFUNC_P(block_handler) || VM_BH_ISEQ_BLOCK_P(block_handler));
1658 return captured;
1659}
1660
1661static inline enum rb_block_handler_type
1662vm_block_handler_type(VALUE block_handler)
1663{
1664 if (VM_BH_ISEQ_BLOCK_P(block_handler)) {
1665 return block_handler_type_iseq;
1666 }
1667 else if (VM_BH_IFUNC_P(block_handler)) {
1668 return block_handler_type_ifunc;
1669 }
1670 else if (SYMBOL_P(block_handler)) {
1671 return block_handler_type_symbol;
1672 }
1673 else {
1674 VM_ASSERT(rb_obj_is_proc(block_handler));
1675 return block_handler_type_proc;
1676 }
1677}
1678
1679static inline void
1680vm_block_handler_verify(MAYBE_UNUSED(VALUE block_handler))
1681{
1682 VM_ASSERT(block_handler == VM_BLOCK_HANDLER_NONE ||
1683 (vm_block_handler_type(block_handler), 1));
1684}
1685
1686static inline enum rb_block_type
1687vm_block_type(const struct rb_block *block)
1688{
1689#if VM_CHECK_MODE > 0
1690 switch (block->type) {
1691 case block_type_iseq:
1692 VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_iseq));
1693 break;
1694 case block_type_ifunc:
1695 VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_ifunc));
1696 break;
1697 case block_type_symbol:
1698 VM_ASSERT(SYMBOL_P(block->as.symbol));
1699 break;
1700 case block_type_proc:
1701 VM_ASSERT(rb_obj_is_proc(block->as.proc));
1702 break;
1703 }
1704#endif
1705 return block->type;
1706}
1707
1708static inline void
1709vm_block_type_set(const struct rb_block *block, enum rb_block_type type)
1710{
1711 struct rb_block *mb = (struct rb_block *)block;
1712 mb->type = type;
1713}
1714
1715static inline const struct rb_block *
1716vm_proc_block(VALUE procval)
1717{
1718 VM_ASSERT(rb_obj_is_proc(procval));
1719 return &((rb_proc_t *)RTYPEDDATA_DATA(procval))->block;
1720}
1721
1722static inline const rb_iseq_t *vm_block_iseq(const struct rb_block *block);
1723static inline const VALUE *vm_block_ep(const struct rb_block *block);
1724
1725static inline const rb_iseq_t *
1726vm_proc_iseq(VALUE procval)
1727{
1728 return vm_block_iseq(vm_proc_block(procval));
1729}
1730
1731static inline const VALUE *
1732vm_proc_ep(VALUE procval)
1733{
1734 return vm_block_ep(vm_proc_block(procval));
1735}
1736
1737static inline const rb_iseq_t *
1738vm_block_iseq(const struct rb_block *block)
1739{
1740 switch (vm_block_type(block)) {
1741 case block_type_iseq: return rb_iseq_check(block->as.captured.code.iseq);
1742 case block_type_proc: return vm_proc_iseq(block->as.proc);
1743 case block_type_ifunc:
1744 case block_type_symbol: return NULL;
1745 }
1746 VM_UNREACHABLE(vm_block_iseq);
1747 return NULL;
1748}
1749
1750static inline const VALUE *
1751vm_block_ep(const struct rb_block *block)
1752{
1753 switch (vm_block_type(block)) {
1754 case block_type_iseq:
1755 case block_type_ifunc: return block->as.captured.ep;
1756 case block_type_proc: return vm_proc_ep(block->as.proc);
1757 case block_type_symbol: return NULL;
1758 }
1759 VM_UNREACHABLE(vm_block_ep);
1760 return NULL;
1761}
1762
1763static inline VALUE
1764vm_block_self(const struct rb_block *block)
1765{
1766 switch (vm_block_type(block)) {
1767 case block_type_iseq:
1768 case block_type_ifunc:
1769 return block->as.captured.self;
1770 case block_type_proc:
1771 return vm_block_self(vm_proc_block(block->as.proc));
1772 case block_type_symbol:
1773 return Qundef;
1774 }
1775 VM_UNREACHABLE(vm_block_self);
1776 return Qundef;
1777}
1778
1779static inline VALUE
1780VM_BH_TO_SYMBOL(VALUE block_handler)
1781{
1782 VM_ASSERT(SYMBOL_P(block_handler));
1783 return block_handler;
1784}
1785
1786static inline VALUE
1787VM_BH_FROM_SYMBOL(VALUE symbol)
1788{
1789 VM_ASSERT(SYMBOL_P(symbol));
1790 return symbol;
1791}
1792
1793static inline VALUE
1794VM_BH_TO_PROC(VALUE block_handler)
1795{
1796 VM_ASSERT(rb_obj_is_proc(block_handler));
1797 return block_handler;
1798}
1799
1800static inline VALUE
1801VM_BH_FROM_PROC(VALUE procval)
1802{
1803 VM_ASSERT(rb_obj_is_proc(procval));
1804 return procval;
1805}
1806
1807/* VM related object allocate functions */
1808VALUE rb_thread_alloc(VALUE klass);
1809VALUE rb_binding_alloc(VALUE klass);
1810VALUE rb_proc_alloc(VALUE klass);
1811VALUE rb_proc_dup(VALUE self);
1812
1813/* for debug */
1814extern bool rb_vmdebug_stack_dump_raw(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, FILE *);
1815extern bool rb_vmdebug_debug_print_pre(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, const VALUE *_pc, FILE *);
1816extern bool rb_vmdebug_debug_print_post(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, FILE *);
1817
1818#define SDR() rb_vmdebug_stack_dump_raw(GET_EC(), GET_EC()->cfp, stderr)
1819#define SDR2(cfp) rb_vmdebug_stack_dump_raw(GET_EC(), (cfp), stderr)
1820bool rb_vm_bugreport(const void *, FILE *);
1821typedef void (*ruby_sighandler_t)(int);
1822RBIMPL_ATTR_FORMAT(RBIMPL_PRINTF_FORMAT, 4, 5)
1823NORETURN(void rb_bug_for_fatal_signal(ruby_sighandler_t default_sighandler, int sig, const void *, const char *fmt, ...));
1824
1825/* functions about thread/vm execution */
1826RUBY_SYMBOL_EXPORT_BEGIN
1827VALUE rb_iseq_eval(const rb_iseq_t *iseq);
1828VALUE rb_iseq_eval_main(const rb_iseq_t *iseq);
1829VALUE rb_iseq_path(const rb_iseq_t *iseq);
1830VALUE rb_iseq_realpath(const rb_iseq_t *iseq);
1831RUBY_SYMBOL_EXPORT_END
1832
1833VALUE rb_iseq_pathobj_new(VALUE path, VALUE realpath);
1834void rb_iseq_pathobj_set(const rb_iseq_t *iseq, VALUE path, VALUE realpath);
1835
1836int rb_ec_frame_method_id_and_class(const rb_execution_context_t *ec, ID *idp, ID *called_idp, VALUE *klassp);
1837void rb_ec_setup_exception(const rb_execution_context_t *ec, VALUE mesg, VALUE cause);
1838
1839VALUE rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, int argc, const VALUE *argv, int kw_splat, VALUE block_handler);
1840
1841VALUE rb_vm_make_proc_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda);
1842static inline VALUE
1843rb_vm_make_proc(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1844{
1845 return rb_vm_make_proc_lambda(ec, captured, klass, 0);
1846}
1847
1848static inline VALUE
1849rb_vm_make_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1850{
1851 return rb_vm_make_proc_lambda(ec, captured, klass, 1);
1852}
1853
1854VALUE rb_vm_make_binding(const rb_execution_context_t *ec, const rb_control_frame_t *src_cfp);
1855VALUE rb_vm_env_local_variables(const rb_env_t *env);
1856VALUE rb_vm_env_numbered_parameters(const rb_env_t *env);
1857const rb_env_t *rb_vm_env_prev_env(const rb_env_t *env);
1858const VALUE *rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const ID *dynvars);
1859void rb_vm_inc_const_missing_count(void);
1860VALUE rb_vm_call_kw(rb_execution_context_t *ec, VALUE recv, VALUE id, int argc,
1861 const VALUE *argv, const rb_callable_method_entry_t *me, int kw_splat);
1862void rb_vm_pop_frame_no_int(rb_execution_context_t *ec);
1863void rb_vm_pop_frame(rb_execution_context_t *ec);
1864
1865void rb_thread_start_timer_thread(void);
1866void rb_thread_stop_timer_thread(void);
1867void rb_thread_reset_timer_thread(void);
1868void rb_thread_wakeup_timer_thread(int);
1869
1870static inline void
1871rb_vm_living_threads_init(rb_vm_t *vm)
1872{
1873 ccan_list_head_init(&vm->waiting_fds);
1874 ccan_list_head_init(&vm->workqueue);
1875 ccan_list_head_init(&vm->ractor.set);
1876 ccan_list_head_init(&vm->ractor.sched.zombie_threads);
1877}
1878
1879typedef int rb_backtrace_iter_func(void *, VALUE, int, VALUE);
1880rb_control_frame_t *rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1881rb_control_frame_t *rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1882VALUE *rb_vm_svar_lep(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1883int rb_vm_get_sourceline(const rb_control_frame_t *);
1884void rb_vm_stack_to_heap(rb_execution_context_t *ec);
1885void ruby_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame);
1886rb_thread_t * ruby_thread_from_native(void);
1887int ruby_thread_set_native(rb_thread_t *th);
1888int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp);
1889void rb_vm_rewind_cfp(rb_execution_context_t *ec, rb_control_frame_t *cfp);
1890void rb_vm_env_write(const VALUE *ep, int index, VALUE v);
1891VALUE rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler);
1892
1893void rb_vm_register_special_exception_str(enum ruby_special_exceptions sp, VALUE exception_class, VALUE mesg);
1894
1895#define rb_vm_register_special_exception(sp, e, m) \
1896 rb_vm_register_special_exception_str(sp, e, rb_usascii_str_new_static((m), (long)rb_strlen_lit(m)))
1897
1898void rb_gc_mark_machine_context(const rb_execution_context_t *ec);
1899
1900void rb_vm_rewrite_cref(rb_cref_t *node, VALUE old_klass, VALUE new_klass, rb_cref_t **new_cref_ptr);
1901
1902const rb_callable_method_entry_t *rb_vm_frame_method_entry(const rb_control_frame_t *cfp);
1903
1904#define sysstack_error GET_VM()->special_exceptions[ruby_error_sysstack]
1905
1906#define CHECK_VM_STACK_OVERFLOW0(cfp, sp, margin) do { \
1907 STATIC_ASSERT(sizeof_sp, sizeof(*(sp)) == sizeof(VALUE)); \
1908 STATIC_ASSERT(sizeof_cfp, sizeof(*(cfp)) == sizeof(rb_control_frame_t)); \
1909 const struct rb_control_frame_struct *bound = (void *)&(sp)[(margin)]; \
1910 if (UNLIKELY((cfp) <= &bound[1])) { \
1911 vm_stackoverflow(); \
1912 } \
1913} while (0)
1914
1915#define CHECK_VM_STACK_OVERFLOW(cfp, margin) \
1916 CHECK_VM_STACK_OVERFLOW0((cfp), (cfp)->sp, (margin))
1917
1918VALUE rb_catch_protect(VALUE t, rb_block_call_func *func, VALUE data, enum ruby_tag_type *stateptr);
1919
1920rb_execution_context_t *rb_vm_main_ractor_ec(rb_vm_t *vm); // ractor.c
1921
1922/* for thread */
1923
1924#if RUBY_VM_THREAD_MODEL == 2
1925
1926RUBY_EXTERN struct rb_ractor_struct *ruby_single_main_ractor; // ractor.c
1927RUBY_EXTERN rb_vm_t *ruby_current_vm_ptr;
1928RUBY_EXTERN rb_event_flag_t ruby_vm_event_flags;
1929RUBY_EXTERN rb_event_flag_t ruby_vm_event_enabled_global_flags;
1930RUBY_EXTERN unsigned int ruby_vm_event_local_num;
1931
1932#define GET_VM() rb_current_vm()
1933#define GET_RACTOR() rb_current_ractor()
1934#define GET_THREAD() rb_current_thread()
1935#define GET_EC() rb_current_execution_context(true)
1936
1937static inline rb_thread_t *
1938rb_ec_thread_ptr(const rb_execution_context_t *ec)
1939{
1940 return ec->thread_ptr;
1941}
1942
1943static inline rb_ractor_t *
1944rb_ec_ractor_ptr(const rb_execution_context_t *ec)
1945{
1946 const rb_thread_t *th = rb_ec_thread_ptr(ec);
1947 if (th) {
1948 VM_ASSERT(th->ractor != NULL);
1949 return th->ractor;
1950 }
1951 else {
1952 return NULL;
1953 }
1954}
1955
1956static inline rb_vm_t *
1957rb_ec_vm_ptr(const rb_execution_context_t *ec)
1958{
1959 const rb_thread_t *th = rb_ec_thread_ptr(ec);
1960 if (th) {
1961 return th->vm;
1962 }
1963 else {
1964 return NULL;
1965 }
1966}
1967
1968NOINLINE(struct rb_execution_context_struct *rb_current_ec_noinline(void));
1969
1970static inline rb_execution_context_t *
1971rb_current_execution_context(bool expect_ec)
1972{
1973#ifdef RB_THREAD_LOCAL_SPECIFIER
1974 #if defined(__arm64__) || defined(__aarch64__)
1975 rb_execution_context_t *ec = rb_current_ec();
1976 #else
1977 rb_execution_context_t *ec = ruby_current_ec;
1978 #endif
1979
1980 /* On the shared objects, `__tls_get_addr()` is used to access the TLS
1981 * and the address of the `ruby_current_ec` can be stored on a function
1982 * frame. However, this address can be mis-used after native thread
1983 * migration of a coroutine.
1984 * 1) Get `ptr =&ruby_current_ec` op NT1 and store it on the frame.
1985 * 2) Context switch and resume it on the NT2.
1986 * 3) `ptr` is used on NT2 but it accesses to the TLS on NT1.
1987 * This assertion checks such misusage.
1988 *
1989 * To avoid accidents, `GET_EC()` should be called once on the frame.
1990 * Note that inlining can produce the problem.
1991 */
1992 VM_ASSERT(ec == rb_current_ec_noinline());
1993#else
1994 rb_execution_context_t *ec = native_tls_get(ruby_current_ec_key);
1995#endif
1996 VM_ASSERT(!expect_ec || ec != NULL);
1997 return ec;
1998}
1999
2000static inline rb_thread_t *
2001rb_current_thread(void)
2002{
2003 const rb_execution_context_t *ec = GET_EC();
2004 return rb_ec_thread_ptr(ec);
2005}
2006
2007static inline rb_ractor_t *
2008rb_current_ractor_raw(bool expect)
2009{
2010 if (ruby_single_main_ractor) {
2011 return ruby_single_main_ractor;
2012 }
2013 else {
2014 const rb_execution_context_t *ec = rb_current_execution_context(expect);
2015 return (expect || ec) ? rb_ec_ractor_ptr(ec) : NULL;
2016 }
2017}
2018
2019static inline rb_ractor_t *
2020rb_current_ractor(void)
2021{
2022 return rb_current_ractor_raw(true);
2023}
2024
2025static inline rb_vm_t *
2026rb_current_vm(void)
2027{
2028#if 0 // TODO: reconsider the assertions
2029 VM_ASSERT(ruby_current_vm_ptr == NULL ||
2030 ruby_current_execution_context_ptr == NULL ||
2031 rb_ec_thread_ptr(GET_EC()) == NULL ||
2032 rb_ec_thread_ptr(GET_EC())->status == THREAD_KILLED ||
2033 rb_ec_vm_ptr(GET_EC()) == ruby_current_vm_ptr);
2034#endif
2035
2036 return ruby_current_vm_ptr;
2037}
2038
2039void rb_ec_vm_lock_rec_release(const rb_execution_context_t *ec,
2040 unsigned int recorded_lock_rec,
2041 unsigned int current_lock_rec);
2042
2043static inline unsigned int
2044rb_ec_vm_lock_rec(const rb_execution_context_t *ec)
2045{
2046 rb_vm_t *vm = rb_ec_vm_ptr(ec);
2047
2048 if (vm->ractor.sync.lock_owner != rb_ec_ractor_ptr(ec)) {
2049 return 0;
2050 }
2051 else {
2052 return vm->ractor.sync.lock_rec;
2053 }
2054}
2055
2056#else
2057#error "unsupported thread model"
2058#endif
2059
2060enum {
2061 TIMER_INTERRUPT_MASK = 0x01,
2062 PENDING_INTERRUPT_MASK = 0x02,
2063 POSTPONED_JOB_INTERRUPT_MASK = 0x04,
2064 TRAP_INTERRUPT_MASK = 0x08,
2065 TERMINATE_INTERRUPT_MASK = 0x10,
2066 VM_BARRIER_INTERRUPT_MASK = 0x20,
2067};
2068
2069#define RUBY_VM_SET_TIMER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TIMER_INTERRUPT_MASK)
2070#define RUBY_VM_SET_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, PENDING_INTERRUPT_MASK)
2071#define RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, POSTPONED_JOB_INTERRUPT_MASK)
2072#define RUBY_VM_SET_TRAP_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TRAP_INTERRUPT_MASK)
2073#define RUBY_VM_SET_TERMINATE_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TERMINATE_INTERRUPT_MASK)
2074#define RUBY_VM_SET_VM_BARRIER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, VM_BARRIER_INTERRUPT_MASK)
2075#define RUBY_VM_INTERRUPTED(ec) ((ec)->interrupt_flag & ~(ec)->interrupt_mask & \
2076 (PENDING_INTERRUPT_MASK|TRAP_INTERRUPT_MASK))
2077
2078static inline bool
2079RUBY_VM_INTERRUPTED_ANY(rb_execution_context_t *ec)
2080{
2081#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
2082 uint32_t current_clock = rb_ec_vm_ptr(ec)->clock;
2083
2084 if (current_clock != ec->checked_clock) {
2085 ec->checked_clock = current_clock;
2086 RUBY_VM_SET_TIMER_INTERRUPT(ec);
2087 }
2088#endif
2089 return ec->interrupt_flag & ~(ec)->interrupt_mask;
2090}
2091
2092VALUE rb_exc_set_backtrace(VALUE exc, VALUE bt);
2093int rb_signal_buff_size(void);
2094int rb_signal_exec(rb_thread_t *th, int sig);
2095void rb_threadptr_check_signal(rb_thread_t *mth);
2096void rb_threadptr_signal_raise(rb_thread_t *th, int sig);
2097void rb_threadptr_signal_exit(rb_thread_t *th);
2098int rb_threadptr_execute_interrupts(rb_thread_t *, int);
2099void rb_threadptr_interrupt(rb_thread_t *th);
2100void rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th);
2101void rb_threadptr_pending_interrupt_clear(rb_thread_t *th);
2102void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v);
2103VALUE rb_ec_get_errinfo(const rb_execution_context_t *ec);
2104void rb_ec_error_print(rb_execution_context_t * volatile ec, volatile VALUE errinfo);
2105void rb_execution_context_update(rb_execution_context_t *ec);
2106void rb_execution_context_mark(const rb_execution_context_t *ec);
2107void rb_fiber_close(rb_fiber_t *fib);
2108void Init_native_thread(rb_thread_t *th);
2109int rb_vm_check_ints_blocking(rb_execution_context_t *ec);
2110
2111// vm_sync.h
2112void rb_vm_cond_wait(rb_vm_t *vm, rb_nativethread_cond_t *cond);
2113void rb_vm_cond_timedwait(rb_vm_t *vm, rb_nativethread_cond_t *cond, unsigned long msec);
2114
2115#define RUBY_VM_CHECK_INTS(ec) rb_vm_check_ints(ec)
2116static inline void
2117rb_vm_check_ints(rb_execution_context_t *ec)
2118{
2119#ifdef RUBY_ASSERT_CRITICAL_SECTION
2120 VM_ASSERT(ruby_assert_critical_section_entered == 0);
2121#endif
2122
2123 VM_ASSERT(ec == GET_EC());
2124
2125 if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(ec))) {
2126 rb_threadptr_execute_interrupts(rb_ec_thread_ptr(ec), 0);
2127 }
2128}
2129
2130/* tracer */
2131
2133 rb_event_flag_t event;
2135 const rb_control_frame_t *cfp;
2136 VALUE self;
2137 ID id;
2138 ID called_id;
2139 VALUE klass;
2140 VALUE data;
2141
2142 int klass_solved;
2143
2144 /* calc from cfp */
2145 int lineno;
2146 VALUE path;
2147};
2148
2149void rb_hook_list_mark(rb_hook_list_t *hooks);
2150void rb_hook_list_mark_and_update(rb_hook_list_t *hooks);
2151void rb_hook_list_free(rb_hook_list_t *hooks);
2152void rb_hook_list_connect_tracepoint(VALUE target, rb_hook_list_t *list, VALUE tpval, unsigned int target_line);
2153void rb_hook_list_remove_tracepoint(rb_hook_list_t *list, VALUE tpval);
2154
2155void rb_exec_event_hooks(struct rb_trace_arg_struct *trace_arg, rb_hook_list_t *hooks, int pop_p);
2156
2157#define EXEC_EVENT_HOOK_ORIG(ec_, hooks_, flag_, self_, id_, called_id_, klass_, data_, pop_p_) do { \
2158 const rb_event_flag_t flag_arg_ = (flag_); \
2159 rb_hook_list_t *hooks_arg_ = (hooks_); \
2160 if (UNLIKELY((hooks_arg_)->events & (flag_arg_))) { \
2161 /* defer evaluating the other arguments */ \
2162 rb_exec_event_hook_orig(ec_, hooks_arg_, flag_arg_, self_, id_, called_id_, klass_, data_, pop_p_); \
2163 } \
2164} while (0)
2165
2166static inline void
2167rb_exec_event_hook_orig(rb_execution_context_t *ec, rb_hook_list_t *hooks, rb_event_flag_t flag,
2168 VALUE self, ID id, ID called_id, VALUE klass, VALUE data, int pop_p)
2169{
2170 struct rb_trace_arg_struct trace_arg;
2171
2172 VM_ASSERT((hooks->events & flag) != 0);
2173
2174 trace_arg.event = flag;
2175 trace_arg.ec = ec;
2176 trace_arg.cfp = ec->cfp;
2177 trace_arg.self = self;
2178 trace_arg.id = id;
2179 trace_arg.called_id = called_id;
2180 trace_arg.klass = klass;
2181 trace_arg.data = data;
2182 trace_arg.path = Qundef;
2183 trace_arg.klass_solved = 0;
2184
2185 rb_exec_event_hooks(&trace_arg, hooks, pop_p);
2186}
2187
2189 VALUE self;
2190 uint32_t id;
2191 rb_hook_list_t hooks;
2192};
2193
2194static inline rb_hook_list_t *
2195rb_ec_ractor_hooks(const rb_execution_context_t *ec)
2196{
2197 struct rb_ractor_pub *cr_pub = (struct rb_ractor_pub *)rb_ec_ractor_ptr(ec);
2198 return &cr_pub->hooks;
2199}
2200
2201#define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2202 EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_ractor_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 0)
2203
2204#define EXEC_EVENT_HOOK_AND_POP_FRAME(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2205 EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_ractor_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 1)
2206
2207static inline void
2208rb_exec_event_hook_script_compiled(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE eval_script)
2209{
2210 EXEC_EVENT_HOOK(ec, RUBY_EVENT_SCRIPT_COMPILED, ec->cfp->self, 0, 0, 0,
2211 NIL_P(eval_script) ? (VALUE)iseq :
2212 rb_ary_new_from_args(2, eval_script, (VALUE)iseq));
2213}
2214
2215void rb_vm_trap_exit(rb_vm_t *vm);
2216void rb_vm_postponed_job_atfork(void); /* vm_trace.c */
2217void rb_vm_postponed_job_free(void); /* vm_trace.c */
2218size_t rb_vm_memsize_postponed_job_queue(void); /* vm_trace.c */
2219void rb_vm_postponed_job_queue_init(rb_vm_t *vm); /* vm_trace.c */
2220
2221RUBY_SYMBOL_EXPORT_BEGIN
2222
2223int rb_thread_check_trap_pending(void);
2224
2225/* #define RUBY_EVENT_RESERVED_FOR_INTERNAL_USE 0x030000 */ /* from vm_core.h */
2226#define RUBY_EVENT_COVERAGE_LINE 0x010000
2227#define RUBY_EVENT_COVERAGE_BRANCH 0x020000
2228
2229extern VALUE rb_get_coverages(void);
2230extern void rb_set_coverages(VALUE, int, VALUE);
2231extern void rb_clear_coverages(void);
2232extern void rb_reset_coverages(void);
2233extern void rb_resume_coverages(void);
2234extern void rb_suspend_coverages(void);
2235
2236void rb_postponed_job_flush(rb_vm_t *vm);
2237
2238// ractor.c
2239RUBY_EXTERN VALUE rb_eRactorUnsafeError;
2240RUBY_EXTERN VALUE rb_eRactorIsolationError;
2241
2242RUBY_SYMBOL_EXPORT_END
2243
2244#endif /* RUBY_VM_CORE_H */
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ALIGNAS
Wraps (or simulates) alignas.
Definition stdalign.h:27
#define RUBY_EXTERN
Declaration of externally visible global variables.
Definition dllexport.h:45
#define RUBY_EVENT_SCRIPT_COMPILED
Encountered an eval.
Definition event.h:60
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RBIMPL_ATTR_FORMAT(x, y, z)
Wraps (or simulates) __attribute__((format))
Definition format.h:29
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define Qundef
Old name of RUBY_Qundef.
#define Qfalse
Old name of RUBY_Qfalse.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define NIL_P
Old name of RB_NIL_P.
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
void * rb_check_typeddata(VALUE obj, const rb_data_type_t *data_type)
Identical to rb_typeddata_is_kind_of(), except it raises exceptions instead of returning false.
Definition error.c:1397
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
Definition proc.c:119
void rb_unblock_function_t(void *)
This is the type of UBFs.
Definition thread.h:336
VALUE rb_block_call_func(RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg))
This is the type of a function that the interpreter expect for C-backended blocks.
Definition iterator.h:83
VALUE type(ANYARGS)
ANYARGS-ed function type.
#define RBIMPL_ATTR_NONNULL(list)
Wraps (or simulates) __attribute__((nonnull))
Definition nonnull.h:30
#define inline
Old Visual Studio versions do not support the inline keyword, so we need to define it to be __inline.
Definition defines.h:91
Functions related to nodes in the AST.
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RTYPEDDATA_DATA(v)
Convenient getter macro.
Definition rtypeddata.h:102
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
Defines old _.
C99 shim for <stdbool.h>
Definition vm_core.h:259
const ID * segments
A null-terminated list of ids, used to represent a constant's path idNULL is used to represent the ::...
Definition vm_core.h:285
Definition vm_core.h:293
Definition vm_core.h:288
Definition iseq.h:241
Definition method.h:62
CREF (Class REFerence)
Definition method.h:44
Definition class.h:36
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:200
struct rb_iseq_constant_body::@154 param
parameter information
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:135
Definition st.h:79
IFUNC (Internal FUNCtion)
Definition imemo.h:84
Definition vm_core.h:251
Definition vm_core.h:297
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
Definition value.h:69
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376