Ruby 3.5.0dev (2025-10-09 revision a29c90c3b0bdc355b8b6795488db3aeba2996575)
vm_core.h (a29c90c3b0bdc355b8b6795488db3aeba2996575)
1#ifndef RUBY_VM_CORE_H
2#define RUBY_VM_CORE_H
3/**********************************************************************
4
5 vm_core.h -
6
7 $Author$
8 created at: 04/01/01 19:41:38 JST
9
10 Copyright (C) 2004-2007 Koichi Sasada
11
12**********************************************************************/
13
14/*
15 * Enable check mode.
16 * 1: enable local assertions.
17 */
18#ifndef VM_CHECK_MODE
19
20// respect RUBY_DUBUG: if given n is 0, then use RUBY_DEBUG
21#define N_OR_RUBY_DEBUG(n) (((n) > 0) ? (n) : RUBY_DEBUG)
22
23#define VM_CHECK_MODE N_OR_RUBY_DEBUG(0)
24#endif
25
39#ifndef VMDEBUG
40#define VMDEBUG 0
41#endif
42
43#if 0
44#undef VMDEBUG
45#define VMDEBUG 3
46#endif
47
48#include "ruby/internal/config.h"
49
50#include <stddef.h>
51#include <signal.h>
52#include <stdarg.h>
53
54#include "ruby_assert.h"
55
56#define RVALUE_SIZE (sizeof(struct RBasic) + sizeof(VALUE[RBIMPL_RVALUE_EMBED_LEN_MAX]))
57
58#if VM_CHECK_MODE > 0
59#define VM_ASSERT(expr, ...) \
60 RUBY_ASSERT_MESG_WHEN(VM_CHECK_MODE > 0, expr, #expr RBIMPL_VA_OPT_ARGS(__VA_ARGS__))
61#define VM_UNREACHABLE(func) rb_bug(#func ": unreachable")
62#define RUBY_ASSERT_CRITICAL_SECTION
63#define RUBY_DEBUG_THREAD_SCHEDULE() rb_thread_schedule()
64#else
65#define VM_ASSERT(/*expr, */...) ((void)0)
66#define VM_UNREACHABLE(func) UNREACHABLE
67#define RUBY_DEBUG_THREAD_SCHEDULE()
68#endif
69
70#define RUBY_ASSERT_MUTEX_OWNED(mutex) VM_ASSERT(rb_mutex_owned_p(mutex))
71
72#if defined(RUBY_ASSERT_CRITICAL_SECTION)
73/*
74# Critical Section Assertions
75
76These assertions are used to ensure that context switching does not occur between two points in the code. In theory,
77such code should already be protected by a mutex, but these assertions are used to ensure that the mutex is held.
78
79The specific case where it can be useful is where a mutex is held further up the call stack, and the code in question
80may not directly hold the mutex. In this case, the critical section assertions can be used to ensure that the mutex is
81held by someone else.
82
83These assertions are only enabled when RUBY_ASSERT_CRITICAL_SECTION is defined, which is only defined if VM_CHECK_MODE
84is set.
85
86## Example Usage
87
88```c
89RUBY_ASSERT_CRITICAL_SECTION_ENTER();
90// ... some code which does not invoke rb_vm_check_ints() ...
91RUBY_ASSERT_CRITICAL_SECTION_LEAVE();
92```
93
94If `rb_vm_check_ints()` is called between the `RUBY_ASSERT_CRITICAL_SECTION_ENTER()` and
95`RUBY_ASSERT_CRITICAL_SECTION_LEAVE()`, a failed assertion will result.
96*/
97extern int ruby_assert_critical_section_entered;
98#define RUBY_ASSERT_CRITICAL_SECTION_ENTER() do{ruby_assert_critical_section_entered += 1;}while(false)
99#define RUBY_ASSERT_CRITICAL_SECTION_LEAVE() do{VM_ASSERT(ruby_assert_critical_section_entered > 0);ruby_assert_critical_section_entered -= 1;}while(false)
100#else
101#define RUBY_ASSERT_CRITICAL_SECTION_ENTER()
102#define RUBY_ASSERT_CRITICAL_SECTION_LEAVE()
103#endif
104
105#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
106# include "wasm/setjmp.h"
107#else
108# include <setjmp.h>
109#endif
110
111#if defined(__linux__) || defined(__FreeBSD__)
112# define RB_THREAD_T_HAS_NATIVE_ID
113#endif
114
116#include "ccan/list/list.h"
117#include "id.h"
118#include "internal.h"
119#include "internal/array.h"
120#include "internal/basic_operators.h"
121#include "internal/namespace.h"
122#include "internal/sanitizers.h"
123#include "internal/serial.h"
124#include "internal/set_table.h"
125#include "internal/vm.h"
126#include "method.h"
127#include "node.h"
128#include "ruby/ruby.h"
129#include "ruby/st.h"
130#include "ruby_atomic.h"
131#include "vm_opts.h"
132
133#include "ruby/thread_native.h"
134/*
135 * implementation selector of get_insn_info algorithm
136 * 0: linear search
137 * 1: binary search
138 * 2: succinct bitvector
139 */
140#ifndef VM_INSN_INFO_TABLE_IMPL
141# define VM_INSN_INFO_TABLE_IMPL 2
142#endif
143
144#if defined(NSIG_MAX) /* POSIX issue 8 */
145# undef NSIG
146# define NSIG NSIG_MAX
147#elif defined(_SIG_MAXSIG) /* FreeBSD */
148# undef NSIG
149# define NSIG _SIG_MAXSIG
150#elif defined(_SIGMAX) /* QNX */
151# define NSIG (_SIGMAX + 1)
152#elif defined(NSIG) /* 99% of everything else */
153# /* take it */
154#else /* Last resort */
155# define NSIG (sizeof(sigset_t) * CHAR_BIT + 1)
156#endif
157
158#define RUBY_NSIG NSIG
159
160#if defined(SIGCLD)
161# define RUBY_SIGCHLD (SIGCLD)
162#elif defined(SIGCHLD)
163# define RUBY_SIGCHLD (SIGCHLD)
164#endif
165
166#if defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) && defined(SA_SIGINFO) && !defined(__NetBSD__)
167# define USE_SIGALTSTACK
168void *rb_allocate_sigaltstack(void);
169void *rb_register_sigaltstack(void *);
170# define RB_ALTSTACK_INIT(var, altstack) var = rb_register_sigaltstack(altstack)
171# define RB_ALTSTACK_FREE(var) free(var)
172# define RB_ALTSTACK(var) var
173#else /* noop */
174# define RB_ALTSTACK_INIT(var, altstack)
175# define RB_ALTSTACK_FREE(var)
176# define RB_ALTSTACK(var) (0)
177#endif
178
179#include THREAD_IMPL_H
180#define RUBY_VM_THREAD_MODEL 2
181
182/*****************/
183/* configuration */
184/*****************/
185
186/* gcc ver. check */
187#if defined(__GNUC__) && __GNUC__ >= 2
188
189#if OPT_TOKEN_THREADED_CODE
190#if OPT_DIRECT_THREADED_CODE
191#undef OPT_DIRECT_THREADED_CODE
192#endif
193#endif
194
195#else /* defined(__GNUC__) && __GNUC__ >= 2 */
196
197/* disable threaded code options */
198#if OPT_DIRECT_THREADED_CODE
199#undef OPT_DIRECT_THREADED_CODE
200#endif
201#if OPT_TOKEN_THREADED_CODE
202#undef OPT_TOKEN_THREADED_CODE
203#endif
204#endif
205
206/* call threaded code */
207#if OPT_CALL_THREADED_CODE
208#if OPT_DIRECT_THREADED_CODE
209#undef OPT_DIRECT_THREADED_CODE
210#endif /* OPT_DIRECT_THREADED_CODE */
211#endif /* OPT_CALL_THREADED_CODE */
212
213void rb_vm_encoded_insn_data_table_init(void);
214typedef unsigned long rb_num_t;
215typedef signed long rb_snum_t;
216
217enum ruby_tag_type {
218 RUBY_TAG_NONE = 0x0,
219 RUBY_TAG_RETURN = 0x1,
220 RUBY_TAG_BREAK = 0x2,
221 RUBY_TAG_NEXT = 0x3,
222 RUBY_TAG_RETRY = 0x4,
223 RUBY_TAG_REDO = 0x5,
224 RUBY_TAG_RAISE = 0x6,
225 RUBY_TAG_THROW = 0x7,
226 RUBY_TAG_FATAL = 0x8,
227 RUBY_TAG_MASK = 0xf
228};
229
230#define TAG_NONE RUBY_TAG_NONE
231#define TAG_RETURN RUBY_TAG_RETURN
232#define TAG_BREAK RUBY_TAG_BREAK
233#define TAG_NEXT RUBY_TAG_NEXT
234#define TAG_RETRY RUBY_TAG_RETRY
235#define TAG_REDO RUBY_TAG_REDO
236#define TAG_RAISE RUBY_TAG_RAISE
237#define TAG_THROW RUBY_TAG_THROW
238#define TAG_FATAL RUBY_TAG_FATAL
239#define TAG_MASK RUBY_TAG_MASK
240
241enum ruby_vm_throw_flags {
242 VM_THROW_NO_ESCAPE_FLAG = 0x8000,
243 VM_THROW_STATE_MASK = 0xff
244};
245
246/* forward declarations */
247struct rb_thread_struct;
249
250/* iseq data type */
252
254 rb_serial_t raw;
255 VALUE data[2];
256};
257
258#define IMEMO_CONST_CACHE_SHAREABLE IMEMO_FL_USER0
259
260// imemo_constcache
262 VALUE flags;
263
264 VALUE value;
265 const rb_cref_t *ic_cref;
266};
267STATIC_ASSERT(sizeof_iseq_inline_constant_cache_entry,
268 (offsetof(struct iseq_inline_constant_cache_entry, ic_cref) +
269 sizeof(const rb_cref_t *)) <= RVALUE_SIZE);
270
287
289 uint64_t value; // dest_shape_id in former half, attr_index in latter half
290 ID iv_set_name;
291};
292
296
298 struct {
299 struct rb_thread_struct *running_thread;
300 VALUE value;
301 } once;
302 struct iseq_inline_constant_cache ic_cache;
303 struct iseq_inline_iv_cache_entry iv_cache;
304};
305
307 const struct rb_call_data *cd;
308 const struct rb_callcache *cc;
309 VALUE block_handler;
310 VALUE recv;
311 int argc;
312 bool kw_splat;
313 VALUE heap_argv;
314};
315
316#ifndef VM_ARGC_STACK_MAX
317#define VM_ARGC_STACK_MAX 128
318#endif
319
320# define CALLING_ARGC(calling) ((calling)->heap_argv ? RARRAY_LENINT((calling)->heap_argv) : (calling)->argc)
321
323
324#if 1
325#define CoreDataFromValue(obj, type) (type*)DATA_PTR(obj)
326#else
327#define CoreDataFromValue(obj, type) (type*)rb_data_object_get(obj)
328#endif
329#define GetCoreDataFromValue(obj, type, ptr) ((ptr) = CoreDataFromValue((obj), type))
330
332 VALUE pathobj; /* String (path) or Array [path, realpath]. Frozen. */
333 VALUE base_label; /* String */
334 VALUE label; /* String */
335 int first_lineno;
336 int node_id;
337 rb_code_location_t code_location;
339
340#define PATHOBJ_PATH 0
341#define PATHOBJ_REALPATH 1
342
343static inline VALUE
344pathobj_path(VALUE pathobj)
345{
346 if (RB_TYPE_P(pathobj, T_STRING)) {
347 return pathobj;
348 }
349 else {
350 VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
351 return RARRAY_AREF(pathobj, PATHOBJ_PATH);
352 }
353}
354
355static inline VALUE
356pathobj_realpath(VALUE pathobj)
357{
358 if (RB_TYPE_P(pathobj, T_STRING)) {
359 return pathobj;
360 }
361 else {
362 VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
363 return RARRAY_AREF(pathobj, PATHOBJ_REALPATH);
364 }
365}
366
367/* Forward declarations */
368typedef uintptr_t iseq_bits_t;
369
370#define ISEQ_IS_SIZE(body) (body->ic_size + body->ivc_size + body->ise_size + body->icvarc_size)
371
372/* [ TS_IVC | TS_ICVARC | TS_ISE | TS_IC ] */
373#define ISEQ_IS_IC_ENTRY(body, idx) (body->is_entries[(idx) + body->ise_size + body->icvarc_size + body->ivc_size].ic_cache);
374
375/* instruction sequence type */
376enum rb_iseq_type {
377 ISEQ_TYPE_TOP,
378 ISEQ_TYPE_METHOD,
379 ISEQ_TYPE_BLOCK,
380 ISEQ_TYPE_CLASS,
381 ISEQ_TYPE_RESCUE,
382 ISEQ_TYPE_ENSURE,
383 ISEQ_TYPE_EVAL,
384 ISEQ_TYPE_MAIN,
385 ISEQ_TYPE_PLAIN
386};
387
388// Attributes specified by Primitive.attr!
389enum rb_builtin_attr {
390 // The iseq does not call methods.
391 BUILTIN_ATTR_LEAF = 0x01,
392 // This iseq only contains single `opt_invokebuiltin_delegate_leave` instruction with 0 arguments.
393 BUILTIN_ATTR_SINGLE_NOARG_LEAF = 0x02,
394 // This attribute signals JIT to duplicate the iseq for each block iseq so that its `yield` will be monomorphic.
395 BUILTIN_ATTR_INLINE_BLOCK = 0x04,
396 // The iseq acts like a C method in backtraces.
397 BUILTIN_ATTR_C_TRACE = 0x08,
398};
399
400typedef VALUE (*rb_jit_func_t)(struct rb_execution_context_struct *, struct rb_control_frame_struct *);
401
403 enum rb_iseq_type type;
404
405 unsigned int iseq_size;
406 VALUE *iseq_encoded; /* encoded iseq (insn addr and operands) */
407
431 struct {
432 struct {
433 unsigned int has_lead : 1;
434 unsigned int has_opt : 1;
435 unsigned int has_rest : 1;
436 unsigned int has_post : 1;
437 unsigned int has_kw : 1;
438 unsigned int has_kwrest : 1;
439 unsigned int has_block : 1;
440
441 unsigned int ambiguous_param0 : 1; /* {|a|} */
442 unsigned int accepts_no_kwarg : 1;
443 unsigned int ruby2_keywords: 1;
444 unsigned int anon_rest: 1;
445 unsigned int anon_kwrest: 1;
446 unsigned int use_block: 1;
447 unsigned int forwardable: 1;
448 } flags;
449
450 unsigned int size;
451
452 int lead_num;
453 int opt_num;
454 int rest_start;
455 int post_start;
456 int post_num;
457 int block_start;
458
459 const VALUE *opt_table; /* (opt_num + 1) entries. */
460 /* opt_num and opt_table:
461 *
462 * def foo o1=e1, o2=e2, ..., oN=eN
463 * #=>
464 * # prologue code
465 * A1: e1
466 * A2: e2
467 * ...
468 * AN: eN
469 * AL: body
470 * opt_num = N
471 * opt_table = [A1, A2, ..., AN, AL]
472 */
473
474 const struct rb_iseq_param_keyword {
475 int num;
476 int required_num;
477 int bits_start;
478 int rest_start;
479 const ID *table;
480 VALUE *default_values;
481 } *keyword;
483
484 rb_iseq_location_t location;
485
486 /* insn info, must be freed */
488 const struct iseq_insn_info_entry *body;
489 unsigned int *positions;
490 unsigned int size;
491#if VM_INSN_INFO_TABLE_IMPL == 2
492 struct succ_index_table *succ_index_table;
493#endif
494 } insns_info;
495
496 const ID *local_table; /* must free */
497
498 enum lvar_state {
499 lvar_uninitialized,
500 lvar_initialized,
501 lvar_reassigned,
502 } *lvar_states;
503
504 /* catch table */
505 struct iseq_catch_table *catch_table;
506
507 /* for child iseq */
508 const struct rb_iseq_struct *parent_iseq;
509 struct rb_iseq_struct *local_iseq; /* local_iseq->flip_cnt can be modified */
510
511 union iseq_inline_storage_entry *is_entries; /* [ TS_IVC | TS_ICVARC | TS_ISE | TS_IC ] */
512 struct rb_call_data *call_data; //struct rb_call_data calls[ci_size];
513
514 struct {
515 rb_snum_t flip_count;
516 VALUE script_lines;
517 VALUE coverage;
518 VALUE pc2branchindex;
519 VALUE *original_iseq;
520 } variable;
521
522 unsigned int local_table_size;
523 unsigned int ic_size; // Number of IC caches
524 unsigned int ise_size; // Number of ISE caches
525 unsigned int ivc_size; // Number of IVC caches
526 unsigned int icvarc_size; // Number of ICVARC caches
527 unsigned int ci_size;
528 unsigned int stack_max; /* for stack overflow check */
529
530 unsigned int builtin_attrs; // Union of rb_builtin_attr
531
532 bool prism; // ISEQ was generated from prism compiler
533
534 union {
535 iseq_bits_t * list; /* Find references for GC */
536 iseq_bits_t single;
537 } mark_bits;
538
539 struct rb_id_table *outer_variables;
540
541 const rb_iseq_t *mandatory_only_iseq;
542
543#if USE_YJIT || USE_ZJIT
544 // Function pointer for JIT code on jit_exec()
545 rb_jit_func_t jit_entry;
546 // Number of calls on jit_exec()
547 long unsigned jit_entry_calls;
548 // Function pointer for JIT code on jit_exec_exception()
549 rb_jit_func_t jit_exception;
550 // Number of calls on jit_exec_exception()
551 long unsigned jit_exception_calls;
552#endif
553
554#if USE_YJIT
555 // YJIT stores some data on each iseq.
556 void *yjit_payload;
557 // Used to estimate how frequently this ISEQ gets called
558 uint64_t yjit_calls_at_interv;
559#endif
560
561#if USE_ZJIT
562 // ZJIT stores some data on each iseq.
563 void *zjit_payload;
564#endif
565};
566
567/* T_IMEMO/iseq */
568/* typedef rb_iseq_t is in method.h */
570 VALUE flags; /* 1 */
571 VALUE wrapper; /* 2 */
572
573 struct rb_iseq_constant_body *body; /* 3 */
574
575 union { /* 4, 5 words */
576 struct iseq_compile_data *compile_data; /* used at compile time */
577
578 struct {
579 VALUE obj;
580 int index;
581 } loader;
582
583 struct {
584 struct rb_hook_list_struct *local_hooks;
585 rb_event_flag_t global_trace_events;
586 } exec;
587 } aux;
588};
589
590#define ISEQ_BODY(iseq) ((iseq)->body)
591
592#if !defined(USE_LAZY_LOAD) || !(USE_LAZY_LOAD+0)
593#define USE_LAZY_LOAD 0
594#endif
595
596#if !USE_LAZY_LOAD
597static inline const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq) {return 0;}
598#endif
599const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq);
600
601static inline const rb_iseq_t *
602rb_iseq_check(const rb_iseq_t *iseq)
603{
604 if (USE_LAZY_LOAD && ISEQ_BODY(iseq) == NULL) {
605 rb_iseq_complete((rb_iseq_t *)iseq);
606 }
607 return iseq;
608}
609
610static inline bool
611rb_iseq_attr_p(const rb_iseq_t *iseq, enum rb_builtin_attr attr)
612{
613 return (ISEQ_BODY(iseq)->builtin_attrs & attr) == attr;
614}
615
616static inline const rb_iseq_t *
617def_iseq_ptr(rb_method_definition_t *def)
618{
619//TODO: re-visit. to check the bug, enable this assertion.
620#if VM_CHECK_MODE > 0
621 if (def->type != VM_METHOD_TYPE_ISEQ) rb_bug("def_iseq_ptr: not iseq (%d)", def->type);
622#endif
623 return rb_iseq_check(def->body.iseq.iseqptr);
624}
625
626enum ruby_special_exceptions {
627 ruby_error_reenter,
628 ruby_error_nomemory,
629 ruby_error_sysstack,
630 ruby_error_stackfatal,
631 ruby_error_stream_closed,
632 ruby_special_error_count
633};
634
635#define GetVMPtr(obj, ptr) \
636 GetCoreDataFromValue((obj), rb_vm_t, (ptr))
637
638struct rb_vm_struct;
639typedef void rb_vm_at_exit_func(struct rb_vm_struct*);
640
641typedef struct rb_at_exit_list {
642 rb_vm_at_exit_func *func;
643 struct rb_at_exit_list *next;
645
646void *rb_objspace_alloc(void);
647void rb_objspace_free(void *objspace);
648void rb_objspace_call_finalizer(void);
649
650typedef struct rb_hook_list_struct {
651 struct rb_event_hook_struct *hooks;
652 rb_event_flag_t events;
653 unsigned int running;
654 bool need_clean;
655 bool is_local;
657
658
659// see builtin.h for definition
660typedef const struct rb_builtin_function *RB_BUILTIN;
661
663 VALUE *varptr;
664 struct global_object_list *next;
665};
666
667typedef struct rb_vm_struct {
668 VALUE self;
669
670 struct {
671 struct ccan_list_head set;
672 unsigned int cnt;
673 unsigned int blocking_cnt;
674
675 struct rb_ractor_struct *main_ractor;
676 struct rb_thread_struct *main_thread; // == vm->ractor.main_ractor->threads.main
677
678 struct {
679 // monitor
680 rb_nativethread_lock_t lock;
681 struct rb_ractor_struct *lock_owner;
682 unsigned int lock_rec;
683
684 // join at exit
685 rb_nativethread_cond_t terminate_cond;
686 bool terminate_waiting;
687
688#ifndef RUBY_THREAD_PTHREAD_H
689 // win32
690 bool barrier_waiting;
691 unsigned int barrier_cnt;
692 rb_nativethread_cond_t barrier_complete_cond;
693 rb_nativethread_cond_t barrier_release_cond;
694#endif
695 } sync;
696
697#ifdef RUBY_THREAD_PTHREAD_H
698 // ractor scheduling
699 struct {
700 rb_nativethread_lock_t lock;
701 struct rb_ractor_struct *lock_owner;
702 bool locked;
703
704 rb_nativethread_cond_t cond; // GRQ
705 unsigned int snt_cnt; // count of shared NTs
706 unsigned int dnt_cnt; // count of dedicated NTs
707
708 unsigned int running_cnt;
709
710 unsigned int max_cpu;
711 struct ccan_list_head grq; // // Global Ready Queue
712 unsigned int grq_cnt;
713
714 // running threads
715 struct ccan_list_head running_threads;
716
717 // threads which switch context by timeslice
718 struct ccan_list_head timeslice_threads;
719
720 struct ccan_list_head zombie_threads;
721
722 // true if timeslice timer is not enable
723 bool timeslice_wait_inf;
724
725 // barrier
726 rb_nativethread_cond_t barrier_complete_cond;
727 rb_nativethread_cond_t barrier_release_cond;
728 bool barrier_waiting;
729 unsigned int barrier_waiting_cnt;
730 unsigned int barrier_serial;
731 struct rb_ractor_struct *barrier_ractor;
732 unsigned int barrier_lock_rec;
733 } sched;
734#endif
735 } ractor;
736
737#ifdef USE_SIGALTSTACK
738 void *main_altstack;
739#endif
740
741 rb_serial_t fork_gen;
742
743 /* set in single-threaded processes only: */
744 volatile int ubf_async_safe;
745
746 unsigned int running: 1;
747 unsigned int thread_abort_on_exception: 1;
748 unsigned int thread_report_on_exception: 1;
749 unsigned int thread_ignore_deadlock: 1;
750
751 /* object management */
752 VALUE mark_object_ary;
754 const VALUE special_exceptions[ruby_special_error_count];
755
756 /* namespace */
757 rb_namespace_t *root_namespace;
758 rb_namespace_t *main_namespace;
759
760 /* load */
761 // For running the init function of statically linked
762 // extensions when they are loaded
763 struct st_table *static_ext_inits;
764
765 /* signal */
766 struct {
767 VALUE cmd[RUBY_NSIG];
768 } trap_list;
769
770 /* postponed_job (async-signal-safe, and thread-safe) */
771 struct rb_postponed_job_queue *postponed_job_queue;
772
773 int src_encoding_index;
774
775 /* workqueue (thread-safe, NOT async-signal-safe) */
776 struct ccan_list_head workqueue; /* <=> rb_workqueue_job.jnode */
777 rb_nativethread_lock_t workqueue_lock;
778
779 VALUE orig_progname, progname;
780 VALUE coverages, me2counter;
781 int coverage_mode;
782
783 struct {
784 struct rb_objspace *objspace;
785 struct gc_mark_func_data_struct {
786 void *data;
787 void (*mark_func)(VALUE v, void *data);
788 } *mark_func_data;
789 } gc;
790
791 rb_at_exit_list *at_exit;
792
793 const struct rb_builtin_function *builtin_function_table;
794
795 st_table *ci_table;
796 struct rb_id_table *negative_cme_table;
797 st_table *overloaded_cme_table; // cme -> overloaded_cme
798 set_table *unused_block_warning_table;
799 set_table *cc_refinement_table;
800
801 // This id table contains a mapping from ID to ICs. It does this with ID
802 // keys and nested st_tables as values. The nested tables have ICs as keys
803 // and Qtrue as values. It is used when inline constant caches need to be
804 // invalidated or ISEQs are being freed.
805 struct rb_id_table *constant_cache;
806 ID inserting_constant_cache_id;
807
808#ifndef VM_GLOBAL_CC_CACHE_TABLE_SIZE
809#define VM_GLOBAL_CC_CACHE_TABLE_SIZE 1023
810#endif
811 const struct rb_callcache *global_cc_cache_table[VM_GLOBAL_CC_CACHE_TABLE_SIZE]; // vm_eval.c
812
813#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
814 uint32_t clock;
815#endif
816
817 /* params */
818 struct { /* size in byte */
819 size_t thread_vm_stack_size;
820 size_t thread_machine_stack_size;
821 size_t fiber_vm_stack_size;
822 size_t fiber_machine_stack_size;
823 } default_params;
824} rb_vm_t;
825
826/* default values */
827
828#define RUBY_VM_SIZE_ALIGN 4096
829
830#define RUBY_VM_THREAD_VM_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
831#define RUBY_VM_THREAD_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
832#define RUBY_VM_THREAD_MACHINE_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
833#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
834
835#define RUBY_VM_FIBER_VM_STACK_SIZE ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
836#define RUBY_VM_FIBER_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
837#define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 64 * 1024 * sizeof(VALUE)) /* 256 KB or 512 KB */
838#if defined(__powerpc64__) || defined(__ppc64__) // macOS has __ppc64__
839#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 32 * 1024 * sizeof(VALUE)) /* 128 KB or 256 KB */
840#else
841#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
842#endif
843
844#if __has_feature(memory_sanitizer) || __has_feature(address_sanitizer)
845/* It seems sanitizers consume A LOT of machine stacks */
846#undef RUBY_VM_THREAD_MACHINE_STACK_SIZE
847#define RUBY_VM_THREAD_MACHINE_STACK_SIZE (1024 * 1024 * sizeof(VALUE))
848#undef RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN
849#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 512 * 1024 * sizeof(VALUE))
850#undef RUBY_VM_FIBER_MACHINE_STACK_SIZE
851#define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 256 * 1024 * sizeof(VALUE))
852#undef RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN
853#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 128 * 1024 * sizeof(VALUE))
854#endif
855
856#ifndef VM_DEBUG_BP_CHECK
857#define VM_DEBUG_BP_CHECK 0
858#endif
859
860#ifndef VM_DEBUG_VERIFY_METHOD_CACHE
861#define VM_DEBUG_VERIFY_METHOD_CACHE (VMDEBUG != 0)
862#endif
863
865 VALUE self;
866 const VALUE *ep;
867 union {
868 const rb_iseq_t *iseq;
869 const struct vm_ifunc *ifunc;
870 VALUE val;
871 } code;
872};
873
874enum rb_block_handler_type {
875 block_handler_type_iseq,
876 block_handler_type_ifunc,
877 block_handler_type_symbol,
878 block_handler_type_proc
879};
880
881enum rb_block_type {
882 block_type_iseq,
883 block_type_ifunc,
884 block_type_symbol,
885 block_type_proc
886};
887
888struct rb_block {
889 union {
890 struct rb_captured_block captured;
891 VALUE symbol;
892 VALUE proc;
893 } as;
894 enum rb_block_type type;
895};
896
898 const VALUE *pc; // cfp[0]
899 VALUE *sp; // cfp[1]
900 const rb_iseq_t *iseq; // cfp[2]
901 VALUE self; // cfp[3] / block[0]
902 const VALUE *ep; // cfp[4] / block[1]
903 const void *block_code; // cfp[5] / block[2] -- iseq, ifunc, or forwarded block handler
904 void *jit_return; // cfp[6] -- return address for JIT code
905#if VM_DEBUG_BP_CHECK
906 VALUE *bp_check; // cfp[7]
907#endif
909
910extern const rb_data_type_t ruby_threadptr_data_type;
911
912static inline struct rb_thread_struct *
913rb_thread_ptr(VALUE thval)
914{
915 return (struct rb_thread_struct *)rb_check_typeddata(thval, &ruby_threadptr_data_type);
916}
917
918enum rb_thread_status {
919 THREAD_RUNNABLE,
920 THREAD_STOPPED,
921 THREAD_STOPPED_FOREVER,
922 THREAD_KILLED
923};
924
925#ifdef RUBY_JMP_BUF
926typedef RUBY_JMP_BUF rb_jmpbuf_t;
927#else
928typedef void *rb_jmpbuf_t[5];
929#endif
930
931/*
932 `rb_vm_tag_jmpbuf_t` type represents a buffer used to
933 long jump to a C frame associated with `rb_vm_tag`.
934
935 Use-site of `rb_vm_tag_jmpbuf_t` is responsible for calling the
936 following functions:
937 - `rb_vm_tag_jmpbuf_init` once `rb_vm_tag_jmpbuf_t` is allocated.
938 - `rb_vm_tag_jmpbuf_deinit` once `rb_vm_tag_jmpbuf_t` is no longer necessary.
939
940 `RB_VM_TAG_JMPBUF_GET` transforms a `rb_vm_tag_jmpbuf_t` into a
941 `rb_jmpbuf_t` to be passed to `rb_setjmp/rb_longjmp`.
942*/
943#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
944/*
945 WebAssembly target with Asyncify-based SJLJ needs
946 to capture the execution context by unwind/rewind-ing
947 call frames into a jump buffer. The buffer space tends
948 to be considerably large unlike other architectures'
949 register-based buffers.
950 Therefore, we allocates the buffer on the heap on such
951 environments.
952*/
953typedef rb_jmpbuf_t *rb_vm_tag_jmpbuf_t;
954
955#define RB_VM_TAG_JMPBUF_GET(buf) (*buf)
956
957static inline void
958rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
959{
960 *jmpbuf = ruby_xmalloc(sizeof(rb_jmpbuf_t));
961}
962
963static inline void
964rb_vm_tag_jmpbuf_deinit(const rb_vm_tag_jmpbuf_t *jmpbuf)
965{
966 ruby_xfree(*jmpbuf);
967}
968#else
969typedef rb_jmpbuf_t rb_vm_tag_jmpbuf_t;
970
971#define RB_VM_TAG_JMPBUF_GET(buf) (buf)
972
973static inline void
974rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
975{
976 // no-op
977}
978
979static inline void
980rb_vm_tag_jmpbuf_deinit(const rb_vm_tag_jmpbuf_t *jmpbuf)
981{
982 // no-op
983}
984#endif
985
986/*
987 the members which are written in EC_PUSH_TAG() should be placed at
988 the beginning and the end, so that entire region is accessible.
989*/
990struct rb_vm_tag {
991 VALUE tag;
992 VALUE retval;
993 rb_vm_tag_jmpbuf_t buf;
994 struct rb_vm_tag *prev;
995 enum ruby_tag_type state;
996 unsigned int lock_rec;
997};
998
999STATIC_ASSERT(rb_vm_tag_buf_offset, offsetof(struct rb_vm_tag, buf) > 0);
1000STATIC_ASSERT(rb_vm_tag_buf_end,
1001 offsetof(struct rb_vm_tag, buf) + sizeof(rb_vm_tag_jmpbuf_t) <
1002 sizeof(struct rb_vm_tag));
1003
1006 void *arg;
1007};
1008
1009struct rb_mutex_struct;
1010
1011typedef struct rb_fiber_struct rb_fiber_t;
1012
1014 struct rb_waiting_list *next;
1015 struct rb_thread_struct *thread;
1016 struct rb_fiber_struct *fiber;
1017};
1018
1020 /* execution information */
1021 VALUE *vm_stack; /* must free, must mark */
1022 size_t vm_stack_size; /* size in word (byte size / sizeof(VALUE)) */
1023 rb_control_frame_t *cfp;
1024
1025 struct rb_vm_tag *tag;
1026
1027 /* interrupt flags */
1028 rb_atomic_t interrupt_flag;
1029 rb_atomic_t interrupt_mask; /* size should match flag */
1030#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
1031 uint32_t checked_clock;
1032#endif
1033
1034 rb_fiber_t *fiber_ptr;
1035 struct rb_thread_struct *thread_ptr;
1036
1037 /* storage (ec (fiber) local) */
1038 struct rb_id_table *local_storage;
1039 VALUE local_storage_recursive_hash;
1040 VALUE local_storage_recursive_hash_for_trace;
1041
1042 /* Inheritable fiber storage. */
1043 VALUE storage;
1044
1045 /* eval env */
1046 const VALUE *root_lep;
1047 VALUE root_svar;
1048
1049 /* trace information */
1050 struct rb_trace_arg_struct *trace_arg;
1051
1052 /* temporary places */
1053 VALUE errinfo;
1054 VALUE passed_block_handler; /* for rb_iterate */
1055
1056 uint8_t raised_flag; /* only 3 bits needed */
1057
1058 /* n.b. only 7 bits needed, really: */
1059 BITFIELD(enum method_missing_reason, method_missing_reason, 8);
1060
1061 VALUE private_const_reference;
1062
1063 struct {
1064 VALUE obj;
1065 VALUE fields_obj;
1066 } gen_fields_cache;
1067
1068 /* for GC */
1069 struct {
1070 VALUE *stack_start;
1071 VALUE *stack_end;
1072 size_t stack_maxsize;
1074
1075#ifdef RUBY_ASAN_ENABLED
1076 void *asan_fake_stack_handle;
1077#endif
1078 } machine;
1079};
1080
1081#ifndef rb_execution_context_t
1083#define rb_execution_context_t rb_execution_context_t
1084#endif
1085
1086// for builtin.h
1087#define VM_CORE_H_EC_DEFINED 1
1088
1089// Set the vm_stack pointer in the execution context.
1090void rb_ec_set_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
1091
1092// Initialize the vm_stack pointer in the execution context and push the initial stack frame.
1093// @param ec the execution context to update.
1094// @param stack a pointer to the stack to use.
1095// @param size the size of the stack, as in `VALUE stack[size]`.
1096void rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
1097
1098// Clear (set to `NULL`) the vm_stack pointer.
1099// @param ec the execution context to update.
1100void rb_ec_clear_vm_stack(rb_execution_context_t *ec);
1101
1103 bool ractor_safe;
1104};
1105
1106typedef struct rb_ractor_struct rb_ractor_t;
1107
1108struct rb_native_thread;
1109
1110typedef struct rb_thread_struct {
1111 struct ccan_list_node lt_node; // managed by a ractor (r->threads.set)
1112 VALUE self;
1113 rb_ractor_t *ractor;
1114 rb_vm_t *vm;
1115 struct rb_native_thread *nt;
1117
1118 struct rb_thread_sched_item sched;
1119 bool mn_schedulable;
1120 rb_atomic_t serial; // only for RUBY_DEBUG_LOG()
1121
1122 VALUE last_status; /* $? */
1123
1124 /* for cfunc */
1125 struct rb_calling_info *calling;
1126
1127 /* for load(true) */
1128 VALUE top_self;
1129 VALUE top_wrapper;
1130
1131 /* thread control */
1132
1133 BITFIELD(enum rb_thread_status, status, 2);
1134 /* bit flags */
1135 unsigned int has_dedicated_nt : 1;
1136 unsigned int to_kill : 1;
1137 unsigned int abort_on_exception: 1;
1138 unsigned int report_on_exception: 1;
1139 unsigned int pending_interrupt_queue_checked: 1;
1140 int8_t priority; /* -3 .. 3 (RUBY_THREAD_PRIORITY_{MIN,MAX}) */
1141 uint32_t running_time_us; /* 12500..800000 */
1142
1143 void *blocking_region_buffer;
1144
1145 VALUE thgroup;
1146 VALUE value;
1147
1148 /* temporary place of retval on OPT_CALL_THREADED_CODE */
1149#if OPT_CALL_THREADED_CODE
1150 VALUE retval;
1151#endif
1152
1153 /* async errinfo queue */
1154 VALUE pending_interrupt_queue;
1155 VALUE pending_interrupt_mask_stack;
1156
1157 /* interrupt management */
1158 rb_nativethread_lock_t interrupt_lock;
1159 struct rb_unblock_callback unblock;
1160 VALUE locking_mutex;
1161 struct rb_mutex_struct *keeping_mutexes;
1162 struct ccan_list_head interrupt_exec_tasks;
1163
1164 struct rb_waiting_list *join_list;
1165
1166 union {
1167 struct {
1168 VALUE proc;
1169 VALUE args;
1170 int kw_splat;
1171 } proc;
1172 struct {
1173 VALUE (*func)(void *);
1174 void *arg;
1175 } func;
1176 } invoke_arg;
1177
1178 enum thread_invoke_type {
1179 thread_invoke_type_none = 0,
1180 thread_invoke_type_proc,
1181 thread_invoke_type_ractor_proc,
1182 thread_invoke_type_func
1183 } invoke_type;
1184
1185 /* fiber */
1186 rb_fiber_t *root_fiber;
1187
1188 VALUE scheduler;
1189 unsigned int blocking;
1190
1191 /* misc */
1192 VALUE name;
1193 void **specific_storage;
1194
1195 struct rb_ext_config ext_config;
1196} rb_thread_t;
1197
1198static inline unsigned int
1199rb_th_serial(const rb_thread_t *th)
1200{
1201 return th ? (unsigned int)th->serial : 0;
1202}
1203
1204typedef enum {
1205 VM_DEFINECLASS_TYPE_CLASS = 0x00,
1206 VM_DEFINECLASS_TYPE_SINGLETON_CLASS = 0x01,
1207 VM_DEFINECLASS_TYPE_MODULE = 0x02,
1208 /* 0x03..0x06 is reserved */
1209 VM_DEFINECLASS_TYPE_MASK = 0x07
1210} rb_vm_defineclass_type_t;
1211
1212#define VM_DEFINECLASS_TYPE(x) ((rb_vm_defineclass_type_t)(x) & VM_DEFINECLASS_TYPE_MASK)
1213#define VM_DEFINECLASS_FLAG_SCOPED 0x08
1214#define VM_DEFINECLASS_FLAG_HAS_SUPERCLASS 0x10
1215#define VM_DEFINECLASS_SCOPED_P(x) ((x) & VM_DEFINECLASS_FLAG_SCOPED)
1216#define VM_DEFINECLASS_HAS_SUPERCLASS_P(x) \
1217 ((x) & VM_DEFINECLASS_FLAG_HAS_SUPERCLASS)
1218
1219/* iseq.c */
1220RUBY_SYMBOL_EXPORT_BEGIN
1221
1222/* node -> iseq */
1223rb_iseq_t *rb_iseq_new (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent, enum rb_iseq_type);
1224rb_iseq_t *rb_iseq_new_top (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent);
1225rb_iseq_t *rb_iseq_new_main (const VALUE ast_value, VALUE path, VALUE realpath, const rb_iseq_t *parent, int opt);
1226rb_iseq_t *rb_iseq_new_eval (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, int first_lineno, const rb_iseq_t *parent, int isolated_depth);
1227rb_iseq_t *rb_iseq_new_with_opt( VALUE ast_value, VALUE name, VALUE path, VALUE realpath, int first_lineno, const rb_iseq_t *parent, int isolated_depth,
1228 enum rb_iseq_type, const rb_compile_option_t*,
1229 VALUE script_lines);
1230
1231struct iseq_link_anchor;
1233 VALUE flags;
1234 VALUE reserved;
1235 void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *);
1236 const void *data;
1237};
1238static inline struct rb_iseq_new_with_callback_callback_func *
1239rb_iseq_new_with_callback_new_callback(
1240 void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *), const void *ptr)
1241{
1243 IMEMO_NEW(struct rb_iseq_new_with_callback_callback_func, imemo_ifunc, Qfalse);
1244 memo->func = func;
1245 memo->data = ptr;
1246
1247 return memo;
1248}
1249rb_iseq_t *rb_iseq_new_with_callback(const struct rb_iseq_new_with_callback_callback_func * ifunc,
1250 VALUE name, VALUE path, VALUE realpath, int first_lineno,
1251 const rb_iseq_t *parent, enum rb_iseq_type, const rb_compile_option_t*);
1252
1253VALUE rb_iseq_disasm(const rb_iseq_t *iseq);
1254int rb_iseq_disasm_insn(VALUE str, const VALUE *iseqval, size_t pos, const rb_iseq_t *iseq, VALUE child);
1255
1256VALUE rb_iseq_coverage(const rb_iseq_t *iseq);
1257
1258RUBY_EXTERN VALUE rb_cISeq;
1259RUBY_EXTERN VALUE rb_cRubyVM;
1260RUBY_EXTERN VALUE rb_mRubyVMFrozenCore;
1261RUBY_EXTERN VALUE rb_block_param_proxy;
1262RUBY_SYMBOL_EXPORT_END
1263
1264#define GetProcPtr(obj, ptr) \
1265 GetCoreDataFromValue((obj), rb_proc_t, (ptr))
1266
1267typedef struct {
1268 const struct rb_block block;
1269 unsigned int is_from_method: 1; /* bool */
1270 unsigned int is_lambda: 1; /* bool */
1271 unsigned int is_isolated: 1; /* bool */
1272} rb_proc_t;
1273
1274RUBY_SYMBOL_EXPORT_BEGIN
1275VALUE rb_proc_isolate(VALUE self);
1276VALUE rb_proc_isolate_bang(VALUE self, VALUE replace_self);
1277VALUE rb_proc_ractor_make_shareable(VALUE proc, VALUE replace_self);
1278RUBY_SYMBOL_EXPORT_END
1279
1280typedef struct {
1281 VALUE flags; /* imemo header */
1282 rb_iseq_t *iseq;
1283 const VALUE *ep;
1284 const VALUE *env;
1285 unsigned int env_size;
1286} rb_env_t;
1287
1288extern const rb_data_type_t ruby_binding_data_type;
1289
1290#define GetBindingPtr(obj, ptr) \
1291 GetCoreDataFromValue((obj), rb_binding_t, (ptr))
1292
1293typedef struct {
1294 const struct rb_block block;
1295 const VALUE pathobj;
1296 int first_lineno;
1297} rb_binding_t;
1298
1299/* used by compile time and send insn */
1300
1301enum vm_check_match_type {
1302 VM_CHECKMATCH_TYPE_WHEN = 1,
1303 VM_CHECKMATCH_TYPE_CASE = 2,
1304 VM_CHECKMATCH_TYPE_RESCUE = 3
1305};
1306
1307#define VM_CHECKMATCH_TYPE_MASK 0x03
1308#define VM_CHECKMATCH_ARRAY 0x04
1309
1310enum vm_opt_newarray_send_type {
1311 VM_OPT_NEWARRAY_SEND_MAX = 1,
1312 VM_OPT_NEWARRAY_SEND_MIN = 2,
1313 VM_OPT_NEWARRAY_SEND_HASH = 3,
1314 VM_OPT_NEWARRAY_SEND_PACK = 4,
1315 VM_OPT_NEWARRAY_SEND_PACK_BUFFER = 5,
1316 VM_OPT_NEWARRAY_SEND_INCLUDE_P = 6,
1317};
1318
1319enum vm_special_object_type {
1320 VM_SPECIAL_OBJECT_VMCORE = 1,
1321 VM_SPECIAL_OBJECT_CBASE,
1322 VM_SPECIAL_OBJECT_CONST_BASE
1323};
1324
1325enum vm_svar_index {
1326 VM_SVAR_LASTLINE = 0, /* $_ */
1327 VM_SVAR_BACKREF = 1, /* $~ */
1328
1329 VM_SVAR_EXTRA_START = 2,
1330 VM_SVAR_FLIPFLOP_START = 2 /* flipflop */
1331};
1332
1333/* inline cache */
1334typedef struct iseq_inline_constant_cache *IC;
1335typedef struct iseq_inline_iv_cache_entry *IVC;
1336typedef struct iseq_inline_cvar_cache_entry *ICVARC;
1337typedef union iseq_inline_storage_entry *ISE;
1338typedef const struct rb_callinfo *CALL_INFO;
1339typedef const struct rb_callcache *CALL_CACHE;
1340typedef struct rb_call_data *CALL_DATA;
1341
1342typedef VALUE CDHASH;
1343
1344#ifndef FUNC_FASTCALL
1345#define FUNC_FASTCALL(x) x
1346#endif
1347
1348typedef rb_control_frame_t *
1349 (FUNC_FASTCALL(*rb_insn_func_t))(rb_execution_context_t *, rb_control_frame_t *);
1350
1351#define VM_TAGGED_PTR_SET(p, tag) ((VALUE)(p) | (tag))
1352#define VM_TAGGED_PTR_REF(v, mask) ((void *)((v) & ~mask))
1353
1354#define GC_GUARDED_PTR(p) VM_TAGGED_PTR_SET((p), 0x01)
1355#define GC_GUARDED_PTR_REF(p) VM_TAGGED_PTR_REF((p), 0x03)
1356#define GC_GUARDED_PTR_P(p) (((VALUE)(p)) & 0x01)
1357
1358enum vm_frame_env_flags {
1359 /* Frame/Environment flag bits:
1360 * MMMM MMMM MMMM MMMM ___F FFFF FFFE EEEX (LSB)
1361 *
1362 * X : tag for GC marking (It seems as Fixnum)
1363 * EEE : 4 bits Env flags
1364 * FF..: 8 bits Frame flags
1365 * MM..: 15 bits frame magic (to check frame corruption)
1366 */
1367
1368 /* frame types */
1369 VM_FRAME_MAGIC_METHOD = 0x11110001,
1370 VM_FRAME_MAGIC_BLOCK = 0x22220001,
1371 VM_FRAME_MAGIC_CLASS = 0x33330001,
1372 VM_FRAME_MAGIC_TOP = 0x44440001,
1373 VM_FRAME_MAGIC_CFUNC = 0x55550001,
1374 VM_FRAME_MAGIC_IFUNC = 0x66660001,
1375 VM_FRAME_MAGIC_EVAL = 0x77770001,
1376 VM_FRAME_MAGIC_RESCUE = 0x78880001,
1377 VM_FRAME_MAGIC_DUMMY = 0x79990001,
1378
1379 VM_FRAME_MAGIC_MASK = 0x7fff0001,
1380
1381 /* frame flag */
1382 VM_FRAME_FLAG_FINISH = 0x0020,
1383 VM_FRAME_FLAG_BMETHOD = 0x0040,
1384 VM_FRAME_FLAG_CFRAME = 0x0080,
1385 VM_FRAME_FLAG_LAMBDA = 0x0100,
1386 VM_FRAME_FLAG_MODIFIED_BLOCK_PARAM = 0x0200,
1387 VM_FRAME_FLAG_CFRAME_KW = 0x0400,
1388 VM_FRAME_FLAG_PASSED = 0x0800,
1389 VM_FRAME_FLAG_NS_REQUIRE = 0x1000,
1390
1391 /* env flag */
1392 VM_ENV_FLAG_LOCAL = 0x0002,
1393 VM_ENV_FLAG_ESCAPED = 0x0004,
1394 VM_ENV_FLAG_WB_REQUIRED = 0x0008,
1395 VM_ENV_FLAG_ISOLATED = 0x0010,
1396};
1397
1398#define VM_ENV_DATA_SIZE ( 3)
1399
1400#define VM_ENV_DATA_INDEX_ME_CREF (-2) /* ep[-2] */
1401#define VM_ENV_DATA_INDEX_SPECVAL (-1) /* ep[-1] */
1402#define VM_ENV_DATA_INDEX_FLAGS ( 0) /* ep[ 0] */
1403#define VM_ENV_DATA_INDEX_ENV ( 1) /* ep[ 1] */
1404
1405#define VM_ENV_INDEX_LAST_LVAR (-VM_ENV_DATA_SIZE)
1406
1407static inline void VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value);
1408
1409static inline void
1410VM_ENV_FLAGS_SET(const VALUE *ep, VALUE flag)
1411{
1412 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1413 VM_ASSERT(FIXNUM_P(flags));
1414 VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags | flag);
1415}
1416
1417static inline void
1418VM_ENV_FLAGS_UNSET(const VALUE *ep, VALUE flag)
1419{
1420 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1421 VM_ASSERT(FIXNUM_P(flags));
1422 VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags & ~flag);
1423}
1424
1425static inline unsigned long
1426VM_ENV_FLAGS(const VALUE *ep, long flag)
1427{
1428 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1429 VM_ASSERT(FIXNUM_P(flags));
1430 return flags & flag;
1431}
1432
1433static inline unsigned long
1434VM_ENV_FLAGS_UNCHECKED(const VALUE *ep, long flag)
1435{
1436 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1437 return flags & flag;
1438}
1439
1440static inline unsigned long
1441VM_ENV_FRAME_TYPE_P(const VALUE *ep, unsigned long frame_type)
1442{
1443 return VM_ENV_FLAGS(ep, VM_FRAME_MAGIC_MASK) == frame_type;
1444}
1445
1446static inline unsigned long
1447VM_FRAME_TYPE(const rb_control_frame_t *cfp)
1448{
1449 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_MAGIC_MASK);
1450}
1451
1452static inline unsigned long
1453VM_FRAME_TYPE_UNCHECKED(const rb_control_frame_t *cfp)
1454{
1455 return VM_ENV_FLAGS_UNCHECKED(cfp->ep, VM_FRAME_MAGIC_MASK);
1456}
1457
1458static inline int
1459VM_FRAME_LAMBDA_P(const rb_control_frame_t *cfp)
1460{
1461 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_LAMBDA) != 0;
1462}
1463
1464static inline int
1465VM_FRAME_CFRAME_KW_P(const rb_control_frame_t *cfp)
1466{
1467 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME_KW) != 0;
1468}
1469
1470static inline int
1471VM_FRAME_FINISHED_P(const rb_control_frame_t *cfp)
1472{
1473 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_FINISH) != 0;
1474}
1475
1476static inline int
1477VM_FRAME_FINISHED_P_UNCHECKED(const rb_control_frame_t *cfp)
1478{
1479 return VM_ENV_FLAGS_UNCHECKED(cfp->ep, VM_FRAME_FLAG_FINISH) != 0;
1480}
1481
1482static inline int
1483VM_FRAME_BMETHOD_P(const rb_control_frame_t *cfp)
1484{
1485 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_BMETHOD) != 0;
1486}
1487
1488static inline int
1489rb_obj_is_iseq(VALUE iseq)
1490{
1491 return imemo_type_p(iseq, imemo_iseq);
1492}
1493
1494#if VM_CHECK_MODE > 0
1495#define RUBY_VM_NORMAL_ISEQ_P(iseq) rb_obj_is_iseq((VALUE)iseq)
1496#endif
1497
1498static inline int
1499VM_FRAME_CFRAME_P(const rb_control_frame_t *cfp)
1500{
1501 int cframe_p = VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME) != 0;
1502 VM_ASSERT(RUBY_VM_NORMAL_ISEQ_P(cfp->iseq) != cframe_p ||
1503 (VM_FRAME_TYPE(cfp) & VM_FRAME_MAGIC_MASK) == VM_FRAME_MAGIC_DUMMY);
1504 return cframe_p;
1505}
1506
1507static inline int
1508VM_FRAME_CFRAME_P_UNCHECKED(const rb_control_frame_t *cfp)
1509{
1510 return VM_ENV_FLAGS_UNCHECKED(cfp->ep, VM_FRAME_FLAG_CFRAME) != 0;
1511}
1512
1513static inline int
1514VM_FRAME_RUBYFRAME_P(const rb_control_frame_t *cfp)
1515{
1516 return !VM_FRAME_CFRAME_P(cfp);
1517}
1518
1519static inline int
1520VM_FRAME_RUBYFRAME_P_UNCHECKED(const rb_control_frame_t *cfp)
1521{
1522 return !VM_FRAME_CFRAME_P_UNCHECKED(cfp);
1523}
1524
1525static inline int
1526VM_FRAME_NS_REQUIRE_P(const rb_control_frame_t *cfp)
1527{
1528 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_NS_REQUIRE) != 0;
1529}
1530
1531#define RUBYVM_CFUNC_FRAME_P(cfp) \
1532 (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_CFUNC)
1533
1534#define VM_GUARDED_PREV_EP(ep) GC_GUARDED_PTR(ep)
1535#define VM_BLOCK_HANDLER_NONE 0
1536
1537static inline int
1538VM_ENV_LOCAL_P(const VALUE *ep)
1539{
1540 return VM_ENV_FLAGS(ep, VM_ENV_FLAG_LOCAL) ? 1 : 0;
1541}
1542
1543static inline int
1544VM_ENV_LOCAL_P_UNCHECKED(const VALUE *ep)
1545{
1546 return VM_ENV_FLAGS_UNCHECKED(ep, VM_ENV_FLAG_LOCAL) ? 1 : 0;
1547}
1548
1549static inline const VALUE *
1550VM_ENV_PREV_EP_UNCHECKED(const VALUE *ep)
1551{
1552 return GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
1553}
1554
1555static inline const VALUE *
1556VM_ENV_PREV_EP(const VALUE *ep)
1557{
1558 VM_ASSERT(VM_ENV_LOCAL_P(ep) == 0);
1559 return VM_ENV_PREV_EP_UNCHECKED(ep);
1560}
1561
1562static inline bool
1563VM_ENV_NAMESPACED_P(const VALUE *ep)
1564{
1565 return VM_ENV_FRAME_TYPE_P(ep, VM_FRAME_MAGIC_CLASS) || VM_ENV_FRAME_TYPE_P(ep, VM_FRAME_MAGIC_TOP);
1566}
1567
1568static inline VALUE
1569VM_ENV_BLOCK_HANDLER(const VALUE *ep)
1570{
1571 if (VM_ENV_NAMESPACED_P(ep)) {
1572 VM_ASSERT(VM_ENV_LOCAL_P(ep));
1573 return VM_BLOCK_HANDLER_NONE;
1574 }
1575
1576 VM_ASSERT(VM_ENV_LOCAL_P(ep));
1577 return ep[VM_ENV_DATA_INDEX_SPECVAL];
1578}
1579
1580static inline const rb_namespace_t *
1581VM_ENV_NAMESPACE(const VALUE *ep)
1582{
1583 VM_ASSERT(VM_ENV_NAMESPACED_P(ep));
1584 VM_ASSERT(VM_ENV_LOCAL_P(ep));
1585 return (const rb_namespace_t *)GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
1586}
1587
1588static inline const rb_namespace_t *
1589VM_ENV_NAMESPACE_UNCHECKED(const VALUE *ep)
1590{
1591 return (const rb_namespace_t *)GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
1592}
1593
1594#if VM_CHECK_MODE > 0
1595int rb_vm_ep_in_heap_p(const VALUE *ep);
1596#endif
1597
1598static inline int
1599VM_ENV_ESCAPED_P(const VALUE *ep)
1600{
1601 VM_ASSERT(rb_vm_ep_in_heap_p(ep) == !!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED));
1602 return VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) ? 1 : 0;
1603}
1604
1606static inline VALUE
1607VM_ENV_ENVVAL(const VALUE *ep)
1608{
1609 VALUE envval = ep[VM_ENV_DATA_INDEX_ENV];
1610 VM_ASSERT(VM_ENV_ESCAPED_P(ep));
1611 VM_ASSERT(envval == Qundef || imemo_type_p(envval, imemo_env));
1612 return envval;
1613}
1614
1616static inline const rb_env_t *
1617VM_ENV_ENVVAL_PTR(const VALUE *ep)
1618{
1619 return (const rb_env_t *)VM_ENV_ENVVAL(ep);
1620}
1621
1622static inline const rb_env_t *
1623vm_env_new(VALUE *env_ep, VALUE *env_body, unsigned int env_size, const rb_iseq_t *iseq)
1624{
1625 rb_env_t *env = IMEMO_NEW(rb_env_t, imemo_env, (VALUE)iseq);
1626 env->ep = env_ep;
1627 env->env = env_body;
1628 env->env_size = env_size;
1629 env_ep[VM_ENV_DATA_INDEX_ENV] = (VALUE)env;
1630 return env;
1631}
1632
1633static inline void
1634VM_FORCE_WRITE(const VALUE *ptr, VALUE v)
1635{
1636 *((VALUE *)ptr) = v;
1637}
1638
1639static inline void
1640VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value)
1641{
1642 VM_ASSERT(RB_SPECIAL_CONST_P(special_const_value));
1643 VM_FORCE_WRITE(ptr, special_const_value);
1644}
1645
1646static inline void
1647VM_STACK_ENV_WRITE(const VALUE *ep, int index, VALUE v)
1648{
1649 VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_WB_REQUIRED) == 0);
1650 VM_FORCE_WRITE(&ep[index], v);
1651}
1652
1653const VALUE *rb_vm_ep_local_ep(const VALUE *ep);
1654const VALUE *rb_vm_proc_local_ep(VALUE proc);
1655void rb_vm_block_ep_update(VALUE obj, const struct rb_block *dst, const VALUE *ep);
1656void rb_vm_block_copy(VALUE obj, const struct rb_block *dst, const struct rb_block *src);
1657
1658VALUE rb_vm_frame_block_handler(const rb_control_frame_t *cfp);
1659
1660#define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp) ((cfp)+1)
1661#define RUBY_VM_NEXT_CONTROL_FRAME(cfp) ((cfp)-1)
1662
1663#define RUBY_VM_VALID_CONTROL_FRAME_P(cfp, ecfp) \
1664 ((void *)(ecfp) > (void *)(cfp))
1665
1666static inline const rb_control_frame_t *
1667RUBY_VM_END_CONTROL_FRAME(const rb_execution_context_t *ec)
1668{
1669 return (rb_control_frame_t *)(ec->vm_stack + ec->vm_stack_size);
1670}
1671
1672static inline int
1673RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
1674{
1675 return !RUBY_VM_VALID_CONTROL_FRAME_P(cfp, RUBY_VM_END_CONTROL_FRAME(ec));
1676}
1677
1678static inline int
1679VM_BH_ISEQ_BLOCK_P(VALUE block_handler)
1680{
1681 if ((block_handler & 0x03) == 0x01) {
1682#if VM_CHECK_MODE > 0
1683 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1684 VM_ASSERT(imemo_type_p(captured->code.val, imemo_iseq));
1685#endif
1686 return 1;
1687 }
1688 else {
1689 return 0;
1690 }
1691}
1692
1693static inline VALUE
1694VM_BH_FROM_ISEQ_BLOCK(const struct rb_captured_block *captured)
1695{
1696 VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x01);
1697 VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1698 return block_handler;
1699}
1700
1701static inline const struct rb_captured_block *
1702VM_BH_TO_ISEQ_BLOCK(VALUE block_handler)
1703{
1704 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1705 VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1706 return captured;
1707}
1708
1709static inline int
1710VM_BH_IFUNC_P(VALUE block_handler)
1711{
1712 if ((block_handler & 0x03) == 0x03) {
1713#if VM_CHECK_MODE > 0
1714 struct rb_captured_block *captured = (void *)(block_handler & ~0x03);
1715 VM_ASSERT(imemo_type_p(captured->code.val, imemo_ifunc));
1716#endif
1717 return 1;
1718 }
1719 else {
1720 return 0;
1721 }
1722}
1723
1724static inline VALUE
1725VM_BH_FROM_IFUNC_BLOCK(const struct rb_captured_block *captured)
1726{
1727 VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x03);
1728 VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1729 return block_handler;
1730}
1731
1732static inline const struct rb_captured_block *
1733VM_BH_TO_IFUNC_BLOCK(VALUE block_handler)
1734{
1735 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1736 VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1737 return captured;
1738}
1739
1740static inline const struct rb_captured_block *
1741VM_BH_TO_CAPT_BLOCK(VALUE block_handler)
1742{
1743 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1744 VM_ASSERT(VM_BH_IFUNC_P(block_handler) || VM_BH_ISEQ_BLOCK_P(block_handler));
1745 return captured;
1746}
1747
1748static inline enum rb_block_handler_type
1749vm_block_handler_type(VALUE block_handler)
1750{
1751 if (VM_BH_ISEQ_BLOCK_P(block_handler)) {
1752 return block_handler_type_iseq;
1753 }
1754 else if (VM_BH_IFUNC_P(block_handler)) {
1755 return block_handler_type_ifunc;
1756 }
1757 else if (SYMBOL_P(block_handler)) {
1758 return block_handler_type_symbol;
1759 }
1760 else {
1761 VM_ASSERT(rb_obj_is_proc(block_handler));
1762 return block_handler_type_proc;
1763 }
1764}
1765
1766static inline void
1767vm_block_handler_verify(MAYBE_UNUSED(VALUE block_handler))
1768{
1769 VM_ASSERT(block_handler == VM_BLOCK_HANDLER_NONE ||
1770 (vm_block_handler_type(block_handler), 1));
1771}
1772
1773static inline enum rb_block_type
1774vm_block_type(const struct rb_block *block)
1775{
1776#if VM_CHECK_MODE > 0
1777 switch (block->type) {
1778 case block_type_iseq:
1779 VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_iseq));
1780 break;
1781 case block_type_ifunc:
1782 VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_ifunc));
1783 break;
1784 case block_type_symbol:
1785 VM_ASSERT(SYMBOL_P(block->as.symbol));
1786 break;
1787 case block_type_proc:
1788 VM_ASSERT(rb_obj_is_proc(block->as.proc));
1789 break;
1790 }
1791#endif
1792 return block->type;
1793}
1794
1795static inline void
1796vm_block_type_set(const struct rb_block *block, enum rb_block_type type)
1797{
1798 struct rb_block *mb = (struct rb_block *)block;
1799 mb->type = type;
1800}
1801
1802static inline const struct rb_block *
1803vm_proc_block(VALUE procval)
1804{
1805 VM_ASSERT(rb_obj_is_proc(procval));
1806 return &((rb_proc_t *)RTYPEDDATA_DATA(procval))->block;
1807}
1808
1809static inline const rb_iseq_t *vm_block_iseq(const struct rb_block *block);
1810static inline const VALUE *vm_block_ep(const struct rb_block *block);
1811
1812static inline const rb_iseq_t *
1813vm_proc_iseq(VALUE procval)
1814{
1815 return vm_block_iseq(vm_proc_block(procval));
1816}
1817
1818static inline const VALUE *
1819vm_proc_ep(VALUE procval)
1820{
1821 return vm_block_ep(vm_proc_block(procval));
1822}
1823
1824static inline const rb_iseq_t *
1825vm_block_iseq(const struct rb_block *block)
1826{
1827 switch (vm_block_type(block)) {
1828 case block_type_iseq: return rb_iseq_check(block->as.captured.code.iseq);
1829 case block_type_proc: return vm_proc_iseq(block->as.proc);
1830 case block_type_ifunc:
1831 case block_type_symbol: return NULL;
1832 }
1833 VM_UNREACHABLE(vm_block_iseq);
1834 return NULL;
1835}
1836
1837static inline const VALUE *
1838vm_block_ep(const struct rb_block *block)
1839{
1840 switch (vm_block_type(block)) {
1841 case block_type_iseq:
1842 case block_type_ifunc: return block->as.captured.ep;
1843 case block_type_proc: return vm_proc_ep(block->as.proc);
1844 case block_type_symbol: return NULL;
1845 }
1846 VM_UNREACHABLE(vm_block_ep);
1847 return NULL;
1848}
1849
1850static inline VALUE
1851vm_block_self(const struct rb_block *block)
1852{
1853 switch (vm_block_type(block)) {
1854 case block_type_iseq:
1855 case block_type_ifunc:
1856 return block->as.captured.self;
1857 case block_type_proc:
1858 return vm_block_self(vm_proc_block(block->as.proc));
1859 case block_type_symbol:
1860 return Qundef;
1861 }
1862 VM_UNREACHABLE(vm_block_self);
1863 return Qundef;
1864}
1865
1866static inline VALUE
1867VM_BH_TO_SYMBOL(VALUE block_handler)
1868{
1869 VM_ASSERT(SYMBOL_P(block_handler));
1870 return block_handler;
1871}
1872
1873static inline VALUE
1874VM_BH_FROM_SYMBOL(VALUE symbol)
1875{
1876 VM_ASSERT(SYMBOL_P(symbol));
1877 return symbol;
1878}
1879
1880static inline VALUE
1881VM_BH_TO_PROC(VALUE block_handler)
1882{
1883 VM_ASSERT(rb_obj_is_proc(block_handler));
1884 return block_handler;
1885}
1886
1887static inline VALUE
1888VM_BH_FROM_PROC(VALUE procval)
1889{
1890 VM_ASSERT(rb_obj_is_proc(procval));
1891 return procval;
1892}
1893
1894/* VM related object allocate functions */
1895VALUE rb_thread_alloc(VALUE klass);
1896VALUE rb_binding_alloc(VALUE klass);
1897VALUE rb_proc_alloc(VALUE klass);
1898VALUE rb_proc_dup(VALUE self);
1899
1900/* for debug */
1901extern bool rb_vmdebug_stack_dump_raw(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, FILE *);
1902extern bool rb_vmdebug_debug_print_pre(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, const VALUE *_pc, FILE *);
1903extern bool rb_vmdebug_debug_print_post(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, FILE *);
1904
1905#define SDR() rb_vmdebug_stack_dump_raw(GET_EC(), GET_EC()->cfp, stderr)
1906#define SDR2(cfp) rb_vmdebug_stack_dump_raw(GET_EC(), (cfp), stderr)
1907bool rb_vm_bugreport(const void *, FILE *);
1908typedef void (*ruby_sighandler_t)(int);
1909RBIMPL_ATTR_FORMAT(RBIMPL_PRINTF_FORMAT, 4, 5)
1910NORETURN(void rb_bug_for_fatal_signal(ruby_sighandler_t default_sighandler, int sig, const void *, const char *fmt, ...));
1911
1912/* functions about thread/vm execution */
1913RUBY_SYMBOL_EXPORT_BEGIN
1914VALUE rb_iseq_eval(const rb_iseq_t *iseq, const rb_namespace_t *ns);
1915VALUE rb_iseq_eval_main(const rb_iseq_t *iseq);
1916VALUE rb_iseq_path(const rb_iseq_t *iseq);
1917VALUE rb_iseq_realpath(const rb_iseq_t *iseq);
1918RUBY_SYMBOL_EXPORT_END
1919
1920VALUE rb_iseq_pathobj_new(VALUE path, VALUE realpath);
1921void rb_iseq_pathobj_set(const rb_iseq_t *iseq, VALUE path, VALUE realpath);
1922
1923int rb_ec_frame_method_id_and_class(const rb_execution_context_t *ec, ID *idp, ID *called_idp, VALUE *klassp);
1924void rb_ec_setup_exception(const rb_execution_context_t *ec, VALUE mesg, VALUE cause);
1925
1926VALUE rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, int argc, const VALUE *argv, int kw_splat, VALUE block_handler);
1927
1928VALUE rb_vm_make_proc_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda);
1929static inline VALUE
1930rb_vm_make_proc(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1931{
1932 return rb_vm_make_proc_lambda(ec, captured, klass, 0);
1933}
1934
1935static inline VALUE
1936rb_vm_make_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1937{
1938 return rb_vm_make_proc_lambda(ec, captured, klass, 1);
1939}
1940
1941VALUE rb_vm_make_binding(const rb_execution_context_t *ec, const rb_control_frame_t *src_cfp);
1942VALUE rb_vm_env_local_variables(const rb_env_t *env);
1943VALUE rb_vm_env_numbered_parameters(const rb_env_t *env);
1944const rb_env_t *rb_vm_env_prev_env(const rb_env_t *env);
1945const VALUE *rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const ID *dynvars);
1946void rb_vm_inc_const_missing_count(void);
1947VALUE rb_vm_call_kw(rb_execution_context_t *ec, VALUE recv, VALUE id, int argc,
1948 const VALUE *argv, const rb_callable_method_entry_t *me, int kw_splat);
1949void rb_vm_pop_frame_no_int(rb_execution_context_t *ec);
1950void rb_vm_pop_frame(rb_execution_context_t *ec);
1951
1952void rb_thread_start_timer_thread(void);
1953void rb_thread_stop_timer_thread(void);
1954void rb_thread_reset_timer_thread(void);
1955void rb_thread_wakeup_timer_thread(int);
1956
1957static inline void
1958rb_vm_living_threads_init(rb_vm_t *vm)
1959{
1960 ccan_list_head_init(&vm->workqueue);
1961 ccan_list_head_init(&vm->ractor.set);
1962#ifdef RUBY_THREAD_PTHREAD_H
1963 ccan_list_head_init(&vm->ractor.sched.zombie_threads);
1964#endif
1965}
1966
1967typedef int rb_backtrace_iter_func(void *, VALUE, int, VALUE);
1968rb_control_frame_t *rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1969rb_control_frame_t *rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1970VALUE *rb_vm_svar_lep(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1971int rb_vm_get_sourceline(const rb_control_frame_t *);
1972void rb_vm_stack_to_heap(rb_execution_context_t *ec);
1973void ruby_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame);
1974void rb_thread_malloc_stack_set(rb_thread_t *th, void *stack);
1975rb_thread_t * ruby_thread_from_native(void);
1976int ruby_thread_set_native(rb_thread_t *th);
1977int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp);
1978void rb_vm_rewind_cfp(rb_execution_context_t *ec, rb_control_frame_t *cfp);
1979void rb_vm_env_write(const VALUE *ep, int index, VALUE v);
1980VALUE rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler);
1981
1982void rb_vm_register_special_exception_str(enum ruby_special_exceptions sp, VALUE exception_class, VALUE mesg);
1983
1984#define rb_vm_register_special_exception(sp, e, m) \
1985 rb_vm_register_special_exception_str(sp, e, rb_usascii_str_new_static((m), (long)rb_strlen_lit(m)))
1986
1987void rb_gc_mark_machine_context(const rb_execution_context_t *ec);
1988
1989rb_cref_t *rb_vm_rewrite_cref(rb_cref_t *node, VALUE old_klass, VALUE new_klass);
1990
1991const rb_callable_method_entry_t *rb_vm_frame_method_entry(const rb_control_frame_t *cfp);
1992const rb_callable_method_entry_t *rb_vm_frame_method_entry_unchecked(const rb_control_frame_t *cfp);
1993
1994#define sysstack_error GET_VM()->special_exceptions[ruby_error_sysstack]
1995
1996#define CHECK_VM_STACK_OVERFLOW0(cfp, sp, margin) do { \
1997 STATIC_ASSERT(sizeof_sp, sizeof(*(sp)) == sizeof(VALUE)); \
1998 STATIC_ASSERT(sizeof_cfp, sizeof(*(cfp)) == sizeof(rb_control_frame_t)); \
1999 const struct rb_control_frame_struct *bound = (void *)&(sp)[(margin)]; \
2000 if (UNLIKELY((cfp) <= &bound[1])) { \
2001 vm_stackoverflow(); \
2002 } \
2003} while (0)
2004
2005#define CHECK_VM_STACK_OVERFLOW(cfp, margin) \
2006 CHECK_VM_STACK_OVERFLOW0((cfp), (cfp)->sp, (margin))
2007
2008VALUE rb_catch_protect(VALUE t, rb_block_call_func *func, VALUE data, enum ruby_tag_type *stateptr);
2009
2010rb_execution_context_t *rb_vm_main_ractor_ec(rb_vm_t *vm); // ractor.c
2011
2012/* for thread */
2013
2014#if RUBY_VM_THREAD_MODEL == 2
2015
2016RUBY_EXTERN struct rb_ractor_struct *ruby_single_main_ractor; // ractor.c
2017RUBY_EXTERN rb_vm_t *ruby_current_vm_ptr;
2018RUBY_EXTERN rb_event_flag_t ruby_vm_event_flags;
2019RUBY_EXTERN rb_event_flag_t ruby_vm_event_enabled_global_flags;
2020RUBY_EXTERN unsigned int ruby_vm_event_local_num;
2021
2022#define GET_VM() rb_current_vm()
2023#define GET_RACTOR() rb_current_ractor()
2024#define GET_THREAD() rb_current_thread()
2025#define GET_EC() rb_current_execution_context(true)
2026
2027static inline rb_thread_t *
2028rb_ec_thread_ptr(const rb_execution_context_t *ec)
2029{
2030 return ec->thread_ptr;
2031}
2032
2033static inline rb_ractor_t *
2034rb_ec_ractor_ptr(const rb_execution_context_t *ec)
2035{
2036 const rb_thread_t *th = rb_ec_thread_ptr(ec);
2037 if (th) {
2038 VM_ASSERT(th->ractor != NULL);
2039 return th->ractor;
2040 }
2041 else {
2042 return NULL;
2043 }
2044}
2045
2046static inline rb_vm_t *
2047rb_ec_vm_ptr(const rb_execution_context_t *ec)
2048{
2049 const rb_thread_t *th = rb_ec_thread_ptr(ec);
2050 if (th) {
2051 return th->vm;
2052 }
2053 else {
2054 return NULL;
2055 }
2056}
2057
2058NOINLINE(struct rb_execution_context_struct *rb_current_ec_noinline(void));
2059
2060static inline rb_execution_context_t *
2061rb_current_execution_context(bool expect_ec)
2062{
2063#ifdef RB_THREAD_LOCAL_SPECIFIER
2064 #ifdef RB_THREAD_CURRENT_EC_NOINLINE
2065 rb_execution_context_t * volatile ec = rb_current_ec();
2066 #else
2067 rb_execution_context_t * volatile ec = ruby_current_ec;
2068 #endif
2069
2070 /* On the shared objects, `__tls_get_addr()` is used to access the TLS
2071 * and the address of the `ruby_current_ec` can be stored on a function
2072 * frame. However, this address can be mis-used after native thread
2073 * migration of a coroutine.
2074 * 1) Get `ptr =&ruby_current_ec` op NT1 and store it on the frame.
2075 * 2) Context switch and resume it on the NT2.
2076 * 3) `ptr` is used on NT2 but it accesses to the TLS on NT1.
2077 * This assertion checks such misusage.
2078 *
2079 * To avoid accidents, `GET_EC()` should be called once on the frame.
2080 * Note that inlining can produce the problem.
2081 */
2082 VM_ASSERT(ec == rb_current_ec_noinline());
2083#else
2084 rb_execution_context_t * volatile ec = native_tls_get(ruby_current_ec_key);
2085#endif
2086 VM_ASSERT(!expect_ec || ec != NULL);
2087 return ec;
2088}
2089
2090static inline rb_thread_t *
2091rb_current_thread(void)
2092{
2093 const rb_execution_context_t *ec = GET_EC();
2094 return rb_ec_thread_ptr(ec);
2095}
2096
2097static inline rb_ractor_t *
2098rb_current_ractor_raw(bool expect)
2099{
2100 if (ruby_single_main_ractor) {
2101 return ruby_single_main_ractor;
2102 }
2103 else {
2104 const rb_execution_context_t *ec = rb_current_execution_context(expect);
2105 return (expect || ec) ? rb_ec_ractor_ptr(ec) : NULL;
2106 }
2107}
2108
2109static inline rb_ractor_t *
2110rb_current_ractor(void)
2111{
2112 return rb_current_ractor_raw(true);
2113}
2114
2115static inline rb_vm_t *
2116rb_current_vm(void)
2117{
2118#if 0 // TODO: reconsider the assertions
2119 VM_ASSERT(ruby_current_vm_ptr == NULL ||
2120 ruby_current_execution_context_ptr == NULL ||
2121 rb_ec_thread_ptr(GET_EC()) == NULL ||
2122 rb_ec_thread_ptr(GET_EC())->status == THREAD_KILLED ||
2123 rb_ec_vm_ptr(GET_EC()) == ruby_current_vm_ptr);
2124#endif
2125
2126 return ruby_current_vm_ptr;
2127}
2128
2129void rb_ec_vm_lock_rec_release(const rb_execution_context_t *ec,
2130 unsigned int recorded_lock_rec,
2131 unsigned int current_lock_rec);
2132
2133/* This technically is a data race, as it's checked without the lock, however we
2134 * check against a value only our own thread will write. */
2135NO_SANITIZE("thread", static inline bool
2136vm_locked_by_ractor_p(rb_vm_t *vm, rb_ractor_t *cr))
2137{
2138 VM_ASSERT(cr == GET_RACTOR());
2139 return vm->ractor.sync.lock_owner == cr;
2140}
2141
2142static inline unsigned int
2143rb_ec_vm_lock_rec(const rb_execution_context_t *ec)
2144{
2145 rb_vm_t *vm = rb_ec_vm_ptr(ec);
2146
2147 if (!vm_locked_by_ractor_p(vm, rb_ec_ractor_ptr(ec))) {
2148 return 0;
2149 }
2150 else {
2151 return vm->ractor.sync.lock_rec;
2152 }
2153}
2154
2155#else
2156#error "unsupported thread model"
2157#endif
2158
2159enum {
2160 TIMER_INTERRUPT_MASK = 0x01,
2161 PENDING_INTERRUPT_MASK = 0x02,
2162 POSTPONED_JOB_INTERRUPT_MASK = 0x04,
2163 TRAP_INTERRUPT_MASK = 0x08,
2164 TERMINATE_INTERRUPT_MASK = 0x10,
2165 VM_BARRIER_INTERRUPT_MASK = 0x20,
2166};
2167
2168#define RUBY_VM_SET_TIMER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TIMER_INTERRUPT_MASK)
2169#define RUBY_VM_SET_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, PENDING_INTERRUPT_MASK)
2170#define RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, POSTPONED_JOB_INTERRUPT_MASK)
2171#define RUBY_VM_SET_TRAP_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TRAP_INTERRUPT_MASK)
2172#define RUBY_VM_SET_TERMINATE_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TERMINATE_INTERRUPT_MASK)
2173#define RUBY_VM_SET_VM_BARRIER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, VM_BARRIER_INTERRUPT_MASK)
2174
2175static inline bool
2176RUBY_VM_INTERRUPTED(rb_execution_context_t *ec)
2177{
2178 return (ATOMIC_LOAD_RELAXED(ec->interrupt_flag) & ~(ec->interrupt_mask) & (PENDING_INTERRUPT_MASK|TRAP_INTERRUPT_MASK));
2179}
2180
2181static inline bool
2182RUBY_VM_INTERRUPTED_ANY(rb_execution_context_t *ec)
2183{
2184#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
2185 uint32_t current_clock = rb_ec_vm_ptr(ec)->clock;
2186
2187 if (current_clock != ec->checked_clock) {
2188 ec->checked_clock = current_clock;
2189 RUBY_VM_SET_TIMER_INTERRUPT(ec);
2190 }
2191#endif
2192 return ATOMIC_LOAD_RELAXED(ec->interrupt_flag) & ~(ec)->interrupt_mask;
2193}
2194
2195VALUE rb_exc_set_backtrace(VALUE exc, VALUE bt);
2196int rb_signal_buff_size(void);
2197int rb_signal_exec(rb_thread_t *th, int sig);
2198void rb_threadptr_check_signal(rb_thread_t *mth);
2199void rb_threadptr_signal_raise(rb_thread_t *th, int sig);
2200void rb_threadptr_signal_exit(rb_thread_t *th);
2201int rb_threadptr_execute_interrupts(rb_thread_t *, int);
2202void rb_threadptr_interrupt(rb_thread_t *th);
2203void rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th);
2204void rb_threadptr_pending_interrupt_clear(rb_thread_t *th);
2205void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v);
2206VALUE rb_ec_get_errinfo(const rb_execution_context_t *ec);
2207void rb_ec_error_print(rb_execution_context_t * volatile ec, volatile VALUE errinfo);
2208void rb_execution_context_update(rb_execution_context_t *ec);
2209void rb_execution_context_mark(const rb_execution_context_t *ec);
2210void rb_fiber_close(rb_fiber_t *fib);
2211void Init_native_thread(rb_thread_t *th);
2212int rb_vm_check_ints_blocking(rb_execution_context_t *ec);
2213
2214// vm_sync.h
2215void rb_vm_cond_wait(rb_vm_t *vm, rb_nativethread_cond_t *cond);
2216void rb_vm_cond_timedwait(rb_vm_t *vm, rb_nativethread_cond_t *cond, unsigned long msec);
2217
2218#define RUBY_VM_CHECK_INTS(ec) rb_vm_check_ints(ec)
2219static inline void
2220rb_vm_check_ints(rb_execution_context_t *ec)
2221{
2222#ifdef RUBY_ASSERT_CRITICAL_SECTION
2223 VM_ASSERT(ruby_assert_critical_section_entered == 0);
2224#endif
2225
2226 VM_ASSERT(ec == rb_current_ec_noinline());
2227
2228 if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(ec))) {
2229 rb_threadptr_execute_interrupts(rb_ec_thread_ptr(ec), 0);
2230 }
2231}
2232
2233/* tracer */
2234
2236 rb_event_flag_t event;
2238 const rb_control_frame_t *cfp;
2239 VALUE self;
2240 ID id;
2241 ID called_id;
2242 VALUE klass;
2243 VALUE data;
2244
2245 int klass_solved;
2246
2247 /* calc from cfp */
2248 int lineno;
2249 VALUE path;
2250};
2251
2252void rb_hook_list_mark(rb_hook_list_t *hooks);
2253void rb_hook_list_mark_and_move(rb_hook_list_t *hooks);
2254void rb_hook_list_free(rb_hook_list_t *hooks);
2255void rb_hook_list_connect_tracepoint(VALUE target, rb_hook_list_t *list, VALUE tpval, unsigned int target_line);
2256void rb_hook_list_remove_tracepoint(rb_hook_list_t *list, VALUE tpval);
2257
2258void rb_exec_event_hooks(struct rb_trace_arg_struct *trace_arg, rb_hook_list_t *hooks, int pop_p);
2259
2260#define EXEC_EVENT_HOOK_ORIG(ec_, hooks_, flag_, self_, id_, called_id_, klass_, data_, pop_p_) do { \
2261 const rb_event_flag_t flag_arg_ = (flag_); \
2262 rb_hook_list_t *hooks_arg_ = (hooks_); \
2263 if (UNLIKELY((hooks_arg_)->events & (flag_arg_))) { \
2264 /* defer evaluating the other arguments */ \
2265 rb_exec_event_hook_orig(ec_, hooks_arg_, flag_arg_, self_, id_, called_id_, klass_, data_, pop_p_); \
2266 } \
2267} while (0)
2268
2269static inline void
2270rb_exec_event_hook_orig(rb_execution_context_t *ec, rb_hook_list_t *hooks, rb_event_flag_t flag,
2271 VALUE self, ID id, ID called_id, VALUE klass, VALUE data, int pop_p)
2272{
2273 struct rb_trace_arg_struct trace_arg;
2274
2275 VM_ASSERT((hooks->events & flag) != 0);
2276
2277 trace_arg.event = flag;
2278 trace_arg.ec = ec;
2279 trace_arg.cfp = ec->cfp;
2280 trace_arg.self = self;
2281 trace_arg.id = id;
2282 trace_arg.called_id = called_id;
2283 trace_arg.klass = klass;
2284 trace_arg.data = data;
2285 trace_arg.path = Qundef;
2286 trace_arg.klass_solved = 0;
2287
2288 rb_exec_event_hooks(&trace_arg, hooks, pop_p);
2289}
2290
2292 VALUE self;
2293 uint32_t id;
2294 rb_hook_list_t hooks;
2295};
2296
2297static inline rb_hook_list_t *
2298rb_ec_ractor_hooks(const rb_execution_context_t *ec)
2299{
2300 struct rb_ractor_pub *cr_pub = (struct rb_ractor_pub *)rb_ec_ractor_ptr(ec);
2301 return &cr_pub->hooks;
2302}
2303
2304#define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2305 EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_ractor_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 0)
2306
2307#define EXEC_EVENT_HOOK_AND_POP_FRAME(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2308 EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_ractor_hooks(ec_), flag_, self_, id_, called_id_, klass_, data_, 1)
2309
2310static inline void
2311rb_exec_event_hook_script_compiled(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE eval_script)
2312{
2313 EXEC_EVENT_HOOK(ec, RUBY_EVENT_SCRIPT_COMPILED, ec->cfp->self, 0, 0, 0,
2314 NIL_P(eval_script) ? (VALUE)iseq :
2315 rb_ary_new_from_args(2, eval_script, (VALUE)iseq));
2316}
2317
2318void rb_vm_trap_exit(rb_vm_t *vm);
2319void rb_vm_postponed_job_atfork(void); /* vm_trace.c */
2320void rb_vm_postponed_job_free(void); /* vm_trace.c */
2321size_t rb_vm_memsize_postponed_job_queue(void); /* vm_trace.c */
2322void rb_vm_postponed_job_queue_init(rb_vm_t *vm); /* vm_trace.c */
2323
2324RUBY_SYMBOL_EXPORT_BEGIN
2325
2326int rb_thread_check_trap_pending(void);
2327
2328/* #define RUBY_EVENT_RESERVED_FOR_INTERNAL_USE 0x030000 */ /* from vm_core.h */
2329#define RUBY_EVENT_COVERAGE_LINE 0x010000
2330#define RUBY_EVENT_COVERAGE_BRANCH 0x020000
2331
2332extern VALUE rb_get_coverages(void);
2333extern void rb_set_coverages(VALUE, int, VALUE);
2334extern void rb_clear_coverages(void);
2335extern void rb_reset_coverages(void);
2336extern void rb_resume_coverages(void);
2337extern void rb_suspend_coverages(void);
2338
2339void rb_postponed_job_flush(rb_vm_t *vm);
2340
2341// ractor.c
2342RUBY_EXTERN VALUE rb_eRactorUnsafeError;
2343RUBY_EXTERN VALUE rb_eRactorIsolationError;
2344
2345RUBY_SYMBOL_EXPORT_END
2346
2347#endif /* RUBY_VM_CORE_H */
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ALIGNAS
Wraps (or simulates) alignas.
Definition stdalign.h:27
#define RUBY_EXTERN
Declaration of externally visible global variables.
Definition dllexport.h:45
#define RUBY_EVENT_SCRIPT_COMPILED
Encountered an eval.
Definition event.h:60
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RBIMPL_ATTR_FORMAT(x, y, z)
Wraps (or simulates) __attribute__((format))
Definition format.h:29
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define Qundef
Old name of RUBY_Qundef.
#define Qfalse
Old name of RUBY_Qfalse.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define NIL_P
Old name of RB_NIL_P.
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
void * rb_check_typeddata(VALUE obj, const rb_data_type_t *data_type)
Identical to rb_typeddata_is_kind_of(), except it raises exceptions instead of returning false.
Definition error.c:1397
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
Definition proc.c:120
void rb_unblock_function_t(void *)
This is the type of UBFs.
Definition thread.h:336
VALUE rb_block_call_func(RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg))
This is the type of a function that the interpreter expect for C-backended blocks.
Definition iterator.h:83
VALUE type(ANYARGS)
ANYARGS-ed function type.
#define RBIMPL_ATTR_NONNULL(list)
Wraps (or simulates) __attribute__((nonnull))
Definition nonnull.h:30
#define inline
Old Visual Studio versions do not support the inline keyword, so we need to define it to be __inline.
Definition defines.h:91
Functions related to nodes in the AST.
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RTYPEDDATA_DATA(v)
Convenient getter macro.
Definition rtypeddata.h:103
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
Defines old _.
C99 shim for <stdbool.h>
Definition vm_core.h:261
const ID * segments
A null-terminated list of ids, used to represent a constant's path idNULL is used to represent the ::...
Definition vm_core.h:285
Definition vm_core.h:293
Definition vm_core.h:288
Definition iseq.h:257
Definition method.h:63
CREF (Class REFerence)
Definition method.h:45
Definition class.h:72
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:202
struct rb_iseq_constant_body::@157 param
parameter information
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:143
Internal header for Namespace.
Definition namespace.h:14
Definition st.h:79
IFUNC (Internal FUNCtion)
Definition imemo.h:85
Definition vm_core.h:253
Definition vm_core.h:297
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
Definition value.h:69
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376