Ruby 4.1.0dev (2026-04-04 revision 892991bdc1a5068d74a8597cd0ccf3092afffabf)
vm_core.h (892991bdc1a5068d74a8597cd0ccf3092afffabf)
1#ifndef RUBY_VM_CORE_H
2#define RUBY_VM_CORE_H
3/**********************************************************************
4
5 vm_core.h -
6
7 $Author$
8 created at: 04/01/01 19:41:38 JST
9
10 Copyright (C) 2004-2007 Koichi Sasada
11
12**********************************************************************/
13
14/*
15 * Enable check mode.
16 * 1: enable local assertions.
17 */
18#ifndef VM_CHECK_MODE
19
20// respect RUBY_DUBUG: if given n is 0, then use RUBY_DEBUG
21#define N_OR_RUBY_DEBUG(n) (((n) > 0) ? (n) : RUBY_DEBUG)
22
23#define VM_CHECK_MODE N_OR_RUBY_DEBUG(0)
24#endif
25
39#ifndef VMDEBUG
40#define VMDEBUG 0
41#endif
42
43#if 0
44#undef VMDEBUG
45#define VMDEBUG 3
46#endif
47
48#include "ruby/internal/config.h"
49
50#include <stddef.h>
51#include <signal.h>
52#include <stdarg.h>
53
54#include "ruby_assert.h"
55
56#define RVALUE_SIZE (sizeof(struct RBasic) + sizeof(VALUE[RBIMPL_RVALUE_EMBED_LEN_MAX]))
57
58#if VM_CHECK_MODE > 0
59#define VM_ASSERT(expr, ...) \
60 RUBY_ASSERT_MESG_WHEN(VM_CHECK_MODE > 0, expr, #expr RBIMPL_VA_OPT_ARGS(__VA_ARGS__))
61#define VM_UNREACHABLE(func) rb_bug(#func ": unreachable")
62#define RUBY_ASSERT_CRITICAL_SECTION
63#define RUBY_DEBUG_THREAD_SCHEDULE() rb_thread_schedule()
64#else
65#define VM_ASSERT(/*expr, */...) ((void)0)
66#define VM_UNREACHABLE(func) UNREACHABLE
67#define RUBY_DEBUG_THREAD_SCHEDULE()
68#endif
69
70#define RUBY_ASSERT_MUTEX_OWNED(mutex) VM_ASSERT(rb_mutex_owned_p(mutex))
71
72#if defined(RUBY_ASSERT_CRITICAL_SECTION)
73/*
74# Critical Section Assertions
75
76These assertions are used to ensure that context switching does not occur between two points in the code. In theory,
77such code should already be protected by a mutex, but these assertions are used to ensure that the mutex is held.
78
79The specific case where it can be useful is where a mutex is held further up the call stack, and the code in question
80may not directly hold the mutex. In this case, the critical section assertions can be used to ensure that the mutex is
81held by someone else.
82
83These assertions are only enabled when RUBY_ASSERT_CRITICAL_SECTION is defined, which is only defined if VM_CHECK_MODE
84is set.
85
86## Example Usage
87
88```c
89RUBY_ASSERT_CRITICAL_SECTION_ENTER();
90// ... some code which does not invoke rb_vm_check_ints() ...
91RUBY_ASSERT_CRITICAL_SECTION_LEAVE();
92```
93
94If `rb_vm_check_ints()` is called between the `RUBY_ASSERT_CRITICAL_SECTION_ENTER()` and
95`RUBY_ASSERT_CRITICAL_SECTION_LEAVE()`, a failed assertion will result.
96*/
97extern int ruby_assert_critical_section_entered;
98#define RUBY_ASSERT_CRITICAL_SECTION_ENTER() do{ruby_assert_critical_section_entered += 1;}while(false)
99#define RUBY_ASSERT_CRITICAL_SECTION_LEAVE() do{VM_ASSERT(ruby_assert_critical_section_entered > 0);ruby_assert_critical_section_entered -= 1;}while(false)
100#else
101#define RUBY_ASSERT_CRITICAL_SECTION_ENTER()
102#define RUBY_ASSERT_CRITICAL_SECTION_LEAVE()
103#endif
104
105#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
106# include "wasm/setjmp.h"
107#else
108# include <setjmp.h>
109#endif
110
111#if defined(__linux__) || defined(__FreeBSD__)
112# define RB_THREAD_T_HAS_NATIVE_ID
113#endif
114
116#include "ccan/list/list.h"
117#include "id.h"
118#include "internal.h"
119#include "internal/array.h"
120#include "internal/basic_operators.h"
121#include "internal/box.h"
122#include "internal/sanitizers.h"
123#include "internal/serial.h"
124#include "internal/set_table.h"
125#include "internal/vm.h"
126#include "method.h"
127#include "node.h"
128#include "ruby/ruby.h"
129#include "ruby/st.h"
130#include "ruby_atomic.h"
131#include "vm_opts.h"
132
133#include "ruby/thread_native.h"
134/*
135 * implementation selector of get_insn_info algorithm
136 * 0: linear search
137 * 1: binary search
138 * 2: succinct bitvector
139 */
140#ifndef VM_INSN_INFO_TABLE_IMPL
141# define VM_INSN_INFO_TABLE_IMPL 2
142#endif
143
144#if defined(NSIG_MAX) /* POSIX issue 8 */
145# undef NSIG
146# define NSIG NSIG_MAX
147#elif defined(_SIG_MAXSIG) /* FreeBSD */
148# undef NSIG
149# define NSIG _SIG_MAXSIG
150#elif defined(_SIGMAX) /* QNX */
151# define NSIG (_SIGMAX + 1)
152#elif defined(NSIG) /* 99% of everything else */
153# /* take it */
154#else /* Last resort */
155# define NSIG (sizeof(sigset_t) * CHAR_BIT + 1)
156#endif
157
158#define RUBY_NSIG NSIG
159
160#if defined(SIGCLD)
161# define RUBY_SIGCHLD (SIGCLD)
162#elif defined(SIGCHLD)
163# define RUBY_SIGCHLD (SIGCHLD)
164#endif
165
166#if defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) && defined(SA_SIGINFO) && !defined(__NetBSD__)
167# define USE_SIGALTSTACK
168void *rb_allocate_sigaltstack(void);
169void *rb_register_sigaltstack(void *);
170# define RB_ALTSTACK_INIT(var, altstack) var = rb_register_sigaltstack(altstack)
171# define RB_ALTSTACK_FREE(var) free(var)
172# define RB_ALTSTACK(var) var
173#else /* noop */
174# define RB_ALTSTACK_INIT(var, altstack)
175# define RB_ALTSTACK_FREE(var)
176# define RB_ALTSTACK(var) (0)
177#endif
178
179#include THREAD_IMPL_H
180#define RUBY_VM_THREAD_MODEL 2
181
182/*****************/
183/* configuration */
184/*****************/
185
186/* gcc ver. check */
187#if defined(__GNUC__) && __GNUC__ >= 2
188
189#if OPT_TOKEN_THREADED_CODE
190#if OPT_DIRECT_THREADED_CODE
191#undef OPT_DIRECT_THREADED_CODE
192#endif
193#endif
194
195#else /* defined(__GNUC__) && __GNUC__ >= 2 */
196
197/* disable threaded code options */
198#if OPT_DIRECT_THREADED_CODE
199#undef OPT_DIRECT_THREADED_CODE
200#endif
201#if OPT_TOKEN_THREADED_CODE
202#undef OPT_TOKEN_THREADED_CODE
203#endif
204#endif
205
206/* call threaded code */
207#if OPT_CALL_THREADED_CODE
208#if OPT_DIRECT_THREADED_CODE
209#undef OPT_DIRECT_THREADED_CODE
210#endif /* OPT_DIRECT_THREADED_CODE */
211#endif /* OPT_CALL_THREADED_CODE */
212
213void rb_vm_encoded_insn_data_table_init(void);
214typedef unsigned long rb_num_t;
215typedef signed long rb_snum_t;
216
217enum ruby_tag_type {
218 RUBY_TAG_NONE = 0x0,
219 RUBY_TAG_RETURN = 0x1,
220 RUBY_TAG_BREAK = 0x2,
221 RUBY_TAG_NEXT = 0x3,
222 RUBY_TAG_RETRY = 0x4,
223 RUBY_TAG_REDO = 0x5,
224 RUBY_TAG_RAISE = 0x6,
225 RUBY_TAG_THROW = 0x7,
226 RUBY_TAG_FATAL = 0x8,
227 RUBY_TAG_MASK = 0xf
228};
229
230#define TAG_NONE RUBY_TAG_NONE
231#define TAG_RETURN RUBY_TAG_RETURN
232#define TAG_BREAK RUBY_TAG_BREAK
233#define TAG_NEXT RUBY_TAG_NEXT
234#define TAG_RETRY RUBY_TAG_RETRY
235#define TAG_REDO RUBY_TAG_REDO
236#define TAG_RAISE RUBY_TAG_RAISE
237#define TAG_THROW RUBY_TAG_THROW
238#define TAG_FATAL RUBY_TAG_FATAL
239#define TAG_MASK RUBY_TAG_MASK
240
241enum ruby_vm_throw_flags {
242 VM_THROW_NO_ESCAPE_FLAG = 0x8000,
243 VM_THROW_STATE_MASK = 0xff
244};
245
246/* forward declarations */
247struct rb_thread_struct;
249
250/* iseq data type */
252
254 rb_serial_t raw;
255 VALUE data[2];
256};
257
258#define IMEMO_CONST_CACHE_SHAREABLE IMEMO_FL_USER0
259
260// imemo_constcache
262 VALUE flags;
263
264 VALUE value;
265 const rb_cref_t *ic_cref;
266};
267STATIC_ASSERT(sizeof_iseq_inline_constant_cache_entry,
268 (offsetof(struct iseq_inline_constant_cache_entry, ic_cref) +
269 sizeof(const rb_cref_t *)) <= RVALUE_SIZE);
270
287
289 uint64_t value; // dest_shape_id in former half, attr_index in latter half
290 ID iv_set_name;
291};
292
296
298 struct {
299 struct rb_thread_struct *running_thread;
300 VALUE value;
301 } once;
302 struct iseq_inline_constant_cache ic_cache;
303 struct iseq_inline_iv_cache_entry iv_cache;
304};
305
307 const struct rb_call_data *cd;
308 const struct rb_callcache *cc;
309 VALUE block_handler;
310 VALUE recv;
311 int argc;
312 bool kw_splat;
313 VALUE heap_argv;
314};
315
316#ifndef VM_ARGC_STACK_MAX
317#define VM_ARGC_STACK_MAX 128
318#endif
319
320#define VM_KW_SPECIFIED_BITS_MAX (32-1) /* TODO: 32 -> Fixnum's max bits */
321
322# define CALLING_ARGC(calling) ((calling)->heap_argv ? RARRAY_LENINT((calling)->heap_argv) : (calling)->argc)
323
325
326#ifndef RUBY_CORE_DATA_TYPE_CHECK
327# if RUBY_DEBUG
328# define RUBY_CORE_DATA_TYPE_CHECK 1
329# else
330# define RUBY_CORE_DATA_TYPE_CHECK 0
331# endif
332#endif
333#if !RUBY_CORE_DATA_TYPE_CHECK
334#define GetCoreDataFromValue(obj, type, data_type, ptr) ((ptr) = (type*)RTYPEDDATA_GET_DATA(obj))
335#else
336#define GetCoreDataFromValue(obj, type, data_type, ptr) TypedData_Get_Struct(obj, type, data_type, ptr)
337#endif
338
340 VALUE pathobj; /* String (path) or Array [path, realpath]. Frozen. */
341 VALUE base_label; /* String */
342 VALUE label; /* String */
343 int first_lineno;
344 int node_id;
345 rb_code_location_t code_location;
347
348#define PATHOBJ_PATH 0
349#define PATHOBJ_REALPATH 1
350
351static inline VALUE
352pathobj_path(VALUE pathobj)
353{
354 if (RB_TYPE_P(pathobj, T_STRING)) {
355 return pathobj;
356 }
357 else {
358 VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
359 return RARRAY_AREF(pathobj, PATHOBJ_PATH);
360 }
361}
362
363static inline VALUE
364pathobj_realpath(VALUE pathobj)
365{
366 if (RB_TYPE_P(pathobj, T_STRING)) {
367 return pathobj;
368 }
369 else {
370 VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
371 return RARRAY_AREF(pathobj, PATHOBJ_REALPATH);
372 }
373}
374
375/* Forward declarations */
376typedef uintptr_t iseq_bits_t;
377
378#define ISEQ_IS_SIZE(body) (body->ic_size + body->ivc_size + body->ise_size + body->icvarc_size)
379
380/* [ TS_IVC | TS_ICVARC | TS_ISE | TS_IC ] */
381#define ISEQ_IS_IC_ENTRY(body, idx) (body->is_entries[(idx) + body->ise_size + body->icvarc_size + body->ivc_size].ic_cache);
382
383/* instruction sequence type */
384enum rb_iseq_type {
385 ISEQ_TYPE_TOP,
386 ISEQ_TYPE_METHOD,
387 ISEQ_TYPE_BLOCK,
388 ISEQ_TYPE_CLASS,
389 ISEQ_TYPE_RESCUE,
390 ISEQ_TYPE_ENSURE,
391 ISEQ_TYPE_EVAL,
392 ISEQ_TYPE_MAIN,
393 ISEQ_TYPE_PLAIN
394};
395
396// Attributes specified by Primitive.attr!
397enum rb_builtin_attr {
398 // The iseq does not call methods.
399 BUILTIN_ATTR_LEAF = 0x01,
400 // This iseq only contains single `opt_invokebuiltin_delegate_leave` instruction with 0 arguments.
401 BUILTIN_ATTR_SINGLE_NOARG_LEAF = 0x02,
402 // This attribute signals JIT to duplicate the iseq for each block iseq so that its `yield` will be monomorphic.
403 BUILTIN_ATTR_INLINE_BLOCK = 0x04,
404 // The iseq acts like a C method in backtraces.
405 BUILTIN_ATTR_C_TRACE = 0x08,
406 // The iseq uses noint branch/jump opcodes that skip interrupt checking.
407 BUILTIN_ATTR_WITHOUT_INTERRUPTS = 0x10,
408};
409
410typedef VALUE (*rb_jit_func_t)(struct rb_execution_context_struct *, struct rb_control_frame_struct *);
411typedef VALUE (*rb_zjit_func_t)(struct rb_execution_context_struct *, struct rb_control_frame_struct *, rb_jit_func_t);
412
414 enum rb_iseq_type type;
415
416 unsigned int iseq_size;
417 VALUE *iseq_encoded; /* encoded iseq (insn addr and operands) */
418
443 struct {
444 unsigned int has_lead : 1;
445 unsigned int has_opt : 1;
446 unsigned int has_rest : 1;
447 unsigned int has_post : 1;
448 unsigned int has_kw : 1;
449 unsigned int has_kwrest : 1;
450 unsigned int has_block : 1;
451
452 unsigned int ambiguous_param0 : 1; /* {|a|} */
453 unsigned int accepts_no_kwarg : 1;
454 unsigned int ruby2_keywords: 1;
455 unsigned int anon_rest: 1;
456 unsigned int anon_kwrest: 1;
457 unsigned int use_block: 1;
458 unsigned int forwardable: 1;
459 unsigned int accepts_no_block: 1;
460 } flags;
461
462 unsigned int size;
463
464 int lead_num;
465 int opt_num;
466 int rest_start;
467 int post_start;
468 int post_num;
469 int block_start;
470
471 const VALUE *opt_table; /* (opt_num + 1) entries. */
472 /* opt_num and opt_table:
473 *
474 * def foo o1=e1, o2=e2, ..., oN=eN
475 * #=>
476 * # prologue code
477 * A1: e1
478 * A2: e2
479 * ...
480 * AN: eN
481 * AL: body
482 * opt_num = N
483 * opt_table = [A1, A2, ..., AN, AL]
484 */
485
487 int num;
488 int required_num;
489 int bits_start;
490 int rest_start;
491 const ID *table;
492 VALUE *default_values;
493 } *keyword;
494 } param;
495
496 rb_iseq_location_t location;
497
498 /* insn info, must be freed */
500 const struct iseq_insn_info_entry *body;
501 unsigned int *positions;
502 unsigned int size;
503#if VM_INSN_INFO_TABLE_IMPL == 2
504 struct succ_index_table *succ_index_table;
505#endif
506 } insns_info;
507
508 const ID *local_table; /* must free */
509
510 enum lvar_state {
511 lvar_uninitialized,
512 lvar_initialized,
513 lvar_reassigned,
514 } *lvar_states;
515
516 /* catch table */
517 struct iseq_catch_table *catch_table;
518
519 /* for child iseq */
520 const struct rb_iseq_struct *parent_iseq;
521 struct rb_iseq_struct *local_iseq; /* local_iseq->flip_cnt can be modified */
522
523 union iseq_inline_storage_entry *is_entries; /* [ TS_IVC | TS_ICVARC | TS_ISE | TS_IC ] */
524 struct rb_call_data *call_data; //struct rb_call_data calls[ci_size];
525
526 struct {
527 rb_snum_t flip_count;
528 VALUE script_lines;
529 VALUE coverage;
530 VALUE pc2branchindex;
531 VALUE *original_iseq;
532 } variable;
533
534 unsigned int local_table_size;
535 unsigned int ic_size; // Number of IC caches
536 unsigned int ise_size; // Number of ISE caches
537 unsigned int ivc_size; // Number of IVC caches
538 unsigned int icvarc_size; // Number of ICVARC caches
539 unsigned int ci_size;
540 unsigned int stack_max; /* for stack overflow check */
541
542 unsigned int builtin_attrs; // Union of rb_builtin_attr
543
544 bool prism; // ISEQ was generated from prism compiler
545
546 union {
547 iseq_bits_t * list; /* Find references for GC */
548 iseq_bits_t single;
549 } mark_bits;
550
551 struct rb_id_table *outer_variables;
552
553 const rb_iseq_t *mandatory_only_iseq;
554
555#if USE_YJIT || USE_ZJIT
556 // Function pointer for JIT code on jit_exec()
557 rb_jit_func_t jit_entry;
558 // Number of calls on jit_exec()
559 long unsigned jit_entry_calls;
560 // Function pointer for JIT code on jit_exec_exception()
561 rb_jit_func_t jit_exception;
562 // Number of calls on jit_exec_exception()
563 long unsigned jit_exception_calls;
564#endif
565
566#if USE_YJIT
567 // YJIT stores some data on each iseq.
568 void *yjit_payload;
569 // Used to estimate how frequently this ISEQ gets called
570 uint64_t yjit_calls_at_interv;
571#endif
572
573#if USE_ZJIT
574 // ZJIT stores some data on each iseq.
575 void *zjit_payload;
576#endif
577};
578
579/* T_IMEMO/iseq */
580/* typedef rb_iseq_t is in method.h */
582 VALUE flags; /* 1 */
583 VALUE wrapper; /* 2 */
584
585 struct rb_iseq_constant_body *body; /* 3 */
586
587 union { /* 4, 5 words */
588 struct iseq_compile_data *compile_data; /* used at compile time */
589
590 struct {
591 VALUE obj;
592 int index;
593 } loader;
594
595 struct {
596 unsigned int local_hooks_cnt;
597 rb_event_flag_t global_trace_events;
598 } exec;
599 } aux;
600};
601
602#define ISEQ_BODY(iseq) ((iseq)->body)
603
604#if !defined(USE_LAZY_LOAD) || !(USE_LAZY_LOAD+0)
605#define USE_LAZY_LOAD 0
606#endif
607
608#if !USE_LAZY_LOAD
609static inline const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq) {return 0;}
610#endif
611const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq);
612
613static inline const rb_iseq_t *
614rb_iseq_check(const rb_iseq_t *iseq)
615{
616 if (USE_LAZY_LOAD && ISEQ_BODY(iseq) == NULL) {
617 rb_iseq_complete((rb_iseq_t *)iseq);
618 }
619 return iseq;
620}
621
622static inline bool
623rb_iseq_attr_p(const rb_iseq_t *iseq, enum rb_builtin_attr attr)
624{
625 return (ISEQ_BODY(iseq)->builtin_attrs & attr) == attr;
626}
627
628static inline const rb_iseq_t *
629def_iseq_ptr(rb_method_definition_t *def)
630{
631//TODO: re-visit. to check the bug, enable this assertion.
632#if VM_CHECK_MODE > 0
633 if (def->type != VM_METHOD_TYPE_ISEQ) rb_bug("def_iseq_ptr: not iseq (%d)", def->type);
634#endif
635 return rb_iseq_check(def->body.iseq.iseqptr);
636}
637
638enum ruby_special_exceptions {
639 ruby_error_reenter,
640 ruby_error_nomemory,
641 ruby_error_sysstack,
642 ruby_error_stackfatal,
643 ruby_error_stream_closed,
644 ruby_special_error_count
645};
646
647extern const rb_data_type_t ruby_vm_data_type;
648
649#define GetVMPtr(obj, ptr) \
650 GetCoreDataFromValue((obj), rb_vm_t, &ruby_vm_data_type, (ptr))
651
652struct rb_vm_struct;
653typedef void rb_vm_at_exit_func(struct rb_vm_struct*);
654
655typedef struct rb_at_exit_list {
656 rb_vm_at_exit_func *func;
657 struct rb_at_exit_list *next;
659
660void *rb_objspace_alloc(void);
661void rb_objspace_free(void *objspace);
662void rb_objspace_call_finalizer(void);
663
664enum rb_hook_list_type {
665 hook_list_type_ractor_local,
666 hook_list_type_targeted_iseq,
667 hook_list_type_targeted_def, // C function
668 hook_list_type_global
669};
670
671typedef struct rb_hook_list_struct {
672 struct rb_event_hook_struct *hooks;
673 rb_event_flag_t events;
674 unsigned int running;
675 enum rb_hook_list_type type;
676 bool need_clean;
678
679// see builtin.h for definition
680typedef const struct rb_builtin_function *RB_BUILTIN;
681
683 VALUE *varptr;
684 struct global_object_list *next;
685};
686
687typedef struct rb_vm_struct {
688 VALUE self;
689
690 struct {
691 struct ccan_list_head set;
692 unsigned int cnt;
693 unsigned int blocking_cnt;
694
695 struct rb_ractor_struct *main_ractor;
696 struct rb_thread_struct *main_thread; // == vm->ractor.main_ractor->threads.main
697
698 struct {
699 // monitor
700 rb_nativethread_lock_t lock;
701 struct rb_ractor_struct *lock_owner;
702 unsigned int lock_rec;
703
704 // join at exit
705 rb_nativethread_cond_t terminate_cond;
706 bool terminate_waiting;
707
708#ifndef RUBY_THREAD_PTHREAD_H
709 // win32
710 bool barrier_waiting;
711 unsigned int barrier_cnt;
712 rb_nativethread_cond_t barrier_complete_cond;
713 rb_nativethread_cond_t barrier_release_cond;
714#endif
715 } sync;
716
717#ifdef RUBY_THREAD_PTHREAD_H
718 // ractor scheduling
719 struct {
720 rb_nativethread_lock_t lock;
721 struct rb_ractor_struct *lock_owner;
722 bool locked;
723
724 rb_nativethread_cond_t cond; // GRQ
725 unsigned int snt_cnt; // count of shared NTs
726 unsigned int dnt_cnt; // count of dedicated NTs
727
728 unsigned int running_cnt;
729
730 unsigned int max_cpu;
731 struct ccan_list_head grq; // // Global Ready Queue
732 unsigned int grq_cnt;
733
734 // running threads
735 struct ccan_list_head running_threads;
736
737 // threads which switch context by timeslice
738 struct ccan_list_head timeslice_threads;
739
740 struct ccan_list_head zombie_threads;
741
742 // true if timeslice timer is not enable
743 bool timeslice_wait_inf;
744
745 // barrier
746 rb_nativethread_cond_t barrier_complete_cond;
747 rb_nativethread_cond_t barrier_release_cond;
748 bool barrier_waiting;
749 unsigned int barrier_waiting_cnt;
750 unsigned int barrier_serial;
751 struct rb_ractor_struct *barrier_ractor;
752 unsigned int barrier_lock_rec;
753 } sched;
754#endif
755 } ractor;
756
757#ifdef USE_SIGALTSTACK
758 void *main_altstack;
759#endif
760
761 rb_serial_t fork_gen;
762
763 /* set in single-threaded processes only: */
764 volatile int ubf_async_safe;
765
766 unsigned int running: 1;
767 unsigned int thread_abort_on_exception: 1;
768 unsigned int thread_report_on_exception: 1;
769 unsigned int thread_ignore_deadlock: 1;
770
771 /* object management */
772 VALUE mark_object_ary;
774 const VALUE special_exceptions[ruby_special_error_count];
775
776 /* Ruby Box */
777 rb_box_t *root_box;
778 rb_box_t *main_box;
779
780 /* load */
781 // For running the init function of statically linked
782 // extensions when they are loaded
783 struct st_table static_ext_inits;
784
785 /* signal */
786 struct {
787 VALUE cmd[RUBY_NSIG];
788 } trap_list;
789
790 /* hook (for internal events: NEWOBJ, FREEOBJ, GC events, etc.) */
791 rb_hook_list_t global_hooks;
792
793 int src_encoding_index;
794
795 /* workqueue (thread-safe, NOT async-signal-safe) */
796 struct ccan_list_head workqueue; /* <=> rb_workqueue_job.jnode */
797 rb_nativethread_lock_t workqueue_lock;
798
799 VALUE orig_progname, progname;
800 VALUE coverages, me2counter;
801 int coverage_mode;
802
803 struct {
804 struct rb_objspace *objspace;
805 struct gc_mark_func_data_struct {
806 void *data;
807 void (*mark_func)(VALUE v, void *data);
808 } *mark_func_data;
809 } gc;
810
811 rb_at_exit_list *at_exit;
812
813 const struct rb_builtin_function *builtin_function_table;
814
815 st_table ci_table;
816 struct rb_id_table negative_cme_table;
817 st_table overloaded_cme_table; // cme -> overloaded_cme
818 set_table unused_block_warning_table;
819 VALUE cc_refinement_set;
820
821 // This id table contains a mapping from ID to ICs. It does this with ID
822 // keys and nested st_tables as values. The nested tables have ICs as keys
823 // and Qtrue as values. It is used when inline constant caches need to be
824 // invalidated or ISEQs are being freed.
825 struct rb_id_table constant_cache;
826 ID inserting_constant_cache_id;
827
828#ifndef VM_GLOBAL_CC_CACHE_TABLE_SIZE
829#define VM_GLOBAL_CC_CACHE_TABLE_SIZE 1023
830#endif
831 const struct rb_callcache *global_cc_cache_table[VM_GLOBAL_CC_CACHE_TABLE_SIZE]; // vm_eval.c
832 bool global_cc_cache_table_used; // vm_eval.c
833
834#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
835 uint32_t clock;
836#endif
837
838 /* params */
839 struct { /* size in byte */
840 size_t thread_vm_stack_size;
841 size_t thread_machine_stack_size;
842 size_t fiber_vm_stack_size;
843 size_t fiber_machine_stack_size;
844 } default_params;
845} rb_vm_t;
846
847extern bool ruby_vm_during_cleanup;
848
849/* default values */
850
851#define RUBY_VM_SIZE_ALIGN 4096
852
853#define RUBY_VM_THREAD_VM_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
854#define RUBY_VM_THREAD_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
855#define RUBY_VM_THREAD_MACHINE_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
856#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
857
858#define RUBY_VM_FIBER_VM_STACK_SIZE ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
859#define RUBY_VM_FIBER_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
860#define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 64 * 1024 * sizeof(VALUE)) /* 256 KB or 512 KB */
861#if defined(__powerpc64__) || defined(__ppc64__) // macOS has __ppc64__
862#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 32 * 1024 * sizeof(VALUE)) /* 128 KB or 256 KB */
863#else
864#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
865#endif
866
867#if __has_feature(memory_sanitizer) || __has_feature(address_sanitizer) || __has_feature(leak_sanitizer)
868/* It seems sanitizers consume A LOT of machine stacks */
869#undef RUBY_VM_THREAD_MACHINE_STACK_SIZE
870#define RUBY_VM_THREAD_MACHINE_STACK_SIZE (1024 * 1024 * sizeof(VALUE))
871#undef RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN
872#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 512 * 1024 * sizeof(VALUE))
873#undef RUBY_VM_FIBER_MACHINE_STACK_SIZE
874#define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 256 * 1024 * sizeof(VALUE))
875#undef RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN
876#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 128 * 1024 * sizeof(VALUE))
877#endif
878
879#ifndef VM_DEBUG_BP_CHECK
880#define VM_DEBUG_BP_CHECK 0
881#endif
882
883#ifndef VM_DEBUG_VERIFY_METHOD_CACHE
884#define VM_DEBUG_VERIFY_METHOD_CACHE (VMDEBUG != 0)
885#endif
886
888 VALUE self;
889 const VALUE *ep;
890 union {
891 const rb_iseq_t *iseq;
892 const struct vm_ifunc *ifunc;
893 VALUE val;
894 } code;
895};
896
897enum rb_block_handler_type {
898 block_handler_type_iseq,
899 block_handler_type_ifunc,
900 block_handler_type_symbol,
901 block_handler_type_proc
902};
903
904enum rb_block_type {
905 block_type_iseq,
906 block_type_ifunc,
907 block_type_symbol,
908 block_type_proc
909};
910
911struct rb_block {
912 union {
913 struct rb_captured_block captured;
914 VALUE symbol;
915 VALUE proc;
916 } as;
917 enum rb_block_type type;
918};
919
921 const VALUE *pc; // cfp[0]
922 VALUE *sp; // cfp[1]
923 const rb_iseq_t *_iseq; // cfp[2] -- use rb_cfp_iseq(cfp) to read
924 VALUE self; // cfp[3] / block[0]
925 const VALUE *ep; // cfp[4] / block[1]
926 const void *block_code; // cfp[5] / block[2] -- iseq, ifunc, or forwarded block handler
927 void *jit_return; // cfp[6] -- return address for JIT code
928#if VM_DEBUG_BP_CHECK
929 VALUE *bp_check; // cfp[7]
930#endif
932
933extern const rb_data_type_t ruby_threadptr_data_type;
934
935static inline struct rb_thread_struct *
936rb_thread_ptr(VALUE thval)
937{
938 return (struct rb_thread_struct *)rb_check_typeddata(thval, &ruby_threadptr_data_type);
939}
940
941enum rb_thread_status {
942 THREAD_RUNNABLE,
943 THREAD_STOPPED,
944 THREAD_STOPPED_FOREVER,
945 THREAD_KILLED
946};
947
948#ifdef RUBY_JMP_BUF
949typedef RUBY_JMP_BUF rb_jmpbuf_t;
950#else
951typedef void *rb_jmpbuf_t[5];
952#endif
953
954/*
955 `rb_vm_tag_jmpbuf_t` type represents a buffer used to
956 long jump to a C frame associated with `rb_vm_tag`.
957
958 Use-site of `rb_vm_tag_jmpbuf_t` is responsible for calling the
959 following functions:
960 - `rb_vm_tag_jmpbuf_init` once `rb_vm_tag_jmpbuf_t` is allocated.
961 - `rb_vm_tag_jmpbuf_deinit` once `rb_vm_tag_jmpbuf_t` is no longer necessary.
962
963 `RB_VM_TAG_JMPBUF_GET` transforms a `rb_vm_tag_jmpbuf_t` into a
964 `rb_jmpbuf_t` to be passed to `rb_setjmp/rb_longjmp`.
965*/
966#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
967/*
968 WebAssembly target with Asyncify-based SJLJ needs
969 to capture the execution context by unwind/rewind-ing
970 call frames into a jump buffer. The buffer space tends
971 to be considerably large unlike other architectures'
972 register-based buffers.
973 Therefore, we allocates the buffer on the heap on such
974 environments.
975*/
976typedef rb_jmpbuf_t *rb_vm_tag_jmpbuf_t;
977
978#define RB_VM_TAG_JMPBUF_GET(buf) (*buf)
979
980static inline void
981rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
982{
983 *jmpbuf = ruby_xmalloc(sizeof(rb_jmpbuf_t));
984}
985
986static inline void
987rb_vm_tag_jmpbuf_deinit(const rb_vm_tag_jmpbuf_t *jmpbuf)
988{
989 ruby_xfree(*jmpbuf);
990}
991#else
992typedef rb_jmpbuf_t rb_vm_tag_jmpbuf_t;
993
994#define RB_VM_TAG_JMPBUF_GET(buf) (buf)
995
996static inline void
997rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
998{
999 // no-op
1000}
1001
1002static inline void
1003rb_vm_tag_jmpbuf_deinit(const rb_vm_tag_jmpbuf_t *jmpbuf)
1004{
1005 // no-op
1006}
1007#endif
1008
1009/*
1010 the members which are written in EC_PUSH_TAG() should be placed at
1011 the beginning and the end, so that entire region is accessible.
1012*/
1014 VALUE tag;
1015 VALUE retval;
1016 rb_vm_tag_jmpbuf_t buf;
1017 struct rb_vm_tag *prev;
1018 enum ruby_tag_type state;
1019 unsigned int lock_rec;
1020};
1021
1022STATIC_ASSERT(rb_vm_tag_buf_offset, offsetof(struct rb_vm_tag, buf) > 0);
1023STATIC_ASSERT(rb_vm_tag_buf_end,
1024 offsetof(struct rb_vm_tag, buf) + sizeof(rb_vm_tag_jmpbuf_t) <
1025 sizeof(struct rb_vm_tag));
1026
1029 void *arg;
1030 rb_atomic_t event_serial;
1031};
1032
1033struct rb_mutex_struct;
1034
1035typedef struct rb_fiber_struct rb_fiber_t;
1036
1038 struct rb_waiting_list *next;
1039 struct rb_thread_struct *thread;
1040 struct rb_fiber_struct *fiber;
1041};
1042
1044 /* execution information */
1045 VALUE *vm_stack; /* must free, must mark */
1046 size_t vm_stack_size; /* size in word (byte size / sizeof(VALUE)) */
1047 rb_control_frame_t *cfp;
1048
1049 struct rb_vm_tag *tag;
1050
1051 /* interrupt flags */
1052 rb_atomic_t interrupt_flag;
1053 rb_atomic_t interrupt_mask; /* size should match flag */
1054#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
1055 uint32_t checked_clock;
1056#endif
1057
1058 rb_fiber_t *fiber_ptr;
1059 struct rb_thread_struct *thread_ptr;
1060 rb_serial_t serial;
1061 rb_serial_t ractor_id;
1062
1063 /* storage (ec (fiber) local) */
1064 struct rb_id_table *local_storage;
1065 VALUE local_storage_recursive_hash;
1066 VALUE local_storage_recursive_hash_for_trace;
1067
1068 /* Inheritable fiber storage. */
1069 VALUE storage;
1070
1071 /* eval env */
1072 const VALUE *root_lep;
1073 VALUE root_svar;
1074
1075 /* trace information */
1076 struct rb_trace_arg_struct *trace_arg;
1077
1078 /* temporary places */
1079 VALUE errinfo;
1080 VALUE passed_block_handler; /* for rb_iterate */
1081
1082 uint8_t raised_flag; /* only 3 bits needed */
1083
1084 /* n.b. only 7 bits needed, really: */
1085 BITFIELD(enum method_missing_reason, method_missing_reason, 8);
1086
1087 VALUE private_const_reference;
1088
1089 struct {
1090 VALUE obj;
1091 VALUE fields_obj;
1092 } gen_fields_cache;
1093
1094 /* for GC */
1095 struct {
1096 VALUE *stack_start;
1097 VALUE *stack_end;
1098 size_t stack_maxsize;
1100
1101#ifdef RUBY_ASAN_ENABLED
1102 void *asan_fake_stack_handle;
1103#endif
1104 } machine;
1105};
1106
1107#ifndef rb_execution_context_t
1109#define rb_execution_context_t rb_execution_context_t
1110#endif
1111
1112// for builtin.h
1113#define VM_CORE_H_EC_DEFINED 1
1114
1115// Set the vm_stack pointer in the execution context.
1116void rb_ec_set_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
1117
1118// Initialize the vm_stack pointer in the execution context and push the initial stack frame.
1119// @param ec the execution context to update.
1120// @param stack a pointer to the stack to use.
1121// @param size the size of the stack, as in `VALUE stack[size]`.
1122void rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
1123
1124// Clear (set to `NULL`) the vm_stack pointer.
1125// @param ec the execution context to update.
1126void rb_ec_clear_vm_stack(rb_execution_context_t *ec);
1127
1128// Close an execution context and free related resources that are no longer needed.
1129// @param ec the execution context to close.
1130void rb_ec_close(rb_execution_context_t *ec);
1131
1133 bool ractor_safe;
1134};
1135
1136typedef struct rb_ractor_struct rb_ractor_t;
1137
1138struct rb_native_thread;
1139
1140typedef struct rb_thread_struct {
1141 struct ccan_list_node lt_node; // managed by a ractor (r->threads.set)
1142 VALUE self;
1143 rb_ractor_t *ractor;
1144 rb_vm_t *vm;
1145 struct rb_native_thread *nt;
1147
1148 struct rb_thread_sched_item sched;
1149 bool mn_schedulable;
1150 rb_atomic_t serial; // only for RUBY_DEBUG_LOG()
1151
1152 VALUE last_status; /* $? */
1153
1154 /* for cfunc */
1155 struct rb_calling_info *calling;
1156
1157 /* for load(true) */
1158 VALUE top_self;
1159 VALUE top_wrapper;
1160
1161 /* thread control */
1162
1163 BITFIELD(enum rb_thread_status, status, 2);
1164 /* bit flags */
1165 unsigned int main_thread : 1;
1166 unsigned int has_dedicated_nt : 1;
1167 unsigned int to_kill : 1;
1168 unsigned int abort_on_exception: 1;
1169 unsigned int report_on_exception: 1;
1170 unsigned int pending_interrupt_queue_checked: 1;
1171 int8_t priority; /* -3 .. 3 (RUBY_THREAD_PRIORITY_{MIN,MAX}) */
1172 uint32_t running_time_us; /* 12500..800000 */
1173
1174 void *blocking_region_buffer;
1175
1176 VALUE thgroup;
1177 VALUE value;
1178
1179 /* temporary place of retval on OPT_CALL_THREADED_CODE */
1180#if OPT_CALL_THREADED_CODE
1181 VALUE retval;
1182#endif
1183
1184 /* async errinfo queue */
1185 VALUE pending_interrupt_queue;
1186 VALUE pending_interrupt_mask_stack;
1187
1188 /* interrupt management */
1189 rb_nativethread_lock_t interrupt_lock;
1190 struct rb_unblock_callback unblock;
1191 VALUE locking_mutex;
1192 struct rb_mutex_struct *keeping_mutexes;
1193 struct ccan_list_head interrupt_exec_tasks;
1194
1195 struct rb_waiting_list *join_list;
1196
1197 union {
1198 struct {
1199 VALUE proc;
1200 VALUE args;
1201 int kw_splat;
1202 } proc;
1203 struct {
1204 VALUE (*func)(void *);
1205 void *arg;
1206 } func;
1207 } invoke_arg;
1208
1209 enum thread_invoke_type {
1210 thread_invoke_type_none = 0,
1211 thread_invoke_type_proc,
1212 thread_invoke_type_ractor_proc,
1213 thread_invoke_type_func
1214 } invoke_type;
1215
1216 /* fiber */
1217 rb_fiber_t *root_fiber;
1218
1219 VALUE scheduler;
1220 unsigned int blocking;
1221
1222 /* misc */
1223 VALUE name;
1224 void **specific_storage;
1225
1226 struct rb_ext_config ext_config;
1227} rb_thread_t;
1228
1229static inline unsigned int
1230rb_th_serial(const rb_thread_t *th)
1231{
1232 return th ? (unsigned int)th->serial : 0;
1233}
1234
1235typedef enum {
1236 VM_DEFINECLASS_TYPE_CLASS = 0x00,
1237 VM_DEFINECLASS_TYPE_SINGLETON_CLASS = 0x01,
1238 VM_DEFINECLASS_TYPE_MODULE = 0x02,
1239 /* 0x03..0x06 is reserved */
1240 VM_DEFINECLASS_TYPE_MASK = 0x07
1241} rb_vm_defineclass_type_t;
1242
1243#define VM_DEFINECLASS_TYPE(x) ((rb_vm_defineclass_type_t)(x) & VM_DEFINECLASS_TYPE_MASK)
1244#define VM_DEFINECLASS_FLAG_SCOPED 0x08
1245#define VM_DEFINECLASS_FLAG_HAS_SUPERCLASS 0x10
1246#define VM_DEFINECLASS_FLAG_DYNAMIC_CREF 0x20
1247#define VM_DEFINECLASS_SCOPED_P(x) ((x) & VM_DEFINECLASS_FLAG_SCOPED)
1248#define VM_DEFINECLASS_HAS_SUPERCLASS_P(x) \
1249 ((x) & VM_DEFINECLASS_FLAG_HAS_SUPERCLASS)
1250#define VM_DEFINECLASS_DYNAMIC_CREF_P(x) \
1251 ((x) & VM_DEFINECLASS_FLAG_DYNAMIC_CREF)
1252
1253/* iseq.c */
1254RUBY_SYMBOL_EXPORT_BEGIN
1255
1256/* node -> iseq */
1257rb_iseq_t *rb_iseq_new (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent, enum rb_iseq_type);
1258rb_iseq_t *rb_iseq_new_top (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent);
1259rb_iseq_t *rb_iseq_new_main (const VALUE ast_value, VALUE path, VALUE realpath, const rb_iseq_t *parent, int opt);
1260rb_iseq_t *rb_iseq_new_eval (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, int first_lineno, const rb_iseq_t *parent, int isolated_depth);
1261rb_iseq_t *rb_iseq_new_with_opt( VALUE ast_value, VALUE name, VALUE path, VALUE realpath, int first_lineno, const rb_iseq_t *parent, int isolated_depth,
1262 enum rb_iseq_type, const rb_compile_option_t*,
1263 VALUE script_lines);
1264
1265struct iseq_link_anchor;
1267 VALUE flags;
1268 VALUE reserved;
1269 void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *);
1270 const void *data;
1271};
1272static inline struct rb_iseq_new_with_callback_callback_func *
1273rb_iseq_new_with_callback_new_callback(
1274 void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *), const void *ptr)
1275{
1277 IMEMO_NEW(struct rb_iseq_new_with_callback_callback_func, imemo_ifunc, Qfalse);
1278 memo->func = func;
1279 memo->data = ptr;
1280
1281 return memo;
1282}
1283rb_iseq_t *rb_iseq_new_with_callback(const struct rb_iseq_new_with_callback_callback_func * ifunc,
1284 VALUE name, VALUE path, VALUE realpath, int first_lineno,
1285 const rb_iseq_t *parent, enum rb_iseq_type, const rb_compile_option_t*);
1286
1287VALUE rb_iseq_disasm(const rb_iseq_t *iseq);
1288int rb_iseq_disasm_insn(VALUE str, const VALUE *iseqval, size_t pos, const rb_iseq_t *iseq, VALUE child);
1289
1290VALUE rb_iseq_coverage(const rb_iseq_t *iseq);
1291
1292RUBY_EXTERN VALUE rb_cISeq;
1293RUBY_EXTERN VALUE rb_cRubyVM;
1294RUBY_EXTERN VALUE rb_mRubyVMFrozenCore;
1295RUBY_EXTERN VALUE rb_block_param_proxy;
1296RUBY_SYMBOL_EXPORT_END
1297
1298extern const rb_data_type_t ruby_proc_data_type;
1299
1300#define GetProcPtr(obj, ptr) \
1301 GetCoreDataFromValue((obj), rb_proc_t, &ruby_proc_data_type, (ptr))
1302
1303typedef struct {
1304 const struct rb_block block;
1305 unsigned int is_from_method: 1; /* bool */
1306 unsigned int is_lambda: 1; /* bool */
1307 unsigned int is_isolated: 1; /* bool */
1308} rb_proc_t;
1309
1310RUBY_SYMBOL_EXPORT_BEGIN
1311VALUE rb_proc_isolate(VALUE self);
1312VALUE rb_proc_isolate_bang(VALUE self, VALUE replace_self);
1313VALUE rb_proc_ractor_make_shareable(VALUE proc, VALUE replace_self);
1314RUBY_SYMBOL_EXPORT_END
1315
1316typedef struct {
1317 VALUE flags; /* imemo header */
1318 rb_iseq_t *iseq;
1319 const VALUE *ep;
1320 const VALUE *env;
1321 unsigned int env_size;
1322} rb_env_t;
1323
1324extern const rb_data_type_t ruby_binding_data_type;
1325
1326#define GetBindingPtr(obj, ptr) \
1327 GetCoreDataFromValue((obj), rb_binding_t, &ruby_binding_data_type, (ptr))
1328
1329typedef struct {
1330 const struct rb_block block;
1331 const VALUE pathobj;
1332 int first_lineno;
1333} rb_binding_t;
1334
1335/* used by compile time and send insn */
1336
1337enum vm_check_match_type {
1338 VM_CHECKMATCH_TYPE_WHEN = 1,
1339 VM_CHECKMATCH_TYPE_CASE = 2,
1340 VM_CHECKMATCH_TYPE_RESCUE = 3
1341};
1342
1343#define VM_CHECKMATCH_TYPE_MASK 0x03
1344#define VM_CHECKMATCH_ARRAY 0x04
1345
1346enum vm_opt_newarray_send_type {
1347 VM_OPT_NEWARRAY_SEND_MAX = 1,
1348 VM_OPT_NEWARRAY_SEND_MIN = 2,
1349 VM_OPT_NEWARRAY_SEND_HASH = 3,
1350 VM_OPT_NEWARRAY_SEND_PACK = 4,
1351 VM_OPT_NEWARRAY_SEND_PACK_BUFFER = 5,
1352 VM_OPT_NEWARRAY_SEND_INCLUDE_P = 6,
1353};
1354
1355enum vm_special_object_type {
1356 VM_SPECIAL_OBJECT_VMCORE = 1,
1357 VM_SPECIAL_OBJECT_CBASE,
1358 VM_SPECIAL_OBJECT_CONST_BASE
1359};
1360
1361enum vm_svar_index {
1362 VM_SVAR_LASTLINE = 0, /* $_ */
1363 VM_SVAR_BACKREF = 1, /* $~ */
1364
1365 VM_SVAR_EXTRA_START = 2,
1366 VM_SVAR_FLIPFLOP_START = 2 /* flipflop */
1367};
1368
1369/* inline cache */
1370typedef struct iseq_inline_constant_cache *IC;
1371typedef struct iseq_inline_iv_cache_entry *IVC;
1372typedef struct iseq_inline_cvar_cache_entry *ICVARC;
1373typedef union iseq_inline_storage_entry *ISE;
1374typedef const struct rb_callinfo *CALL_INFO;
1375typedef const struct rb_callcache *CALL_CACHE;
1376typedef struct rb_call_data *CALL_DATA;
1377
1378typedef VALUE CDHASH;
1379
1380#ifndef FUNC_FASTCALL
1381#define FUNC_FASTCALL(x) x
1382#endif
1383
1384typedef rb_control_frame_t *
1385 (FUNC_FASTCALL(*rb_insn_func_t))(rb_execution_context_t *, rb_control_frame_t *);
1386
1387#define VM_TAGGED_PTR_SET(p, tag) ((VALUE)(p) | (tag))
1388#define VM_TAGGED_PTR_REF(v, mask) ((void *)((v) & ~mask))
1389
1390#define GC_GUARDED_PTR(p) VM_TAGGED_PTR_SET((p), 0x01)
1391#define GC_GUARDED_PTR_REF(p) VM_TAGGED_PTR_REF((p), 0x03)
1392#define GC_GUARDED_PTR_P(p) (((VALUE)(p)) & 0x01)
1393
1394enum vm_frame_env_flags {
1395 /* Frame/Environment flag bits:
1396 * MMMM MMMM MMMM MMMM ___F FFFF FFFE EEEX (LSB)
1397 *
1398 * X : tag for GC marking (It seems as Fixnum)
1399 * EEE : 4 bits Env flags
1400 * FF..: 8 bits Frame flags
1401 * MM..: 15 bits frame magic (to check frame corruption)
1402 */
1403
1404 /* frame types */
1405 VM_FRAME_MAGIC_METHOD = 0x11110001,
1406 VM_FRAME_MAGIC_BLOCK = 0x22220001,
1407 VM_FRAME_MAGIC_CLASS = 0x33330001,
1408 VM_FRAME_MAGIC_TOP = 0x44440001,
1409 VM_FRAME_MAGIC_CFUNC = 0x55550001,
1410 VM_FRAME_MAGIC_IFUNC = 0x66660001,
1411 VM_FRAME_MAGIC_EVAL = 0x77770001,
1412 VM_FRAME_MAGIC_RESCUE = 0x78880001,
1413 VM_FRAME_MAGIC_DUMMY = 0x79990001,
1414
1415 VM_FRAME_MAGIC_MASK = 0x7fff0001,
1416
1417 /* frame flag */
1418 VM_FRAME_FLAG_FINISH = 0x0020,
1419 VM_FRAME_FLAG_BMETHOD = 0x0040,
1420 VM_FRAME_FLAG_CFRAME = 0x0080,
1421 VM_FRAME_FLAG_LAMBDA = 0x0100,
1422 VM_FRAME_FLAG_MODIFIED_BLOCK_PARAM = 0x0200,
1423 VM_FRAME_FLAG_CFRAME_KW = 0x0400,
1424 VM_FRAME_FLAG_PASSED = 0x0800,
1425 VM_FRAME_FLAG_BOX_REQUIRE = 0x1000,
1426
1427 /* env flag */
1428 VM_ENV_FLAG_LOCAL = 0x0002,
1429 VM_ENV_FLAG_ESCAPED = 0x0004,
1430 VM_ENV_FLAG_WB_REQUIRED = 0x0008,
1431 VM_ENV_FLAG_ISOLATED = 0x0010,
1432};
1433
1434#define VM_ENV_DATA_SIZE ( 3)
1435
1436#define VM_ENV_DATA_INDEX_ME_CREF (-2) /* ep[-2] */
1437#define VM_ENV_DATA_INDEX_SPECVAL (-1) /* ep[-1] */
1438#define VM_ENV_DATA_INDEX_FLAGS ( 0) /* ep[ 0] */
1439#define VM_ENV_DATA_INDEX_ENV ( 1) /* ep[ 1] */
1440
1441#define VM_ENV_INDEX_LAST_LVAR (-VM_ENV_DATA_SIZE)
1442
1443static inline void VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value);
1444
1445static inline void
1446VM_ENV_FLAGS_SET(const VALUE *ep, VALUE flag)
1447{
1448 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1449 VM_ASSERT(FIXNUM_P(flags));
1450 VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags | flag);
1451}
1452
1453static inline void
1454VM_ENV_FLAGS_UNSET(const VALUE *ep, VALUE flag)
1455{
1456 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1457 VM_ASSERT(FIXNUM_P(flags));
1458 VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags & ~flag);
1459}
1460
1461static inline unsigned long
1462VM_ENV_FLAGS(const VALUE *ep, long flag)
1463{
1464 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1465 VM_ASSERT(FIXNUM_P(flags));
1466 return flags & flag;
1467}
1468
1469static inline unsigned long
1470VM_ENV_FLAGS_UNCHECKED(const VALUE *ep, long flag)
1471{
1472 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1473 return flags & flag;
1474}
1475
1476static inline unsigned long
1477VM_ENV_FRAME_TYPE_P(const VALUE *ep, unsigned long frame_type)
1478{
1479 return VM_ENV_FLAGS(ep, VM_FRAME_MAGIC_MASK) == frame_type;
1480}
1481
1482static inline unsigned long
1483VM_FRAME_TYPE(const rb_control_frame_t *cfp)
1484{
1485 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_MAGIC_MASK);
1486}
1487
1488static inline unsigned long
1489VM_FRAME_TYPE_UNCHECKED(const rb_control_frame_t *cfp)
1490{
1491 return VM_ENV_FLAGS_UNCHECKED(cfp->ep, VM_FRAME_MAGIC_MASK);
1492}
1493
1494static inline int
1495VM_FRAME_LAMBDA_P(const rb_control_frame_t *cfp)
1496{
1497 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_LAMBDA) != 0;
1498}
1499
1500static inline int
1501VM_FRAME_CFRAME_KW_P(const rb_control_frame_t *cfp)
1502{
1503 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME_KW) != 0;
1504}
1505
1506static inline int
1507VM_FRAME_FINISHED_P(const rb_control_frame_t *cfp)
1508{
1509 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_FINISH) != 0;
1510}
1511
1512static inline int
1513VM_FRAME_FINISHED_P_UNCHECKED(const rb_control_frame_t *cfp)
1514{
1515 return VM_ENV_FLAGS_UNCHECKED(cfp->ep, VM_FRAME_FLAG_FINISH) != 0;
1516}
1517
1518static inline int
1519VM_FRAME_BMETHOD_P(const rb_control_frame_t *cfp)
1520{
1521 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_BMETHOD) != 0;
1522}
1523
1524static inline int
1525rb_obj_is_iseq(VALUE iseq)
1526{
1527 return imemo_type_p(iseq, imemo_iseq);
1528}
1529
1530#if VM_CHECK_MODE > 0
1531#define RUBY_VM_NORMAL_ISEQ_P(iseq) rb_obj_is_iseq((VALUE)iseq)
1532#endif
1533
1534static inline int
1535VM_FRAME_CFRAME_P(const rb_control_frame_t *cfp)
1536{
1537 int cframe_p = VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME) != 0;
1538 // With ZJIT lightweight frames, cfp->_iseq may be stale (not yet materialized),
1539 // so skip this assertion when jit_return is set (zjit.h is not available here).
1540 VM_ASSERT(cfp->jit_return ||
1541 RUBY_VM_NORMAL_ISEQ_P(cfp->_iseq) != cframe_p ||
1542 (VM_FRAME_TYPE(cfp) & VM_FRAME_MAGIC_MASK) == VM_FRAME_MAGIC_DUMMY);
1543 return cframe_p;
1544}
1545
1546static inline int
1547VM_FRAME_CFRAME_P_UNCHECKED(const rb_control_frame_t *cfp)
1548{
1549 return VM_ENV_FLAGS_UNCHECKED(cfp->ep, VM_FRAME_FLAG_CFRAME) != 0;
1550}
1551
1552static inline int
1553VM_FRAME_RUBYFRAME_P(const rb_control_frame_t *cfp)
1554{
1555 return !VM_FRAME_CFRAME_P(cfp);
1556}
1557
1558static inline int
1559VM_FRAME_RUBYFRAME_P_UNCHECKED(const rb_control_frame_t *cfp)
1560{
1561 return !VM_FRAME_CFRAME_P_UNCHECKED(cfp);
1562}
1563
1564static inline int
1565VM_FRAME_NS_REQUIRE_P(const rb_control_frame_t *cfp)
1566{
1567 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_BOX_REQUIRE) != 0;
1568}
1569
1570#define RUBYVM_CFUNC_FRAME_P(cfp) \
1571 (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_CFUNC)
1572
1573#define VM_GUARDED_PREV_EP(ep) GC_GUARDED_PTR(ep)
1574#define VM_BLOCK_HANDLER_NONE 0
1575
1576static inline int
1577VM_ENV_LOCAL_P(const VALUE *ep)
1578{
1579 return VM_ENV_FLAGS(ep, VM_ENV_FLAG_LOCAL) ? 1 : 0;
1580}
1581
1582static inline int
1583VM_ENV_LOCAL_P_UNCHECKED(const VALUE *ep)
1584{
1585 return VM_ENV_FLAGS_UNCHECKED(ep, VM_ENV_FLAG_LOCAL) ? 1 : 0;
1586}
1587
1588static inline const VALUE *
1589VM_ENV_PREV_EP_UNCHECKED(const VALUE *ep)
1590{
1591 return GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
1592}
1593
1594static inline const VALUE *
1595VM_ENV_PREV_EP(const VALUE *ep)
1596{
1597 VM_ASSERT(VM_ENV_LOCAL_P(ep) == 0);
1598 return VM_ENV_PREV_EP_UNCHECKED(ep);
1599}
1600
1601static inline bool
1602VM_ENV_BOXED_P(const VALUE *ep)
1603{
1604 return VM_ENV_FRAME_TYPE_P(ep, VM_FRAME_MAGIC_CLASS) || VM_ENV_FRAME_TYPE_P(ep, VM_FRAME_MAGIC_TOP);
1605}
1606
1607static inline VALUE
1608VM_ENV_BLOCK_HANDLER(const VALUE *ep)
1609{
1610 if (VM_ENV_BOXED_P(ep)) {
1611 VM_ASSERT(VM_ENV_LOCAL_P(ep));
1612 return VM_BLOCK_HANDLER_NONE;
1613 }
1614
1615 VM_ASSERT(VM_ENV_LOCAL_P(ep));
1616 return ep[VM_ENV_DATA_INDEX_SPECVAL];
1617}
1618
1619static inline const rb_box_t *
1620VM_ENV_BOX(const VALUE *ep)
1621{
1622 VM_ASSERT(VM_ENV_BOXED_P(ep));
1623 VM_ASSERT(VM_ENV_LOCAL_P(ep));
1624 return (const rb_box_t *)GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
1625}
1626
1627static inline const rb_box_t *
1628VM_ENV_BOX_UNCHECKED(const VALUE *ep)
1629{
1630 return (const rb_box_t *)GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
1631}
1632
1633#if VM_CHECK_MODE > 0
1634int rb_vm_ep_in_heap_p(const VALUE *ep);
1635#endif
1636
1637static inline int
1638VM_ENV_ESCAPED_P(const VALUE *ep)
1639{
1640 VM_ASSERT(rb_vm_ep_in_heap_p(ep) == !!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED));
1641 return VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) ? 1 : 0;
1642}
1643
1645static inline VALUE
1646VM_ENV_ENVVAL(const VALUE *ep)
1647{
1648 VALUE envval = ep[VM_ENV_DATA_INDEX_ENV];
1649 VM_ASSERT(VM_ENV_ESCAPED_P(ep));
1650 VM_ASSERT(envval == Qundef || imemo_type_p(envval, imemo_env));
1651 return envval;
1652}
1653
1655static inline const rb_env_t *
1656VM_ENV_ENVVAL_PTR(const VALUE *ep)
1657{
1658 return (const rb_env_t *)VM_ENV_ENVVAL(ep);
1659}
1660
1661static inline const rb_env_t *
1662vm_env_new(VALUE *env_ep, VALUE *env_body, unsigned int env_size, const rb_iseq_t *iseq)
1663{
1664 rb_env_t *env = IMEMO_NEW(rb_env_t, imemo_env, (VALUE)iseq);
1665 env->ep = env_ep;
1666 env->env = env_body;
1667 env->env_size = env_size;
1668 env_ep[VM_ENV_DATA_INDEX_ENV] = (VALUE)env;
1669 return env;
1670}
1671
1672static inline void
1673VM_FORCE_WRITE(const VALUE *ptr, VALUE v)
1674{
1675 *((VALUE *)ptr) = v;
1676}
1677
1678static inline void
1679VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value)
1680{
1681 VM_ASSERT(RB_SPECIAL_CONST_P(special_const_value));
1682 VM_FORCE_WRITE(ptr, special_const_value);
1683}
1684
1685static inline void
1686VM_STACK_ENV_WRITE(const VALUE *ep, int index, VALUE v)
1687{
1688 VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_WB_REQUIRED) == 0);
1689 VM_FORCE_WRITE(&ep[index], v);
1690}
1691
1692const VALUE *rb_vm_ep_local_ep(const VALUE *ep);
1693const VALUE *rb_vm_proc_local_ep(VALUE proc);
1694void rb_vm_block_ep_update(VALUE obj, const struct rb_block *dst, const VALUE *ep);
1695void rb_vm_block_copy(VALUE obj, const struct rb_block *dst, const struct rb_block *src);
1696
1697VALUE rb_vm_frame_block_handler(const rb_control_frame_t *cfp);
1698
1699#define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp) ((cfp)+1)
1700#define RUBY_VM_NEXT_CONTROL_FRAME(cfp) ((cfp)-1)
1701
1702#define RUBY_VM_VALID_CONTROL_FRAME_P(cfp, ecfp) \
1703 ((void *)(ecfp) > (void *)(cfp))
1704
1705static inline const rb_control_frame_t *
1706RUBY_VM_END_CONTROL_FRAME(const rb_execution_context_t *ec)
1707{
1708 return (rb_control_frame_t *)(ec->vm_stack + ec->vm_stack_size);
1709}
1710
1711static inline int
1712RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
1713{
1714 return !RUBY_VM_VALID_CONTROL_FRAME_P(cfp, RUBY_VM_END_CONTROL_FRAME(ec));
1715}
1716
1717static inline int
1718VM_BH_ISEQ_BLOCK_P(VALUE block_handler)
1719{
1720 if ((block_handler & 0x03) == 0x01) {
1721#if VM_CHECK_MODE > 0
1722 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1723 VM_ASSERT(imemo_type_p(captured->code.val, imemo_iseq));
1724#endif
1725 return 1;
1726 }
1727 else {
1728 return 0;
1729 }
1730}
1731
1732static inline VALUE
1733VM_BH_FROM_ISEQ_BLOCK(const struct rb_captured_block *captured)
1734{
1735 VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x01);
1736 VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1737 return block_handler;
1738}
1739
1740static inline const struct rb_captured_block *
1741VM_BH_TO_ISEQ_BLOCK(VALUE block_handler)
1742{
1743 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1744 VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1745 return captured;
1746}
1747
1748static inline int
1749VM_BH_IFUNC_P(VALUE block_handler)
1750{
1751 if ((block_handler & 0x03) == 0x03) {
1752#if VM_CHECK_MODE > 0
1753 struct rb_captured_block *captured = (void *)(block_handler & ~0x03);
1754 VM_ASSERT(imemo_type_p(captured->code.val, imemo_ifunc));
1755#endif
1756 return 1;
1757 }
1758 else {
1759 return 0;
1760 }
1761}
1762
1763static inline VALUE
1764VM_BH_FROM_IFUNC_BLOCK(const struct rb_captured_block *captured)
1765{
1766 VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x03);
1767 VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1768 return block_handler;
1769}
1770
1771static inline const struct rb_captured_block *
1772VM_BH_TO_IFUNC_BLOCK(VALUE block_handler)
1773{
1774 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1775 VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1776 return captured;
1777}
1778
1779static inline const struct rb_captured_block *
1780VM_BH_TO_CAPT_BLOCK(VALUE block_handler)
1781{
1782 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1783 VM_ASSERT(VM_BH_IFUNC_P(block_handler) || VM_BH_ISEQ_BLOCK_P(block_handler));
1784 return captured;
1785}
1786
1787static inline enum rb_block_handler_type
1788vm_block_handler_type(VALUE block_handler)
1789{
1790 if (VM_BH_ISEQ_BLOCK_P(block_handler)) {
1791 return block_handler_type_iseq;
1792 }
1793 else if (VM_BH_IFUNC_P(block_handler)) {
1794 return block_handler_type_ifunc;
1795 }
1796 else if (SYMBOL_P(block_handler)) {
1797 return block_handler_type_symbol;
1798 }
1799 else {
1800 VM_ASSERT(rb_obj_is_proc(block_handler));
1801 return block_handler_type_proc;
1802 }
1803}
1804
1805static inline void
1806vm_block_handler_verify(MAYBE_UNUSED(VALUE block_handler))
1807{
1808 VM_ASSERT(block_handler == VM_BLOCK_HANDLER_NONE ||
1809 (vm_block_handler_type(block_handler), 1));
1810}
1811
1812static inline enum rb_block_type
1813vm_block_type(const struct rb_block *block)
1814{
1815#if VM_CHECK_MODE > 0
1816 switch (block->type) {
1817 case block_type_iseq:
1818 VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_iseq));
1819 break;
1820 case block_type_ifunc:
1821 VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_ifunc));
1822 break;
1823 case block_type_symbol:
1824 VM_ASSERT(SYMBOL_P(block->as.symbol));
1825 break;
1826 case block_type_proc:
1827 VM_ASSERT(rb_obj_is_proc(block->as.proc));
1828 break;
1829 }
1830#endif
1831 return block->type;
1832}
1833
1834static inline void
1835vm_block_type_set(const struct rb_block *block, enum rb_block_type type)
1836{
1837 struct rb_block *mb = (struct rb_block *)block;
1838 mb->type = type;
1839}
1840
1841static inline const struct rb_block *
1842vm_proc_block(VALUE procval)
1843{
1844 VM_ASSERT(rb_obj_is_proc(procval));
1845 return &((rb_proc_t *)RTYPEDDATA_DATA(procval))->block;
1846}
1847
1848static inline const rb_iseq_t *vm_block_iseq(const struct rb_block *block);
1849static inline const VALUE *vm_block_ep(const struct rb_block *block);
1850
1851static inline const rb_iseq_t *
1852vm_proc_iseq(VALUE procval)
1853{
1854 return vm_block_iseq(vm_proc_block(procval));
1855}
1856
1857static inline const VALUE *
1858vm_proc_ep(VALUE procval)
1859{
1860 return vm_block_ep(vm_proc_block(procval));
1861}
1862
1863static inline const rb_iseq_t *
1864vm_block_iseq(const struct rb_block *block)
1865{
1866 switch (vm_block_type(block)) {
1867 case block_type_iseq: return rb_iseq_check(block->as.captured.code.iseq);
1868 case block_type_proc: return vm_proc_iseq(block->as.proc);
1869 case block_type_ifunc:
1870 case block_type_symbol: return NULL;
1871 }
1872 VM_UNREACHABLE(vm_block_iseq);
1873 return NULL;
1874}
1875
1876static inline const VALUE *
1877vm_block_ep(const struct rb_block *block)
1878{
1879 switch (vm_block_type(block)) {
1880 case block_type_iseq:
1881 case block_type_ifunc: return block->as.captured.ep;
1882 case block_type_proc: return vm_proc_ep(block->as.proc);
1883 case block_type_symbol: return NULL;
1884 }
1885 VM_UNREACHABLE(vm_block_ep);
1886 return NULL;
1887}
1888
1889static inline VALUE
1890vm_block_self(const struct rb_block *block)
1891{
1892 switch (vm_block_type(block)) {
1893 case block_type_iseq:
1894 case block_type_ifunc:
1895 return block->as.captured.self;
1896 case block_type_proc:
1897 return vm_block_self(vm_proc_block(block->as.proc));
1898 case block_type_symbol:
1899 return Qundef;
1900 }
1901 VM_UNREACHABLE(vm_block_self);
1902 return Qundef;
1903}
1904
1905static inline VALUE
1906VM_BH_TO_SYMBOL(VALUE block_handler)
1907{
1908 VM_ASSERT(SYMBOL_P(block_handler));
1909 return block_handler;
1910}
1911
1912static inline VALUE
1913VM_BH_FROM_SYMBOL(VALUE symbol)
1914{
1915 VM_ASSERT(SYMBOL_P(symbol));
1916 return symbol;
1917}
1918
1919static inline VALUE
1920VM_BH_TO_PROC(VALUE block_handler)
1921{
1922 VM_ASSERT(rb_obj_is_proc(block_handler));
1923 return block_handler;
1924}
1925
1926static inline VALUE
1927VM_BH_FROM_PROC(VALUE procval)
1928{
1929 VM_ASSERT(rb_obj_is_proc(procval));
1930 return procval;
1931}
1932
1933/* VM related object allocate functions */
1934VALUE rb_thread_alloc(VALUE klass);
1935VALUE rb_binding_alloc(VALUE klass);
1936VALUE rb_proc_alloc(VALUE klass);
1937VALUE rb_proc_dup(VALUE self);
1938
1939/* for debug */
1940extern bool rb_vmdebug_stack_dump_raw(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, FILE *);
1941extern bool rb_vmdebug_debug_print_pre(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, const VALUE *_pc, FILE *);
1942extern bool rb_vmdebug_debug_print_post(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, FILE *);
1943
1944#define SDR() rb_vmdebug_stack_dump_raw(GET_EC(), GET_EC()->cfp, stderr)
1945#define SDR2(cfp) rb_vmdebug_stack_dump_raw(GET_EC(), (cfp), stderr)
1946bool rb_vm_bugreport(const void *, FILE *);
1947typedef void (*ruby_sighandler_t)(int);
1948RBIMPL_ATTR_FORMAT(RBIMPL_PRINTF_FORMAT, 4, 5)
1949NORETURN(void rb_bug_for_fatal_signal(ruby_sighandler_t default_sighandler, int sig, const void *, const char *fmt, ...));
1950
1951/* functions about thread/vm execution */
1952RUBY_SYMBOL_EXPORT_BEGIN
1953VALUE rb_iseq_eval(const rb_iseq_t *iseq, const rb_box_t *box);
1954VALUE rb_iseq_eval_main(const rb_iseq_t *iseq);
1955VALUE rb_iseq_path(const rb_iseq_t *iseq);
1956VALUE rb_iseq_realpath(const rb_iseq_t *iseq);
1957RUBY_SYMBOL_EXPORT_END
1958
1959VALUE rb_iseq_pathobj_new(VALUE path, VALUE realpath);
1960void rb_iseq_pathobj_set(const rb_iseq_t *iseq, VALUE path, VALUE realpath);
1961
1962int rb_ec_frame_method_id_and_class(const rb_execution_context_t *ec, ID *idp, ID *called_idp, VALUE *klassp);
1963void rb_ec_setup_exception(const rb_execution_context_t *ec, VALUE mesg, VALUE cause);
1964
1965VALUE rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, int argc, const VALUE *argv, int kw_splat, VALUE block_handler);
1966
1967VALUE rb_vm_make_proc_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda);
1968static inline VALUE
1969rb_vm_make_proc(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1970{
1971 return rb_vm_make_proc_lambda(ec, captured, klass, 0);
1972}
1973
1974static inline VALUE
1975rb_vm_make_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1976{
1977 return rb_vm_make_proc_lambda(ec, captured, klass, 1);
1978}
1979
1980VALUE rb_vm_make_binding(const rb_execution_context_t *ec, const rb_control_frame_t *src_cfp);
1981VALUE rb_vm_env_local_variables(const rb_env_t *env);
1982VALUE rb_vm_env_numbered_parameters(const rb_env_t *env);
1983const rb_env_t *rb_vm_env_prev_env(const rb_env_t *env);
1984const VALUE *rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const ID *dynvars);
1985void rb_vm_inc_const_missing_count(void);
1986VALUE rb_vm_call_kw(rb_execution_context_t *ec, VALUE recv, VALUE id, int argc,
1987 const VALUE *argv, const rb_callable_method_entry_t *me, int kw_splat);
1988void rb_vm_pop_frame_no_int(rb_execution_context_t *ec);
1989void rb_vm_pop_frame(rb_execution_context_t *ec);
1990
1991void rb_thread_start_timer_thread(void);
1992void rb_thread_stop_timer_thread(void);
1993void rb_thread_reset_timer_thread(void);
1994void rb_thread_wakeup_timer_thread(int);
1995
1996static inline void
1997rb_vm_living_threads_init(rb_vm_t *vm)
1998{
1999 ccan_list_head_init(&vm->workqueue);
2000 ccan_list_head_init(&vm->ractor.set);
2001#ifdef RUBY_THREAD_PTHREAD_H
2002 ccan_list_head_init(&vm->ractor.sched.zombie_threads);
2003#endif
2004}
2005
2006typedef int rb_backtrace_iter_func(void *, VALUE, int, VALUE);
2007rb_control_frame_t *rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
2008rb_control_frame_t *rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
2009VALUE *rb_vm_svar_lep(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
2010int rb_vm_get_sourceline(const rb_control_frame_t *);
2011void rb_vm_stack_to_heap(rb_execution_context_t *ec);
2012void ruby_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame);
2013void rb_thread_malloc_stack_set(rb_thread_t *th, void *stack, size_t stack_size);
2014rb_thread_t * ruby_thread_from_native(void);
2015int ruby_thread_set_native(rb_thread_t *th);
2016int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp);
2017void rb_vm_rewind_cfp(rb_execution_context_t *ec, rb_control_frame_t *cfp);
2018void rb_vm_env_write(const VALUE *ep, int index, VALUE v);
2019VALUE rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler);
2020
2021void rb_vm_register_special_exception_str(enum ruby_special_exceptions sp, VALUE exception_class, VALUE mesg);
2022
2023#define rb_vm_register_special_exception(sp, e, m) \
2024 rb_vm_register_special_exception_str(sp, e, rb_usascii_str_new_static((m), (long)rb_strlen_lit(m)))
2025
2026void rb_gc_mark_machine_context(const rb_execution_context_t *ec);
2027
2028rb_cref_t *rb_vm_rewrite_cref(rb_cref_t *node, VALUE old_klass, VALUE new_klass);
2029
2030const rb_callable_method_entry_t *rb_vm_frame_method_entry(const rb_control_frame_t *cfp);
2031const rb_callable_method_entry_t *rb_vm_frame_method_entry_unchecked(const rb_control_frame_t *cfp);
2032
2033#define sysstack_error GET_VM()->special_exceptions[ruby_error_sysstack]
2034
2035#define CHECK_VM_STACK_OVERFLOW0(cfp, sp, margin) do { \
2036 STATIC_ASSERT(sizeof_sp, sizeof(*(sp)) == sizeof(VALUE)); \
2037 STATIC_ASSERT(sizeof_cfp, sizeof(*(cfp)) == sizeof(rb_control_frame_t)); \
2038 const struct rb_control_frame_struct *bound = (void *)&(sp)[(margin)]; \
2039 if (UNLIKELY((cfp) <= &bound[1])) { \
2040 vm_stackoverflow(); \
2041 } \
2042} while (0)
2043
2044#define CHECK_VM_STACK_OVERFLOW(cfp, margin) \
2045 CHECK_VM_STACK_OVERFLOW0((cfp), (cfp)->sp, (margin))
2046
2047VALUE rb_catch_protect(VALUE t, rb_block_call_func *func, VALUE data, enum ruby_tag_type *stateptr);
2048
2049rb_execution_context_t *rb_vm_main_ractor_ec(rb_vm_t *vm); // ractor.c
2050
2051/* for thread */
2052
2053#if RUBY_VM_THREAD_MODEL == 2
2054
2055RUBY_EXTERN struct rb_ractor_struct *ruby_single_main_ractor; // ractor.c
2056RUBY_EXTERN rb_vm_t *ruby_current_vm_ptr;
2057RUBY_EXTERN rb_event_flag_t ruby_vm_event_flags;
2058RUBY_EXTERN rb_event_flag_t ruby_vm_event_enabled_global_flags; // only ever added to
2059RUBY_EXTERN unsigned int ruby_vm_iseq_events_enabled;
2060RUBY_EXTERN unsigned int ruby_vm_c_events_enabled;
2061
2062#define GET_VM() rb_current_vm()
2063#define GET_RACTOR() rb_current_ractor()
2064#define GET_THREAD() rb_current_thread()
2065#define GET_EC() rb_current_execution_context(true)
2066
2067static inline rb_serial_t
2068rb_ec_serial(struct rb_execution_context_struct *ec)
2069{
2070 VM_ASSERT(ec->serial >= 1);
2071 return ec->serial;
2072}
2073
2074static inline rb_thread_t *
2075rb_ec_thread_ptr(const rb_execution_context_t *ec)
2076{
2077 return ec->thread_ptr;
2078}
2079
2080static inline rb_ractor_t *
2081rb_ec_ractor_ptr(const rb_execution_context_t *ec)
2082{
2083 const rb_thread_t *th = rb_ec_thread_ptr(ec);
2084 if (th) {
2085 VM_ASSERT(th->ractor != NULL);
2086 return th->ractor;
2087 }
2088 else {
2089 return NULL;
2090 }
2091}
2092
2093static inline rb_serial_t
2094rb_ec_ractor_id(const rb_execution_context_t *ec)
2095{
2096 rb_serial_t ractor_id = ec->ractor_id;
2097 RUBY_ASSERT(ractor_id);
2098 return ractor_id;
2099}
2100
2101static inline rb_vm_t *
2102rb_ec_vm_ptr(const rb_execution_context_t *ec)
2103{
2104 const rb_thread_t *th = rb_ec_thread_ptr(ec);
2105 if (th) {
2106 return th->vm;
2107 }
2108 else {
2109 return NULL;
2110 }
2111}
2112
2113NOINLINE(struct rb_execution_context_struct *rb_current_ec_noinline(void));
2114
2115static inline rb_execution_context_t *
2116rb_current_execution_context(bool expect_ec)
2117{
2118#ifdef RB_THREAD_LOCAL_SPECIFIER
2119 #ifdef RB_THREAD_CURRENT_EC_NOINLINE
2120 rb_execution_context_t * volatile ec = rb_current_ec();
2121 #else
2122 rb_execution_context_t * volatile ec = ruby_current_ec;
2123 #endif
2124
2125 /* On the shared objects, `__tls_get_addr()` is used to access the TLS
2126 * and the address of the `ruby_current_ec` can be stored on a function
2127 * frame. However, this address can be mis-used after native thread
2128 * migration of a coroutine.
2129 * 1) Get `ptr = &ruby_current_ec` on NT1 and store it on the frame.
2130 * 2) Context switch and resume it on the NT2.
2131 * 3) `ptr` is used on NT2 but it accesses the TLS of NT1.
2132 * This assertion checks such misusage.
2133 *
2134 * To avoid accidents, `GET_EC()` should be called once on the frame.
2135 * Note that inlining can produce the problem.
2136 */
2137 VM_ASSERT(ec == rb_current_ec_noinline());
2138#else
2139 rb_execution_context_t * volatile ec = native_tls_get(ruby_current_ec_key);
2140#endif
2141 VM_ASSERT(!expect_ec || ec != NULL);
2142 return ec;
2143}
2144
2145static inline rb_thread_t *
2146rb_current_thread(void)
2147{
2148 const rb_execution_context_t *ec = GET_EC();
2149 return rb_ec_thread_ptr(ec);
2150}
2151
2152static inline rb_ractor_t *
2153rb_current_ractor_raw(bool expect)
2154{
2155 if (ruby_single_main_ractor) {
2156 return ruby_single_main_ractor;
2157 }
2158 else {
2159 const rb_execution_context_t *ec = rb_current_execution_context(expect);
2160 return (expect || ec) ? rb_ec_ractor_ptr(ec) : NULL;
2161 }
2162}
2163
2164static inline rb_ractor_t *
2165rb_current_ractor(void)
2166{
2167 return rb_current_ractor_raw(true);
2168}
2169
2170static inline rb_vm_t *
2171rb_current_vm(void)
2172{
2173#if 0 // TODO: reconsider the assertions
2174 VM_ASSERT(ruby_current_vm_ptr == NULL ||
2175 ruby_current_execution_context_ptr == NULL ||
2176 rb_ec_thread_ptr(GET_EC()) == NULL ||
2177 rb_ec_thread_ptr(GET_EC())->status == THREAD_KILLED ||
2178 rb_ec_vm_ptr(GET_EC()) == ruby_current_vm_ptr);
2179#endif
2180
2181 return ruby_current_vm_ptr;
2182}
2183
2184void rb_ec_vm_lock_rec_release(const rb_execution_context_t *ec,
2185 unsigned int recorded_lock_rec,
2186 unsigned int current_lock_rec);
2187
2188/* This technically is a data race, as it's checked without the lock, however we
2189 * check against a value only our own thread will write. */
2190NO_SANITIZE("thread", static inline bool
2191vm_locked_by_ractor_p(rb_vm_t *vm, rb_ractor_t *cr))
2192{
2193 VM_ASSERT(cr == GET_RACTOR());
2194 return vm->ractor.sync.lock_owner == cr;
2195}
2196
2197static inline unsigned int
2198rb_ec_vm_lock_rec(const rb_execution_context_t *ec)
2199{
2200 rb_vm_t *vm = rb_ec_vm_ptr(ec);
2201
2202 if (!vm_locked_by_ractor_p(vm, rb_ec_ractor_ptr(ec))) {
2203 return 0;
2204 }
2205 else {
2206 return vm->ractor.sync.lock_rec;
2207 }
2208}
2209
2210#else
2211#error "unsupported thread model"
2212#endif
2213
2214enum {
2215 TIMER_INTERRUPT_MASK = 0x01,
2216 PENDING_INTERRUPT_MASK = 0x02,
2217 POSTPONED_JOB_INTERRUPT_MASK = 0x04,
2218 TRAP_INTERRUPT_MASK = 0x08,
2219 TERMINATE_INTERRUPT_MASK = 0x10,
2220 VM_BARRIER_INTERRUPT_MASK = 0x20,
2221};
2222
2223#define RUBY_VM_SET_TIMER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TIMER_INTERRUPT_MASK)
2224#define RUBY_VM_SET_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, PENDING_INTERRUPT_MASK)
2225#define RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, POSTPONED_JOB_INTERRUPT_MASK)
2226#define RUBY_VM_SET_TRAP_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TRAP_INTERRUPT_MASK)
2227#define RUBY_VM_SET_TERMINATE_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TERMINATE_INTERRUPT_MASK)
2228#define RUBY_VM_SET_VM_BARRIER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, VM_BARRIER_INTERRUPT_MASK)
2229
2230static inline bool
2231RUBY_VM_INTERRUPTED(rb_execution_context_t *ec)
2232{
2233 return (ATOMIC_LOAD_RELAXED(ec->interrupt_flag) & ~(ec->interrupt_mask) & (PENDING_INTERRUPT_MASK|TRAP_INTERRUPT_MASK));
2234}
2235
2236static inline bool
2237RUBY_VM_INTERRUPTED_ANY(rb_execution_context_t *ec)
2238{
2239#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
2240 uint32_t current_clock = rb_ec_vm_ptr(ec)->clock;
2241
2242 if (current_clock != ec->checked_clock) {
2243 ec->checked_clock = current_clock;
2244 RUBY_VM_SET_TIMER_INTERRUPT(ec);
2245 }
2246#endif
2247 return ATOMIC_LOAD_RELAXED(ec->interrupt_flag) & ~(ec)->interrupt_mask;
2248}
2249
2250VALUE rb_exc_set_backtrace(VALUE exc, VALUE bt);
2251int rb_signal_buff_size(void);
2252int rb_signal_exec(rb_thread_t *th, int sig);
2253void rb_threadptr_check_signal(rb_thread_t *mth);
2254void rb_threadptr_signal_raise(rb_thread_t *th, int sig);
2255void rb_threadptr_signal_exit(rb_thread_t *th);
2256int rb_threadptr_execute_interrupts(rb_thread_t *, int);
2257void rb_threadptr_interrupt(rb_thread_t *th);
2258void rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th);
2259void rb_threadptr_pending_interrupt_clear(rb_thread_t *th);
2260void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v);
2261VALUE rb_ec_get_errinfo(const rb_execution_context_t *ec);
2262void rb_ec_error_print(rb_execution_context_t * volatile ec, volatile VALUE errinfo);
2263void rb_execution_context_update(rb_execution_context_t *ec);
2264void rb_execution_context_mark(const rb_execution_context_t *ec);
2265void rb_fiber_close(rb_fiber_t *fib);
2266void Init_native_thread(rb_thread_t *th);
2267int rb_vm_check_ints_blocking(rb_execution_context_t *ec);
2268
2269// vm_sync.h
2270void rb_vm_cond_wait(rb_vm_t *vm, rb_nativethread_cond_t *cond);
2271void rb_vm_cond_timedwait(rb_vm_t *vm, rb_nativethread_cond_t *cond, unsigned long msec);
2272
2273#define RUBY_VM_CHECK_INTS(ec) rb_vm_check_ints(ec)
2274static inline void
2275rb_vm_check_ints(rb_execution_context_t *ec)
2276{
2277#ifdef RUBY_ASSERT_CRITICAL_SECTION
2278 VM_ASSERT(ruby_assert_critical_section_entered == 0);
2279#endif
2280
2281 VM_ASSERT(ec == rb_current_ec_noinline());
2282
2283 if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(ec))) {
2284 rb_threadptr_execute_interrupts(rb_ec_thread_ptr(ec), 0);
2285 }
2286}
2287
2288/* tracer */
2289
2291 rb_event_flag_t event;
2293 const rb_control_frame_t *cfp;
2294 VALUE self;
2295 ID id;
2296 ID called_id;
2297 VALUE klass;
2298 VALUE data;
2299
2300 int klass_solved;
2301
2302 /* calc from cfp */
2303 int lineno;
2304 VALUE path;
2305};
2306
2307void rb_hook_list_mark(rb_hook_list_t *hooks);
2308void rb_hook_list_mark_and_move(rb_hook_list_t *hooks);
2309void rb_hook_list_free(rb_hook_list_t *hooks);
2310void rb_hook_list_connect_local_tracepoint(rb_hook_list_t *list, VALUE tpval, unsigned int target_line);
2311bool rb_hook_list_remove_local_tracepoint(rb_hook_list_t *list, VALUE tpval);
2312unsigned int rb_hook_list_count(rb_hook_list_t *list);
2313
2314void rb_exec_event_hooks(struct rb_trace_arg_struct *trace_arg, rb_hook_list_t *hooks, int pop_p);
2315
2316#define EXEC_EVENT_HOOK_ORIG(ec_, hooks_, flag_, self_, id_, called_id_, klass_, data_, pop_p_) do { \
2317 const rb_event_flag_t flag_arg_ = (flag_); \
2318 rb_hook_list_t *hooks_arg_ = (hooks_); \
2319 if (UNLIKELY((hooks_arg_)->events & (flag_arg_))) { \
2320 /* defer evaluating the other arguments */ \
2321 rb_exec_event_hook_orig(ec_, hooks_arg_, flag_arg_, self_, id_, called_id_, klass_, data_, pop_p_); \
2322 } \
2323} while (0)
2324
2325static inline void
2326rb_exec_event_hook_orig(rb_execution_context_t *ec, rb_hook_list_t *hooks, rb_event_flag_t flag,
2327 VALUE self, ID id, ID called_id, VALUE klass, VALUE data, int pop_p)
2328{
2329 struct rb_trace_arg_struct trace_arg;
2330
2331 VM_ASSERT((hooks->events & flag) != 0);
2332
2333 trace_arg.event = flag;
2334 trace_arg.ec = ec;
2335 trace_arg.cfp = ec->cfp;
2336 trace_arg.self = self;
2337 trace_arg.id = id;
2338 trace_arg.called_id = called_id;
2339 trace_arg.klass = klass;
2340 trace_arg.data = data;
2341 trace_arg.path = Qundef;
2342 trace_arg.klass_solved = 0;
2343
2344 rb_exec_event_hooks(&trace_arg, hooks, pop_p);
2345}
2346
2348 VALUE self;
2349 uint32_t id;
2350 rb_hook_list_t hooks;
2351 st_table targeted_hooks; // also called "local hooks". {ISEQ => hook_list, def => hook_list...}
2352 unsigned int targeted_hooks_cnt; // ex: tp.enabled(target: method(:puts))
2353};
2354
2355static inline rb_hook_list_t *
2356rb_ec_ractor_hooks(const rb_execution_context_t *ec)
2357{
2358 struct rb_ractor_pub *cr_pub = (struct rb_ractor_pub *)rb_ec_ractor_ptr(ec);
2359 return &cr_pub->hooks;
2360}
2361
2362static inline rb_hook_list_t *
2363rb_vm_global_hooks(const rb_execution_context_t *ec)
2364{
2365 return &rb_ec_vm_ptr(ec)->global_hooks;
2366}
2367
2368static inline rb_hook_list_t *
2369rb_ec_hooks(const rb_execution_context_t *ec, rb_event_flag_t event)
2370{
2371 // Should be a single bit set
2372 VM_ASSERT(event != 0 && ((event - 1) & event) == 0);
2373
2375 return rb_vm_global_hooks(ec);
2376 }
2377 else {
2378 return rb_ec_ractor_hooks(ec);
2379 }
2380}
2381
2382#define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2383 EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_hooks(ec_, flag_), flag_, self_, id_, called_id_, klass_, data_, 0)
2384
2385#define EXEC_EVENT_HOOK_AND_POP_FRAME(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2386 EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_hooks(ec_, flag_), flag_, self_, id_, called_id_, klass_, data_, 1)
2387
2388static inline void
2389rb_exec_event_hook_script_compiled(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE eval_script)
2390{
2391 EXEC_EVENT_HOOK(ec, RUBY_EVENT_SCRIPT_COMPILED, ec->cfp->self, 0, 0, 0,
2392 NIL_P(eval_script) ? (VALUE)iseq :
2393 rb_ary_new_from_args(2, eval_script, (VALUE)iseq));
2394}
2395
2396void rb_vm_trap_exit(rb_vm_t *vm);
2397void rb_vm_postponed_job_atfork(void); /* vm_trace.c */
2398size_t rb_vm_memsize_postponed_job_queue(void); /* vm_trace.c */
2399
2400RUBY_SYMBOL_EXPORT_BEGIN
2401
2402int rb_thread_check_trap_pending(void);
2403
2404/* #define RUBY_EVENT_RESERVED_FOR_INTERNAL_USE 0x030000 */ /* from vm_core.h */
2405#define RUBY_EVENT_COVERAGE_LINE 0x010000
2406#define RUBY_EVENT_COVERAGE_BRANCH 0x020000
2407
2408extern VALUE rb_get_coverages(void);
2409extern void rb_set_coverages(VALUE, int, VALUE);
2410extern void rb_clear_coverages(void);
2411extern void rb_reset_coverages(void);
2412extern void rb_resume_coverages(void);
2413extern void rb_suspend_coverages(void);
2414
2415void rb_postponed_job_flush(rb_vm_t *vm);
2416
2417// ractor.c
2418RUBY_EXTERN VALUE rb_eRactorUnsafeError;
2419RUBY_EXTERN VALUE rb_eRactorIsolationError;
2420
2421RUBY_SYMBOL_EXPORT_END
2422
2423#endif /* RUBY_VM_CORE_H */
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ALIGNAS
Wraps (or simulates) alignas.
Definition stdalign.h:27
#define RUBY_EXTERN
Declaration of externally visible global variables.
Definition dllexport.h:45
#define RUBY_EVENT_SCRIPT_COMPILED
Encountered an eval.
Definition event.h:60
#define RUBY_INTERNAL_EVENT_OBJSPACE_MASK
Bitmask of GC events.
Definition event.h:100
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define Qundef
Old name of RUBY_Qundef.
#define Qfalse
Old name of RUBY_Qfalse.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define NIL_P
Old name of RB_NIL_P.
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
void * rb_check_typeddata(VALUE obj, const rb_data_type_t *data_type)
Identical to rb_typeddata_is_kind_of(), except it raises exceptions instead of returning false.
Definition error.c:1413
#define RBIMPL_ATTR_FORMAT(x, y, z)
Wraps (or simulates) __attribute__((format))
Definition format.h:29
#define RBIMPL_ATTR_NONNULL(list)
Wraps (or simulates) __attribute__((nonnull))
Definition nonnull.h:30
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
Definition proc.c:122
void rb_unblock_function_t(void *)
This is the type of UBFs.
Definition thread.h:336
VALUE rb_block_call_func(RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg))
This is the type of a function that the interpreter expect for C-backended blocks.
Definition iterator.h:83
VALUE type(ANYARGS)
ANYARGS-ed function type.
Functions related to nodes in the AST.
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RTYPEDDATA_DATA(v)
Convenient getter macro.
Definition rtypeddata.h:106
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
Defines old _.
C99 shim for <stdbool.h>
Definition vm_core.h:261
const ID * segments
A null-terminated list of ids, used to represent a constant's path idNULL is used to represent the ::...
Definition vm_core.h:285
Definition vm_core.h:293
Definition vm_core.h:288
Definition iseq.h:260
Internal header for Ruby Box.
Definition box.h:14
Definition method.h:63
CREF (Class REFerence)
Definition method.h:45
Definition class.h:37
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:229
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:143
Definition st.h:79
IFUNC (Internal FUNCtion)
Definition imemo.h:84
Definition vm_core.h:253
Definition vm_core.h:297
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
Definition value.h:69
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376