Ruby 4.0.0dev (2025-12-25 revision 6a66129d6c289b0da99cd89592f5ee948da6f381)
vm_core.h (6a66129d6c289b0da99cd89592f5ee948da6f381)
1#ifndef RUBY_VM_CORE_H
2#define RUBY_VM_CORE_H
3/**********************************************************************
4
5 vm_core.h -
6
7 $Author$
8 created at: 04/01/01 19:41:38 JST
9
10 Copyright (C) 2004-2007 Koichi Sasada
11
12**********************************************************************/
13
14/*
15 * Enable check mode.
16 * 1: enable local assertions.
17 */
18#ifndef VM_CHECK_MODE
19
20// respect RUBY_DUBUG: if given n is 0, then use RUBY_DEBUG
21#define N_OR_RUBY_DEBUG(n) (((n) > 0) ? (n) : RUBY_DEBUG)
22
23#define VM_CHECK_MODE N_OR_RUBY_DEBUG(0)
24#endif
25
39#ifndef VMDEBUG
40#define VMDEBUG 0
41#endif
42
43#if 0
44#undef VMDEBUG
45#define VMDEBUG 3
46#endif
47
48#include "ruby/internal/config.h"
49
50#include <stddef.h>
51#include <signal.h>
52#include <stdarg.h>
53
54#include "ruby_assert.h"
55
56#define RVALUE_SIZE (sizeof(struct RBasic) + sizeof(VALUE[RBIMPL_RVALUE_EMBED_LEN_MAX]))
57
58#if VM_CHECK_MODE > 0
59#define VM_ASSERT(expr, ...) \
60 RUBY_ASSERT_MESG_WHEN(VM_CHECK_MODE > 0, expr, #expr RBIMPL_VA_OPT_ARGS(__VA_ARGS__))
61#define VM_UNREACHABLE(func) rb_bug(#func ": unreachable")
62#define RUBY_ASSERT_CRITICAL_SECTION
63#define RUBY_DEBUG_THREAD_SCHEDULE() rb_thread_schedule()
64#else
65#define VM_ASSERT(/*expr, */...) ((void)0)
66#define VM_UNREACHABLE(func) UNREACHABLE
67#define RUBY_DEBUG_THREAD_SCHEDULE()
68#endif
69
70#define RUBY_ASSERT_MUTEX_OWNED(mutex) VM_ASSERT(rb_mutex_owned_p(mutex))
71
72#if defined(RUBY_ASSERT_CRITICAL_SECTION)
73/*
74# Critical Section Assertions
75
76These assertions are used to ensure that context switching does not occur between two points in the code. In theory,
77such code should already be protected by a mutex, but these assertions are used to ensure that the mutex is held.
78
79The specific case where it can be useful is where a mutex is held further up the call stack, and the code in question
80may not directly hold the mutex. In this case, the critical section assertions can be used to ensure that the mutex is
81held by someone else.
82
83These assertions are only enabled when RUBY_ASSERT_CRITICAL_SECTION is defined, which is only defined if VM_CHECK_MODE
84is set.
85
86## Example Usage
87
88```c
89RUBY_ASSERT_CRITICAL_SECTION_ENTER();
90// ... some code which does not invoke rb_vm_check_ints() ...
91RUBY_ASSERT_CRITICAL_SECTION_LEAVE();
92```
93
94If `rb_vm_check_ints()` is called between the `RUBY_ASSERT_CRITICAL_SECTION_ENTER()` and
95`RUBY_ASSERT_CRITICAL_SECTION_LEAVE()`, a failed assertion will result.
96*/
97extern int ruby_assert_critical_section_entered;
98#define RUBY_ASSERT_CRITICAL_SECTION_ENTER() do{ruby_assert_critical_section_entered += 1;}while(false)
99#define RUBY_ASSERT_CRITICAL_SECTION_LEAVE() do{VM_ASSERT(ruby_assert_critical_section_entered > 0);ruby_assert_critical_section_entered -= 1;}while(false)
100#else
101#define RUBY_ASSERT_CRITICAL_SECTION_ENTER()
102#define RUBY_ASSERT_CRITICAL_SECTION_LEAVE()
103#endif
104
105#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
106# include "wasm/setjmp.h"
107#else
108# include <setjmp.h>
109#endif
110
111#if defined(__linux__) || defined(__FreeBSD__)
112# define RB_THREAD_T_HAS_NATIVE_ID
113#endif
114
116#include "ccan/list/list.h"
117#include "id.h"
118#include "internal.h"
119#include "internal/array.h"
120#include "internal/basic_operators.h"
121#include "internal/box.h"
122#include "internal/sanitizers.h"
123#include "internal/serial.h"
124#include "internal/set_table.h"
125#include "internal/vm.h"
126#include "method.h"
127#include "node.h"
128#include "ruby/ruby.h"
129#include "ruby/st.h"
130#include "ruby_atomic.h"
131#include "vm_opts.h"
132
133#include "ruby/thread_native.h"
134/*
135 * implementation selector of get_insn_info algorithm
136 * 0: linear search
137 * 1: binary search
138 * 2: succinct bitvector
139 */
140#ifndef VM_INSN_INFO_TABLE_IMPL
141# define VM_INSN_INFO_TABLE_IMPL 2
142#endif
143
144#if defined(NSIG_MAX) /* POSIX issue 8 */
145# undef NSIG
146# define NSIG NSIG_MAX
147#elif defined(_SIG_MAXSIG) /* FreeBSD */
148# undef NSIG
149# define NSIG _SIG_MAXSIG
150#elif defined(_SIGMAX) /* QNX */
151# define NSIG (_SIGMAX + 1)
152#elif defined(NSIG) /* 99% of everything else */
153# /* take it */
154#else /* Last resort */
155# define NSIG (sizeof(sigset_t) * CHAR_BIT + 1)
156#endif
157
158#define RUBY_NSIG NSIG
159
160#if defined(SIGCLD)
161# define RUBY_SIGCHLD (SIGCLD)
162#elif defined(SIGCHLD)
163# define RUBY_SIGCHLD (SIGCHLD)
164#endif
165
166#if defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) && defined(SA_SIGINFO) && !defined(__NetBSD__)
167# define USE_SIGALTSTACK
168void *rb_allocate_sigaltstack(void);
169void *rb_register_sigaltstack(void *);
170# define RB_ALTSTACK_INIT(var, altstack) var = rb_register_sigaltstack(altstack)
171# define RB_ALTSTACK_FREE(var) free(var)
172# define RB_ALTSTACK(var) var
173#else /* noop */
174# define RB_ALTSTACK_INIT(var, altstack)
175# define RB_ALTSTACK_FREE(var)
176# define RB_ALTSTACK(var) (0)
177#endif
178
179#include THREAD_IMPL_H
180#define RUBY_VM_THREAD_MODEL 2
181
182/*****************/
183/* configuration */
184/*****************/
185
186/* gcc ver. check */
187#if defined(__GNUC__) && __GNUC__ >= 2
188
189#if OPT_TOKEN_THREADED_CODE
190#if OPT_DIRECT_THREADED_CODE
191#undef OPT_DIRECT_THREADED_CODE
192#endif
193#endif
194
195#else /* defined(__GNUC__) && __GNUC__ >= 2 */
196
197/* disable threaded code options */
198#if OPT_DIRECT_THREADED_CODE
199#undef OPT_DIRECT_THREADED_CODE
200#endif
201#if OPT_TOKEN_THREADED_CODE
202#undef OPT_TOKEN_THREADED_CODE
203#endif
204#endif
205
206/* call threaded code */
207#if OPT_CALL_THREADED_CODE
208#if OPT_DIRECT_THREADED_CODE
209#undef OPT_DIRECT_THREADED_CODE
210#endif /* OPT_DIRECT_THREADED_CODE */
211#endif /* OPT_CALL_THREADED_CODE */
212
213void rb_vm_encoded_insn_data_table_init(void);
214typedef unsigned long rb_num_t;
215typedef signed long rb_snum_t;
216
217enum ruby_tag_type {
218 RUBY_TAG_NONE = 0x0,
219 RUBY_TAG_RETURN = 0x1,
220 RUBY_TAG_BREAK = 0x2,
221 RUBY_TAG_NEXT = 0x3,
222 RUBY_TAG_RETRY = 0x4,
223 RUBY_TAG_REDO = 0x5,
224 RUBY_TAG_RAISE = 0x6,
225 RUBY_TAG_THROW = 0x7,
226 RUBY_TAG_FATAL = 0x8,
227 RUBY_TAG_MASK = 0xf
228};
229
230#define TAG_NONE RUBY_TAG_NONE
231#define TAG_RETURN RUBY_TAG_RETURN
232#define TAG_BREAK RUBY_TAG_BREAK
233#define TAG_NEXT RUBY_TAG_NEXT
234#define TAG_RETRY RUBY_TAG_RETRY
235#define TAG_REDO RUBY_TAG_REDO
236#define TAG_RAISE RUBY_TAG_RAISE
237#define TAG_THROW RUBY_TAG_THROW
238#define TAG_FATAL RUBY_TAG_FATAL
239#define TAG_MASK RUBY_TAG_MASK
240
241enum ruby_vm_throw_flags {
242 VM_THROW_NO_ESCAPE_FLAG = 0x8000,
243 VM_THROW_STATE_MASK = 0xff
244};
245
246/* forward declarations */
247struct rb_thread_struct;
249
250/* iseq data type */
252
254 rb_serial_t raw;
255 VALUE data[2];
256};
257
258#define IMEMO_CONST_CACHE_SHAREABLE IMEMO_FL_USER0
259
260// imemo_constcache
262 VALUE flags;
263
264 VALUE value;
265 const rb_cref_t *ic_cref;
266};
267STATIC_ASSERT(sizeof_iseq_inline_constant_cache_entry,
268 (offsetof(struct iseq_inline_constant_cache_entry, ic_cref) +
269 sizeof(const rb_cref_t *)) <= RVALUE_SIZE);
270
287
289 uint64_t value; // dest_shape_id in former half, attr_index in latter half
290 ID iv_set_name;
291};
292
296
298 struct {
299 struct rb_thread_struct *running_thread;
300 VALUE value;
301 } once;
302 struct iseq_inline_constant_cache ic_cache;
303 struct iseq_inline_iv_cache_entry iv_cache;
304};
305
307 const struct rb_call_data *cd;
308 const struct rb_callcache *cc;
309 VALUE block_handler;
310 VALUE recv;
311 int argc;
312 bool kw_splat;
313 VALUE heap_argv;
314};
315
316#ifndef VM_ARGC_STACK_MAX
317#define VM_ARGC_STACK_MAX 128
318#endif
319
320#define VM_KW_SPECIFIED_BITS_MAX (32-1) /* TODO: 32 -> Fixnum's max bits */
321
322# define CALLING_ARGC(calling) ((calling)->heap_argv ? RARRAY_LENINT((calling)->heap_argv) : (calling)->argc)
323
325
326#if 1
327#define CoreDataFromValue(obj, type) (type*)DATA_PTR(obj)
328#else
329#define CoreDataFromValue(obj, type) (type*)rb_data_object_get(obj)
330#endif
331#define GetCoreDataFromValue(obj, type, ptr) ((ptr) = CoreDataFromValue((obj), type))
332
334 VALUE pathobj; /* String (path) or Array [path, realpath]. Frozen. */
335 VALUE base_label; /* String */
336 VALUE label; /* String */
337 int first_lineno;
338 int node_id;
339 rb_code_location_t code_location;
341
342#define PATHOBJ_PATH 0
343#define PATHOBJ_REALPATH 1
344
345static inline VALUE
346pathobj_path(VALUE pathobj)
347{
348 if (RB_TYPE_P(pathobj, T_STRING)) {
349 return pathobj;
350 }
351 else {
352 VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
353 return RARRAY_AREF(pathobj, PATHOBJ_PATH);
354 }
355}
356
357static inline VALUE
358pathobj_realpath(VALUE pathobj)
359{
360 if (RB_TYPE_P(pathobj, T_STRING)) {
361 return pathobj;
362 }
363 else {
364 VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
365 return RARRAY_AREF(pathobj, PATHOBJ_REALPATH);
366 }
367}
368
369/* Forward declarations */
370typedef uintptr_t iseq_bits_t;
371
372#define ISEQ_IS_SIZE(body) (body->ic_size + body->ivc_size + body->ise_size + body->icvarc_size)
373
374/* [ TS_IVC | TS_ICVARC | TS_ISE | TS_IC ] */
375#define ISEQ_IS_IC_ENTRY(body, idx) (body->is_entries[(idx) + body->ise_size + body->icvarc_size + body->ivc_size].ic_cache);
376
377/* instruction sequence type */
378enum rb_iseq_type {
379 ISEQ_TYPE_TOP,
380 ISEQ_TYPE_METHOD,
381 ISEQ_TYPE_BLOCK,
382 ISEQ_TYPE_CLASS,
383 ISEQ_TYPE_RESCUE,
384 ISEQ_TYPE_ENSURE,
385 ISEQ_TYPE_EVAL,
386 ISEQ_TYPE_MAIN,
387 ISEQ_TYPE_PLAIN
388};
389
390// Attributes specified by Primitive.attr!
391enum rb_builtin_attr {
392 // The iseq does not call methods.
393 BUILTIN_ATTR_LEAF = 0x01,
394 // This iseq only contains single `opt_invokebuiltin_delegate_leave` instruction with 0 arguments.
395 BUILTIN_ATTR_SINGLE_NOARG_LEAF = 0x02,
396 // This attribute signals JIT to duplicate the iseq for each block iseq so that its `yield` will be monomorphic.
397 BUILTIN_ATTR_INLINE_BLOCK = 0x04,
398 // The iseq acts like a C method in backtraces.
399 BUILTIN_ATTR_C_TRACE = 0x08,
400};
401
402typedef VALUE (*rb_jit_func_t)(struct rb_execution_context_struct *, struct rb_control_frame_struct *);
403typedef VALUE (*rb_zjit_func_t)(struct rb_execution_context_struct *, struct rb_control_frame_struct *, rb_jit_func_t);
404
406 enum rb_iseq_type type;
407
408 unsigned int iseq_size;
409 VALUE *iseq_encoded; /* encoded iseq (insn addr and operands) */
410
435 struct {
436 unsigned int has_lead : 1;
437 unsigned int has_opt : 1;
438 unsigned int has_rest : 1;
439 unsigned int has_post : 1;
440 unsigned int has_kw : 1;
441 unsigned int has_kwrest : 1;
442 unsigned int has_block : 1;
443
444 unsigned int ambiguous_param0 : 1; /* {|a|} */
445 unsigned int accepts_no_kwarg : 1;
446 unsigned int ruby2_keywords: 1;
447 unsigned int anon_rest: 1;
448 unsigned int anon_kwrest: 1;
449 unsigned int use_block: 1;
450 unsigned int forwardable: 1;
451 } flags;
452
453 unsigned int size;
454
455 int lead_num;
456 int opt_num;
457 int rest_start;
458 int post_start;
459 int post_num;
460 int block_start;
461
462 const VALUE *opt_table; /* (opt_num + 1) entries. */
463 /* opt_num and opt_table:
464 *
465 * def foo o1=e1, o2=e2, ..., oN=eN
466 * #=>
467 * # prologue code
468 * A1: e1
469 * A2: e2
470 * ...
471 * AN: eN
472 * AL: body
473 * opt_num = N
474 * opt_table = [A1, A2, ..., AN, AL]
475 */
476
478 int num;
479 int required_num;
480 int bits_start;
481 int rest_start;
482 const ID *table;
483 VALUE *default_values;
484 } *keyword;
485 } param;
486
487 rb_iseq_location_t location;
488
489 /* insn info, must be freed */
491 const struct iseq_insn_info_entry *body;
492 unsigned int *positions;
493 unsigned int size;
494#if VM_INSN_INFO_TABLE_IMPL == 2
495 struct succ_index_table *succ_index_table;
496#endif
497 } insns_info;
498
499 const ID *local_table; /* must free */
500
501 enum lvar_state {
502 lvar_uninitialized,
503 lvar_initialized,
504 lvar_reassigned,
505 } *lvar_states;
506
507 /* catch table */
508 struct iseq_catch_table *catch_table;
509
510 /* for child iseq */
511 const struct rb_iseq_struct *parent_iseq;
512 struct rb_iseq_struct *local_iseq; /* local_iseq->flip_cnt can be modified */
513
514 union iseq_inline_storage_entry *is_entries; /* [ TS_IVC | TS_ICVARC | TS_ISE | TS_IC ] */
515 struct rb_call_data *call_data; //struct rb_call_data calls[ci_size];
516
517 struct {
518 rb_snum_t flip_count;
519 VALUE script_lines;
520 VALUE coverage;
521 VALUE pc2branchindex;
522 VALUE *original_iseq;
523 } variable;
524
525 unsigned int local_table_size;
526 unsigned int ic_size; // Number of IC caches
527 unsigned int ise_size; // Number of ISE caches
528 unsigned int ivc_size; // Number of IVC caches
529 unsigned int icvarc_size; // Number of ICVARC caches
530 unsigned int ci_size;
531 unsigned int stack_max; /* for stack overflow check */
532
533 unsigned int builtin_attrs; // Union of rb_builtin_attr
534
535 bool prism; // ISEQ was generated from prism compiler
536
537 union {
538 iseq_bits_t * list; /* Find references for GC */
539 iseq_bits_t single;
540 } mark_bits;
541
542 struct rb_id_table *outer_variables;
543
544 const rb_iseq_t *mandatory_only_iseq;
545
546#if USE_YJIT || USE_ZJIT
547 // Function pointer for JIT code on jit_exec()
548 rb_jit_func_t jit_entry;
549 // Number of calls on jit_exec()
550 long unsigned jit_entry_calls;
551 // Function pointer for JIT code on jit_exec_exception()
552 rb_jit_func_t jit_exception;
553 // Number of calls on jit_exec_exception()
554 long unsigned jit_exception_calls;
555#endif
556
557#if USE_YJIT
558 // YJIT stores some data on each iseq.
559 void *yjit_payload;
560 // Used to estimate how frequently this ISEQ gets called
561 uint64_t yjit_calls_at_interv;
562#endif
563
564#if USE_ZJIT
565 // ZJIT stores some data on each iseq.
566 void *zjit_payload;
567#endif
568};
569
570/* T_IMEMO/iseq */
571/* typedef rb_iseq_t is in method.h */
573 VALUE flags; /* 1 */
574 VALUE wrapper; /* 2 */
575
576 struct rb_iseq_constant_body *body; /* 3 */
577
578 union { /* 4, 5 words */
579 struct iseq_compile_data *compile_data; /* used at compile time */
580
581 struct {
582 VALUE obj;
583 int index;
584 } loader;
585
586 struct {
587 unsigned int local_hooks_cnt;
588 rb_event_flag_t global_trace_events;
589 } exec;
590 } aux;
591};
592
593#define ISEQ_BODY(iseq) ((iseq)->body)
594
595#if !defined(USE_LAZY_LOAD) || !(USE_LAZY_LOAD+0)
596#define USE_LAZY_LOAD 0
597#endif
598
599#if !USE_LAZY_LOAD
600static inline const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq) {return 0;}
601#endif
602const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq);
603
604static inline const rb_iseq_t *
605rb_iseq_check(const rb_iseq_t *iseq)
606{
607 if (USE_LAZY_LOAD && ISEQ_BODY(iseq) == NULL) {
608 rb_iseq_complete((rb_iseq_t *)iseq);
609 }
610 return iseq;
611}
612
613static inline bool
614rb_iseq_attr_p(const rb_iseq_t *iseq, enum rb_builtin_attr attr)
615{
616 return (ISEQ_BODY(iseq)->builtin_attrs & attr) == attr;
617}
618
619static inline const rb_iseq_t *
620def_iseq_ptr(rb_method_definition_t *def)
621{
622//TODO: re-visit. to check the bug, enable this assertion.
623#if VM_CHECK_MODE > 0
624 if (def->type != VM_METHOD_TYPE_ISEQ) rb_bug("def_iseq_ptr: not iseq (%d)", def->type);
625#endif
626 return rb_iseq_check(def->body.iseq.iseqptr);
627}
628
629enum ruby_special_exceptions {
630 ruby_error_reenter,
631 ruby_error_nomemory,
632 ruby_error_sysstack,
633 ruby_error_stackfatal,
634 ruby_error_stream_closed,
635 ruby_special_error_count
636};
637
638#define GetVMPtr(obj, ptr) \
639 GetCoreDataFromValue((obj), rb_vm_t, (ptr))
640
641struct rb_vm_struct;
642typedef void rb_vm_at_exit_func(struct rb_vm_struct*);
643
644typedef struct rb_at_exit_list {
645 rb_vm_at_exit_func *func;
646 struct rb_at_exit_list *next;
648
649void *rb_objspace_alloc(void);
650void rb_objspace_free(void *objspace);
651void rb_objspace_call_finalizer(void);
652
653enum rb_hook_list_type {
654 hook_list_type_ractor_local,
655 hook_list_type_targeted_iseq,
656 hook_list_type_targeted_def, // C function
657 hook_list_type_global
658};
659
660typedef struct rb_hook_list_struct {
661 struct rb_event_hook_struct *hooks;
662 rb_event_flag_t events;
663 unsigned int running;
664 enum rb_hook_list_type type;
665 bool need_clean;
667
668// see builtin.h for definition
669typedef const struct rb_builtin_function *RB_BUILTIN;
670
672 VALUE *varptr;
673 struct global_object_list *next;
674};
675
676typedef struct rb_vm_struct {
677 VALUE self;
678
679 struct {
680 struct ccan_list_head set;
681 unsigned int cnt;
682 unsigned int blocking_cnt;
683
684 struct rb_ractor_struct *main_ractor;
685 struct rb_thread_struct *main_thread; // == vm->ractor.main_ractor->threads.main
686
687 struct {
688 // monitor
689 rb_nativethread_lock_t lock;
690 struct rb_ractor_struct *lock_owner;
691 unsigned int lock_rec;
692
693 // join at exit
694 rb_nativethread_cond_t terminate_cond;
695 bool terminate_waiting;
696
697#ifndef RUBY_THREAD_PTHREAD_H
698 // win32
699 bool barrier_waiting;
700 unsigned int barrier_cnt;
701 rb_nativethread_cond_t barrier_complete_cond;
702 rb_nativethread_cond_t barrier_release_cond;
703#endif
704 } sync;
705
706#ifdef RUBY_THREAD_PTHREAD_H
707 // ractor scheduling
708 struct {
709 rb_nativethread_lock_t lock;
710 struct rb_ractor_struct *lock_owner;
711 bool locked;
712
713 rb_nativethread_cond_t cond; // GRQ
714 unsigned int snt_cnt; // count of shared NTs
715 unsigned int dnt_cnt; // count of dedicated NTs
716
717 unsigned int running_cnt;
718
719 unsigned int max_cpu;
720 struct ccan_list_head grq; // // Global Ready Queue
721 unsigned int grq_cnt;
722
723 // running threads
724 struct ccan_list_head running_threads;
725
726 // threads which switch context by timeslice
727 struct ccan_list_head timeslice_threads;
728
729 struct ccan_list_head zombie_threads;
730
731 // true if timeslice timer is not enable
732 bool timeslice_wait_inf;
733
734 // barrier
735 rb_nativethread_cond_t barrier_complete_cond;
736 rb_nativethread_cond_t barrier_release_cond;
737 bool barrier_waiting;
738 unsigned int barrier_waiting_cnt;
739 unsigned int barrier_serial;
740 struct rb_ractor_struct *barrier_ractor;
741 unsigned int barrier_lock_rec;
742 } sched;
743#endif
744 } ractor;
745
746#ifdef USE_SIGALTSTACK
747 void *main_altstack;
748#endif
749
750 rb_serial_t fork_gen;
751
752 /* set in single-threaded processes only: */
753 volatile int ubf_async_safe;
754
755 unsigned int running: 1;
756 unsigned int thread_abort_on_exception: 1;
757 unsigned int thread_report_on_exception: 1;
758 unsigned int thread_ignore_deadlock: 1;
759
760 /* object management */
761 VALUE mark_object_ary;
763 const VALUE special_exceptions[ruby_special_error_count];
764
765 /* Ruby Box */
766 rb_box_t *root_box;
767 rb_box_t *main_box;
768
769 /* load */
770 // For running the init function of statically linked
771 // extensions when they are loaded
772 struct st_table *static_ext_inits;
773
774 /* signal */
775 struct {
776 VALUE cmd[RUBY_NSIG];
777 } trap_list;
778
779 /* hook (for internal events: NEWOBJ, FREEOBJ, GC events, etc.) */
780 rb_hook_list_t global_hooks;
781
782 /* postponed_job (async-signal-safe, and thread-safe) */
783 struct rb_postponed_job_queue *postponed_job_queue;
784
785 int src_encoding_index;
786
787 /* workqueue (thread-safe, NOT async-signal-safe) */
788 struct ccan_list_head workqueue; /* <=> rb_workqueue_job.jnode */
789 rb_nativethread_lock_t workqueue_lock;
790
791 VALUE orig_progname, progname;
792 VALUE coverages, me2counter;
793 int coverage_mode;
794
795 struct {
796 struct rb_objspace *objspace;
797 struct gc_mark_func_data_struct {
798 void *data;
799 void (*mark_func)(VALUE v, void *data);
800 } *mark_func_data;
801 } gc;
802
803 rb_at_exit_list *at_exit;
804
805 const struct rb_builtin_function *builtin_function_table;
806
807 st_table *ci_table;
808 struct rb_id_table *negative_cme_table;
809 st_table *overloaded_cme_table; // cme -> overloaded_cme
810 set_table *unused_block_warning_table;
811 set_table *cc_refinement_table;
812
813 // This id table contains a mapping from ID to ICs. It does this with ID
814 // keys and nested st_tables as values. The nested tables have ICs as keys
815 // and Qtrue as values. It is used when inline constant caches need to be
816 // invalidated or ISEQs are being freed.
817 struct rb_id_table *constant_cache;
818 ID inserting_constant_cache_id;
819
820#ifndef VM_GLOBAL_CC_CACHE_TABLE_SIZE
821#define VM_GLOBAL_CC_CACHE_TABLE_SIZE 1023
822#endif
823 const struct rb_callcache *global_cc_cache_table[VM_GLOBAL_CC_CACHE_TABLE_SIZE]; // vm_eval.c
824
825#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
826 uint32_t clock;
827#endif
828
829 /* params */
830 struct { /* size in byte */
831 size_t thread_vm_stack_size;
832 size_t thread_machine_stack_size;
833 size_t fiber_vm_stack_size;
834 size_t fiber_machine_stack_size;
835 } default_params;
836} rb_vm_t;
837
838extern bool ruby_vm_during_cleanup;
839
840/* default values */
841
842#define RUBY_VM_SIZE_ALIGN 4096
843
844#define RUBY_VM_THREAD_VM_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
845#define RUBY_VM_THREAD_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
846#define RUBY_VM_THREAD_MACHINE_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
847#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
848
849#define RUBY_VM_FIBER_VM_STACK_SIZE ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
850#define RUBY_VM_FIBER_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
851#define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 64 * 1024 * sizeof(VALUE)) /* 256 KB or 512 KB */
852#if defined(__powerpc64__) || defined(__ppc64__) // macOS has __ppc64__
853#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 32 * 1024 * sizeof(VALUE)) /* 128 KB or 256 KB */
854#else
855#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
856#endif
857
858#if __has_feature(memory_sanitizer) || __has_feature(address_sanitizer) || __has_feature(leak_sanitizer)
859/* It seems sanitizers consume A LOT of machine stacks */
860#undef RUBY_VM_THREAD_MACHINE_STACK_SIZE
861#define RUBY_VM_THREAD_MACHINE_STACK_SIZE (1024 * 1024 * sizeof(VALUE))
862#undef RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN
863#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 512 * 1024 * sizeof(VALUE))
864#undef RUBY_VM_FIBER_MACHINE_STACK_SIZE
865#define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 256 * 1024 * sizeof(VALUE))
866#undef RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN
867#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 128 * 1024 * sizeof(VALUE))
868#endif
869
870#ifndef VM_DEBUG_BP_CHECK
871#define VM_DEBUG_BP_CHECK 0
872#endif
873
874#ifndef VM_DEBUG_VERIFY_METHOD_CACHE
875#define VM_DEBUG_VERIFY_METHOD_CACHE (VMDEBUG != 0)
876#endif
877
879 VALUE self;
880 const VALUE *ep;
881 union {
882 const rb_iseq_t *iseq;
883 const struct vm_ifunc *ifunc;
884 VALUE val;
885 } code;
886};
887
888enum rb_block_handler_type {
889 block_handler_type_iseq,
890 block_handler_type_ifunc,
891 block_handler_type_symbol,
892 block_handler_type_proc
893};
894
895enum rb_block_type {
896 block_type_iseq,
897 block_type_ifunc,
898 block_type_symbol,
899 block_type_proc
900};
901
902struct rb_block {
903 union {
904 struct rb_captured_block captured;
905 VALUE symbol;
906 VALUE proc;
907 } as;
908 enum rb_block_type type;
909};
910
912 const VALUE *pc; // cfp[0]
913 VALUE *sp; // cfp[1]
914 const rb_iseq_t *iseq; // cfp[2]
915 VALUE self; // cfp[3] / block[0]
916 const VALUE *ep; // cfp[4] / block[1]
917 const void *block_code; // cfp[5] / block[2] -- iseq, ifunc, or forwarded block handler
918 void *jit_return; // cfp[6] -- return address for JIT code
919#if VM_DEBUG_BP_CHECK
920 VALUE *bp_check; // cfp[7]
921#endif
923
924extern const rb_data_type_t ruby_threadptr_data_type;
925
926static inline struct rb_thread_struct *
927rb_thread_ptr(VALUE thval)
928{
929 return (struct rb_thread_struct *)rb_check_typeddata(thval, &ruby_threadptr_data_type);
930}
931
932enum rb_thread_status {
933 THREAD_RUNNABLE,
934 THREAD_STOPPED,
935 THREAD_STOPPED_FOREVER,
936 THREAD_KILLED
937};
938
939#ifdef RUBY_JMP_BUF
940typedef RUBY_JMP_BUF rb_jmpbuf_t;
941#else
942typedef void *rb_jmpbuf_t[5];
943#endif
944
945/*
946 `rb_vm_tag_jmpbuf_t` type represents a buffer used to
947 long jump to a C frame associated with `rb_vm_tag`.
948
949 Use-site of `rb_vm_tag_jmpbuf_t` is responsible for calling the
950 following functions:
951 - `rb_vm_tag_jmpbuf_init` once `rb_vm_tag_jmpbuf_t` is allocated.
952 - `rb_vm_tag_jmpbuf_deinit` once `rb_vm_tag_jmpbuf_t` is no longer necessary.
953
954 `RB_VM_TAG_JMPBUF_GET` transforms a `rb_vm_tag_jmpbuf_t` into a
955 `rb_jmpbuf_t` to be passed to `rb_setjmp/rb_longjmp`.
956*/
957#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
958/*
959 WebAssembly target with Asyncify-based SJLJ needs
960 to capture the execution context by unwind/rewind-ing
961 call frames into a jump buffer. The buffer space tends
962 to be considerably large unlike other architectures'
963 register-based buffers.
964 Therefore, we allocates the buffer on the heap on such
965 environments.
966*/
967typedef rb_jmpbuf_t *rb_vm_tag_jmpbuf_t;
968
969#define RB_VM_TAG_JMPBUF_GET(buf) (*buf)
970
971static inline void
972rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
973{
974 *jmpbuf = ruby_xmalloc(sizeof(rb_jmpbuf_t));
975}
976
977static inline void
978rb_vm_tag_jmpbuf_deinit(const rb_vm_tag_jmpbuf_t *jmpbuf)
979{
980 ruby_xfree(*jmpbuf);
981}
982#else
983typedef rb_jmpbuf_t rb_vm_tag_jmpbuf_t;
984
985#define RB_VM_TAG_JMPBUF_GET(buf) (buf)
986
987static inline void
988rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
989{
990 // no-op
991}
992
993static inline void
994rb_vm_tag_jmpbuf_deinit(const rb_vm_tag_jmpbuf_t *jmpbuf)
995{
996 // no-op
997}
998#endif
999
1000/*
1001 the members which are written in EC_PUSH_TAG() should be placed at
1002 the beginning and the end, so that entire region is accessible.
1003*/
1005 VALUE tag;
1006 VALUE retval;
1007 rb_vm_tag_jmpbuf_t buf;
1008 struct rb_vm_tag *prev;
1009 enum ruby_tag_type state;
1010 unsigned int lock_rec;
1011};
1012
1013STATIC_ASSERT(rb_vm_tag_buf_offset, offsetof(struct rb_vm_tag, buf) > 0);
1014STATIC_ASSERT(rb_vm_tag_buf_end,
1015 offsetof(struct rb_vm_tag, buf) + sizeof(rb_vm_tag_jmpbuf_t) <
1016 sizeof(struct rb_vm_tag));
1017
1020 void *arg;
1021};
1022
1023struct rb_mutex_struct;
1024
1025typedef struct rb_fiber_struct rb_fiber_t;
1026
1028 struct rb_waiting_list *next;
1029 struct rb_thread_struct *thread;
1030 struct rb_fiber_struct *fiber;
1031};
1032
1034 /* execution information */
1035 VALUE *vm_stack; /* must free, must mark */
1036 size_t vm_stack_size; /* size in word (byte size / sizeof(VALUE)) */
1037 rb_control_frame_t *cfp;
1038
1039 struct rb_vm_tag *tag;
1040
1041 /* interrupt flags */
1042 rb_atomic_t interrupt_flag;
1043 rb_atomic_t interrupt_mask; /* size should match flag */
1044#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
1045 uint32_t checked_clock;
1046#endif
1047
1048 rb_fiber_t *fiber_ptr;
1049 struct rb_thread_struct *thread_ptr;
1050 rb_serial_t serial;
1051 rb_serial_t ractor_id;
1052
1053 /* storage (ec (fiber) local) */
1054 struct rb_id_table *local_storage;
1055 VALUE local_storage_recursive_hash;
1056 VALUE local_storage_recursive_hash_for_trace;
1057
1058 /* Inheritable fiber storage. */
1059 VALUE storage;
1060
1061 /* eval env */
1062 const VALUE *root_lep;
1063 VALUE root_svar;
1064
1065 /* trace information */
1066 struct rb_trace_arg_struct *trace_arg;
1067
1068 /* temporary places */
1069 VALUE errinfo;
1070 VALUE passed_block_handler; /* for rb_iterate */
1071
1072 uint8_t raised_flag; /* only 3 bits needed */
1073
1074 /* n.b. only 7 bits needed, really: */
1075 BITFIELD(enum method_missing_reason, method_missing_reason, 8);
1076
1077 VALUE private_const_reference;
1078
1079 struct {
1080 VALUE obj;
1081 VALUE fields_obj;
1082 } gen_fields_cache;
1083
1084 /* for GC */
1085 struct {
1086 VALUE *stack_start;
1087 VALUE *stack_end;
1088 size_t stack_maxsize;
1090
1091#ifdef RUBY_ASAN_ENABLED
1092 void *asan_fake_stack_handle;
1093#endif
1094 } machine;
1095};
1096
1097#ifndef rb_execution_context_t
1099#define rb_execution_context_t rb_execution_context_t
1100#endif
1101
1102// for builtin.h
1103#define VM_CORE_H_EC_DEFINED 1
1104
1105// Set the vm_stack pointer in the execution context.
1106void rb_ec_set_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
1107
1108// Initialize the vm_stack pointer in the execution context and push the initial stack frame.
1109// @param ec the execution context to update.
1110// @param stack a pointer to the stack to use.
1111// @param size the size of the stack, as in `VALUE stack[size]`.
1112void rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
1113
1114// Clear (set to `NULL`) the vm_stack pointer.
1115// @param ec the execution context to update.
1116void rb_ec_clear_vm_stack(rb_execution_context_t *ec);
1117
1118// Close an execution context and free related resources that are no longer needed.
1119// @param ec the execution context to close.
1120void rb_ec_close(rb_execution_context_t *ec);
1121
1123 bool ractor_safe;
1124};
1125
1126typedef struct rb_ractor_struct rb_ractor_t;
1127
1128struct rb_native_thread;
1129
1130typedef struct rb_thread_struct {
1131 struct ccan_list_node lt_node; // managed by a ractor (r->threads.set)
1132 VALUE self;
1133 rb_ractor_t *ractor;
1134 rb_vm_t *vm;
1135 struct rb_native_thread *nt;
1137
1138 struct rb_thread_sched_item sched;
1139 bool mn_schedulable;
1140 rb_atomic_t serial; // only for RUBY_DEBUG_LOG()
1141
1142 VALUE last_status; /* $? */
1143
1144 /* for cfunc */
1145 struct rb_calling_info *calling;
1146
1147 /* for load(true) */
1148 VALUE top_self;
1149 VALUE top_wrapper;
1150
1151 /* thread control */
1152
1153 BITFIELD(enum rb_thread_status, status, 2);
1154 /* bit flags */
1155 unsigned int has_dedicated_nt : 1;
1156 unsigned int to_kill : 1;
1157 unsigned int abort_on_exception: 1;
1158 unsigned int report_on_exception: 1;
1159 unsigned int pending_interrupt_queue_checked: 1;
1160 int8_t priority; /* -3 .. 3 (RUBY_THREAD_PRIORITY_{MIN,MAX}) */
1161 uint32_t running_time_us; /* 12500..800000 */
1162
1163 void *blocking_region_buffer;
1164
1165 VALUE thgroup;
1166 VALUE value;
1167
1168 /* temporary place of retval on OPT_CALL_THREADED_CODE */
1169#if OPT_CALL_THREADED_CODE
1170 VALUE retval;
1171#endif
1172
1173 /* async errinfo queue */
1174 VALUE pending_interrupt_queue;
1175 VALUE pending_interrupt_mask_stack;
1176
1177 /* interrupt management */
1178 rb_nativethread_lock_t interrupt_lock;
1179 struct rb_unblock_callback unblock;
1180 VALUE locking_mutex;
1181 struct rb_mutex_struct *keeping_mutexes;
1182 struct ccan_list_head interrupt_exec_tasks;
1183
1184 struct rb_waiting_list *join_list;
1185
1186 union {
1187 struct {
1188 VALUE proc;
1189 VALUE args;
1190 int kw_splat;
1191 } proc;
1192 struct {
1193 VALUE (*func)(void *);
1194 void *arg;
1195 } func;
1196 } invoke_arg;
1197
1198 enum thread_invoke_type {
1199 thread_invoke_type_none = 0,
1200 thread_invoke_type_proc,
1201 thread_invoke_type_ractor_proc,
1202 thread_invoke_type_func
1203 } invoke_type;
1204
1205 /* fiber */
1206 rb_fiber_t *root_fiber;
1207
1208 VALUE scheduler;
1209 unsigned int blocking;
1210
1211 /* misc */
1212 VALUE name;
1213 void **specific_storage;
1214
1215 struct rb_ext_config ext_config;
1216} rb_thread_t;
1217
1218static inline unsigned int
1219rb_th_serial(const rb_thread_t *th)
1220{
1221 return th ? (unsigned int)th->serial : 0;
1222}
1223
1224typedef enum {
1225 VM_DEFINECLASS_TYPE_CLASS = 0x00,
1226 VM_DEFINECLASS_TYPE_SINGLETON_CLASS = 0x01,
1227 VM_DEFINECLASS_TYPE_MODULE = 0x02,
1228 /* 0x03..0x06 is reserved */
1229 VM_DEFINECLASS_TYPE_MASK = 0x07
1230} rb_vm_defineclass_type_t;
1231
1232#define VM_DEFINECLASS_TYPE(x) ((rb_vm_defineclass_type_t)(x) & VM_DEFINECLASS_TYPE_MASK)
1233#define VM_DEFINECLASS_FLAG_SCOPED 0x08
1234#define VM_DEFINECLASS_FLAG_HAS_SUPERCLASS 0x10
1235#define VM_DEFINECLASS_SCOPED_P(x) ((x) & VM_DEFINECLASS_FLAG_SCOPED)
1236#define VM_DEFINECLASS_HAS_SUPERCLASS_P(x) \
1237 ((x) & VM_DEFINECLASS_FLAG_HAS_SUPERCLASS)
1238
1239/* iseq.c */
1240RUBY_SYMBOL_EXPORT_BEGIN
1241
1242/* node -> iseq */
1243rb_iseq_t *rb_iseq_new (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent, enum rb_iseq_type);
1244rb_iseq_t *rb_iseq_new_top (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent);
1245rb_iseq_t *rb_iseq_new_main (const VALUE ast_value, VALUE path, VALUE realpath, const rb_iseq_t *parent, int opt);
1246rb_iseq_t *rb_iseq_new_eval (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, int first_lineno, const rb_iseq_t *parent, int isolated_depth);
1247rb_iseq_t *rb_iseq_new_with_opt( VALUE ast_value, VALUE name, VALUE path, VALUE realpath, int first_lineno, const rb_iseq_t *parent, int isolated_depth,
1248 enum rb_iseq_type, const rb_compile_option_t*,
1249 VALUE script_lines);
1250
1251struct iseq_link_anchor;
1253 VALUE flags;
1254 VALUE reserved;
1255 void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *);
1256 const void *data;
1257};
1258static inline struct rb_iseq_new_with_callback_callback_func *
1259rb_iseq_new_with_callback_new_callback(
1260 void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *), const void *ptr)
1261{
1263 IMEMO_NEW(struct rb_iseq_new_with_callback_callback_func, imemo_ifunc, Qfalse);
1264 memo->func = func;
1265 memo->data = ptr;
1266
1267 return memo;
1268}
1269rb_iseq_t *rb_iseq_new_with_callback(const struct rb_iseq_new_with_callback_callback_func * ifunc,
1270 VALUE name, VALUE path, VALUE realpath, int first_lineno,
1271 const rb_iseq_t *parent, enum rb_iseq_type, const rb_compile_option_t*);
1272
1273VALUE rb_iseq_disasm(const rb_iseq_t *iseq);
1274int rb_iseq_disasm_insn(VALUE str, const VALUE *iseqval, size_t pos, const rb_iseq_t *iseq, VALUE child);
1275
1276VALUE rb_iseq_coverage(const rb_iseq_t *iseq);
1277
1278RUBY_EXTERN VALUE rb_cISeq;
1279RUBY_EXTERN VALUE rb_cRubyVM;
1280RUBY_EXTERN VALUE rb_mRubyVMFrozenCore;
1281RUBY_EXTERN VALUE rb_block_param_proxy;
1282RUBY_SYMBOL_EXPORT_END
1283
1284#define GetProcPtr(obj, ptr) \
1285 GetCoreDataFromValue((obj), rb_proc_t, (ptr))
1286
1287typedef struct {
1288 const struct rb_block block;
1289 unsigned int is_from_method: 1; /* bool */
1290 unsigned int is_lambda: 1; /* bool */
1291 unsigned int is_isolated: 1; /* bool */
1292} rb_proc_t;
1293
1294RUBY_SYMBOL_EXPORT_BEGIN
1295VALUE rb_proc_isolate(VALUE self);
1296VALUE rb_proc_isolate_bang(VALUE self, VALUE replace_self);
1297VALUE rb_proc_ractor_make_shareable(VALUE proc, VALUE replace_self);
1298RUBY_SYMBOL_EXPORT_END
1299
1300typedef struct {
1301 VALUE flags; /* imemo header */
1302 rb_iseq_t *iseq;
1303 const VALUE *ep;
1304 const VALUE *env;
1305 unsigned int env_size;
1306} rb_env_t;
1307
1308extern const rb_data_type_t ruby_binding_data_type;
1309
1310#define GetBindingPtr(obj, ptr) \
1311 GetCoreDataFromValue((obj), rb_binding_t, (ptr))
1312
1313typedef struct {
1314 const struct rb_block block;
1315 const VALUE pathobj;
1316 int first_lineno;
1317} rb_binding_t;
1318
1319/* used by compile time and send insn */
1320
1321enum vm_check_match_type {
1322 VM_CHECKMATCH_TYPE_WHEN = 1,
1323 VM_CHECKMATCH_TYPE_CASE = 2,
1324 VM_CHECKMATCH_TYPE_RESCUE = 3
1325};
1326
1327#define VM_CHECKMATCH_TYPE_MASK 0x03
1328#define VM_CHECKMATCH_ARRAY 0x04
1329
1330enum vm_opt_newarray_send_type {
1331 VM_OPT_NEWARRAY_SEND_MAX = 1,
1332 VM_OPT_NEWARRAY_SEND_MIN = 2,
1333 VM_OPT_NEWARRAY_SEND_HASH = 3,
1334 VM_OPT_NEWARRAY_SEND_PACK = 4,
1335 VM_OPT_NEWARRAY_SEND_PACK_BUFFER = 5,
1336 VM_OPT_NEWARRAY_SEND_INCLUDE_P = 6,
1337};
1338
1339enum vm_special_object_type {
1340 VM_SPECIAL_OBJECT_VMCORE = 1,
1341 VM_SPECIAL_OBJECT_CBASE,
1342 VM_SPECIAL_OBJECT_CONST_BASE
1343};
1344
1345enum vm_svar_index {
1346 VM_SVAR_LASTLINE = 0, /* $_ */
1347 VM_SVAR_BACKREF = 1, /* $~ */
1348
1349 VM_SVAR_EXTRA_START = 2,
1350 VM_SVAR_FLIPFLOP_START = 2 /* flipflop */
1351};
1352
1353/* inline cache */
1354typedef struct iseq_inline_constant_cache *IC;
1355typedef struct iseq_inline_iv_cache_entry *IVC;
1356typedef struct iseq_inline_cvar_cache_entry *ICVARC;
1357typedef union iseq_inline_storage_entry *ISE;
1358typedef const struct rb_callinfo *CALL_INFO;
1359typedef const struct rb_callcache *CALL_CACHE;
1360typedef struct rb_call_data *CALL_DATA;
1361
1362typedef VALUE CDHASH;
1363
1364#ifndef FUNC_FASTCALL
1365#define FUNC_FASTCALL(x) x
1366#endif
1367
1368typedef rb_control_frame_t *
1369 (FUNC_FASTCALL(*rb_insn_func_t))(rb_execution_context_t *, rb_control_frame_t *);
1370
1371#define VM_TAGGED_PTR_SET(p, tag) ((VALUE)(p) | (tag))
1372#define VM_TAGGED_PTR_REF(v, mask) ((void *)((v) & ~mask))
1373
1374#define GC_GUARDED_PTR(p) VM_TAGGED_PTR_SET((p), 0x01)
1375#define GC_GUARDED_PTR_REF(p) VM_TAGGED_PTR_REF((p), 0x03)
1376#define GC_GUARDED_PTR_P(p) (((VALUE)(p)) & 0x01)
1377
1378enum vm_frame_env_flags {
1379 /* Frame/Environment flag bits:
1380 * MMMM MMMM MMMM MMMM ___F FFFF FFFE EEEX (LSB)
1381 *
1382 * X : tag for GC marking (It seems as Fixnum)
1383 * EEE : 4 bits Env flags
1384 * FF..: 8 bits Frame flags
1385 * MM..: 15 bits frame magic (to check frame corruption)
1386 */
1387
1388 /* frame types */
1389 VM_FRAME_MAGIC_METHOD = 0x11110001,
1390 VM_FRAME_MAGIC_BLOCK = 0x22220001,
1391 VM_FRAME_MAGIC_CLASS = 0x33330001,
1392 VM_FRAME_MAGIC_TOP = 0x44440001,
1393 VM_FRAME_MAGIC_CFUNC = 0x55550001,
1394 VM_FRAME_MAGIC_IFUNC = 0x66660001,
1395 VM_FRAME_MAGIC_EVAL = 0x77770001,
1396 VM_FRAME_MAGIC_RESCUE = 0x78880001,
1397 VM_FRAME_MAGIC_DUMMY = 0x79990001,
1398
1399 VM_FRAME_MAGIC_MASK = 0x7fff0001,
1400
1401 /* frame flag */
1402 VM_FRAME_FLAG_FINISH = 0x0020,
1403 VM_FRAME_FLAG_BMETHOD = 0x0040,
1404 VM_FRAME_FLAG_CFRAME = 0x0080,
1405 VM_FRAME_FLAG_LAMBDA = 0x0100,
1406 VM_FRAME_FLAG_MODIFIED_BLOCK_PARAM = 0x0200,
1407 VM_FRAME_FLAG_CFRAME_KW = 0x0400,
1408 VM_FRAME_FLAG_PASSED = 0x0800,
1409 VM_FRAME_FLAG_BOX_REQUIRE = 0x1000,
1410
1411 /* env flag */
1412 VM_ENV_FLAG_LOCAL = 0x0002,
1413 VM_ENV_FLAG_ESCAPED = 0x0004,
1414 VM_ENV_FLAG_WB_REQUIRED = 0x0008,
1415 VM_ENV_FLAG_ISOLATED = 0x0010,
1416};
1417
1418#define VM_ENV_DATA_SIZE ( 3)
1419
1420#define VM_ENV_DATA_INDEX_ME_CREF (-2) /* ep[-2] */
1421#define VM_ENV_DATA_INDEX_SPECVAL (-1) /* ep[-1] */
1422#define VM_ENV_DATA_INDEX_FLAGS ( 0) /* ep[ 0] */
1423#define VM_ENV_DATA_INDEX_ENV ( 1) /* ep[ 1] */
1424
1425#define VM_ENV_INDEX_LAST_LVAR (-VM_ENV_DATA_SIZE)
1426
1427static inline void VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value);
1428
1429static inline void
1430VM_ENV_FLAGS_SET(const VALUE *ep, VALUE flag)
1431{
1432 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1433 VM_ASSERT(FIXNUM_P(flags));
1434 VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags | flag);
1435}
1436
1437static inline void
1438VM_ENV_FLAGS_UNSET(const VALUE *ep, VALUE flag)
1439{
1440 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1441 VM_ASSERT(FIXNUM_P(flags));
1442 VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags & ~flag);
1443}
1444
1445static inline unsigned long
1446VM_ENV_FLAGS(const VALUE *ep, long flag)
1447{
1448 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1449 VM_ASSERT(FIXNUM_P(flags));
1450 return flags & flag;
1451}
1452
1453static inline unsigned long
1454VM_ENV_FLAGS_UNCHECKED(const VALUE *ep, long flag)
1455{
1456 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1457 return flags & flag;
1458}
1459
1460static inline unsigned long
1461VM_ENV_FRAME_TYPE_P(const VALUE *ep, unsigned long frame_type)
1462{
1463 return VM_ENV_FLAGS(ep, VM_FRAME_MAGIC_MASK) == frame_type;
1464}
1465
1466static inline unsigned long
1467VM_FRAME_TYPE(const rb_control_frame_t *cfp)
1468{
1469 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_MAGIC_MASK);
1470}
1471
1472static inline unsigned long
1473VM_FRAME_TYPE_UNCHECKED(const rb_control_frame_t *cfp)
1474{
1475 return VM_ENV_FLAGS_UNCHECKED(cfp->ep, VM_FRAME_MAGIC_MASK);
1476}
1477
1478static inline int
1479VM_FRAME_LAMBDA_P(const rb_control_frame_t *cfp)
1480{
1481 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_LAMBDA) != 0;
1482}
1483
1484static inline int
1485VM_FRAME_CFRAME_KW_P(const rb_control_frame_t *cfp)
1486{
1487 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME_KW) != 0;
1488}
1489
1490static inline int
1491VM_FRAME_FINISHED_P(const rb_control_frame_t *cfp)
1492{
1493 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_FINISH) != 0;
1494}
1495
1496static inline int
1497VM_FRAME_FINISHED_P_UNCHECKED(const rb_control_frame_t *cfp)
1498{
1499 return VM_ENV_FLAGS_UNCHECKED(cfp->ep, VM_FRAME_FLAG_FINISH) != 0;
1500}
1501
1502static inline int
1503VM_FRAME_BMETHOD_P(const rb_control_frame_t *cfp)
1504{
1505 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_BMETHOD) != 0;
1506}
1507
1508static inline int
1509rb_obj_is_iseq(VALUE iseq)
1510{
1511 return imemo_type_p(iseq, imemo_iseq);
1512}
1513
1514#if VM_CHECK_MODE > 0
1515#define RUBY_VM_NORMAL_ISEQ_P(iseq) rb_obj_is_iseq((VALUE)iseq)
1516#endif
1517
1518static inline int
1519VM_FRAME_CFRAME_P(const rb_control_frame_t *cfp)
1520{
1521 int cframe_p = VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME) != 0;
1522 VM_ASSERT(RUBY_VM_NORMAL_ISEQ_P(cfp->iseq) != cframe_p ||
1523 (VM_FRAME_TYPE(cfp) & VM_FRAME_MAGIC_MASK) == VM_FRAME_MAGIC_DUMMY);
1524 return cframe_p;
1525}
1526
1527static inline int
1528VM_FRAME_CFRAME_P_UNCHECKED(const rb_control_frame_t *cfp)
1529{
1530 return VM_ENV_FLAGS_UNCHECKED(cfp->ep, VM_FRAME_FLAG_CFRAME) != 0;
1531}
1532
1533static inline int
1534VM_FRAME_RUBYFRAME_P(const rb_control_frame_t *cfp)
1535{
1536 return !VM_FRAME_CFRAME_P(cfp);
1537}
1538
1539static inline int
1540VM_FRAME_RUBYFRAME_P_UNCHECKED(const rb_control_frame_t *cfp)
1541{
1542 return !VM_FRAME_CFRAME_P_UNCHECKED(cfp);
1543}
1544
1545static inline int
1546VM_FRAME_NS_REQUIRE_P(const rb_control_frame_t *cfp)
1547{
1548 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_BOX_REQUIRE) != 0;
1549}
1550
1551#define RUBYVM_CFUNC_FRAME_P(cfp) \
1552 (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_CFUNC)
1553
1554#define VM_GUARDED_PREV_EP(ep) GC_GUARDED_PTR(ep)
1555#define VM_BLOCK_HANDLER_NONE 0
1556
1557static inline int
1558VM_ENV_LOCAL_P(const VALUE *ep)
1559{
1560 return VM_ENV_FLAGS(ep, VM_ENV_FLAG_LOCAL) ? 1 : 0;
1561}
1562
1563static inline int
1564VM_ENV_LOCAL_P_UNCHECKED(const VALUE *ep)
1565{
1566 return VM_ENV_FLAGS_UNCHECKED(ep, VM_ENV_FLAG_LOCAL) ? 1 : 0;
1567}
1568
1569static inline const VALUE *
1570VM_ENV_PREV_EP_UNCHECKED(const VALUE *ep)
1571{
1572 return GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
1573}
1574
1575static inline const VALUE *
1576VM_ENV_PREV_EP(const VALUE *ep)
1577{
1578 VM_ASSERT(VM_ENV_LOCAL_P(ep) == 0);
1579 return VM_ENV_PREV_EP_UNCHECKED(ep);
1580}
1581
1582static inline bool
1583VM_ENV_BOXED_P(const VALUE *ep)
1584{
1585 return VM_ENV_FRAME_TYPE_P(ep, VM_FRAME_MAGIC_CLASS) || VM_ENV_FRAME_TYPE_P(ep, VM_FRAME_MAGIC_TOP);
1586}
1587
1588static inline VALUE
1589VM_ENV_BLOCK_HANDLER(const VALUE *ep)
1590{
1591 if (VM_ENV_BOXED_P(ep)) {
1592 VM_ASSERT(VM_ENV_LOCAL_P(ep));
1593 return VM_BLOCK_HANDLER_NONE;
1594 }
1595
1596 VM_ASSERT(VM_ENV_LOCAL_P(ep));
1597 return ep[VM_ENV_DATA_INDEX_SPECVAL];
1598}
1599
1600static inline const rb_box_t *
1601VM_ENV_BOX(const VALUE *ep)
1602{
1603 VM_ASSERT(VM_ENV_BOXED_P(ep));
1604 VM_ASSERT(VM_ENV_LOCAL_P(ep));
1605 return (const rb_box_t *)GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
1606}
1607
1608static inline const rb_box_t *
1609VM_ENV_BOX_UNCHECKED(const VALUE *ep)
1610{
1611 return (const rb_box_t *)GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
1612}
1613
1614#if VM_CHECK_MODE > 0
1615int rb_vm_ep_in_heap_p(const VALUE *ep);
1616#endif
1617
1618static inline int
1619VM_ENV_ESCAPED_P(const VALUE *ep)
1620{
1621 VM_ASSERT(rb_vm_ep_in_heap_p(ep) == !!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED));
1622 return VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) ? 1 : 0;
1623}
1624
1626static inline VALUE
1627VM_ENV_ENVVAL(const VALUE *ep)
1628{
1629 VALUE envval = ep[VM_ENV_DATA_INDEX_ENV];
1630 VM_ASSERT(VM_ENV_ESCAPED_P(ep));
1631 VM_ASSERT(envval == Qundef || imemo_type_p(envval, imemo_env));
1632 return envval;
1633}
1634
1636static inline const rb_env_t *
1637VM_ENV_ENVVAL_PTR(const VALUE *ep)
1638{
1639 return (const rb_env_t *)VM_ENV_ENVVAL(ep);
1640}
1641
1642static inline const rb_env_t *
1643vm_env_new(VALUE *env_ep, VALUE *env_body, unsigned int env_size, const rb_iseq_t *iseq)
1644{
1645 rb_env_t *env = IMEMO_NEW(rb_env_t, imemo_env, (VALUE)iseq);
1646 env->ep = env_ep;
1647 env->env = env_body;
1648 env->env_size = env_size;
1649 env_ep[VM_ENV_DATA_INDEX_ENV] = (VALUE)env;
1650 return env;
1651}
1652
1653static inline void
1654VM_FORCE_WRITE(const VALUE *ptr, VALUE v)
1655{
1656 *((VALUE *)ptr) = v;
1657}
1658
1659static inline void
1660VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value)
1661{
1662 VM_ASSERT(RB_SPECIAL_CONST_P(special_const_value));
1663 VM_FORCE_WRITE(ptr, special_const_value);
1664}
1665
1666static inline void
1667VM_STACK_ENV_WRITE(const VALUE *ep, int index, VALUE v)
1668{
1669 VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_WB_REQUIRED) == 0);
1670 VM_FORCE_WRITE(&ep[index], v);
1671}
1672
1673const VALUE *rb_vm_ep_local_ep(const VALUE *ep);
1674const VALUE *rb_vm_proc_local_ep(VALUE proc);
1675void rb_vm_block_ep_update(VALUE obj, const struct rb_block *dst, const VALUE *ep);
1676void rb_vm_block_copy(VALUE obj, const struct rb_block *dst, const struct rb_block *src);
1677
1678VALUE rb_vm_frame_block_handler(const rb_control_frame_t *cfp);
1679
1680#define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp) ((cfp)+1)
1681#define RUBY_VM_NEXT_CONTROL_FRAME(cfp) ((cfp)-1)
1682
1683#define RUBY_VM_VALID_CONTROL_FRAME_P(cfp, ecfp) \
1684 ((void *)(ecfp) > (void *)(cfp))
1685
1686static inline const rb_control_frame_t *
1687RUBY_VM_END_CONTROL_FRAME(const rb_execution_context_t *ec)
1688{
1689 return (rb_control_frame_t *)(ec->vm_stack + ec->vm_stack_size);
1690}
1691
1692static inline int
1693RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
1694{
1695 return !RUBY_VM_VALID_CONTROL_FRAME_P(cfp, RUBY_VM_END_CONTROL_FRAME(ec));
1696}
1697
1698static inline int
1699VM_BH_ISEQ_BLOCK_P(VALUE block_handler)
1700{
1701 if ((block_handler & 0x03) == 0x01) {
1702#if VM_CHECK_MODE > 0
1703 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1704 VM_ASSERT(imemo_type_p(captured->code.val, imemo_iseq));
1705#endif
1706 return 1;
1707 }
1708 else {
1709 return 0;
1710 }
1711}
1712
1713static inline VALUE
1714VM_BH_FROM_ISEQ_BLOCK(const struct rb_captured_block *captured)
1715{
1716 VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x01);
1717 VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1718 return block_handler;
1719}
1720
1721static inline const struct rb_captured_block *
1722VM_BH_TO_ISEQ_BLOCK(VALUE block_handler)
1723{
1724 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1725 VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1726 return captured;
1727}
1728
1729static inline int
1730VM_BH_IFUNC_P(VALUE block_handler)
1731{
1732 if ((block_handler & 0x03) == 0x03) {
1733#if VM_CHECK_MODE > 0
1734 struct rb_captured_block *captured = (void *)(block_handler & ~0x03);
1735 VM_ASSERT(imemo_type_p(captured->code.val, imemo_ifunc));
1736#endif
1737 return 1;
1738 }
1739 else {
1740 return 0;
1741 }
1742}
1743
1744static inline VALUE
1745VM_BH_FROM_IFUNC_BLOCK(const struct rb_captured_block *captured)
1746{
1747 VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x03);
1748 VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1749 return block_handler;
1750}
1751
1752static inline const struct rb_captured_block *
1753VM_BH_TO_IFUNC_BLOCK(VALUE block_handler)
1754{
1755 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1756 VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1757 return captured;
1758}
1759
1760static inline const struct rb_captured_block *
1761VM_BH_TO_CAPT_BLOCK(VALUE block_handler)
1762{
1763 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1764 VM_ASSERT(VM_BH_IFUNC_P(block_handler) || VM_BH_ISEQ_BLOCK_P(block_handler));
1765 return captured;
1766}
1767
1768static inline enum rb_block_handler_type
1769vm_block_handler_type(VALUE block_handler)
1770{
1771 if (VM_BH_ISEQ_BLOCK_P(block_handler)) {
1772 return block_handler_type_iseq;
1773 }
1774 else if (VM_BH_IFUNC_P(block_handler)) {
1775 return block_handler_type_ifunc;
1776 }
1777 else if (SYMBOL_P(block_handler)) {
1778 return block_handler_type_symbol;
1779 }
1780 else {
1781 VM_ASSERT(rb_obj_is_proc(block_handler));
1782 return block_handler_type_proc;
1783 }
1784}
1785
1786static inline void
1787vm_block_handler_verify(MAYBE_UNUSED(VALUE block_handler))
1788{
1789 VM_ASSERT(block_handler == VM_BLOCK_HANDLER_NONE ||
1790 (vm_block_handler_type(block_handler), 1));
1791}
1792
1793static inline enum rb_block_type
1794vm_block_type(const struct rb_block *block)
1795{
1796#if VM_CHECK_MODE > 0
1797 switch (block->type) {
1798 case block_type_iseq:
1799 VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_iseq));
1800 break;
1801 case block_type_ifunc:
1802 VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_ifunc));
1803 break;
1804 case block_type_symbol:
1805 VM_ASSERT(SYMBOL_P(block->as.symbol));
1806 break;
1807 case block_type_proc:
1808 VM_ASSERT(rb_obj_is_proc(block->as.proc));
1809 break;
1810 }
1811#endif
1812 return block->type;
1813}
1814
1815static inline void
1816vm_block_type_set(const struct rb_block *block, enum rb_block_type type)
1817{
1818 struct rb_block *mb = (struct rb_block *)block;
1819 mb->type = type;
1820}
1821
1822static inline const struct rb_block *
1823vm_proc_block(VALUE procval)
1824{
1825 VM_ASSERT(rb_obj_is_proc(procval));
1826 return &((rb_proc_t *)RTYPEDDATA_DATA(procval))->block;
1827}
1828
1829static inline const rb_iseq_t *vm_block_iseq(const struct rb_block *block);
1830static inline const VALUE *vm_block_ep(const struct rb_block *block);
1831
1832static inline const rb_iseq_t *
1833vm_proc_iseq(VALUE procval)
1834{
1835 return vm_block_iseq(vm_proc_block(procval));
1836}
1837
1838static inline const VALUE *
1839vm_proc_ep(VALUE procval)
1840{
1841 return vm_block_ep(vm_proc_block(procval));
1842}
1843
1844static inline const rb_iseq_t *
1845vm_block_iseq(const struct rb_block *block)
1846{
1847 switch (vm_block_type(block)) {
1848 case block_type_iseq: return rb_iseq_check(block->as.captured.code.iseq);
1849 case block_type_proc: return vm_proc_iseq(block->as.proc);
1850 case block_type_ifunc:
1851 case block_type_symbol: return NULL;
1852 }
1853 VM_UNREACHABLE(vm_block_iseq);
1854 return NULL;
1855}
1856
1857static inline const VALUE *
1858vm_block_ep(const struct rb_block *block)
1859{
1860 switch (vm_block_type(block)) {
1861 case block_type_iseq:
1862 case block_type_ifunc: return block->as.captured.ep;
1863 case block_type_proc: return vm_proc_ep(block->as.proc);
1864 case block_type_symbol: return NULL;
1865 }
1866 VM_UNREACHABLE(vm_block_ep);
1867 return NULL;
1868}
1869
1870static inline VALUE
1871vm_block_self(const struct rb_block *block)
1872{
1873 switch (vm_block_type(block)) {
1874 case block_type_iseq:
1875 case block_type_ifunc:
1876 return block->as.captured.self;
1877 case block_type_proc:
1878 return vm_block_self(vm_proc_block(block->as.proc));
1879 case block_type_symbol:
1880 return Qundef;
1881 }
1882 VM_UNREACHABLE(vm_block_self);
1883 return Qundef;
1884}
1885
1886static inline VALUE
1887VM_BH_TO_SYMBOL(VALUE block_handler)
1888{
1889 VM_ASSERT(SYMBOL_P(block_handler));
1890 return block_handler;
1891}
1892
1893static inline VALUE
1894VM_BH_FROM_SYMBOL(VALUE symbol)
1895{
1896 VM_ASSERT(SYMBOL_P(symbol));
1897 return symbol;
1898}
1899
1900static inline VALUE
1901VM_BH_TO_PROC(VALUE block_handler)
1902{
1903 VM_ASSERT(rb_obj_is_proc(block_handler));
1904 return block_handler;
1905}
1906
1907static inline VALUE
1908VM_BH_FROM_PROC(VALUE procval)
1909{
1910 VM_ASSERT(rb_obj_is_proc(procval));
1911 return procval;
1912}
1913
1914/* VM related object allocate functions */
1915VALUE rb_thread_alloc(VALUE klass);
1916VALUE rb_binding_alloc(VALUE klass);
1917VALUE rb_proc_alloc(VALUE klass);
1918VALUE rb_proc_dup(VALUE self);
1919
1920/* for debug */
1921extern bool rb_vmdebug_stack_dump_raw(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, FILE *);
1922extern bool rb_vmdebug_debug_print_pre(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, const VALUE *_pc, FILE *);
1923extern bool rb_vmdebug_debug_print_post(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, FILE *);
1924
1925#define SDR() rb_vmdebug_stack_dump_raw(GET_EC(), GET_EC()->cfp, stderr)
1926#define SDR2(cfp) rb_vmdebug_stack_dump_raw(GET_EC(), (cfp), stderr)
1927bool rb_vm_bugreport(const void *, FILE *);
1928typedef void (*ruby_sighandler_t)(int);
1929RBIMPL_ATTR_FORMAT(RBIMPL_PRINTF_FORMAT, 4, 5)
1930NORETURN(void rb_bug_for_fatal_signal(ruby_sighandler_t default_sighandler, int sig, const void *, const char *fmt, ...));
1931
1932/* functions about thread/vm execution */
1933RUBY_SYMBOL_EXPORT_BEGIN
1934VALUE rb_iseq_eval(const rb_iseq_t *iseq, const rb_box_t *box);
1935VALUE rb_iseq_eval_main(const rb_iseq_t *iseq);
1936VALUE rb_iseq_path(const rb_iseq_t *iseq);
1937VALUE rb_iseq_realpath(const rb_iseq_t *iseq);
1938RUBY_SYMBOL_EXPORT_END
1939
1940VALUE rb_iseq_pathobj_new(VALUE path, VALUE realpath);
1941void rb_iseq_pathobj_set(const rb_iseq_t *iseq, VALUE path, VALUE realpath);
1942
1943int rb_ec_frame_method_id_and_class(const rb_execution_context_t *ec, ID *idp, ID *called_idp, VALUE *klassp);
1944void rb_ec_setup_exception(const rb_execution_context_t *ec, VALUE mesg, VALUE cause);
1945
1946VALUE rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, int argc, const VALUE *argv, int kw_splat, VALUE block_handler);
1947
1948VALUE rb_vm_make_proc_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda);
1949static inline VALUE
1950rb_vm_make_proc(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1951{
1952 return rb_vm_make_proc_lambda(ec, captured, klass, 0);
1953}
1954
1955static inline VALUE
1956rb_vm_make_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1957{
1958 return rb_vm_make_proc_lambda(ec, captured, klass, 1);
1959}
1960
1961VALUE rb_vm_make_binding(const rb_execution_context_t *ec, const rb_control_frame_t *src_cfp);
1962VALUE rb_vm_env_local_variables(const rb_env_t *env);
1963VALUE rb_vm_env_numbered_parameters(const rb_env_t *env);
1964const rb_env_t *rb_vm_env_prev_env(const rb_env_t *env);
1965const VALUE *rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const ID *dynvars);
1966void rb_vm_inc_const_missing_count(void);
1967VALUE rb_vm_call_kw(rb_execution_context_t *ec, VALUE recv, VALUE id, int argc,
1968 const VALUE *argv, const rb_callable_method_entry_t *me, int kw_splat);
1969void rb_vm_pop_frame_no_int(rb_execution_context_t *ec);
1970void rb_vm_pop_frame(rb_execution_context_t *ec);
1971
1972void rb_thread_start_timer_thread(void);
1973void rb_thread_stop_timer_thread(void);
1974void rb_thread_reset_timer_thread(void);
1975void rb_thread_wakeup_timer_thread(int);
1976
1977static inline void
1978rb_vm_living_threads_init(rb_vm_t *vm)
1979{
1980 ccan_list_head_init(&vm->workqueue);
1981 ccan_list_head_init(&vm->ractor.set);
1982#ifdef RUBY_THREAD_PTHREAD_H
1983 ccan_list_head_init(&vm->ractor.sched.zombie_threads);
1984#endif
1985}
1986
1987typedef int rb_backtrace_iter_func(void *, VALUE, int, VALUE);
1988rb_control_frame_t *rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1989rb_control_frame_t *rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1990VALUE *rb_vm_svar_lep(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1991int rb_vm_get_sourceline(const rb_control_frame_t *);
1992void rb_vm_stack_to_heap(rb_execution_context_t *ec);
1993void ruby_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame);
1994void rb_thread_malloc_stack_set(rb_thread_t *th, void *stack);
1995rb_thread_t * ruby_thread_from_native(void);
1996int ruby_thread_set_native(rb_thread_t *th);
1997int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp);
1998void rb_vm_rewind_cfp(rb_execution_context_t *ec, rb_control_frame_t *cfp);
1999void rb_vm_env_write(const VALUE *ep, int index, VALUE v);
2000VALUE rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler);
2001
2002void rb_vm_register_special_exception_str(enum ruby_special_exceptions sp, VALUE exception_class, VALUE mesg);
2003
2004#define rb_vm_register_special_exception(sp, e, m) \
2005 rb_vm_register_special_exception_str(sp, e, rb_usascii_str_new_static((m), (long)rb_strlen_lit(m)))
2006
2007void rb_gc_mark_machine_context(const rb_execution_context_t *ec);
2008
2009rb_cref_t *rb_vm_rewrite_cref(rb_cref_t *node, VALUE old_klass, VALUE new_klass);
2010
2011const rb_callable_method_entry_t *rb_vm_frame_method_entry(const rb_control_frame_t *cfp);
2012const rb_callable_method_entry_t *rb_vm_frame_method_entry_unchecked(const rb_control_frame_t *cfp);
2013
2014#define sysstack_error GET_VM()->special_exceptions[ruby_error_sysstack]
2015
2016#define CHECK_VM_STACK_OVERFLOW0(cfp, sp, margin) do { \
2017 STATIC_ASSERT(sizeof_sp, sizeof(*(sp)) == sizeof(VALUE)); \
2018 STATIC_ASSERT(sizeof_cfp, sizeof(*(cfp)) == sizeof(rb_control_frame_t)); \
2019 const struct rb_control_frame_struct *bound = (void *)&(sp)[(margin)]; \
2020 if (UNLIKELY((cfp) <= &bound[1])) { \
2021 vm_stackoverflow(); \
2022 } \
2023} while (0)
2024
2025#define CHECK_VM_STACK_OVERFLOW(cfp, margin) \
2026 CHECK_VM_STACK_OVERFLOW0((cfp), (cfp)->sp, (margin))
2027
2028VALUE rb_catch_protect(VALUE t, rb_block_call_func *func, VALUE data, enum ruby_tag_type *stateptr);
2029
2030rb_execution_context_t *rb_vm_main_ractor_ec(rb_vm_t *vm); // ractor.c
2031
2032/* for thread */
2033
2034#if RUBY_VM_THREAD_MODEL == 2
2035
2036RUBY_EXTERN struct rb_ractor_struct *ruby_single_main_ractor; // ractor.c
2037RUBY_EXTERN rb_vm_t *ruby_current_vm_ptr;
2038RUBY_EXTERN rb_event_flag_t ruby_vm_event_flags;
2039RUBY_EXTERN rb_event_flag_t ruby_vm_event_enabled_global_flags; // only ever added to
2040RUBY_EXTERN unsigned int ruby_vm_iseq_events_enabled;
2041RUBY_EXTERN unsigned int ruby_vm_c_events_enabled;
2042
2043#define GET_VM() rb_current_vm()
2044#define GET_RACTOR() rb_current_ractor()
2045#define GET_THREAD() rb_current_thread()
2046#define GET_EC() rb_current_execution_context(true)
2047
2048static inline rb_serial_t
2049rb_ec_serial(struct rb_execution_context_struct *ec)
2050{
2051 VM_ASSERT(ec->serial >= 1);
2052 return ec->serial;
2053}
2054
2055static inline rb_thread_t *
2056rb_ec_thread_ptr(const rb_execution_context_t *ec)
2057{
2058 return ec->thread_ptr;
2059}
2060
2061static inline rb_ractor_t *
2062rb_ec_ractor_ptr(const rb_execution_context_t *ec)
2063{
2064 const rb_thread_t *th = rb_ec_thread_ptr(ec);
2065 if (th) {
2066 VM_ASSERT(th->ractor != NULL);
2067 return th->ractor;
2068 }
2069 else {
2070 return NULL;
2071 }
2072}
2073
2074static inline rb_serial_t
2075rb_ec_ractor_id(const rb_execution_context_t *ec)
2076{
2077 rb_serial_t ractor_id = ec->ractor_id;
2078 RUBY_ASSERT(ractor_id);
2079 return ractor_id;
2080}
2081
2082static inline rb_vm_t *
2083rb_ec_vm_ptr(const rb_execution_context_t *ec)
2084{
2085 const rb_thread_t *th = rb_ec_thread_ptr(ec);
2086 if (th) {
2087 return th->vm;
2088 }
2089 else {
2090 return NULL;
2091 }
2092}
2093
2094NOINLINE(struct rb_execution_context_struct *rb_current_ec_noinline(void));
2095
2096static inline rb_execution_context_t *
2097rb_current_execution_context(bool expect_ec)
2098{
2099#ifdef RB_THREAD_LOCAL_SPECIFIER
2100 #ifdef RB_THREAD_CURRENT_EC_NOINLINE
2101 rb_execution_context_t * volatile ec = rb_current_ec();
2102 #else
2103 rb_execution_context_t * volatile ec = ruby_current_ec;
2104 #endif
2105
2106 /* On the shared objects, `__tls_get_addr()` is used to access the TLS
2107 * and the address of the `ruby_current_ec` can be stored on a function
2108 * frame. However, this address can be mis-used after native thread
2109 * migration of a coroutine.
2110 * 1) Get `ptr = &ruby_current_ec` on NT1 and store it on the frame.
2111 * 2) Context switch and resume it on the NT2.
2112 * 3) `ptr` is used on NT2 but it accesses the TLS of NT1.
2113 * This assertion checks such misusage.
2114 *
2115 * To avoid accidents, `GET_EC()` should be called once on the frame.
2116 * Note that inlining can produce the problem.
2117 */
2118 VM_ASSERT(ec == rb_current_ec_noinline());
2119#else
2120 rb_execution_context_t * volatile ec = native_tls_get(ruby_current_ec_key);
2121#endif
2122 VM_ASSERT(!expect_ec || ec != NULL);
2123 return ec;
2124}
2125
2126static inline rb_thread_t *
2127rb_current_thread(void)
2128{
2129 const rb_execution_context_t *ec = GET_EC();
2130 return rb_ec_thread_ptr(ec);
2131}
2132
2133static inline rb_ractor_t *
2134rb_current_ractor_raw(bool expect)
2135{
2136 if (ruby_single_main_ractor) {
2137 return ruby_single_main_ractor;
2138 }
2139 else {
2140 const rb_execution_context_t *ec = rb_current_execution_context(expect);
2141 return (expect || ec) ? rb_ec_ractor_ptr(ec) : NULL;
2142 }
2143}
2144
2145static inline rb_ractor_t *
2146rb_current_ractor(void)
2147{
2148 return rb_current_ractor_raw(true);
2149}
2150
2151static inline rb_vm_t *
2152rb_current_vm(void)
2153{
2154#if 0 // TODO: reconsider the assertions
2155 VM_ASSERT(ruby_current_vm_ptr == NULL ||
2156 ruby_current_execution_context_ptr == NULL ||
2157 rb_ec_thread_ptr(GET_EC()) == NULL ||
2158 rb_ec_thread_ptr(GET_EC())->status == THREAD_KILLED ||
2159 rb_ec_vm_ptr(GET_EC()) == ruby_current_vm_ptr);
2160#endif
2161
2162 return ruby_current_vm_ptr;
2163}
2164
2165void rb_ec_vm_lock_rec_release(const rb_execution_context_t *ec,
2166 unsigned int recorded_lock_rec,
2167 unsigned int current_lock_rec);
2168
2169/* This technically is a data race, as it's checked without the lock, however we
2170 * check against a value only our own thread will write. */
2171NO_SANITIZE("thread", static inline bool
2172vm_locked_by_ractor_p(rb_vm_t *vm, rb_ractor_t *cr))
2173{
2174 VM_ASSERT(cr == GET_RACTOR());
2175 return vm->ractor.sync.lock_owner == cr;
2176}
2177
2178static inline unsigned int
2179rb_ec_vm_lock_rec(const rb_execution_context_t *ec)
2180{
2181 rb_vm_t *vm = rb_ec_vm_ptr(ec);
2182
2183 if (!vm_locked_by_ractor_p(vm, rb_ec_ractor_ptr(ec))) {
2184 return 0;
2185 }
2186 else {
2187 return vm->ractor.sync.lock_rec;
2188 }
2189}
2190
2191#else
2192#error "unsupported thread model"
2193#endif
2194
2195enum {
2196 TIMER_INTERRUPT_MASK = 0x01,
2197 PENDING_INTERRUPT_MASK = 0x02,
2198 POSTPONED_JOB_INTERRUPT_MASK = 0x04,
2199 TRAP_INTERRUPT_MASK = 0x08,
2200 TERMINATE_INTERRUPT_MASK = 0x10,
2201 VM_BARRIER_INTERRUPT_MASK = 0x20,
2202};
2203
2204#define RUBY_VM_SET_TIMER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TIMER_INTERRUPT_MASK)
2205#define RUBY_VM_SET_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, PENDING_INTERRUPT_MASK)
2206#define RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, POSTPONED_JOB_INTERRUPT_MASK)
2207#define RUBY_VM_SET_TRAP_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TRAP_INTERRUPT_MASK)
2208#define RUBY_VM_SET_TERMINATE_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TERMINATE_INTERRUPT_MASK)
2209#define RUBY_VM_SET_VM_BARRIER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, VM_BARRIER_INTERRUPT_MASK)
2210
2211static inline bool
2212RUBY_VM_INTERRUPTED(rb_execution_context_t *ec)
2213{
2214 return (ATOMIC_LOAD_RELAXED(ec->interrupt_flag) & ~(ec->interrupt_mask) & (PENDING_INTERRUPT_MASK|TRAP_INTERRUPT_MASK));
2215}
2216
2217static inline bool
2218RUBY_VM_INTERRUPTED_ANY(rb_execution_context_t *ec)
2219{
2220#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
2221 uint32_t current_clock = rb_ec_vm_ptr(ec)->clock;
2222
2223 if (current_clock != ec->checked_clock) {
2224 ec->checked_clock = current_clock;
2225 RUBY_VM_SET_TIMER_INTERRUPT(ec);
2226 }
2227#endif
2228 return ATOMIC_LOAD_RELAXED(ec->interrupt_flag) & ~(ec)->interrupt_mask;
2229}
2230
2231VALUE rb_exc_set_backtrace(VALUE exc, VALUE bt);
2232int rb_signal_buff_size(void);
2233int rb_signal_exec(rb_thread_t *th, int sig);
2234void rb_threadptr_check_signal(rb_thread_t *mth);
2235void rb_threadptr_signal_raise(rb_thread_t *th, int sig);
2236void rb_threadptr_signal_exit(rb_thread_t *th);
2237int rb_threadptr_execute_interrupts(rb_thread_t *, int);
2238void rb_threadptr_interrupt(rb_thread_t *th);
2239void rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th);
2240void rb_threadptr_pending_interrupt_clear(rb_thread_t *th);
2241void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v);
2242VALUE rb_ec_get_errinfo(const rb_execution_context_t *ec);
2243void rb_ec_error_print(rb_execution_context_t * volatile ec, volatile VALUE errinfo);
2244void rb_execution_context_update(rb_execution_context_t *ec);
2245void rb_execution_context_mark(const rb_execution_context_t *ec);
2246void rb_fiber_close(rb_fiber_t *fib);
2247void Init_native_thread(rb_thread_t *th);
2248int rb_vm_check_ints_blocking(rb_execution_context_t *ec);
2249
2250// vm_sync.h
2251void rb_vm_cond_wait(rb_vm_t *vm, rb_nativethread_cond_t *cond);
2252void rb_vm_cond_timedwait(rb_vm_t *vm, rb_nativethread_cond_t *cond, unsigned long msec);
2253
2254#define RUBY_VM_CHECK_INTS(ec) rb_vm_check_ints(ec)
2255static inline void
2256rb_vm_check_ints(rb_execution_context_t *ec)
2257{
2258#ifdef RUBY_ASSERT_CRITICAL_SECTION
2259 VM_ASSERT(ruby_assert_critical_section_entered == 0);
2260#endif
2261
2262 VM_ASSERT(ec == rb_current_ec_noinline());
2263
2264 if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(ec))) {
2265 rb_threadptr_execute_interrupts(rb_ec_thread_ptr(ec), 0);
2266 }
2267}
2268
2269/* tracer */
2270
2272 rb_event_flag_t event;
2274 const rb_control_frame_t *cfp;
2275 VALUE self;
2276 ID id;
2277 ID called_id;
2278 VALUE klass;
2279 VALUE data;
2280
2281 int klass_solved;
2282
2283 /* calc from cfp */
2284 int lineno;
2285 VALUE path;
2286};
2287
2288void rb_hook_list_mark(rb_hook_list_t *hooks);
2289void rb_hook_list_mark_and_move(rb_hook_list_t *hooks);
2290void rb_hook_list_free(rb_hook_list_t *hooks);
2291void rb_hook_list_connect_local_tracepoint(rb_hook_list_t *list, VALUE tpval, unsigned int target_line);
2292bool rb_hook_list_remove_local_tracepoint(rb_hook_list_t *list, VALUE tpval);
2293unsigned int rb_hook_list_count(rb_hook_list_t *list);
2294
2295void rb_exec_event_hooks(struct rb_trace_arg_struct *trace_arg, rb_hook_list_t *hooks, int pop_p);
2296
2297#define EXEC_EVENT_HOOK_ORIG(ec_, hooks_, flag_, self_, id_, called_id_, klass_, data_, pop_p_) do { \
2298 const rb_event_flag_t flag_arg_ = (flag_); \
2299 rb_hook_list_t *hooks_arg_ = (hooks_); \
2300 if (UNLIKELY((hooks_arg_)->events & (flag_arg_))) { \
2301 /* defer evaluating the other arguments */ \
2302 rb_exec_event_hook_orig(ec_, hooks_arg_, flag_arg_, self_, id_, called_id_, klass_, data_, pop_p_); \
2303 } \
2304} while (0)
2305
2306static inline void
2307rb_exec_event_hook_orig(rb_execution_context_t *ec, rb_hook_list_t *hooks, rb_event_flag_t flag,
2308 VALUE self, ID id, ID called_id, VALUE klass, VALUE data, int pop_p)
2309{
2310 struct rb_trace_arg_struct trace_arg;
2311
2312 VM_ASSERT((hooks->events & flag) != 0);
2313
2314 trace_arg.event = flag;
2315 trace_arg.ec = ec;
2316 trace_arg.cfp = ec->cfp;
2317 trace_arg.self = self;
2318 trace_arg.id = id;
2319 trace_arg.called_id = called_id;
2320 trace_arg.klass = klass;
2321 trace_arg.data = data;
2322 trace_arg.path = Qundef;
2323 trace_arg.klass_solved = 0;
2324
2325 rb_exec_event_hooks(&trace_arg, hooks, pop_p);
2326}
2327
2329 VALUE self;
2330 uint32_t id;
2331 rb_hook_list_t hooks;
2332 st_table *targeted_hooks; // also called "local hooks". {ISEQ => hook_list, def => hook_list...}
2333 unsigned int targeted_hooks_cnt; // ex: tp.enabled(target: method(:puts))
2334};
2335
2336static inline rb_hook_list_t *
2337rb_ec_ractor_hooks(const rb_execution_context_t *ec)
2338{
2339 struct rb_ractor_pub *cr_pub = (struct rb_ractor_pub *)rb_ec_ractor_ptr(ec);
2340 return &cr_pub->hooks;
2341}
2342
2343static inline rb_hook_list_t *
2344rb_vm_global_hooks(const rb_execution_context_t *ec)
2345{
2346 return &rb_ec_vm_ptr(ec)->global_hooks;
2347}
2348
2349static inline rb_hook_list_t *
2350rb_ec_hooks(const rb_execution_context_t *ec, rb_event_flag_t event)
2351{
2352 // Should be a single bit set
2353 VM_ASSERT(event != 0 && ((event - 1) & event) == 0);
2354
2356 return rb_vm_global_hooks(ec);
2357 }
2358 else {
2359 return rb_ec_ractor_hooks(ec);
2360 }
2361}
2362
2363#define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2364 EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_hooks(ec_, flag_), flag_, self_, id_, called_id_, klass_, data_, 0)
2365
2366#define EXEC_EVENT_HOOK_AND_POP_FRAME(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2367 EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_hooks(ec_, flag_), flag_, self_, id_, called_id_, klass_, data_, 1)
2368
2369static inline void
2370rb_exec_event_hook_script_compiled(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE eval_script)
2371{
2372 EXEC_EVENT_HOOK(ec, RUBY_EVENT_SCRIPT_COMPILED, ec->cfp->self, 0, 0, 0,
2373 NIL_P(eval_script) ? (VALUE)iseq :
2374 rb_ary_new_from_args(2, eval_script, (VALUE)iseq));
2375}
2376
2377void rb_vm_trap_exit(rb_vm_t *vm);
2378void rb_vm_postponed_job_atfork(void); /* vm_trace.c */
2379void rb_vm_postponed_job_free(void); /* vm_trace.c */
2380size_t rb_vm_memsize_postponed_job_queue(void); /* vm_trace.c */
2381void rb_vm_postponed_job_queue_init(rb_vm_t *vm); /* vm_trace.c */
2382
2383RUBY_SYMBOL_EXPORT_BEGIN
2384
2385int rb_thread_check_trap_pending(void);
2386
2387/* #define RUBY_EVENT_RESERVED_FOR_INTERNAL_USE 0x030000 */ /* from vm_core.h */
2388#define RUBY_EVENT_COVERAGE_LINE 0x010000
2389#define RUBY_EVENT_COVERAGE_BRANCH 0x020000
2390
2391extern VALUE rb_get_coverages(void);
2392extern void rb_set_coverages(VALUE, int, VALUE);
2393extern void rb_clear_coverages(void);
2394extern void rb_reset_coverages(void);
2395extern void rb_resume_coverages(void);
2396extern void rb_suspend_coverages(void);
2397
2398void rb_postponed_job_flush(rb_vm_t *vm);
2399
2400// ractor.c
2401RUBY_EXTERN VALUE rb_eRactorUnsafeError;
2402RUBY_EXTERN VALUE rb_eRactorIsolationError;
2403
2404RUBY_SYMBOL_EXPORT_END
2405
2406#endif /* RUBY_VM_CORE_H */
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ALIGNAS
Wraps (or simulates) alignas.
Definition stdalign.h:27
#define RUBY_EXTERN
Declaration of externally visible global variables.
Definition dllexport.h:45
#define RUBY_EVENT_SCRIPT_COMPILED
Encountered an eval.
Definition event.h:60
#define RUBY_INTERNAL_EVENT_OBJSPACE_MASK
Bitmask of GC events.
Definition event.h:100
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RBIMPL_ATTR_FORMAT(x, y, z)
Wraps (or simulates) __attribute__((format))
Definition format.h:29
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define Qundef
Old name of RUBY_Qundef.
#define Qfalse
Old name of RUBY_Qfalse.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define NIL_P
Old name of RB_NIL_P.
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
void * rb_check_typeddata(VALUE obj, const rb_data_type_t *data_type)
Identical to rb_typeddata_is_kind_of(), except it raises exceptions instead of returning false.
Definition error.c:1398
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
Definition proc.c:120
void rb_unblock_function_t(void *)
This is the type of UBFs.
Definition thread.h:336
VALUE rb_block_call_func(RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg))
This is the type of a function that the interpreter expect for C-backended blocks.
Definition iterator.h:83
VALUE type(ANYARGS)
ANYARGS-ed function type.
#define RBIMPL_ATTR_NONNULL(list)
Wraps (or simulates) __attribute__((nonnull))
Definition nonnull.h:30
#define inline
Old Visual Studio versions do not support the inline keyword, so we need to define it to be __inline.
Definition defines.h:91
Functions related to nodes in the AST.
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RTYPEDDATA_DATA(v)
Convenient getter macro.
Definition rtypeddata.h:103
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
Defines old _.
C99 shim for <stdbool.h>
Definition vm_core.h:261
const ID * segments
A null-terminated list of ids, used to represent a constant's path idNULL is used to represent the ::...
Definition vm_core.h:285
Definition vm_core.h:293
Definition vm_core.h:288
Definition iseq.h:259
Internal header for Ruby Box.
Definition box.h:14
Definition method.h:63
CREF (Class REFerence)
Definition method.h:45
Definition class.h:72
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:208
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:143
Definition st.h:79
IFUNC (Internal FUNCtion)
Definition imemo.h:85
Definition vm_core.h:253
Definition vm_core.h:297
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
Definition value.h:69
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376