Ruby 4.0.0dev (2025-12-19 revision f133ebb2db664801f87efa98aa91d610d194b700)
vm_core.h (f133ebb2db664801f87efa98aa91d610d194b700)
1#ifndef RUBY_VM_CORE_H
2#define RUBY_VM_CORE_H
3/**********************************************************************
4
5 vm_core.h -
6
7 $Author$
8 created at: 04/01/01 19:41:38 JST
9
10 Copyright (C) 2004-2007 Koichi Sasada
11
12**********************************************************************/
13
14/*
15 * Enable check mode.
16 * 1: enable local assertions.
17 */
18#ifndef VM_CHECK_MODE
19
20// respect RUBY_DUBUG: if given n is 0, then use RUBY_DEBUG
21#define N_OR_RUBY_DEBUG(n) (((n) > 0) ? (n) : RUBY_DEBUG)
22
23#define VM_CHECK_MODE N_OR_RUBY_DEBUG(0)
24#endif
25
39#ifndef VMDEBUG
40#define VMDEBUG 0
41#endif
42
43#if 0
44#undef VMDEBUG
45#define VMDEBUG 3
46#endif
47
48#include "ruby/internal/config.h"
49
50#include <stddef.h>
51#include <signal.h>
52#include <stdarg.h>
53
54#include "ruby_assert.h"
55
56#define RVALUE_SIZE (sizeof(struct RBasic) + sizeof(VALUE[RBIMPL_RVALUE_EMBED_LEN_MAX]))
57
58#if VM_CHECK_MODE > 0
59#define VM_ASSERT(expr, ...) \
60 RUBY_ASSERT_MESG_WHEN(VM_CHECK_MODE > 0, expr, #expr RBIMPL_VA_OPT_ARGS(__VA_ARGS__))
61#define VM_UNREACHABLE(func) rb_bug(#func ": unreachable")
62#define RUBY_ASSERT_CRITICAL_SECTION
63#define RUBY_DEBUG_THREAD_SCHEDULE() rb_thread_schedule()
64#else
65#define VM_ASSERT(/*expr, */...) ((void)0)
66#define VM_UNREACHABLE(func) UNREACHABLE
67#define RUBY_DEBUG_THREAD_SCHEDULE()
68#endif
69
70#define RUBY_ASSERT_MUTEX_OWNED(mutex) VM_ASSERT(rb_mutex_owned_p(mutex))
71
72#if defined(RUBY_ASSERT_CRITICAL_SECTION)
73/*
74# Critical Section Assertions
75
76These assertions are used to ensure that context switching does not occur between two points in the code. In theory,
77such code should already be protected by a mutex, but these assertions are used to ensure that the mutex is held.
78
79The specific case where it can be useful is where a mutex is held further up the call stack, and the code in question
80may not directly hold the mutex. In this case, the critical section assertions can be used to ensure that the mutex is
81held by someone else.
82
83These assertions are only enabled when RUBY_ASSERT_CRITICAL_SECTION is defined, which is only defined if VM_CHECK_MODE
84is set.
85
86## Example Usage
87
88```c
89RUBY_ASSERT_CRITICAL_SECTION_ENTER();
90// ... some code which does not invoke rb_vm_check_ints() ...
91RUBY_ASSERT_CRITICAL_SECTION_LEAVE();
92```
93
94If `rb_vm_check_ints()` is called between the `RUBY_ASSERT_CRITICAL_SECTION_ENTER()` and
95`RUBY_ASSERT_CRITICAL_SECTION_LEAVE()`, a failed assertion will result.
96*/
97extern int ruby_assert_critical_section_entered;
98#define RUBY_ASSERT_CRITICAL_SECTION_ENTER() do{ruby_assert_critical_section_entered += 1;}while(false)
99#define RUBY_ASSERT_CRITICAL_SECTION_LEAVE() do{VM_ASSERT(ruby_assert_critical_section_entered > 0);ruby_assert_critical_section_entered -= 1;}while(false)
100#else
101#define RUBY_ASSERT_CRITICAL_SECTION_ENTER()
102#define RUBY_ASSERT_CRITICAL_SECTION_LEAVE()
103#endif
104
105#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
106# include "wasm/setjmp.h"
107#else
108# include <setjmp.h>
109#endif
110
111#if defined(__linux__) || defined(__FreeBSD__)
112# define RB_THREAD_T_HAS_NATIVE_ID
113#endif
114
116#include "ccan/list/list.h"
117#include "id.h"
118#include "internal.h"
119#include "internal/array.h"
120#include "internal/basic_operators.h"
121#include "internal/box.h"
122#include "internal/sanitizers.h"
123#include "internal/serial.h"
124#include "internal/set_table.h"
125#include "internal/vm.h"
126#include "method.h"
127#include "node.h"
128#include "ruby/ruby.h"
129#include "ruby/st.h"
130#include "ruby_atomic.h"
131#include "vm_opts.h"
132
133#include "ruby/thread_native.h"
134/*
135 * implementation selector of get_insn_info algorithm
136 * 0: linear search
137 * 1: binary search
138 * 2: succinct bitvector
139 */
140#ifndef VM_INSN_INFO_TABLE_IMPL
141# define VM_INSN_INFO_TABLE_IMPL 2
142#endif
143
144#if defined(NSIG_MAX) /* POSIX issue 8 */
145# undef NSIG
146# define NSIG NSIG_MAX
147#elif defined(_SIG_MAXSIG) /* FreeBSD */
148# undef NSIG
149# define NSIG _SIG_MAXSIG
150#elif defined(_SIGMAX) /* QNX */
151# define NSIG (_SIGMAX + 1)
152#elif defined(NSIG) /* 99% of everything else */
153# /* take it */
154#else /* Last resort */
155# define NSIG (sizeof(sigset_t) * CHAR_BIT + 1)
156#endif
157
158#define RUBY_NSIG NSIG
159
160#if defined(SIGCLD)
161# define RUBY_SIGCHLD (SIGCLD)
162#elif defined(SIGCHLD)
163# define RUBY_SIGCHLD (SIGCHLD)
164#endif
165
166#if defined(SIGSEGV) && defined(HAVE_SIGALTSTACK) && defined(SA_SIGINFO) && !defined(__NetBSD__)
167# define USE_SIGALTSTACK
168void *rb_allocate_sigaltstack(void);
169void *rb_register_sigaltstack(void *);
170# define RB_ALTSTACK_INIT(var, altstack) var = rb_register_sigaltstack(altstack)
171# define RB_ALTSTACK_FREE(var) free(var)
172# define RB_ALTSTACK(var) var
173#else /* noop */
174# define RB_ALTSTACK_INIT(var, altstack)
175# define RB_ALTSTACK_FREE(var)
176# define RB_ALTSTACK(var) (0)
177#endif
178
179#include THREAD_IMPL_H
180#define RUBY_VM_THREAD_MODEL 2
181
182/*****************/
183/* configuration */
184/*****************/
185
186/* gcc ver. check */
187#if defined(__GNUC__) && __GNUC__ >= 2
188
189#if OPT_TOKEN_THREADED_CODE
190#if OPT_DIRECT_THREADED_CODE
191#undef OPT_DIRECT_THREADED_CODE
192#endif
193#endif
194
195#else /* defined(__GNUC__) && __GNUC__ >= 2 */
196
197/* disable threaded code options */
198#if OPT_DIRECT_THREADED_CODE
199#undef OPT_DIRECT_THREADED_CODE
200#endif
201#if OPT_TOKEN_THREADED_CODE
202#undef OPT_TOKEN_THREADED_CODE
203#endif
204#endif
205
206/* call threaded code */
207#if OPT_CALL_THREADED_CODE
208#if OPT_DIRECT_THREADED_CODE
209#undef OPT_DIRECT_THREADED_CODE
210#endif /* OPT_DIRECT_THREADED_CODE */
211#endif /* OPT_CALL_THREADED_CODE */
212
213void rb_vm_encoded_insn_data_table_init(void);
214typedef unsigned long rb_num_t;
215typedef signed long rb_snum_t;
216
217enum ruby_tag_type {
218 RUBY_TAG_NONE = 0x0,
219 RUBY_TAG_RETURN = 0x1,
220 RUBY_TAG_BREAK = 0x2,
221 RUBY_TAG_NEXT = 0x3,
222 RUBY_TAG_RETRY = 0x4,
223 RUBY_TAG_REDO = 0x5,
224 RUBY_TAG_RAISE = 0x6,
225 RUBY_TAG_THROW = 0x7,
226 RUBY_TAG_FATAL = 0x8,
227 RUBY_TAG_MASK = 0xf
228};
229
230#define TAG_NONE RUBY_TAG_NONE
231#define TAG_RETURN RUBY_TAG_RETURN
232#define TAG_BREAK RUBY_TAG_BREAK
233#define TAG_NEXT RUBY_TAG_NEXT
234#define TAG_RETRY RUBY_TAG_RETRY
235#define TAG_REDO RUBY_TAG_REDO
236#define TAG_RAISE RUBY_TAG_RAISE
237#define TAG_THROW RUBY_TAG_THROW
238#define TAG_FATAL RUBY_TAG_FATAL
239#define TAG_MASK RUBY_TAG_MASK
240
241enum ruby_vm_throw_flags {
242 VM_THROW_NO_ESCAPE_FLAG = 0x8000,
243 VM_THROW_STATE_MASK = 0xff
244};
245
246/* forward declarations */
247struct rb_thread_struct;
249
250/* iseq data type */
252
254 rb_serial_t raw;
255 VALUE data[2];
256};
257
258#define IMEMO_CONST_CACHE_SHAREABLE IMEMO_FL_USER0
259
260// imemo_constcache
262 VALUE flags;
263
264 VALUE value;
265 const rb_cref_t *ic_cref;
266};
267STATIC_ASSERT(sizeof_iseq_inline_constant_cache_entry,
268 (offsetof(struct iseq_inline_constant_cache_entry, ic_cref) +
269 sizeof(const rb_cref_t *)) <= RVALUE_SIZE);
270
287
289 uint64_t value; // dest_shape_id in former half, attr_index in latter half
290 ID iv_set_name;
291};
292
296
298 struct {
299 struct rb_thread_struct *running_thread;
300 VALUE value;
301 } once;
302 struct iseq_inline_constant_cache ic_cache;
303 struct iseq_inline_iv_cache_entry iv_cache;
304};
305
307 const struct rb_call_data *cd;
308 const struct rb_callcache *cc;
309 VALUE block_handler;
310 VALUE recv;
311 int argc;
312 bool kw_splat;
313 VALUE heap_argv;
314};
315
316#ifndef VM_ARGC_STACK_MAX
317#define VM_ARGC_STACK_MAX 128
318#endif
319
320#define VM_KW_SPECIFIED_BITS_MAX (32-1) /* TODO: 32 -> Fixnum's max bits */
321
322# define CALLING_ARGC(calling) ((calling)->heap_argv ? RARRAY_LENINT((calling)->heap_argv) : (calling)->argc)
323
325
326#if 1
327#define CoreDataFromValue(obj, type) (type*)DATA_PTR(obj)
328#else
329#define CoreDataFromValue(obj, type) (type*)rb_data_object_get(obj)
330#endif
331#define GetCoreDataFromValue(obj, type, ptr) ((ptr) = CoreDataFromValue((obj), type))
332
334 VALUE pathobj; /* String (path) or Array [path, realpath]. Frozen. */
335 VALUE base_label; /* String */
336 VALUE label; /* String */
337 int first_lineno;
338 int node_id;
339 rb_code_location_t code_location;
341
342#define PATHOBJ_PATH 0
343#define PATHOBJ_REALPATH 1
344
345static inline VALUE
346pathobj_path(VALUE pathobj)
347{
348 if (RB_TYPE_P(pathobj, T_STRING)) {
349 return pathobj;
350 }
351 else {
352 VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
353 return RARRAY_AREF(pathobj, PATHOBJ_PATH);
354 }
355}
356
357static inline VALUE
358pathobj_realpath(VALUE pathobj)
359{
360 if (RB_TYPE_P(pathobj, T_STRING)) {
361 return pathobj;
362 }
363 else {
364 VM_ASSERT(RB_TYPE_P(pathobj, T_ARRAY));
365 return RARRAY_AREF(pathobj, PATHOBJ_REALPATH);
366 }
367}
368
369/* Forward declarations */
370typedef uintptr_t iseq_bits_t;
371
372#define ISEQ_IS_SIZE(body) (body->ic_size + body->ivc_size + body->ise_size + body->icvarc_size)
373
374/* [ TS_IVC | TS_ICVARC | TS_ISE | TS_IC ] */
375#define ISEQ_IS_IC_ENTRY(body, idx) (body->is_entries[(idx) + body->ise_size + body->icvarc_size + body->ivc_size].ic_cache);
376
377/* instruction sequence type */
378enum rb_iseq_type {
379 ISEQ_TYPE_TOP,
380 ISEQ_TYPE_METHOD,
381 ISEQ_TYPE_BLOCK,
382 ISEQ_TYPE_CLASS,
383 ISEQ_TYPE_RESCUE,
384 ISEQ_TYPE_ENSURE,
385 ISEQ_TYPE_EVAL,
386 ISEQ_TYPE_MAIN,
387 ISEQ_TYPE_PLAIN
388};
389
390// Attributes specified by Primitive.attr!
391enum rb_builtin_attr {
392 // The iseq does not call methods.
393 BUILTIN_ATTR_LEAF = 0x01,
394 // This iseq only contains single `opt_invokebuiltin_delegate_leave` instruction with 0 arguments.
395 BUILTIN_ATTR_SINGLE_NOARG_LEAF = 0x02,
396 // This attribute signals JIT to duplicate the iseq for each block iseq so that its `yield` will be monomorphic.
397 BUILTIN_ATTR_INLINE_BLOCK = 0x04,
398 // The iseq acts like a C method in backtraces.
399 BUILTIN_ATTR_C_TRACE = 0x08,
400};
401
402typedef VALUE (*rb_jit_func_t)(struct rb_execution_context_struct *, struct rb_control_frame_struct *);
403typedef VALUE (*rb_zjit_func_t)(struct rb_execution_context_struct *, struct rb_control_frame_struct *, rb_jit_func_t);
404
406 enum rb_iseq_type type;
407
408 unsigned int iseq_size;
409 VALUE *iseq_encoded; /* encoded iseq (insn addr and operands) */
410
435 struct {
436 unsigned int has_lead : 1;
437 unsigned int has_opt : 1;
438 unsigned int has_rest : 1;
439 unsigned int has_post : 1;
440 unsigned int has_kw : 1;
441 unsigned int has_kwrest : 1;
442 unsigned int has_block : 1;
443
444 unsigned int ambiguous_param0 : 1; /* {|a|} */
445 unsigned int accepts_no_kwarg : 1;
446 unsigned int ruby2_keywords: 1;
447 unsigned int anon_rest: 1;
448 unsigned int anon_kwrest: 1;
449 unsigned int use_block: 1;
450 unsigned int forwardable: 1;
451 } flags;
452
453 unsigned int size;
454
455 int lead_num;
456 int opt_num;
457 int rest_start;
458 int post_start;
459 int post_num;
460 int block_start;
461
462 const VALUE *opt_table; /* (opt_num + 1) entries. */
463 /* opt_num and opt_table:
464 *
465 * def foo o1=e1, o2=e2, ..., oN=eN
466 * #=>
467 * # prologue code
468 * A1: e1
469 * A2: e2
470 * ...
471 * AN: eN
472 * AL: body
473 * opt_num = N
474 * opt_table = [A1, A2, ..., AN, AL]
475 */
476
478 int num;
479 int required_num;
480 int bits_start;
481 int rest_start;
482 const ID *table;
483 VALUE *default_values;
484 } *keyword;
485 } param;
486
487 rb_iseq_location_t location;
488
489 /* insn info, must be freed */
491 const struct iseq_insn_info_entry *body;
492 unsigned int *positions;
493 unsigned int size;
494#if VM_INSN_INFO_TABLE_IMPL == 2
495 struct succ_index_table *succ_index_table;
496#endif
497 } insns_info;
498
499 const ID *local_table; /* must free */
500
501 enum lvar_state {
502 lvar_uninitialized,
503 lvar_initialized,
504 lvar_reassigned,
505 } *lvar_states;
506
507 /* catch table */
508 struct iseq_catch_table *catch_table;
509
510 /* for child iseq */
511 const struct rb_iseq_struct *parent_iseq;
512 struct rb_iseq_struct *local_iseq; /* local_iseq->flip_cnt can be modified */
513
514 union iseq_inline_storage_entry *is_entries; /* [ TS_IVC | TS_ICVARC | TS_ISE | TS_IC ] */
515 struct rb_call_data *call_data; //struct rb_call_data calls[ci_size];
516
517 struct {
518 rb_snum_t flip_count;
519 VALUE script_lines;
520 VALUE coverage;
521 VALUE pc2branchindex;
522 VALUE *original_iseq;
523 } variable;
524
525 unsigned int local_table_size;
526 unsigned int ic_size; // Number of IC caches
527 unsigned int ise_size; // Number of ISE caches
528 unsigned int ivc_size; // Number of IVC caches
529 unsigned int icvarc_size; // Number of ICVARC caches
530 unsigned int ci_size;
531 unsigned int stack_max; /* for stack overflow check */
532
533 unsigned int builtin_attrs; // Union of rb_builtin_attr
534
535 bool prism; // ISEQ was generated from prism compiler
536
537 union {
538 iseq_bits_t * list; /* Find references for GC */
539 iseq_bits_t single;
540 } mark_bits;
541
542 struct rb_id_table *outer_variables;
543
544 const rb_iseq_t *mandatory_only_iseq;
545
546#if USE_YJIT || USE_ZJIT
547 // Function pointer for JIT code on jit_exec()
548 rb_jit_func_t jit_entry;
549 // Number of calls on jit_exec()
550 long unsigned jit_entry_calls;
551 // Function pointer for JIT code on jit_exec_exception()
552 rb_jit_func_t jit_exception;
553 // Number of calls on jit_exec_exception()
554 long unsigned jit_exception_calls;
555#endif
556
557#if USE_YJIT
558 // YJIT stores some data on each iseq.
559 void *yjit_payload;
560 // Used to estimate how frequently this ISEQ gets called
561 uint64_t yjit_calls_at_interv;
562#endif
563
564#if USE_ZJIT
565 // ZJIT stores some data on each iseq.
566 void *zjit_payload;
567#endif
568};
569
570/* T_IMEMO/iseq */
571/* typedef rb_iseq_t is in method.h */
573 VALUE flags; /* 1 */
574 VALUE wrapper; /* 2 */
575
576 struct rb_iseq_constant_body *body; /* 3 */
577
578 union { /* 4, 5 words */
579 struct iseq_compile_data *compile_data; /* used at compile time */
580
581 struct {
582 VALUE obj;
583 int index;
584 } loader;
585
586 struct {
587 unsigned int local_hooks_cnt;
588 rb_event_flag_t global_trace_events;
589 } exec;
590 } aux;
591};
592
593#define ISEQ_BODY(iseq) ((iseq)->body)
594
595#if !defined(USE_LAZY_LOAD) || !(USE_LAZY_LOAD+0)
596#define USE_LAZY_LOAD 0
597#endif
598
599#if !USE_LAZY_LOAD
600static inline const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq) {return 0;}
601#endif
602const rb_iseq_t *rb_iseq_complete(const rb_iseq_t *iseq);
603
604static inline const rb_iseq_t *
605rb_iseq_check(const rb_iseq_t *iseq)
606{
607 if (USE_LAZY_LOAD && ISEQ_BODY(iseq) == NULL) {
608 rb_iseq_complete((rb_iseq_t *)iseq);
609 }
610 return iseq;
611}
612
613static inline bool
614rb_iseq_attr_p(const rb_iseq_t *iseq, enum rb_builtin_attr attr)
615{
616 return (ISEQ_BODY(iseq)->builtin_attrs & attr) == attr;
617}
618
619static inline const rb_iseq_t *
620def_iseq_ptr(rb_method_definition_t *def)
621{
622//TODO: re-visit. to check the bug, enable this assertion.
623#if VM_CHECK_MODE > 0
624 if (def->type != VM_METHOD_TYPE_ISEQ) rb_bug("def_iseq_ptr: not iseq (%d)", def->type);
625#endif
626 return rb_iseq_check(def->body.iseq.iseqptr);
627}
628
629enum ruby_special_exceptions {
630 ruby_error_reenter,
631 ruby_error_nomemory,
632 ruby_error_sysstack,
633 ruby_error_stackfatal,
634 ruby_error_stream_closed,
635 ruby_special_error_count
636};
637
638#define GetVMPtr(obj, ptr) \
639 GetCoreDataFromValue((obj), rb_vm_t, (ptr))
640
641struct rb_vm_struct;
642typedef void rb_vm_at_exit_func(struct rb_vm_struct*);
643
644typedef struct rb_at_exit_list {
645 rb_vm_at_exit_func *func;
646 struct rb_at_exit_list *next;
648
649void *rb_objspace_alloc(void);
650void rb_objspace_free(void *objspace);
651void rb_objspace_call_finalizer(void);
652
653enum rb_hook_list_type {
654 hook_list_type_ractor_local,
655 hook_list_type_targeted_iseq,
656 hook_list_type_targeted_def, // C function
657 hook_list_type_global
658};
659
660typedef struct rb_hook_list_struct {
661 struct rb_event_hook_struct *hooks;
662 rb_event_flag_t events;
663 unsigned int running;
664 enum rb_hook_list_type type;
665 bool need_clean;
667
668// see builtin.h for definition
669typedef const struct rb_builtin_function *RB_BUILTIN;
670
672 VALUE *varptr;
673 struct global_object_list *next;
674};
675
676typedef struct rb_vm_struct {
677 VALUE self;
678
679 struct {
680 struct ccan_list_head set;
681 unsigned int cnt;
682 unsigned int blocking_cnt;
683
684 struct rb_ractor_struct *main_ractor;
685 struct rb_thread_struct *main_thread; // == vm->ractor.main_ractor->threads.main
686
687 struct {
688 // monitor
689 rb_nativethread_lock_t lock;
690 struct rb_ractor_struct *lock_owner;
691 unsigned int lock_rec;
692
693 // join at exit
694 rb_nativethread_cond_t terminate_cond;
695 bool terminate_waiting;
696
697#ifndef RUBY_THREAD_PTHREAD_H
698 // win32
699 bool barrier_waiting;
700 unsigned int barrier_cnt;
701 rb_nativethread_cond_t barrier_complete_cond;
702 rb_nativethread_cond_t barrier_release_cond;
703#endif
704 } sync;
705
706#ifdef RUBY_THREAD_PTHREAD_H
707 // ractor scheduling
708 struct {
709 rb_nativethread_lock_t lock;
710 struct rb_ractor_struct *lock_owner;
711 bool locked;
712
713 rb_nativethread_cond_t cond; // GRQ
714 unsigned int snt_cnt; // count of shared NTs
715 unsigned int dnt_cnt; // count of dedicated NTs
716
717 unsigned int running_cnt;
718
719 unsigned int max_cpu;
720 struct ccan_list_head grq; // // Global Ready Queue
721 unsigned int grq_cnt;
722
723 // running threads
724 struct ccan_list_head running_threads;
725
726 // threads which switch context by timeslice
727 struct ccan_list_head timeslice_threads;
728
729 struct ccan_list_head zombie_threads;
730
731 // true if timeslice timer is not enable
732 bool timeslice_wait_inf;
733
734 // barrier
735 rb_nativethread_cond_t barrier_complete_cond;
736 rb_nativethread_cond_t barrier_release_cond;
737 bool barrier_waiting;
738 unsigned int barrier_waiting_cnt;
739 unsigned int barrier_serial;
740 struct rb_ractor_struct *barrier_ractor;
741 unsigned int barrier_lock_rec;
742 } sched;
743#endif
744 } ractor;
745
746#ifdef USE_SIGALTSTACK
747 void *main_altstack;
748#endif
749
750 rb_serial_t fork_gen;
751
752 /* set in single-threaded processes only: */
753 volatile int ubf_async_safe;
754
755 unsigned int running: 1;
756 unsigned int thread_abort_on_exception: 1;
757 unsigned int thread_report_on_exception: 1;
758 unsigned int thread_ignore_deadlock: 1;
759
760 /* object management */
761 VALUE mark_object_ary;
763 const VALUE special_exceptions[ruby_special_error_count];
764
765 /* Ruby Box */
766 rb_box_t *root_box;
767 rb_box_t *main_box;
768
769 /* load */
770 // For running the init function of statically linked
771 // extensions when they are loaded
772 struct st_table *static_ext_inits;
773
774 /* signal */
775 struct {
776 VALUE cmd[RUBY_NSIG];
777 } trap_list;
778
779 /* hook (for internal events: NEWOBJ, FREEOBJ, GC events, etc.) */
780 rb_hook_list_t global_hooks;
781
782 /* postponed_job (async-signal-safe, and thread-safe) */
783 struct rb_postponed_job_queue *postponed_job_queue;
784
785 int src_encoding_index;
786
787 /* workqueue (thread-safe, NOT async-signal-safe) */
788 struct ccan_list_head workqueue; /* <=> rb_workqueue_job.jnode */
789 rb_nativethread_lock_t workqueue_lock;
790
791 VALUE orig_progname, progname;
792 VALUE coverages, me2counter;
793 int coverage_mode;
794
795 struct {
796 struct rb_objspace *objspace;
797 struct gc_mark_func_data_struct {
798 void *data;
799 void (*mark_func)(VALUE v, void *data);
800 } *mark_func_data;
801 } gc;
802
803 rb_at_exit_list *at_exit;
804
805 const struct rb_builtin_function *builtin_function_table;
806
807 st_table *ci_table;
808 struct rb_id_table *negative_cme_table;
809 st_table *overloaded_cme_table; // cme -> overloaded_cme
810 set_table *unused_block_warning_table;
811 set_table *cc_refinement_table;
812
813 // This id table contains a mapping from ID to ICs. It does this with ID
814 // keys and nested st_tables as values. The nested tables have ICs as keys
815 // and Qtrue as values. It is used when inline constant caches need to be
816 // invalidated or ISEQs are being freed.
817 struct rb_id_table *constant_cache;
818 ID inserting_constant_cache_id;
819
820#ifndef VM_GLOBAL_CC_CACHE_TABLE_SIZE
821#define VM_GLOBAL_CC_CACHE_TABLE_SIZE 1023
822#endif
823 const struct rb_callcache *global_cc_cache_table[VM_GLOBAL_CC_CACHE_TABLE_SIZE]; // vm_eval.c
824
825#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
826 uint32_t clock;
827#endif
828
829 /* params */
830 struct { /* size in byte */
831 size_t thread_vm_stack_size;
832 size_t thread_machine_stack_size;
833 size_t fiber_vm_stack_size;
834 size_t fiber_machine_stack_size;
835 } default_params;
836} rb_vm_t;
837
838extern bool ruby_vm_during_cleanup;
839
840/* default values */
841
842#define RUBY_VM_SIZE_ALIGN 4096
843
844#define RUBY_VM_THREAD_VM_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
845#define RUBY_VM_THREAD_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
846#define RUBY_VM_THREAD_MACHINE_STACK_SIZE ( 128 * 1024 * sizeof(VALUE)) /* 512 KB or 1024 KB */
847#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
848
849#define RUBY_VM_FIBER_VM_STACK_SIZE ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
850#define RUBY_VM_FIBER_VM_STACK_SIZE_MIN ( 2 * 1024 * sizeof(VALUE)) /* 8 KB or 16 KB */
851#define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 64 * 1024 * sizeof(VALUE)) /* 256 KB or 512 KB */
852#if defined(__powerpc64__) || defined(__ppc64__) // macOS has __ppc64__
853#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 32 * 1024 * sizeof(VALUE)) /* 128 KB or 256 KB */
854#else
855#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 16 * 1024 * sizeof(VALUE)) /* 64 KB or 128 KB */
856#endif
857
858#if __has_feature(memory_sanitizer) || __has_feature(address_sanitizer) || __has_feature(leak_sanitizer)
859/* It seems sanitizers consume A LOT of machine stacks */
860#undef RUBY_VM_THREAD_MACHINE_STACK_SIZE
861#define RUBY_VM_THREAD_MACHINE_STACK_SIZE (1024 * 1024 * sizeof(VALUE))
862#undef RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN
863#define RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN ( 512 * 1024 * sizeof(VALUE))
864#undef RUBY_VM_FIBER_MACHINE_STACK_SIZE
865#define RUBY_VM_FIBER_MACHINE_STACK_SIZE ( 256 * 1024 * sizeof(VALUE))
866#undef RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN
867#define RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN ( 128 * 1024 * sizeof(VALUE))
868#endif
869
870#ifndef VM_DEBUG_BP_CHECK
871#define VM_DEBUG_BP_CHECK 0
872#endif
873
874#ifndef VM_DEBUG_VERIFY_METHOD_CACHE
875#define VM_DEBUG_VERIFY_METHOD_CACHE (VMDEBUG != 0)
876#endif
877
879 VALUE self;
880 const VALUE *ep;
881 union {
882 const rb_iseq_t *iseq;
883 const struct vm_ifunc *ifunc;
884 VALUE val;
885 } code;
886};
887
888enum rb_block_handler_type {
889 block_handler_type_iseq,
890 block_handler_type_ifunc,
891 block_handler_type_symbol,
892 block_handler_type_proc
893};
894
895enum rb_block_type {
896 block_type_iseq,
897 block_type_ifunc,
898 block_type_symbol,
899 block_type_proc
900};
901
902struct rb_block {
903 union {
904 struct rb_captured_block captured;
905 VALUE symbol;
906 VALUE proc;
907 } as;
908 enum rb_block_type type;
909};
910
912 const VALUE *pc; // cfp[0]
913 VALUE *sp; // cfp[1]
914 const rb_iseq_t *iseq; // cfp[2]
915 VALUE self; // cfp[3] / block[0]
916 const VALUE *ep; // cfp[4] / block[1]
917 const void *block_code; // cfp[5] / block[2] -- iseq, ifunc, or forwarded block handler
918 void *jit_return; // cfp[6] -- return address for JIT code
919#if VM_DEBUG_BP_CHECK
920 VALUE *bp_check; // cfp[7]
921#endif
923
924extern const rb_data_type_t ruby_threadptr_data_type;
925
926static inline struct rb_thread_struct *
927rb_thread_ptr(VALUE thval)
928{
929 return (struct rb_thread_struct *)rb_check_typeddata(thval, &ruby_threadptr_data_type);
930}
931
932enum rb_thread_status {
933 THREAD_RUNNABLE,
934 THREAD_STOPPED,
935 THREAD_STOPPED_FOREVER,
936 THREAD_KILLED
937};
938
939#ifdef RUBY_JMP_BUF
940typedef RUBY_JMP_BUF rb_jmpbuf_t;
941#else
942typedef void *rb_jmpbuf_t[5];
943#endif
944
945/*
946 `rb_vm_tag_jmpbuf_t` type represents a buffer used to
947 long jump to a C frame associated with `rb_vm_tag`.
948
949 Use-site of `rb_vm_tag_jmpbuf_t` is responsible for calling the
950 following functions:
951 - `rb_vm_tag_jmpbuf_init` once `rb_vm_tag_jmpbuf_t` is allocated.
952 - `rb_vm_tag_jmpbuf_deinit` once `rb_vm_tag_jmpbuf_t` is no longer necessary.
953
954 `RB_VM_TAG_JMPBUF_GET` transforms a `rb_vm_tag_jmpbuf_t` into a
955 `rb_jmpbuf_t` to be passed to `rb_setjmp/rb_longjmp`.
956*/
957#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
958/*
959 WebAssembly target with Asyncify-based SJLJ needs
960 to capture the execution context by unwind/rewind-ing
961 call frames into a jump buffer. The buffer space tends
962 to be considerably large unlike other architectures'
963 register-based buffers.
964 Therefore, we allocates the buffer on the heap on such
965 environments.
966*/
967typedef rb_jmpbuf_t *rb_vm_tag_jmpbuf_t;
968
969#define RB_VM_TAG_JMPBUF_GET(buf) (*buf)
970
971static inline void
972rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
973{
974 *jmpbuf = ruby_xmalloc(sizeof(rb_jmpbuf_t));
975}
976
977static inline void
978rb_vm_tag_jmpbuf_deinit(const rb_vm_tag_jmpbuf_t *jmpbuf)
979{
980 ruby_xfree(*jmpbuf);
981}
982#else
983typedef rb_jmpbuf_t rb_vm_tag_jmpbuf_t;
984
985#define RB_VM_TAG_JMPBUF_GET(buf) (buf)
986
987static inline void
988rb_vm_tag_jmpbuf_init(rb_vm_tag_jmpbuf_t *jmpbuf)
989{
990 // no-op
991}
992
993static inline void
994rb_vm_tag_jmpbuf_deinit(const rb_vm_tag_jmpbuf_t *jmpbuf)
995{
996 // no-op
997}
998#endif
999
1000/*
1001 the members which are written in EC_PUSH_TAG() should be placed at
1002 the beginning and the end, so that entire region is accessible.
1003*/
1005 VALUE tag;
1006 VALUE retval;
1007 rb_vm_tag_jmpbuf_t buf;
1008 struct rb_vm_tag *prev;
1009 enum ruby_tag_type state;
1010 unsigned int lock_rec;
1011};
1012
1013STATIC_ASSERT(rb_vm_tag_buf_offset, offsetof(struct rb_vm_tag, buf) > 0);
1014STATIC_ASSERT(rb_vm_tag_buf_end,
1015 offsetof(struct rb_vm_tag, buf) + sizeof(rb_vm_tag_jmpbuf_t) <
1016 sizeof(struct rb_vm_tag));
1017
1020 void *arg;
1021};
1022
1023struct rb_mutex_struct;
1024
1025typedef struct rb_fiber_struct rb_fiber_t;
1026
1028 struct rb_waiting_list *next;
1029 struct rb_thread_struct *thread;
1030 struct rb_fiber_struct *fiber;
1031};
1032
1034 /* execution information */
1035 VALUE *vm_stack; /* must free, must mark */
1036 size_t vm_stack_size; /* size in word (byte size / sizeof(VALUE)) */
1037 rb_control_frame_t *cfp;
1038
1039 struct rb_vm_tag *tag;
1040
1041 /* interrupt flags */
1042 rb_atomic_t interrupt_flag;
1043 rb_atomic_t interrupt_mask; /* size should match flag */
1044#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
1045 uint32_t checked_clock;
1046#endif
1047
1048 rb_fiber_t *fiber_ptr;
1049 struct rb_thread_struct *thread_ptr;
1050 rb_serial_t serial;
1051
1052 /* storage (ec (fiber) local) */
1053 struct rb_id_table *local_storage;
1054 VALUE local_storage_recursive_hash;
1055 VALUE local_storage_recursive_hash_for_trace;
1056
1057 /* Inheritable fiber storage. */
1058 VALUE storage;
1059
1060 /* eval env */
1061 const VALUE *root_lep;
1062 VALUE root_svar;
1063
1064 /* trace information */
1065 struct rb_trace_arg_struct *trace_arg;
1066
1067 /* temporary places */
1068 VALUE errinfo;
1069 VALUE passed_block_handler; /* for rb_iterate */
1070
1071 uint8_t raised_flag; /* only 3 bits needed */
1072
1073 /* n.b. only 7 bits needed, really: */
1074 BITFIELD(enum method_missing_reason, method_missing_reason, 8);
1075
1076 VALUE private_const_reference;
1077
1078 struct {
1079 VALUE obj;
1080 VALUE fields_obj;
1081 } gen_fields_cache;
1082
1083 /* for GC */
1084 struct {
1085 VALUE *stack_start;
1086 VALUE *stack_end;
1087 size_t stack_maxsize;
1089
1090#ifdef RUBY_ASAN_ENABLED
1091 void *asan_fake_stack_handle;
1092#endif
1093 } machine;
1094};
1095
1096#ifndef rb_execution_context_t
1098#define rb_execution_context_t rb_execution_context_t
1099#endif
1100
1101// for builtin.h
1102#define VM_CORE_H_EC_DEFINED 1
1103
1104// Set the vm_stack pointer in the execution context.
1105void rb_ec_set_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
1106
1107// Initialize the vm_stack pointer in the execution context and push the initial stack frame.
1108// @param ec the execution context to update.
1109// @param stack a pointer to the stack to use.
1110// @param size the size of the stack, as in `VALUE stack[size]`.
1111void rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size);
1112
1113// Clear (set to `NULL`) the vm_stack pointer.
1114// @param ec the execution context to update.
1115void rb_ec_clear_vm_stack(rb_execution_context_t *ec);
1116
1117// Close an execution context and free related resources that are no longer needed.
1118// @param ec the execution context to close.
1119void rb_ec_close(rb_execution_context_t *ec);
1120
1122 bool ractor_safe;
1123};
1124
1125typedef struct rb_ractor_struct rb_ractor_t;
1126
1127struct rb_native_thread;
1128
1129typedef struct rb_thread_struct {
1130 struct ccan_list_node lt_node; // managed by a ractor (r->threads.set)
1131 VALUE self;
1132 rb_ractor_t *ractor;
1133 rb_vm_t *vm;
1134 struct rb_native_thread *nt;
1136
1137 struct rb_thread_sched_item sched;
1138 bool mn_schedulable;
1139 rb_atomic_t serial; // only for RUBY_DEBUG_LOG()
1140
1141 VALUE last_status; /* $? */
1142
1143 /* for cfunc */
1144 struct rb_calling_info *calling;
1145
1146 /* for load(true) */
1147 VALUE top_self;
1148 VALUE top_wrapper;
1149
1150 /* thread control */
1151
1152 BITFIELD(enum rb_thread_status, status, 2);
1153 /* bit flags */
1154 unsigned int has_dedicated_nt : 1;
1155 unsigned int to_kill : 1;
1156 unsigned int abort_on_exception: 1;
1157 unsigned int report_on_exception: 1;
1158 unsigned int pending_interrupt_queue_checked: 1;
1159 int8_t priority; /* -3 .. 3 (RUBY_THREAD_PRIORITY_{MIN,MAX}) */
1160 uint32_t running_time_us; /* 12500..800000 */
1161
1162 void *blocking_region_buffer;
1163
1164 VALUE thgroup;
1165 VALUE value;
1166
1167 /* temporary place of retval on OPT_CALL_THREADED_CODE */
1168#if OPT_CALL_THREADED_CODE
1169 VALUE retval;
1170#endif
1171
1172 /* async errinfo queue */
1173 VALUE pending_interrupt_queue;
1174 VALUE pending_interrupt_mask_stack;
1175
1176 /* interrupt management */
1177 rb_nativethread_lock_t interrupt_lock;
1178 struct rb_unblock_callback unblock;
1179 VALUE locking_mutex;
1180 struct rb_mutex_struct *keeping_mutexes;
1181 struct ccan_list_head interrupt_exec_tasks;
1182
1183 struct rb_waiting_list *join_list;
1184
1185 union {
1186 struct {
1187 VALUE proc;
1188 VALUE args;
1189 int kw_splat;
1190 } proc;
1191 struct {
1192 VALUE (*func)(void *);
1193 void *arg;
1194 } func;
1195 } invoke_arg;
1196
1197 enum thread_invoke_type {
1198 thread_invoke_type_none = 0,
1199 thread_invoke_type_proc,
1200 thread_invoke_type_ractor_proc,
1201 thread_invoke_type_func
1202 } invoke_type;
1203
1204 /* fiber */
1205 rb_fiber_t *root_fiber;
1206
1207 VALUE scheduler;
1208 unsigned int blocking;
1209
1210 /* misc */
1211 VALUE name;
1212 void **specific_storage;
1213
1214 struct rb_ext_config ext_config;
1215} rb_thread_t;
1216
1217static inline unsigned int
1218rb_th_serial(const rb_thread_t *th)
1219{
1220 return th ? (unsigned int)th->serial : 0;
1221}
1222
1223typedef enum {
1224 VM_DEFINECLASS_TYPE_CLASS = 0x00,
1225 VM_DEFINECLASS_TYPE_SINGLETON_CLASS = 0x01,
1226 VM_DEFINECLASS_TYPE_MODULE = 0x02,
1227 /* 0x03..0x06 is reserved */
1228 VM_DEFINECLASS_TYPE_MASK = 0x07
1229} rb_vm_defineclass_type_t;
1230
1231#define VM_DEFINECLASS_TYPE(x) ((rb_vm_defineclass_type_t)(x) & VM_DEFINECLASS_TYPE_MASK)
1232#define VM_DEFINECLASS_FLAG_SCOPED 0x08
1233#define VM_DEFINECLASS_FLAG_HAS_SUPERCLASS 0x10
1234#define VM_DEFINECLASS_SCOPED_P(x) ((x) & VM_DEFINECLASS_FLAG_SCOPED)
1235#define VM_DEFINECLASS_HAS_SUPERCLASS_P(x) \
1236 ((x) & VM_DEFINECLASS_FLAG_HAS_SUPERCLASS)
1237
1238/* iseq.c */
1239RUBY_SYMBOL_EXPORT_BEGIN
1240
1241/* node -> iseq */
1242rb_iseq_t *rb_iseq_new (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent, enum rb_iseq_type);
1243rb_iseq_t *rb_iseq_new_top (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, const rb_iseq_t *parent);
1244rb_iseq_t *rb_iseq_new_main (const VALUE ast_value, VALUE path, VALUE realpath, const rb_iseq_t *parent, int opt);
1245rb_iseq_t *rb_iseq_new_eval (const VALUE ast_value, VALUE name, VALUE path, VALUE realpath, int first_lineno, const rb_iseq_t *parent, int isolated_depth);
1246rb_iseq_t *rb_iseq_new_with_opt( VALUE ast_value, VALUE name, VALUE path, VALUE realpath, int first_lineno, const rb_iseq_t *parent, int isolated_depth,
1247 enum rb_iseq_type, const rb_compile_option_t*,
1248 VALUE script_lines);
1249
1250struct iseq_link_anchor;
1252 VALUE flags;
1253 VALUE reserved;
1254 void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *);
1255 const void *data;
1256};
1257static inline struct rb_iseq_new_with_callback_callback_func *
1258rb_iseq_new_with_callback_new_callback(
1259 void (*func)(rb_iseq_t *, struct iseq_link_anchor *, const void *), const void *ptr)
1260{
1262 IMEMO_NEW(struct rb_iseq_new_with_callback_callback_func, imemo_ifunc, Qfalse);
1263 memo->func = func;
1264 memo->data = ptr;
1265
1266 return memo;
1267}
1268rb_iseq_t *rb_iseq_new_with_callback(const struct rb_iseq_new_with_callback_callback_func * ifunc,
1269 VALUE name, VALUE path, VALUE realpath, int first_lineno,
1270 const rb_iseq_t *parent, enum rb_iseq_type, const rb_compile_option_t*);
1271
1272VALUE rb_iseq_disasm(const rb_iseq_t *iseq);
1273int rb_iseq_disasm_insn(VALUE str, const VALUE *iseqval, size_t pos, const rb_iseq_t *iseq, VALUE child);
1274
1275VALUE rb_iseq_coverage(const rb_iseq_t *iseq);
1276
1277RUBY_EXTERN VALUE rb_cISeq;
1278RUBY_EXTERN VALUE rb_cRubyVM;
1279RUBY_EXTERN VALUE rb_mRubyVMFrozenCore;
1280RUBY_EXTERN VALUE rb_block_param_proxy;
1281RUBY_SYMBOL_EXPORT_END
1282
1283#define GetProcPtr(obj, ptr) \
1284 GetCoreDataFromValue((obj), rb_proc_t, (ptr))
1285
1286typedef struct {
1287 const struct rb_block block;
1288 unsigned int is_from_method: 1; /* bool */
1289 unsigned int is_lambda: 1; /* bool */
1290 unsigned int is_isolated: 1; /* bool */
1291} rb_proc_t;
1292
1293RUBY_SYMBOL_EXPORT_BEGIN
1294VALUE rb_proc_isolate(VALUE self);
1295VALUE rb_proc_isolate_bang(VALUE self, VALUE replace_self);
1296VALUE rb_proc_ractor_make_shareable(VALUE proc, VALUE replace_self);
1297RUBY_SYMBOL_EXPORT_END
1298
1299typedef struct {
1300 VALUE flags; /* imemo header */
1301 rb_iseq_t *iseq;
1302 const VALUE *ep;
1303 const VALUE *env;
1304 unsigned int env_size;
1305} rb_env_t;
1306
1307extern const rb_data_type_t ruby_binding_data_type;
1308
1309#define GetBindingPtr(obj, ptr) \
1310 GetCoreDataFromValue((obj), rb_binding_t, (ptr))
1311
1312typedef struct {
1313 const struct rb_block block;
1314 const VALUE pathobj;
1315 int first_lineno;
1316} rb_binding_t;
1317
1318/* used by compile time and send insn */
1319
1320enum vm_check_match_type {
1321 VM_CHECKMATCH_TYPE_WHEN = 1,
1322 VM_CHECKMATCH_TYPE_CASE = 2,
1323 VM_CHECKMATCH_TYPE_RESCUE = 3
1324};
1325
1326#define VM_CHECKMATCH_TYPE_MASK 0x03
1327#define VM_CHECKMATCH_ARRAY 0x04
1328
1329enum vm_opt_newarray_send_type {
1330 VM_OPT_NEWARRAY_SEND_MAX = 1,
1331 VM_OPT_NEWARRAY_SEND_MIN = 2,
1332 VM_OPT_NEWARRAY_SEND_HASH = 3,
1333 VM_OPT_NEWARRAY_SEND_PACK = 4,
1334 VM_OPT_NEWARRAY_SEND_PACK_BUFFER = 5,
1335 VM_OPT_NEWARRAY_SEND_INCLUDE_P = 6,
1336};
1337
1338enum vm_special_object_type {
1339 VM_SPECIAL_OBJECT_VMCORE = 1,
1340 VM_SPECIAL_OBJECT_CBASE,
1341 VM_SPECIAL_OBJECT_CONST_BASE
1342};
1343
1344enum vm_svar_index {
1345 VM_SVAR_LASTLINE = 0, /* $_ */
1346 VM_SVAR_BACKREF = 1, /* $~ */
1347
1348 VM_SVAR_EXTRA_START = 2,
1349 VM_SVAR_FLIPFLOP_START = 2 /* flipflop */
1350};
1351
1352/* inline cache */
1353typedef struct iseq_inline_constant_cache *IC;
1354typedef struct iseq_inline_iv_cache_entry *IVC;
1355typedef struct iseq_inline_cvar_cache_entry *ICVARC;
1356typedef union iseq_inline_storage_entry *ISE;
1357typedef const struct rb_callinfo *CALL_INFO;
1358typedef const struct rb_callcache *CALL_CACHE;
1359typedef struct rb_call_data *CALL_DATA;
1360
1361typedef VALUE CDHASH;
1362
1363#ifndef FUNC_FASTCALL
1364#define FUNC_FASTCALL(x) x
1365#endif
1366
1367typedef rb_control_frame_t *
1368 (FUNC_FASTCALL(*rb_insn_func_t))(rb_execution_context_t *, rb_control_frame_t *);
1369
1370#define VM_TAGGED_PTR_SET(p, tag) ((VALUE)(p) | (tag))
1371#define VM_TAGGED_PTR_REF(v, mask) ((void *)((v) & ~mask))
1372
1373#define GC_GUARDED_PTR(p) VM_TAGGED_PTR_SET((p), 0x01)
1374#define GC_GUARDED_PTR_REF(p) VM_TAGGED_PTR_REF((p), 0x03)
1375#define GC_GUARDED_PTR_P(p) (((VALUE)(p)) & 0x01)
1376
1377enum vm_frame_env_flags {
1378 /* Frame/Environment flag bits:
1379 * MMMM MMMM MMMM MMMM ___F FFFF FFFE EEEX (LSB)
1380 *
1381 * X : tag for GC marking (It seems as Fixnum)
1382 * EEE : 4 bits Env flags
1383 * FF..: 8 bits Frame flags
1384 * MM..: 15 bits frame magic (to check frame corruption)
1385 */
1386
1387 /* frame types */
1388 VM_FRAME_MAGIC_METHOD = 0x11110001,
1389 VM_FRAME_MAGIC_BLOCK = 0x22220001,
1390 VM_FRAME_MAGIC_CLASS = 0x33330001,
1391 VM_FRAME_MAGIC_TOP = 0x44440001,
1392 VM_FRAME_MAGIC_CFUNC = 0x55550001,
1393 VM_FRAME_MAGIC_IFUNC = 0x66660001,
1394 VM_FRAME_MAGIC_EVAL = 0x77770001,
1395 VM_FRAME_MAGIC_RESCUE = 0x78880001,
1396 VM_FRAME_MAGIC_DUMMY = 0x79990001,
1397
1398 VM_FRAME_MAGIC_MASK = 0x7fff0001,
1399
1400 /* frame flag */
1401 VM_FRAME_FLAG_FINISH = 0x0020,
1402 VM_FRAME_FLAG_BMETHOD = 0x0040,
1403 VM_FRAME_FLAG_CFRAME = 0x0080,
1404 VM_FRAME_FLAG_LAMBDA = 0x0100,
1405 VM_FRAME_FLAG_MODIFIED_BLOCK_PARAM = 0x0200,
1406 VM_FRAME_FLAG_CFRAME_KW = 0x0400,
1407 VM_FRAME_FLAG_PASSED = 0x0800,
1408 VM_FRAME_FLAG_BOX_REQUIRE = 0x1000,
1409
1410 /* env flag */
1411 VM_ENV_FLAG_LOCAL = 0x0002,
1412 VM_ENV_FLAG_ESCAPED = 0x0004,
1413 VM_ENV_FLAG_WB_REQUIRED = 0x0008,
1414 VM_ENV_FLAG_ISOLATED = 0x0010,
1415};
1416
1417#define VM_ENV_DATA_SIZE ( 3)
1418
1419#define VM_ENV_DATA_INDEX_ME_CREF (-2) /* ep[-2] */
1420#define VM_ENV_DATA_INDEX_SPECVAL (-1) /* ep[-1] */
1421#define VM_ENV_DATA_INDEX_FLAGS ( 0) /* ep[ 0] */
1422#define VM_ENV_DATA_INDEX_ENV ( 1) /* ep[ 1] */
1423
1424#define VM_ENV_INDEX_LAST_LVAR (-VM_ENV_DATA_SIZE)
1425
1426static inline void VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value);
1427
1428static inline void
1429VM_ENV_FLAGS_SET(const VALUE *ep, VALUE flag)
1430{
1431 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1432 VM_ASSERT(FIXNUM_P(flags));
1433 VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags | flag);
1434}
1435
1436static inline void
1437VM_ENV_FLAGS_UNSET(const VALUE *ep, VALUE flag)
1438{
1439 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1440 VM_ASSERT(FIXNUM_P(flags));
1441 VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_FLAGS], flags & ~flag);
1442}
1443
1444static inline unsigned long
1445VM_ENV_FLAGS(const VALUE *ep, long flag)
1446{
1447 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1448 VM_ASSERT(FIXNUM_P(flags));
1449 return flags & flag;
1450}
1451
1452static inline unsigned long
1453VM_ENV_FLAGS_UNCHECKED(const VALUE *ep, long flag)
1454{
1455 VALUE flags = ep[VM_ENV_DATA_INDEX_FLAGS];
1456 return flags & flag;
1457}
1458
1459static inline unsigned long
1460VM_ENV_FRAME_TYPE_P(const VALUE *ep, unsigned long frame_type)
1461{
1462 return VM_ENV_FLAGS(ep, VM_FRAME_MAGIC_MASK) == frame_type;
1463}
1464
1465static inline unsigned long
1466VM_FRAME_TYPE(const rb_control_frame_t *cfp)
1467{
1468 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_MAGIC_MASK);
1469}
1470
1471static inline unsigned long
1472VM_FRAME_TYPE_UNCHECKED(const rb_control_frame_t *cfp)
1473{
1474 return VM_ENV_FLAGS_UNCHECKED(cfp->ep, VM_FRAME_MAGIC_MASK);
1475}
1476
1477static inline int
1478VM_FRAME_LAMBDA_P(const rb_control_frame_t *cfp)
1479{
1480 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_LAMBDA) != 0;
1481}
1482
1483static inline int
1484VM_FRAME_CFRAME_KW_P(const rb_control_frame_t *cfp)
1485{
1486 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME_KW) != 0;
1487}
1488
1489static inline int
1490VM_FRAME_FINISHED_P(const rb_control_frame_t *cfp)
1491{
1492 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_FINISH) != 0;
1493}
1494
1495static inline int
1496VM_FRAME_FINISHED_P_UNCHECKED(const rb_control_frame_t *cfp)
1497{
1498 return VM_ENV_FLAGS_UNCHECKED(cfp->ep, VM_FRAME_FLAG_FINISH) != 0;
1499}
1500
1501static inline int
1502VM_FRAME_BMETHOD_P(const rb_control_frame_t *cfp)
1503{
1504 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_BMETHOD) != 0;
1505}
1506
1507static inline int
1508rb_obj_is_iseq(VALUE iseq)
1509{
1510 return imemo_type_p(iseq, imemo_iseq);
1511}
1512
1513#if VM_CHECK_MODE > 0
1514#define RUBY_VM_NORMAL_ISEQ_P(iseq) rb_obj_is_iseq((VALUE)iseq)
1515#endif
1516
1517static inline int
1518VM_FRAME_CFRAME_P(const rb_control_frame_t *cfp)
1519{
1520 int cframe_p = VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_CFRAME) != 0;
1521 VM_ASSERT(RUBY_VM_NORMAL_ISEQ_P(cfp->iseq) != cframe_p ||
1522 (VM_FRAME_TYPE(cfp) & VM_FRAME_MAGIC_MASK) == VM_FRAME_MAGIC_DUMMY);
1523 return cframe_p;
1524}
1525
1526static inline int
1527VM_FRAME_CFRAME_P_UNCHECKED(const rb_control_frame_t *cfp)
1528{
1529 return VM_ENV_FLAGS_UNCHECKED(cfp->ep, VM_FRAME_FLAG_CFRAME) != 0;
1530}
1531
1532static inline int
1533VM_FRAME_RUBYFRAME_P(const rb_control_frame_t *cfp)
1534{
1535 return !VM_FRAME_CFRAME_P(cfp);
1536}
1537
1538static inline int
1539VM_FRAME_RUBYFRAME_P_UNCHECKED(const rb_control_frame_t *cfp)
1540{
1541 return !VM_FRAME_CFRAME_P_UNCHECKED(cfp);
1542}
1543
1544static inline int
1545VM_FRAME_NS_REQUIRE_P(const rb_control_frame_t *cfp)
1546{
1547 return VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_BOX_REQUIRE) != 0;
1548}
1549
1550#define RUBYVM_CFUNC_FRAME_P(cfp) \
1551 (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_CFUNC)
1552
1553#define VM_GUARDED_PREV_EP(ep) GC_GUARDED_PTR(ep)
1554#define VM_BLOCK_HANDLER_NONE 0
1555
1556static inline int
1557VM_ENV_LOCAL_P(const VALUE *ep)
1558{
1559 return VM_ENV_FLAGS(ep, VM_ENV_FLAG_LOCAL) ? 1 : 0;
1560}
1561
1562static inline int
1563VM_ENV_LOCAL_P_UNCHECKED(const VALUE *ep)
1564{
1565 return VM_ENV_FLAGS_UNCHECKED(ep, VM_ENV_FLAG_LOCAL) ? 1 : 0;
1566}
1567
1568static inline const VALUE *
1569VM_ENV_PREV_EP_UNCHECKED(const VALUE *ep)
1570{
1571 return GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
1572}
1573
1574static inline const VALUE *
1575VM_ENV_PREV_EP(const VALUE *ep)
1576{
1577 VM_ASSERT(VM_ENV_LOCAL_P(ep) == 0);
1578 return VM_ENV_PREV_EP_UNCHECKED(ep);
1579}
1580
1581static inline bool
1582VM_ENV_BOXED_P(const VALUE *ep)
1583{
1584 return VM_ENV_FRAME_TYPE_P(ep, VM_FRAME_MAGIC_CLASS) || VM_ENV_FRAME_TYPE_P(ep, VM_FRAME_MAGIC_TOP);
1585}
1586
1587static inline VALUE
1588VM_ENV_BLOCK_HANDLER(const VALUE *ep)
1589{
1590 if (VM_ENV_BOXED_P(ep)) {
1591 VM_ASSERT(VM_ENV_LOCAL_P(ep));
1592 return VM_BLOCK_HANDLER_NONE;
1593 }
1594
1595 VM_ASSERT(VM_ENV_LOCAL_P(ep));
1596 return ep[VM_ENV_DATA_INDEX_SPECVAL];
1597}
1598
1599static inline const rb_box_t *
1600VM_ENV_BOX(const VALUE *ep)
1601{
1602 VM_ASSERT(VM_ENV_BOXED_P(ep));
1603 VM_ASSERT(VM_ENV_LOCAL_P(ep));
1604 return (const rb_box_t *)GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
1605}
1606
1607static inline const rb_box_t *
1608VM_ENV_BOX_UNCHECKED(const VALUE *ep)
1609{
1610 return (const rb_box_t *)GC_GUARDED_PTR_REF(ep[VM_ENV_DATA_INDEX_SPECVAL]);
1611}
1612
1613#if VM_CHECK_MODE > 0
1614int rb_vm_ep_in_heap_p(const VALUE *ep);
1615#endif
1616
1617static inline int
1618VM_ENV_ESCAPED_P(const VALUE *ep)
1619{
1620 VM_ASSERT(rb_vm_ep_in_heap_p(ep) == !!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED));
1621 return VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) ? 1 : 0;
1622}
1623
1625static inline VALUE
1626VM_ENV_ENVVAL(const VALUE *ep)
1627{
1628 VALUE envval = ep[VM_ENV_DATA_INDEX_ENV];
1629 VM_ASSERT(VM_ENV_ESCAPED_P(ep));
1630 VM_ASSERT(envval == Qundef || imemo_type_p(envval, imemo_env));
1631 return envval;
1632}
1633
1635static inline const rb_env_t *
1636VM_ENV_ENVVAL_PTR(const VALUE *ep)
1637{
1638 return (const rb_env_t *)VM_ENV_ENVVAL(ep);
1639}
1640
1641static inline const rb_env_t *
1642vm_env_new(VALUE *env_ep, VALUE *env_body, unsigned int env_size, const rb_iseq_t *iseq)
1643{
1644 rb_env_t *env = IMEMO_NEW(rb_env_t, imemo_env, (VALUE)iseq);
1645 env->ep = env_ep;
1646 env->env = env_body;
1647 env->env_size = env_size;
1648 env_ep[VM_ENV_DATA_INDEX_ENV] = (VALUE)env;
1649 return env;
1650}
1651
1652static inline void
1653VM_FORCE_WRITE(const VALUE *ptr, VALUE v)
1654{
1655 *((VALUE *)ptr) = v;
1656}
1657
1658static inline void
1659VM_FORCE_WRITE_SPECIAL_CONST(const VALUE *ptr, VALUE special_const_value)
1660{
1661 VM_ASSERT(RB_SPECIAL_CONST_P(special_const_value));
1662 VM_FORCE_WRITE(ptr, special_const_value);
1663}
1664
1665static inline void
1666VM_STACK_ENV_WRITE(const VALUE *ep, int index, VALUE v)
1667{
1668 VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_WB_REQUIRED) == 0);
1669 VM_FORCE_WRITE(&ep[index], v);
1670}
1671
1672const VALUE *rb_vm_ep_local_ep(const VALUE *ep);
1673const VALUE *rb_vm_proc_local_ep(VALUE proc);
1674void rb_vm_block_ep_update(VALUE obj, const struct rb_block *dst, const VALUE *ep);
1675void rb_vm_block_copy(VALUE obj, const struct rb_block *dst, const struct rb_block *src);
1676
1677VALUE rb_vm_frame_block_handler(const rb_control_frame_t *cfp);
1678
1679#define RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp) ((cfp)+1)
1680#define RUBY_VM_NEXT_CONTROL_FRAME(cfp) ((cfp)-1)
1681
1682#define RUBY_VM_VALID_CONTROL_FRAME_P(cfp, ecfp) \
1683 ((void *)(ecfp) > (void *)(cfp))
1684
1685static inline const rb_control_frame_t *
1686RUBY_VM_END_CONTROL_FRAME(const rb_execution_context_t *ec)
1687{
1688 return (rb_control_frame_t *)(ec->vm_stack + ec->vm_stack_size);
1689}
1690
1691static inline int
1692RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
1693{
1694 return !RUBY_VM_VALID_CONTROL_FRAME_P(cfp, RUBY_VM_END_CONTROL_FRAME(ec));
1695}
1696
1697static inline int
1698VM_BH_ISEQ_BLOCK_P(VALUE block_handler)
1699{
1700 if ((block_handler & 0x03) == 0x01) {
1701#if VM_CHECK_MODE > 0
1702 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1703 VM_ASSERT(imemo_type_p(captured->code.val, imemo_iseq));
1704#endif
1705 return 1;
1706 }
1707 else {
1708 return 0;
1709 }
1710}
1711
1712static inline VALUE
1713VM_BH_FROM_ISEQ_BLOCK(const struct rb_captured_block *captured)
1714{
1715 VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x01);
1716 VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1717 return block_handler;
1718}
1719
1720static inline const struct rb_captured_block *
1721VM_BH_TO_ISEQ_BLOCK(VALUE block_handler)
1722{
1723 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1724 VM_ASSERT(VM_BH_ISEQ_BLOCK_P(block_handler));
1725 return captured;
1726}
1727
1728static inline int
1729VM_BH_IFUNC_P(VALUE block_handler)
1730{
1731 if ((block_handler & 0x03) == 0x03) {
1732#if VM_CHECK_MODE > 0
1733 struct rb_captured_block *captured = (void *)(block_handler & ~0x03);
1734 VM_ASSERT(imemo_type_p(captured->code.val, imemo_ifunc));
1735#endif
1736 return 1;
1737 }
1738 else {
1739 return 0;
1740 }
1741}
1742
1743static inline VALUE
1744VM_BH_FROM_IFUNC_BLOCK(const struct rb_captured_block *captured)
1745{
1746 VALUE block_handler = VM_TAGGED_PTR_SET(captured, 0x03);
1747 VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1748 return block_handler;
1749}
1750
1751static inline const struct rb_captured_block *
1752VM_BH_TO_IFUNC_BLOCK(VALUE block_handler)
1753{
1754 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1755 VM_ASSERT(VM_BH_IFUNC_P(block_handler));
1756 return captured;
1757}
1758
1759static inline const struct rb_captured_block *
1760VM_BH_TO_CAPT_BLOCK(VALUE block_handler)
1761{
1762 struct rb_captured_block *captured = VM_TAGGED_PTR_REF(block_handler, 0x03);
1763 VM_ASSERT(VM_BH_IFUNC_P(block_handler) || VM_BH_ISEQ_BLOCK_P(block_handler));
1764 return captured;
1765}
1766
1767static inline enum rb_block_handler_type
1768vm_block_handler_type(VALUE block_handler)
1769{
1770 if (VM_BH_ISEQ_BLOCK_P(block_handler)) {
1771 return block_handler_type_iseq;
1772 }
1773 else if (VM_BH_IFUNC_P(block_handler)) {
1774 return block_handler_type_ifunc;
1775 }
1776 else if (SYMBOL_P(block_handler)) {
1777 return block_handler_type_symbol;
1778 }
1779 else {
1780 VM_ASSERT(rb_obj_is_proc(block_handler));
1781 return block_handler_type_proc;
1782 }
1783}
1784
1785static inline void
1786vm_block_handler_verify(MAYBE_UNUSED(VALUE block_handler))
1787{
1788 VM_ASSERT(block_handler == VM_BLOCK_HANDLER_NONE ||
1789 (vm_block_handler_type(block_handler), 1));
1790}
1791
1792static inline enum rb_block_type
1793vm_block_type(const struct rb_block *block)
1794{
1795#if VM_CHECK_MODE > 0
1796 switch (block->type) {
1797 case block_type_iseq:
1798 VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_iseq));
1799 break;
1800 case block_type_ifunc:
1801 VM_ASSERT(imemo_type_p(block->as.captured.code.val, imemo_ifunc));
1802 break;
1803 case block_type_symbol:
1804 VM_ASSERT(SYMBOL_P(block->as.symbol));
1805 break;
1806 case block_type_proc:
1807 VM_ASSERT(rb_obj_is_proc(block->as.proc));
1808 break;
1809 }
1810#endif
1811 return block->type;
1812}
1813
1814static inline void
1815vm_block_type_set(const struct rb_block *block, enum rb_block_type type)
1816{
1817 struct rb_block *mb = (struct rb_block *)block;
1818 mb->type = type;
1819}
1820
1821static inline const struct rb_block *
1822vm_proc_block(VALUE procval)
1823{
1824 VM_ASSERT(rb_obj_is_proc(procval));
1825 return &((rb_proc_t *)RTYPEDDATA_DATA(procval))->block;
1826}
1827
1828static inline const rb_iseq_t *vm_block_iseq(const struct rb_block *block);
1829static inline const VALUE *vm_block_ep(const struct rb_block *block);
1830
1831static inline const rb_iseq_t *
1832vm_proc_iseq(VALUE procval)
1833{
1834 return vm_block_iseq(vm_proc_block(procval));
1835}
1836
1837static inline const VALUE *
1838vm_proc_ep(VALUE procval)
1839{
1840 return vm_block_ep(vm_proc_block(procval));
1841}
1842
1843static inline const rb_iseq_t *
1844vm_block_iseq(const struct rb_block *block)
1845{
1846 switch (vm_block_type(block)) {
1847 case block_type_iseq: return rb_iseq_check(block->as.captured.code.iseq);
1848 case block_type_proc: return vm_proc_iseq(block->as.proc);
1849 case block_type_ifunc:
1850 case block_type_symbol: return NULL;
1851 }
1852 VM_UNREACHABLE(vm_block_iseq);
1853 return NULL;
1854}
1855
1856static inline const VALUE *
1857vm_block_ep(const struct rb_block *block)
1858{
1859 switch (vm_block_type(block)) {
1860 case block_type_iseq:
1861 case block_type_ifunc: return block->as.captured.ep;
1862 case block_type_proc: return vm_proc_ep(block->as.proc);
1863 case block_type_symbol: return NULL;
1864 }
1865 VM_UNREACHABLE(vm_block_ep);
1866 return NULL;
1867}
1868
1869static inline VALUE
1870vm_block_self(const struct rb_block *block)
1871{
1872 switch (vm_block_type(block)) {
1873 case block_type_iseq:
1874 case block_type_ifunc:
1875 return block->as.captured.self;
1876 case block_type_proc:
1877 return vm_block_self(vm_proc_block(block->as.proc));
1878 case block_type_symbol:
1879 return Qundef;
1880 }
1881 VM_UNREACHABLE(vm_block_self);
1882 return Qundef;
1883}
1884
1885static inline VALUE
1886VM_BH_TO_SYMBOL(VALUE block_handler)
1887{
1888 VM_ASSERT(SYMBOL_P(block_handler));
1889 return block_handler;
1890}
1891
1892static inline VALUE
1893VM_BH_FROM_SYMBOL(VALUE symbol)
1894{
1895 VM_ASSERT(SYMBOL_P(symbol));
1896 return symbol;
1897}
1898
1899static inline VALUE
1900VM_BH_TO_PROC(VALUE block_handler)
1901{
1902 VM_ASSERT(rb_obj_is_proc(block_handler));
1903 return block_handler;
1904}
1905
1906static inline VALUE
1907VM_BH_FROM_PROC(VALUE procval)
1908{
1909 VM_ASSERT(rb_obj_is_proc(procval));
1910 return procval;
1911}
1912
1913/* VM related object allocate functions */
1914VALUE rb_thread_alloc(VALUE klass);
1915VALUE rb_binding_alloc(VALUE klass);
1916VALUE rb_proc_alloc(VALUE klass);
1917VALUE rb_proc_dup(VALUE self);
1918
1919/* for debug */
1920extern bool rb_vmdebug_stack_dump_raw(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, FILE *);
1921extern bool rb_vmdebug_debug_print_pre(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, const VALUE *_pc, FILE *);
1922extern bool rb_vmdebug_debug_print_post(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, FILE *);
1923
1924#define SDR() rb_vmdebug_stack_dump_raw(GET_EC(), GET_EC()->cfp, stderr)
1925#define SDR2(cfp) rb_vmdebug_stack_dump_raw(GET_EC(), (cfp), stderr)
1926bool rb_vm_bugreport(const void *, FILE *);
1927typedef void (*ruby_sighandler_t)(int);
1928RBIMPL_ATTR_FORMAT(RBIMPL_PRINTF_FORMAT, 4, 5)
1929NORETURN(void rb_bug_for_fatal_signal(ruby_sighandler_t default_sighandler, int sig, const void *, const char *fmt, ...));
1930
1931/* functions about thread/vm execution */
1932RUBY_SYMBOL_EXPORT_BEGIN
1933VALUE rb_iseq_eval(const rb_iseq_t *iseq, const rb_box_t *box);
1934VALUE rb_iseq_eval_main(const rb_iseq_t *iseq);
1935VALUE rb_iseq_path(const rb_iseq_t *iseq);
1936VALUE rb_iseq_realpath(const rb_iseq_t *iseq);
1937RUBY_SYMBOL_EXPORT_END
1938
1939VALUE rb_iseq_pathobj_new(VALUE path, VALUE realpath);
1940void rb_iseq_pathobj_set(const rb_iseq_t *iseq, VALUE path, VALUE realpath);
1941
1942int rb_ec_frame_method_id_and_class(const rb_execution_context_t *ec, ID *idp, ID *called_idp, VALUE *klassp);
1943void rb_ec_setup_exception(const rb_execution_context_t *ec, VALUE mesg, VALUE cause);
1944
1945VALUE rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, int argc, const VALUE *argv, int kw_splat, VALUE block_handler);
1946
1947VALUE rb_vm_make_proc_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda);
1948static inline VALUE
1949rb_vm_make_proc(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1950{
1951 return rb_vm_make_proc_lambda(ec, captured, klass, 0);
1952}
1953
1954static inline VALUE
1955rb_vm_make_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass)
1956{
1957 return rb_vm_make_proc_lambda(ec, captured, klass, 1);
1958}
1959
1960VALUE rb_vm_make_binding(const rb_execution_context_t *ec, const rb_control_frame_t *src_cfp);
1961VALUE rb_vm_env_local_variables(const rb_env_t *env);
1962VALUE rb_vm_env_numbered_parameters(const rb_env_t *env);
1963const rb_env_t *rb_vm_env_prev_env(const rb_env_t *env);
1964const VALUE *rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const ID *dynvars);
1965void rb_vm_inc_const_missing_count(void);
1966VALUE rb_vm_call_kw(rb_execution_context_t *ec, VALUE recv, VALUE id, int argc,
1967 const VALUE *argv, const rb_callable_method_entry_t *me, int kw_splat);
1968void rb_vm_pop_frame_no_int(rb_execution_context_t *ec);
1969void rb_vm_pop_frame(rb_execution_context_t *ec);
1970
1971void rb_thread_start_timer_thread(void);
1972void rb_thread_stop_timer_thread(void);
1973void rb_thread_reset_timer_thread(void);
1974void rb_thread_wakeup_timer_thread(int);
1975
1976static inline void
1977rb_vm_living_threads_init(rb_vm_t *vm)
1978{
1979 ccan_list_head_init(&vm->workqueue);
1980 ccan_list_head_init(&vm->ractor.set);
1981#ifdef RUBY_THREAD_PTHREAD_H
1982 ccan_list_head_init(&vm->ractor.sched.zombie_threads);
1983#endif
1984}
1985
1986typedef int rb_backtrace_iter_func(void *, VALUE, int, VALUE);
1987rb_control_frame_t *rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1988rb_control_frame_t *rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1989VALUE *rb_vm_svar_lep(const rb_execution_context_t *ec, const rb_control_frame_t *cfp);
1990int rb_vm_get_sourceline(const rb_control_frame_t *);
1991void rb_vm_stack_to_heap(rb_execution_context_t *ec);
1992void ruby_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame);
1993void rb_thread_malloc_stack_set(rb_thread_t *th, void *stack);
1994rb_thread_t * ruby_thread_from_native(void);
1995int ruby_thread_set_native(rb_thread_t *th);
1996int rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp);
1997void rb_vm_rewind_cfp(rb_execution_context_t *ec, rb_control_frame_t *cfp);
1998void rb_vm_env_write(const VALUE *ep, int index, VALUE v);
1999VALUE rb_vm_bh_to_procval(const rb_execution_context_t *ec, VALUE block_handler);
2000
2001void rb_vm_register_special_exception_str(enum ruby_special_exceptions sp, VALUE exception_class, VALUE mesg);
2002
2003#define rb_vm_register_special_exception(sp, e, m) \
2004 rb_vm_register_special_exception_str(sp, e, rb_usascii_str_new_static((m), (long)rb_strlen_lit(m)))
2005
2006void rb_gc_mark_machine_context(const rb_execution_context_t *ec);
2007
2008rb_cref_t *rb_vm_rewrite_cref(rb_cref_t *node, VALUE old_klass, VALUE new_klass);
2009
2010const rb_callable_method_entry_t *rb_vm_frame_method_entry(const rb_control_frame_t *cfp);
2011const rb_callable_method_entry_t *rb_vm_frame_method_entry_unchecked(const rb_control_frame_t *cfp);
2012
2013#define sysstack_error GET_VM()->special_exceptions[ruby_error_sysstack]
2014
2015#define CHECK_VM_STACK_OVERFLOW0(cfp, sp, margin) do { \
2016 STATIC_ASSERT(sizeof_sp, sizeof(*(sp)) == sizeof(VALUE)); \
2017 STATIC_ASSERT(sizeof_cfp, sizeof(*(cfp)) == sizeof(rb_control_frame_t)); \
2018 const struct rb_control_frame_struct *bound = (void *)&(sp)[(margin)]; \
2019 if (UNLIKELY((cfp) <= &bound[1])) { \
2020 vm_stackoverflow(); \
2021 } \
2022} while (0)
2023
2024#define CHECK_VM_STACK_OVERFLOW(cfp, margin) \
2025 CHECK_VM_STACK_OVERFLOW0((cfp), (cfp)->sp, (margin))
2026
2027VALUE rb_catch_protect(VALUE t, rb_block_call_func *func, VALUE data, enum ruby_tag_type *stateptr);
2028
2029rb_execution_context_t *rb_vm_main_ractor_ec(rb_vm_t *vm); // ractor.c
2030
2031/* for thread */
2032
2033#if RUBY_VM_THREAD_MODEL == 2
2034
2035RUBY_EXTERN struct rb_ractor_struct *ruby_single_main_ractor; // ractor.c
2036RUBY_EXTERN rb_vm_t *ruby_current_vm_ptr;
2037RUBY_EXTERN rb_event_flag_t ruby_vm_event_flags;
2038RUBY_EXTERN rb_event_flag_t ruby_vm_event_enabled_global_flags; // only ever added to
2039RUBY_EXTERN unsigned int ruby_vm_iseq_events_enabled;
2040RUBY_EXTERN unsigned int ruby_vm_c_events_enabled;
2041
2042#define GET_VM() rb_current_vm()
2043#define GET_RACTOR() rb_current_ractor()
2044#define GET_THREAD() rb_current_thread()
2045#define GET_EC() rb_current_execution_context(true)
2046
2047static inline rb_serial_t
2048rb_ec_serial(struct rb_execution_context_struct *ec)
2049{
2050 VM_ASSERT(ec->serial >= 1);
2051 return ec->serial;
2052}
2053
2054static inline rb_thread_t *
2055rb_ec_thread_ptr(const rb_execution_context_t *ec)
2056{
2057 return ec->thread_ptr;
2058}
2059
2060static inline rb_ractor_t *
2061rb_ec_ractor_ptr(const rb_execution_context_t *ec)
2062{
2063 const rb_thread_t *th = rb_ec_thread_ptr(ec);
2064 if (th) {
2065 VM_ASSERT(th->ractor != NULL);
2066 return th->ractor;
2067 }
2068 else {
2069 return NULL;
2070 }
2071}
2072
2073static inline rb_vm_t *
2074rb_ec_vm_ptr(const rb_execution_context_t *ec)
2075{
2076 const rb_thread_t *th = rb_ec_thread_ptr(ec);
2077 if (th) {
2078 return th->vm;
2079 }
2080 else {
2081 return NULL;
2082 }
2083}
2084
2085NOINLINE(struct rb_execution_context_struct *rb_current_ec_noinline(void));
2086
2087static inline rb_execution_context_t *
2088rb_current_execution_context(bool expect_ec)
2089{
2090#ifdef RB_THREAD_LOCAL_SPECIFIER
2091 #ifdef RB_THREAD_CURRENT_EC_NOINLINE
2092 rb_execution_context_t * volatile ec = rb_current_ec();
2093 #else
2094 rb_execution_context_t * volatile ec = ruby_current_ec;
2095 #endif
2096
2097 /* On the shared objects, `__tls_get_addr()` is used to access the TLS
2098 * and the address of the `ruby_current_ec` can be stored on a function
2099 * frame. However, this address can be mis-used after native thread
2100 * migration of a coroutine.
2101 * 1) Get `ptr = &ruby_current_ec` on NT1 and store it on the frame.
2102 * 2) Context switch and resume it on the NT2.
2103 * 3) `ptr` is used on NT2 but it accesses the TLS of NT1.
2104 * This assertion checks such misusage.
2105 *
2106 * To avoid accidents, `GET_EC()` should be called once on the frame.
2107 * Note that inlining can produce the problem.
2108 */
2109 VM_ASSERT(ec == rb_current_ec_noinline());
2110#else
2111 rb_execution_context_t * volatile ec = native_tls_get(ruby_current_ec_key);
2112#endif
2113 VM_ASSERT(!expect_ec || ec != NULL);
2114 return ec;
2115}
2116
2117static inline rb_thread_t *
2118rb_current_thread(void)
2119{
2120 const rb_execution_context_t *ec = GET_EC();
2121 return rb_ec_thread_ptr(ec);
2122}
2123
2124static inline rb_ractor_t *
2125rb_current_ractor_raw(bool expect)
2126{
2127 if (ruby_single_main_ractor) {
2128 return ruby_single_main_ractor;
2129 }
2130 else {
2131 const rb_execution_context_t *ec = rb_current_execution_context(expect);
2132 return (expect || ec) ? rb_ec_ractor_ptr(ec) : NULL;
2133 }
2134}
2135
2136static inline rb_ractor_t *
2137rb_current_ractor(void)
2138{
2139 return rb_current_ractor_raw(true);
2140}
2141
2142static inline rb_vm_t *
2143rb_current_vm(void)
2144{
2145#if 0 // TODO: reconsider the assertions
2146 VM_ASSERT(ruby_current_vm_ptr == NULL ||
2147 ruby_current_execution_context_ptr == NULL ||
2148 rb_ec_thread_ptr(GET_EC()) == NULL ||
2149 rb_ec_thread_ptr(GET_EC())->status == THREAD_KILLED ||
2150 rb_ec_vm_ptr(GET_EC()) == ruby_current_vm_ptr);
2151#endif
2152
2153 return ruby_current_vm_ptr;
2154}
2155
2156void rb_ec_vm_lock_rec_release(const rb_execution_context_t *ec,
2157 unsigned int recorded_lock_rec,
2158 unsigned int current_lock_rec);
2159
2160/* This technically is a data race, as it's checked without the lock, however we
2161 * check against a value only our own thread will write. */
2162NO_SANITIZE("thread", static inline bool
2163vm_locked_by_ractor_p(rb_vm_t *vm, rb_ractor_t *cr))
2164{
2165 VM_ASSERT(cr == GET_RACTOR());
2166 return vm->ractor.sync.lock_owner == cr;
2167}
2168
2169static inline unsigned int
2170rb_ec_vm_lock_rec(const rb_execution_context_t *ec)
2171{
2172 rb_vm_t *vm = rb_ec_vm_ptr(ec);
2173
2174 if (!vm_locked_by_ractor_p(vm, rb_ec_ractor_ptr(ec))) {
2175 return 0;
2176 }
2177 else {
2178 return vm->ractor.sync.lock_rec;
2179 }
2180}
2181
2182#else
2183#error "unsupported thread model"
2184#endif
2185
2186enum {
2187 TIMER_INTERRUPT_MASK = 0x01,
2188 PENDING_INTERRUPT_MASK = 0x02,
2189 POSTPONED_JOB_INTERRUPT_MASK = 0x04,
2190 TRAP_INTERRUPT_MASK = 0x08,
2191 TERMINATE_INTERRUPT_MASK = 0x10,
2192 VM_BARRIER_INTERRUPT_MASK = 0x20,
2193};
2194
2195#define RUBY_VM_SET_TIMER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TIMER_INTERRUPT_MASK)
2196#define RUBY_VM_SET_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, PENDING_INTERRUPT_MASK)
2197#define RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, POSTPONED_JOB_INTERRUPT_MASK)
2198#define RUBY_VM_SET_TRAP_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TRAP_INTERRUPT_MASK)
2199#define RUBY_VM_SET_TERMINATE_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, TERMINATE_INTERRUPT_MASK)
2200#define RUBY_VM_SET_VM_BARRIER_INTERRUPT(ec) ATOMIC_OR((ec)->interrupt_flag, VM_BARRIER_INTERRUPT_MASK)
2201
2202static inline bool
2203RUBY_VM_INTERRUPTED(rb_execution_context_t *ec)
2204{
2205 return (ATOMIC_LOAD_RELAXED(ec->interrupt_flag) & ~(ec->interrupt_mask) & (PENDING_INTERRUPT_MASK|TRAP_INTERRUPT_MASK));
2206}
2207
2208static inline bool
2209RUBY_VM_INTERRUPTED_ANY(rb_execution_context_t *ec)
2210{
2211#if defined(USE_VM_CLOCK) && USE_VM_CLOCK
2212 uint32_t current_clock = rb_ec_vm_ptr(ec)->clock;
2213
2214 if (current_clock != ec->checked_clock) {
2215 ec->checked_clock = current_clock;
2216 RUBY_VM_SET_TIMER_INTERRUPT(ec);
2217 }
2218#endif
2219 return ATOMIC_LOAD_RELAXED(ec->interrupt_flag) & ~(ec)->interrupt_mask;
2220}
2221
2222VALUE rb_exc_set_backtrace(VALUE exc, VALUE bt);
2223int rb_signal_buff_size(void);
2224int rb_signal_exec(rb_thread_t *th, int sig);
2225void rb_threadptr_check_signal(rb_thread_t *mth);
2226void rb_threadptr_signal_raise(rb_thread_t *th, int sig);
2227void rb_threadptr_signal_exit(rb_thread_t *th);
2228int rb_threadptr_execute_interrupts(rb_thread_t *, int);
2229void rb_threadptr_interrupt(rb_thread_t *th);
2230void rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th);
2231void rb_threadptr_pending_interrupt_clear(rb_thread_t *th);
2232void rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v);
2233VALUE rb_ec_get_errinfo(const rb_execution_context_t *ec);
2234void rb_ec_error_print(rb_execution_context_t * volatile ec, volatile VALUE errinfo);
2235void rb_execution_context_update(rb_execution_context_t *ec);
2236void rb_execution_context_mark(const rb_execution_context_t *ec);
2237void rb_fiber_close(rb_fiber_t *fib);
2238void Init_native_thread(rb_thread_t *th);
2239int rb_vm_check_ints_blocking(rb_execution_context_t *ec);
2240
2241// vm_sync.h
2242void rb_vm_cond_wait(rb_vm_t *vm, rb_nativethread_cond_t *cond);
2243void rb_vm_cond_timedwait(rb_vm_t *vm, rb_nativethread_cond_t *cond, unsigned long msec);
2244
2245#define RUBY_VM_CHECK_INTS(ec) rb_vm_check_ints(ec)
2246static inline void
2247rb_vm_check_ints(rb_execution_context_t *ec)
2248{
2249#ifdef RUBY_ASSERT_CRITICAL_SECTION
2250 VM_ASSERT(ruby_assert_critical_section_entered == 0);
2251#endif
2252
2253 VM_ASSERT(ec == rb_current_ec_noinline());
2254
2255 if (UNLIKELY(RUBY_VM_INTERRUPTED_ANY(ec))) {
2256 rb_threadptr_execute_interrupts(rb_ec_thread_ptr(ec), 0);
2257 }
2258}
2259
2260/* tracer */
2261
2263 rb_event_flag_t event;
2265 const rb_control_frame_t *cfp;
2266 VALUE self;
2267 ID id;
2268 ID called_id;
2269 VALUE klass;
2270 VALUE data;
2271
2272 int klass_solved;
2273
2274 /* calc from cfp */
2275 int lineno;
2276 VALUE path;
2277};
2278
2279void rb_hook_list_mark(rb_hook_list_t *hooks);
2280void rb_hook_list_mark_and_move(rb_hook_list_t *hooks);
2281void rb_hook_list_free(rb_hook_list_t *hooks);
2282void rb_hook_list_connect_local_tracepoint(rb_hook_list_t *list, VALUE tpval, unsigned int target_line);
2283bool rb_hook_list_remove_local_tracepoint(rb_hook_list_t *list, VALUE tpval);
2284unsigned int rb_hook_list_count(rb_hook_list_t *list);
2285
2286void rb_exec_event_hooks(struct rb_trace_arg_struct *trace_arg, rb_hook_list_t *hooks, int pop_p);
2287
2288#define EXEC_EVENT_HOOK_ORIG(ec_, hooks_, flag_, self_, id_, called_id_, klass_, data_, pop_p_) do { \
2289 const rb_event_flag_t flag_arg_ = (flag_); \
2290 rb_hook_list_t *hooks_arg_ = (hooks_); \
2291 if (UNLIKELY((hooks_arg_)->events & (flag_arg_))) { \
2292 /* defer evaluating the other arguments */ \
2293 rb_exec_event_hook_orig(ec_, hooks_arg_, flag_arg_, self_, id_, called_id_, klass_, data_, pop_p_); \
2294 } \
2295} while (0)
2296
2297static inline void
2298rb_exec_event_hook_orig(rb_execution_context_t *ec, rb_hook_list_t *hooks, rb_event_flag_t flag,
2299 VALUE self, ID id, ID called_id, VALUE klass, VALUE data, int pop_p)
2300{
2301 struct rb_trace_arg_struct trace_arg;
2302
2303 VM_ASSERT((hooks->events & flag) != 0);
2304
2305 trace_arg.event = flag;
2306 trace_arg.ec = ec;
2307 trace_arg.cfp = ec->cfp;
2308 trace_arg.self = self;
2309 trace_arg.id = id;
2310 trace_arg.called_id = called_id;
2311 trace_arg.klass = klass;
2312 trace_arg.data = data;
2313 trace_arg.path = Qundef;
2314 trace_arg.klass_solved = 0;
2315
2316 rb_exec_event_hooks(&trace_arg, hooks, pop_p);
2317}
2318
2320 VALUE self;
2321 uint32_t id;
2322 rb_hook_list_t hooks;
2323 st_table *targeted_hooks; // also called "local hooks". {ISEQ => hook_list, def => hook_list...}
2324 unsigned int targeted_hooks_cnt; // ex: tp.enabled(target: method(:puts))
2325};
2326
2327static inline rb_hook_list_t *
2328rb_ec_ractor_hooks(const rb_execution_context_t *ec)
2329{
2330 struct rb_ractor_pub *cr_pub = (struct rb_ractor_pub *)rb_ec_ractor_ptr(ec);
2331 return &cr_pub->hooks;
2332}
2333
2334static inline rb_hook_list_t *
2335rb_vm_global_hooks(const rb_execution_context_t *ec)
2336{
2337 return &rb_ec_vm_ptr(ec)->global_hooks;
2338}
2339
2340static inline rb_hook_list_t *
2341rb_ec_hooks(const rb_execution_context_t *ec, rb_event_flag_t event)
2342{
2343 // Should be a single bit set
2344 VM_ASSERT(event != 0 && ((event - 1) & event) == 0);
2345
2347 return rb_vm_global_hooks(ec);
2348 }
2349 else {
2350 return rb_ec_ractor_hooks(ec);
2351 }
2352}
2353
2354#define EXEC_EVENT_HOOK(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2355 EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_hooks(ec_, flag_), flag_, self_, id_, called_id_, klass_, data_, 0)
2356
2357#define EXEC_EVENT_HOOK_AND_POP_FRAME(ec_, flag_, self_, id_, called_id_, klass_, data_) \
2358 EXEC_EVENT_HOOK_ORIG(ec_, rb_ec_hooks(ec_, flag_), flag_, self_, id_, called_id_, klass_, data_, 1)
2359
2360static inline void
2361rb_exec_event_hook_script_compiled(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE eval_script)
2362{
2363 EXEC_EVENT_HOOK(ec, RUBY_EVENT_SCRIPT_COMPILED, ec->cfp->self, 0, 0, 0,
2364 NIL_P(eval_script) ? (VALUE)iseq :
2365 rb_ary_new_from_args(2, eval_script, (VALUE)iseq));
2366}
2367
2368void rb_vm_trap_exit(rb_vm_t *vm);
2369void rb_vm_postponed_job_atfork(void); /* vm_trace.c */
2370void rb_vm_postponed_job_free(void); /* vm_trace.c */
2371size_t rb_vm_memsize_postponed_job_queue(void); /* vm_trace.c */
2372void rb_vm_postponed_job_queue_init(rb_vm_t *vm); /* vm_trace.c */
2373
2374RUBY_SYMBOL_EXPORT_BEGIN
2375
2376int rb_thread_check_trap_pending(void);
2377
2378/* #define RUBY_EVENT_RESERVED_FOR_INTERNAL_USE 0x030000 */ /* from vm_core.h */
2379#define RUBY_EVENT_COVERAGE_LINE 0x010000
2380#define RUBY_EVENT_COVERAGE_BRANCH 0x020000
2381
2382extern VALUE rb_get_coverages(void);
2383extern void rb_set_coverages(VALUE, int, VALUE);
2384extern void rb_clear_coverages(void);
2385extern void rb_reset_coverages(void);
2386extern void rb_resume_coverages(void);
2387extern void rb_suspend_coverages(void);
2388
2389void rb_postponed_job_flush(rb_vm_t *vm);
2390
2391// ractor.c
2392RUBY_EXTERN VALUE rb_eRactorUnsafeError;
2393RUBY_EXTERN VALUE rb_eRactorIsolationError;
2394
2395RUBY_SYMBOL_EXPORT_END
2396
2397#endif /* RUBY_VM_CORE_H */
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ALIGNAS
Wraps (or simulates) alignas.
Definition stdalign.h:27
#define RUBY_EXTERN
Declaration of externally visible global variables.
Definition dllexport.h:45
#define RUBY_EVENT_SCRIPT_COMPILED
Encountered an eval.
Definition event.h:60
#define RUBY_INTERNAL_EVENT_OBJSPACE_MASK
Bitmask of GC events.
Definition event.h:100
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RBIMPL_ATTR_FORMAT(x, y, z)
Wraps (or simulates) __attribute__((format))
Definition format.h:29
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define Qundef
Old name of RUBY_Qundef.
#define Qfalse
Old name of RUBY_Qfalse.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define NIL_P
Old name of RB_NIL_P.
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
void * rb_check_typeddata(VALUE obj, const rb_data_type_t *data_type)
Identical to rb_typeddata_is_kind_of(), except it raises exceptions instead of returning false.
Definition error.c:1398
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
Definition proc.c:120
void rb_unblock_function_t(void *)
This is the type of UBFs.
Definition thread.h:336
VALUE rb_block_call_func(RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg))
This is the type of a function that the interpreter expect for C-backended blocks.
Definition iterator.h:83
VALUE type(ANYARGS)
ANYARGS-ed function type.
#define RBIMPL_ATTR_NONNULL(list)
Wraps (or simulates) __attribute__((nonnull))
Definition nonnull.h:30
#define inline
Old Visual Studio versions do not support the inline keyword, so we need to define it to be __inline.
Definition defines.h:91
Functions related to nodes in the AST.
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RTYPEDDATA_DATA(v)
Convenient getter macro.
Definition rtypeddata.h:103
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
Defines old _.
C99 shim for <stdbool.h>
Definition vm_core.h:261
const ID * segments
A null-terminated list of ids, used to represent a constant's path idNULL is used to represent the ::...
Definition vm_core.h:285
Definition vm_core.h:293
Definition vm_core.h:288
Definition iseq.h:259
Internal header for Ruby Box.
Definition box.h:14
Definition method.h:63
CREF (Class REFerence)
Definition method.h:45
Definition class.h:72
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:208
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:143
Definition st.h:79
IFUNC (Internal FUNCtion)
Definition imemo.h:85
Definition vm_core.h:253
Definition vm_core.h:297
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
#define SIZEOF_VALUE
Identical to sizeof(VALUE), except it is a macro that can also be used inside of preprocessor directi...
Definition value.h:69
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376