Ruby 3.5.0dev (2025-05-01 revision 994dadfbf4d090e73f996bafcc9d3d64892a57c6)
zjit.c (994dadfbf4d090e73f996bafcc9d3d64892a57c6)
1#include "internal.h"
2#include "internal/sanitizers.h"
3#include "internal/string.h"
4#include "internal/hash.h"
5#include "internal/variable.h"
6#include "internal/compile.h"
7#include "internal/class.h"
8#include "internal/fixnum.h"
9#include "internal/numeric.h"
10#include "internal/gc.h"
11#include "internal/vm.h"
12#include "vm_core.h"
13#include "vm_callinfo.h"
14#include "builtin.h"
15#include "insns.inc"
16#include "insns_info.inc"
17#include "vm_sync.h"
18#include "vm_insnhelper.h"
19#include "probes.h"
20#include "probes_helper.h"
21#include "iseq.h"
22#include "ruby/debug.h"
23#include "internal/cont.h"
24#include "zjit.h"
25
26// For mmapp(), sysconf()
27#ifndef _WIN32
28#include <unistd.h>
29#include <sys/mman.h>
30#endif
31
32#include <errno.h>
33
34uint32_t
35rb_zjit_get_page_size(void)
36{
37#if defined(_SC_PAGESIZE)
38 long page_size = sysconf(_SC_PAGESIZE);
39 if (page_size <= 0) rb_bug("zjit: failed to get page size");
40
41 // 1 GiB limit. x86 CPUs with PDPE1GB can do this and anything larger is unexpected.
42 // Though our design sort of assume we have fine grained control over memory protection
43 // which require small page sizes.
44 if (page_size > 0x40000000l) rb_bug("zjit page size too large");
45
46 return (uint32_t)page_size;
47#else
48#error "ZJIT supports POSIX only for now"
49#endif
50}
51
52#if defined(MAP_FIXED_NOREPLACE) && defined(_SC_PAGESIZE)
53// Align the current write position to a multiple of bytes
54static uint8_t *
55align_ptr(uint8_t *ptr, uint32_t multiple)
56{
57 // Compute the pointer modulo the given alignment boundary
58 uint32_t rem = ((uint32_t)(uintptr_t)ptr) % multiple;
59
60 // If the pointer is already aligned, stop
61 if (rem == 0)
62 return ptr;
63
64 // Pad the pointer by the necessary amount to align it
65 uint32_t pad = multiple - rem;
66
67 return ptr + pad;
68}
69#endif
70
71// Address space reservation. Memory pages are mapped on an as needed basis.
72// See the Rust mm module for details.
73uint8_t *
74rb_zjit_reserve_addr_space(uint32_t mem_size)
75{
76#ifndef _WIN32
77 uint8_t *mem_block;
78
79 // On Linux
80 #if defined(MAP_FIXED_NOREPLACE) && defined(_SC_PAGESIZE)
81 uint32_t const page_size = (uint32_t)sysconf(_SC_PAGESIZE);
82 uint8_t *const cfunc_sample_addr = (void *)(uintptr_t)&rb_zjit_reserve_addr_space;
83 uint8_t *const probe_region_end = cfunc_sample_addr + INT32_MAX;
84 // Align the requested address to page size
85 uint8_t *req_addr = align_ptr(cfunc_sample_addr, page_size);
86
87 // Probe for addresses close to this function using MAP_FIXED_NOREPLACE
88 // to improve odds of being in range for 32-bit relative call instructions.
89 do {
90 mem_block = mmap(
91 req_addr,
92 mem_size,
93 PROT_NONE,
94 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED_NOREPLACE,
95 -1,
96 0
97 );
98
99 // If we succeeded, stop
100 if (mem_block != MAP_FAILED) {
101 ruby_annotate_mmap(mem_block, mem_size, "Ruby:rb_zjit_reserve_addr_space");
102 break;
103 }
104
105 // -4MiB. Downwards to probe away from the heap. (On x86/A64 Linux
106 // main_code_addr < heap_addr, and in case we are in a shared
107 // library mapped higher than the heap, downwards is still better
108 // since it's towards the end of the heap rather than the stack.)
109 req_addr -= 4 * 1024 * 1024;
110 } while (req_addr < probe_region_end);
111
112 // On MacOS and other platforms
113 #else
114 // Try to map a chunk of memory as executable
115 mem_block = mmap(
116 (void *)rb_zjit_reserve_addr_space,
117 mem_size,
118 PROT_NONE,
119 MAP_PRIVATE | MAP_ANONYMOUS,
120 -1,
121 0
122 );
123 #endif
124
125 // Fallback
126 if (mem_block == MAP_FAILED) {
127 // Try again without the address hint (e.g., valgrind)
128 mem_block = mmap(
129 NULL,
130 mem_size,
131 PROT_NONE,
132 MAP_PRIVATE | MAP_ANONYMOUS,
133 -1,
134 0
135 );
136
137 if (mem_block != MAP_FAILED) {
138 ruby_annotate_mmap(mem_block, mem_size, "Ruby:rb_zjit_reserve_addr_space:fallback");
139 }
140 }
141
142 // Check that the memory mapping was successful
143 if (mem_block == MAP_FAILED) {
144 perror("ruby: zjit: mmap:");
145 if(errno == ENOMEM) {
146 // No crash report if it's only insufficient memory
147 exit(EXIT_FAILURE);
148 }
149 rb_bug("mmap failed");
150 }
151
152 return mem_block;
153#else
154 // Windows not supported for now
155 return NULL;
156#endif
157}
158
159unsigned long
160rb_RSTRING_LEN(VALUE str)
161{
162 return RSTRING_LEN(str);
163}
164
165char *
166rb_RSTRING_PTR(VALUE str)
167{
168 return RSTRING_PTR(str);
169}
170
171void rb_zjit_profile_disable(const rb_iseq_t *iseq);
172
173void
174rb_zjit_compile_iseq(const rb_iseq_t *iseq, rb_execution_context_t *ec, bool jit_exception)
175{
176 RB_VM_LOCK_ENTER();
177 rb_vm_barrier();
178
179 // Convert ZJIT instructions back to bare instructions
180 rb_zjit_profile_disable(iseq);
181
182 // Compile a block version starting at the current instruction
183 uint8_t *rb_zjit_iseq_gen_entry_point(const rb_iseq_t *iseq, rb_execution_context_t *ec); // defined in Rust
184 uintptr_t code_ptr = (uintptr_t)rb_zjit_iseq_gen_entry_point(iseq, ec);
185
186 // TODO: support jit_exception
187 iseq->body->jit_entry = (rb_jit_func_t)code_ptr;
188
189 RB_VM_LOCK_LEAVE();
190}
191
192unsigned int
193rb_iseq_encoded_size(const rb_iseq_t *iseq)
194{
195 return iseq->body->iseq_size;
196}
197
198// Get the opcode given a program counter. Can return trace opcode variants.
199int
200rb_iseq_opcode_at_pc(const rb_iseq_t *iseq, const VALUE *pc)
201{
202 // ZJIT should only use iseqs after AST to bytecode compilation
203 RUBY_ASSERT_ALWAYS(FL_TEST_RAW((VALUE)iseq, ISEQ_TRANSLATED));
204
205 const VALUE at_pc = *pc;
206 return rb_vm_insn_addr2opcode((const void *)at_pc);
207}
208
209// Get the PC for a given index in an iseq
210VALUE *
211rb_iseq_pc_at_idx(const rb_iseq_t *iseq, uint32_t insn_idx)
212{
213 RUBY_ASSERT_ALWAYS(IMEMO_TYPE_P(iseq, imemo_iseq));
214 RUBY_ASSERT_ALWAYS(insn_idx < iseq->body->iseq_size);
215 VALUE *encoded = iseq->body->iseq_encoded;
216 VALUE *pc = &encoded[insn_idx];
217 return pc;
218}
219
220const char *
221rb_insn_name(VALUE insn)
222{
223 return insn_name(insn);
224}
225
227rb_get_ec_cfp(const rb_execution_context_t *ec)
228{
229 return ec->cfp;
230}
231
232const rb_iseq_t *
233rb_get_cfp_iseq(struct rb_control_frame_struct *cfp)
234{
235 return cfp->iseq;
236}
237
238VALUE *
239rb_get_cfp_pc(struct rb_control_frame_struct *cfp)
240{
241 return (VALUE*)cfp->pc;
242}
243
244VALUE *
245rb_get_cfp_sp(struct rb_control_frame_struct *cfp)
246{
247 return cfp->sp;
248}
249
250VALUE
251rb_get_cfp_self(struct rb_control_frame_struct *cfp)
252{
253 return cfp->self;
254}
255
256VALUE *
257rb_get_cfp_ep(struct rb_control_frame_struct *cfp)
258{
259 return (VALUE*)cfp->ep;
260}
261
262const VALUE *
263rb_get_cfp_ep_level(struct rb_control_frame_struct *cfp, uint32_t lv)
264{
265 uint32_t i;
266 const VALUE *ep = (VALUE*)cfp->ep;
267 for (i = 0; i < lv; i++) {
268 ep = VM_ENV_PREV_EP(ep);
269 }
270 return ep;
271}
272
273extern VALUE *rb_vm_base_ptr(struct rb_control_frame_struct *cfp);
274
275rb_method_type_t
276rb_get_cme_def_type(const rb_callable_method_entry_t *cme)
277{
278 if (UNDEFINED_METHOD_ENTRY_P(cme)) {
279 return VM_METHOD_TYPE_UNDEF;
280 }
281 else {
282 return cme->def->type;
283 }
284}
285
286ID
287rb_get_cme_def_body_attr_id(const rb_callable_method_entry_t *cme)
288{
289 return cme->def->body.attr.id;
290}
291
292enum method_optimized_type
293rb_get_cme_def_body_optimized_type(const rb_callable_method_entry_t *cme)
294{
295 return cme->def->body.optimized.type;
296}
297
298unsigned int
299rb_get_cme_def_body_optimized_index(const rb_callable_method_entry_t *cme)
300{
301 return cme->def->body.optimized.index;
302}
303
305rb_get_cme_def_body_cfunc(const rb_callable_method_entry_t *cme)
306{
307 return UNALIGNED_MEMBER_PTR(cme->def, body.cfunc);
308}
309
310uintptr_t
311rb_get_def_method_serial(const rb_method_definition_t *def)
312{
313 return def->method_serial;
314}
315
316ID
317rb_get_def_original_id(const rb_method_definition_t *def)
318{
319 return def->original_id;
320}
321
322int
323rb_get_mct_argc(const rb_method_cfunc_t *mct)
324{
325 return mct->argc;
326}
327
328void *
329rb_get_mct_func(const rb_method_cfunc_t *mct)
330{
331 return (void*)(uintptr_t)mct->func; // this field is defined as type VALUE (*func)(ANYARGS)
332}
333
334const rb_iseq_t *
335rb_get_def_iseq_ptr(rb_method_definition_t *def)
336{
337 return def_iseq_ptr(def);
338}
339
340const rb_iseq_t *
341rb_get_iseq_body_local_iseq(const rb_iseq_t *iseq)
342{
343 return iseq->body->local_iseq;
344}
345
346VALUE *
347rb_get_iseq_body_iseq_encoded(const rb_iseq_t *iseq)
348{
349 return iseq->body->iseq_encoded;
350}
351
352unsigned
353rb_get_iseq_body_stack_max(const rb_iseq_t *iseq)
354{
355 return iseq->body->stack_max;
356}
357
358enum rb_iseq_type
359rb_get_iseq_body_type(const rb_iseq_t *iseq)
360{
361 return iseq->body->type;
362}
363
364bool
365rb_get_iseq_flags_has_lead(const rb_iseq_t *iseq)
366{
367 return iseq->body->param.flags.has_lead;
368}
369
370bool
371rb_get_iseq_flags_has_opt(const rb_iseq_t *iseq)
372{
373 return iseq->body->param.flags.has_opt;
374}
375
376bool
377rb_get_iseq_flags_has_kw(const rb_iseq_t *iseq)
378{
379 return iseq->body->param.flags.has_kw;
380}
381
382bool
383rb_get_iseq_flags_has_post(const rb_iseq_t *iseq)
384{
385 return iseq->body->param.flags.has_post;
386}
387
388bool
389rb_get_iseq_flags_has_kwrest(const rb_iseq_t *iseq)
390{
391 return iseq->body->param.flags.has_kwrest;
392}
393
394bool
395rb_get_iseq_flags_anon_kwrest(const rb_iseq_t *iseq)
396{
397 return iseq->body->param.flags.anon_kwrest;
398}
399
400bool
401rb_get_iseq_flags_has_rest(const rb_iseq_t *iseq)
402{
403 return iseq->body->param.flags.has_rest;
404}
405
406bool
407rb_get_iseq_flags_ruby2_keywords(const rb_iseq_t *iseq)
408{
409 return iseq->body->param.flags.ruby2_keywords;
410}
411
412bool
413rb_get_iseq_flags_has_block(const rb_iseq_t *iseq)
414{
415 return iseq->body->param.flags.has_block;
416}
417
418bool
419rb_get_iseq_flags_ambiguous_param0(const rb_iseq_t *iseq)
420{
421 return iseq->body->param.flags.ambiguous_param0;
422}
423
424bool
425rb_get_iseq_flags_accepts_no_kwarg(const rb_iseq_t *iseq)
426{
427 return iseq->body->param.flags.accepts_no_kwarg;
428}
429
430bool
431rb_get_iseq_flags_forwardable(const rb_iseq_t *iseq)
432{
433 return iseq->body->param.flags.forwardable;
434}
435
436// This is defined only as a named struct inside rb_iseq_constant_body.
437// By giving it a separate typedef, we make it nameable by rust-bindgen.
438// Bindgen's temp/anon name isn't guaranteed stable.
439typedef struct rb_iseq_param_keyword rb_iseq_param_keyword_struct;
440
441const rb_iseq_param_keyword_struct *
442rb_get_iseq_body_param_keyword(const rb_iseq_t *iseq)
443{
444 return iseq->body->param.keyword;
445}
446
447unsigned
448rb_get_iseq_body_param_size(const rb_iseq_t *iseq)
449{
450 return iseq->body->param.size;
451}
452
453int
454rb_get_iseq_body_param_lead_num(const rb_iseq_t *iseq)
455{
456 return iseq->body->param.lead_num;
457}
458
459int
460rb_get_iseq_body_param_opt_num(const rb_iseq_t *iseq)
461{
462 return iseq->body->param.opt_num;
463}
464
465const VALUE *
466rb_get_iseq_body_param_opt_table(const rb_iseq_t *iseq)
467{
468 return iseq->body->param.opt_table;
469}
470
471unsigned int
472rb_get_iseq_body_local_table_size(const rb_iseq_t *iseq)
473{
474 return iseq->body->local_table_size;
475}
476
477int
478rb_get_cikw_keyword_len(const struct rb_callinfo_kwarg *cikw)
479{
480 return cikw->keyword_len;
481}
482
483VALUE
484rb_get_cikw_keywords_idx(const struct rb_callinfo_kwarg *cikw, int idx)
485{
486 return cikw->keywords[idx];
487}
488
489const struct rb_callinfo *
490rb_get_call_data_ci(const struct rb_call_data *cd)
491{
492 return cd->ci;
493}
494
495// The FL_TEST() macro
496VALUE
497rb_FL_TEST(VALUE obj, VALUE flags)
498{
499 return RB_FL_TEST(obj, flags);
500}
501
502// The FL_TEST_RAW() macro, normally an internal implementation detail
503VALUE
504rb_FL_TEST_RAW(VALUE obj, VALUE flags)
505{
506 return FL_TEST_RAW(obj, flags);
507}
508
509// The RB_TYPE_P macro
510bool
511rb_RB_TYPE_P(VALUE obj, enum ruby_value_type t)
512{
513 return RB_TYPE_P(obj, t);
514}
515
516long
517rb_RSTRUCT_LEN(VALUE st)
518{
519 return RSTRUCT_LEN(st);
520}
521
522bool
523rb_BASIC_OP_UNREDEFINED_P(enum ruby_basic_operators bop, uint32_t klass)
524{
525 return BASIC_OP_UNREDEFINED_P(bop, klass);
526}
527
528bool
529rb_zjit_multi_ractor_p(void)
530{
531 return rb_multi_ractor_p();
532}
533
534// For debug builds
535void
536rb_assert_iseq_handle(VALUE handle)
537{
538 RUBY_ASSERT_ALWAYS(IMEMO_TYPE_P(handle, imemo_iseq));
539}
540
541bool
542rb_zjit_constcache_shareable(const struct iseq_inline_constant_cache_entry *ice)
543{
544 return (ice->flags & IMEMO_CONST_CACHE_SHAREABLE) != 0;
545}
546
547void
548rb_assert_cme_handle(VALUE handle)
549{
550 RUBY_ASSERT_ALWAYS(!rb_objspace_garbage_object_p(handle));
551 RUBY_ASSERT_ALWAYS(IMEMO_TYPE_P(handle, imemo_ment));
552}
553
554int
555rb_IMEMO_TYPE_P(VALUE imemo, enum imemo_type imemo_type)
556{
557 return IMEMO_TYPE_P(imemo, imemo_type);
558}
559
560// Release the VM lock. The lock level must point to the same integer used to
561// acquire the lock.
562void
563rb_zjit_vm_unlock(unsigned int *recursive_lock_level, const char *file, int line)
564{
565 rb_vm_lock_leave(recursive_lock_level, file, line);
566}
567
568bool
569rb_zjit_mark_writable(void *mem_block, uint32_t mem_size)
570{
571 return mprotect(mem_block, mem_size, PROT_READ | PROT_WRITE) == 0;
572}
573
574void
575rb_zjit_mark_executable(void *mem_block, uint32_t mem_size)
576{
577 // Do not call mprotect when mem_size is zero. Some platforms may return
578 // an error for it. https://github.com/Shopify/ruby/issues/450
579 if (mem_size == 0) {
580 return;
581 }
582 if (mprotect(mem_block, mem_size, PROT_READ | PROT_EXEC)) {
583 rb_bug("Couldn't make JIT page (%p, %lu bytes) executable, errno: %s",
584 mem_block, (unsigned long)mem_size, strerror(errno));
585 }
586}
587
588// Free the specified memory block.
589bool
590rb_zjit_mark_unused(void *mem_block, uint32_t mem_size)
591{
592 // On Linux, you need to use madvise MADV_DONTNEED to free memory.
593 // We might not need to call this on macOS, but it's not really documented.
594 // We generally prefer to do the same thing on both to ease testing too.
595 madvise(mem_block, mem_size, MADV_DONTNEED);
596
597 // On macOS, mprotect PROT_NONE seems to reduce RSS.
598 // We also call this on Linux to avoid executing unused pages.
599 return mprotect(mem_block, mem_size, PROT_NONE) == 0;
600}
601
602// Invalidate icache for arm64.
603// `start` is inclusive and `end` is exclusive.
604void
605rb_zjit_icache_invalidate(void *start, void *end)
606{
607 // Clear/invalidate the instruction cache. Compiles to nothing on x86_64
608 // but required on ARM before running freshly written code.
609 // On Darwin it's the same as calling sys_icache_invalidate().
610#ifdef __GNUC__
611 __builtin___clear_cache(start, end);
612#elif defined(__aarch64__)
613#error No instruction cache clear available with this compiler on Aarch64!
614#endif
615}
616
617unsigned int
618rb_vm_ci_argc(const struct rb_callinfo *ci)
619{
620 return vm_ci_argc(ci);
621}
622
623ID
624rb_vm_ci_mid(const struct rb_callinfo *ci)
625{
626 return vm_ci_mid(ci);
627}
628
629unsigned int
630rb_vm_ci_flag(const struct rb_callinfo *ci)
631{
632 return vm_ci_flag(ci);
633}
634
635const struct rb_callinfo_kwarg *
636rb_vm_ci_kwarg(const struct rb_callinfo *ci)
637{
638 return vm_ci_kwarg(ci);
639}
640
641rb_method_visibility_t
642rb_METHOD_ENTRY_VISI(const rb_callable_method_entry_t *me)
643{
644 return METHOD_ENTRY_VISI(me);
645}
646
647VALUE
648rb_yarv_class_of(VALUE obj)
649{
650 return rb_class_of(obj);
651}
652
653// Acquire the VM lock and then signal all other Ruby threads (ractors) to
654// contend for the VM lock, putting them to sleep. ZJIT uses this to evict
655// threads running inside generated code so among other things, it can
656// safely change memory protection of regions housing generated code.
657void
658rb_zjit_vm_lock_then_barrier(unsigned int *recursive_lock_level, const char *file, int line)
659{
660 rb_vm_lock_enter(recursive_lock_level, file, line);
661 rb_vm_barrier();
662}
663
664VALUE
665rb_RCLASS_ORIGIN(VALUE c)
666{
667 return RCLASS_ORIGIN(c);
668}
669
670// Convert a given ISEQ's instructions to zjit_* instructions
671void
672rb_zjit_profile_enable(const rb_iseq_t *iseq)
673{
674 // This table encodes an opcode into the instruction's address
675 const void *const *insn_table = rb_vm_get_insns_address_table();
676
677 unsigned int insn_idx = 0;
678 while (insn_idx < iseq->body->iseq_size) {
679 int insn = rb_vm_insn_addr2opcode((void *)iseq->body->iseq_encoded[insn_idx]);
680 int zjit_insn = vm_bare_insn_to_zjit_insn(insn);
681 if (insn != zjit_insn) {
682 iseq->body->iseq_encoded[insn_idx] = (VALUE)insn_table[zjit_insn];
683 }
684 insn_idx += insn_len(insn);
685 }
686}
687
688// Convert a given ISEQ's ZJIT instructions to bare instructions
689void
690rb_zjit_profile_disable(const rb_iseq_t *iseq)
691{
692 // This table encodes an opcode into the instruction's address
693 const void *const *insn_table = rb_vm_get_insns_address_table();
694
695 unsigned int insn_idx = 0;
696 while (insn_idx < iseq->body->iseq_size) {
697 int insn = rb_vm_insn_addr2opcode((void *)iseq->body->iseq_encoded[insn_idx]);
698 int bare_insn = vm_zjit_insn_to_bare_insn(insn);
699 if (insn != bare_insn) {
700 iseq->body->iseq_encoded[insn_idx] = (VALUE)insn_table[bare_insn];
701 }
702 insn_idx += insn_len(insn);
703 }
704}
705
706// Get profiling information for ISEQ
707void *
708rb_iseq_get_zjit_payload(const rb_iseq_t *iseq)
709{
710 RUBY_ASSERT_ALWAYS(IMEMO_TYPE_P(iseq, imemo_iseq));
711 if (iseq->body) {
712 return iseq->body->zjit_payload;
713 }
714 else {
715 // Body is NULL when constructing the iseq.
716 return NULL;
717 }
718}
719
720// Set profiling information for ISEQ
721void
722rb_iseq_set_zjit_payload(const rb_iseq_t *iseq, void *payload)
723{
724 RUBY_ASSERT_ALWAYS(IMEMO_TYPE_P(iseq, imemo_iseq));
725 RUBY_ASSERT_ALWAYS(iseq->body);
726 RUBY_ASSERT_ALWAYS(NULL == iseq->body->zjit_payload);
727 iseq->body->zjit_payload = payload;
728}
729
730// Primitives used by zjit.rb
731VALUE rb_zjit_assert_compiles(rb_execution_context_t *ec, VALUE self);
732
733void
734rb_zjit_print_exception(void)
735{
736 VALUE exception = rb_errinfo();
737 rb_set_errinfo(Qnil);
738 assert(RTEST(exception));
739 rb_warn("Ruby error: %"PRIsVALUE"", rb_funcall(exception, rb_intern("full_message"), 0));
740}
741
742// Preprocessed zjit.rb generated during build
743#include "zjit.rbinc"
#define RUBY_ASSERT_ALWAYS(expr,...)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
Definition assert.h:199
static VALUE RB_FL_TEST(VALUE obj, VALUE flags)
Tests if the given flag(s) are set or not.
Definition fl_type.h:495
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:132
#define Qnil
Old name of RUBY_Qnil.
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:466
static VALUE rb_class_of(VALUE obj)
Object to class mapping function.
Definition globals.h:172
VALUE rb_funcall(VALUE recv, ID mid, int n,...)
Calls a method.
Definition vm_eval.c:1099
Defines RBIMPL_HAS_BUILTIN.
#define errno
Ractor-aware version of errno.
Definition ruby.h:388
#define RTEST
This is an old name of RB_TEST.
Definition vm_core.h:260
Definition method.h:63
struct rb_iseq_constant_body::@155 param
parameter information
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376
ruby_value_type
C-level type of an object.
Definition value_type.h:113