Ruby 3.5.0dev (2025-10-22 revision 66c12bd5194396fab66338a87ca8f2d89f1d66d0)
jit.c (66c12bd5194396fab66338a87ca8f2d89f1d66d0)
1// Glue code shared between YJIT and ZJIT for use from Rust.
2// For FFI safety and bindgen compatibility reasons, certain types of C
3// functions require wrapping before they can be called from Rust. Those show
4// up here.
5//
6// Code specific to YJIT and ZJIT should go to yjit.c and zjit.c respectively.
7
8#include "internal.h"
9#include "vm_core.h"
10#include "vm_callinfo.h"
11#include "builtin.h"
12#include "insns.inc"
13#include "insns_info.inc"
14#include "iseq.h"
15#include "internal/gc.h"
16#include "vm_sync.h"
17#include "internal/fixnum.h"
18
19enum jit_bindgen_constants {
20 // Field offsets for the RObject struct
21 ROBJECT_OFFSET_AS_HEAP_FIELDS = offsetof(struct RObject, as.heap.fields),
22 ROBJECT_OFFSET_AS_ARY = offsetof(struct RObject, as.ary),
23
24 // Field offsets for the RString struct
25 RUBY_OFFSET_RSTRING_LEN = offsetof(struct RString, len)
26};
27
28// Manually bound in rust since this is out-of-range of `int`,
29// so this can't be in a `enum`, and we avoid `static const`
30// to avoid allocating storage for the constant.
31const shape_id_t rb_invalid_shape_id = INVALID_SHAPE_ID;
32
33unsigned int
34rb_iseq_encoded_size(const rb_iseq_t *iseq)
35{
36 return iseq->body->iseq_size;
37}
38
39// Get the PC for a given index in an iseq
40VALUE *
41rb_iseq_pc_at_idx(const rb_iseq_t *iseq, uint32_t insn_idx)
42{
43 RUBY_ASSERT_ALWAYS(IMEMO_TYPE_P(iseq, imemo_iseq));
44 RUBY_ASSERT_ALWAYS(insn_idx < iseq->body->iseq_size);
45 VALUE *encoded = iseq->body->iseq_encoded;
46 VALUE *pc = &encoded[insn_idx];
47 return pc;
48}
49
50// Get the opcode given a program counter. Can return trace opcode variants.
51int
52rb_iseq_opcode_at_pc(const rb_iseq_t *iseq, const VALUE *pc)
53{
54 // YJIT should only use iseqs after AST to bytecode compilation
55 RUBY_ASSERT_ALWAYS(FL_TEST_RAW((VALUE)iseq, ISEQ_TRANSLATED));
56
57 const VALUE at_pc = *pc;
58 return rb_vm_insn_addr2opcode((const void *)at_pc);
59}
60
61unsigned long
62rb_RSTRING_LEN(VALUE str)
63{
64 return RSTRING_LEN(str);
65}
66
67char *
68rb_RSTRING_PTR(VALUE str)
69{
70 return RSTRING_PTR(str);
71}
72
73const char *
74rb_insn_name(VALUE insn)
75{
76 return insn_name(insn);
77}
78
79unsigned int
80rb_vm_ci_argc(const struct rb_callinfo *ci)
81{
82 return vm_ci_argc(ci);
83}
84
85ID
86rb_vm_ci_mid(const struct rb_callinfo *ci)
87{
88 return vm_ci_mid(ci);
89}
90
91unsigned int
92rb_vm_ci_flag(const struct rb_callinfo *ci)
93{
94 return vm_ci_flag(ci);
95}
96
97const struct rb_callinfo_kwarg *
98rb_vm_ci_kwarg(const struct rb_callinfo *ci)
99{
100 return vm_ci_kwarg(ci);
101}
102
103int
104rb_get_cikw_keyword_len(const struct rb_callinfo_kwarg *cikw)
105{
106 return cikw->keyword_len;
107}
108
109VALUE
110rb_get_cikw_keywords_idx(const struct rb_callinfo_kwarg *cikw, int idx)
111{
112 return cikw->keywords[idx];
113}
114
115rb_method_visibility_t
116rb_METHOD_ENTRY_VISI(const rb_callable_method_entry_t *me)
117{
118 return METHOD_ENTRY_VISI(me);
119}
120
121rb_method_type_t
122rb_get_cme_def_type(const rb_callable_method_entry_t *cme)
123{
124 if (UNDEFINED_METHOD_ENTRY_P(cme)) {
125 return VM_METHOD_TYPE_UNDEF;
126 }
127 else {
128 return cme->def->type;
129 }
130}
131
132ID
133rb_get_cme_def_body_attr_id(const rb_callable_method_entry_t *cme)
134{
135 return cme->def->body.attr.id;
136}
137
138enum method_optimized_type
139rb_get_cme_def_body_optimized_type(const rb_callable_method_entry_t *cme)
140{
141 return cme->def->body.optimized.type;
142}
143
144unsigned int
145rb_get_cme_def_body_optimized_index(const rb_callable_method_entry_t *cme)
146{
147 return cme->def->body.optimized.index;
148}
149
151rb_get_cme_def_body_cfunc(const rb_callable_method_entry_t *cme)
152{
153 return UNALIGNED_MEMBER_PTR(cme->def, body.cfunc);
154}
155
156uintptr_t
157rb_get_def_method_serial(const rb_method_definition_t *def)
158{
159 return def->method_serial;
160}
161
162ID
163rb_get_def_original_id(const rb_method_definition_t *def)
164{
165 return def->original_id;
166}
167
168VALUE
169rb_get_def_bmethod_proc(rb_method_definition_t *def)
170{
171 RUBY_ASSERT(def->type == VM_METHOD_TYPE_BMETHOD);
172 return def->body.bmethod.proc;
173}
174
175rb_proc_t *
176rb_jit_get_proc_ptr(VALUE procv)
177{
178 rb_proc_t *proc;
179 GetProcPtr(procv, proc);
180 return proc;
181}
182
183int
184rb_get_mct_argc(const rb_method_cfunc_t *mct)
185{
186 return mct->argc;
187}
188
189void *
190rb_get_mct_func(const rb_method_cfunc_t *mct)
191{
192 return (void*)(uintptr_t)mct->func; // this field is defined as type VALUE (*func)(ANYARGS)
193}
194
195const rb_iseq_t *
196rb_get_def_iseq_ptr(rb_method_definition_t *def)
197{
198 return def_iseq_ptr(def);
199}
200
201const rb_iseq_t *
202rb_get_iseq_body_local_iseq(const rb_iseq_t *iseq)
203{
204 return iseq->body->local_iseq;
205}
206
207const rb_iseq_t *
208rb_get_iseq_body_parent_iseq(const rb_iseq_t *iseq)
209{
210 return iseq->body->parent_iseq;
211}
212
213unsigned int
214rb_get_iseq_body_local_table_size(const rb_iseq_t *iseq)
215{
216 return iseq->body->local_table_size;
217}
218
219VALUE *
220rb_get_iseq_body_iseq_encoded(const rb_iseq_t *iseq)
221{
222 return iseq->body->iseq_encoded;
223}
224
225unsigned
226rb_get_iseq_body_stack_max(const rb_iseq_t *iseq)
227{
228 return iseq->body->stack_max;
229}
230
231enum rb_iseq_type
232rb_get_iseq_body_type(const rb_iseq_t *iseq)
233{
234 return iseq->body->type;
235}
236
237bool
238rb_get_iseq_flags_has_lead(const rb_iseq_t *iseq)
239{
240 return iseq->body->param.flags.has_lead;
241}
242
243bool
244rb_get_iseq_flags_has_opt(const rb_iseq_t *iseq)
245{
246 return iseq->body->param.flags.has_opt;
247}
248
249bool
250rb_get_iseq_flags_has_kw(const rb_iseq_t *iseq)
251{
252 return iseq->body->param.flags.has_kw;
253}
254
255bool
256rb_get_iseq_flags_has_post(const rb_iseq_t *iseq)
257{
258 return iseq->body->param.flags.has_post;
259}
260
261bool
262rb_get_iseq_flags_has_kwrest(const rb_iseq_t *iseq)
263{
264 return iseq->body->param.flags.has_kwrest;
265}
266
267bool
268rb_get_iseq_flags_anon_kwrest(const rb_iseq_t *iseq)
269{
270 return iseq->body->param.flags.anon_kwrest;
271}
272
273bool
274rb_get_iseq_flags_has_rest(const rb_iseq_t *iseq)
275{
276 return iseq->body->param.flags.has_rest;
277}
278
279bool
280rb_get_iseq_flags_ruby2_keywords(const rb_iseq_t *iseq)
281{
282 return iseq->body->param.flags.ruby2_keywords;
283}
284
285bool
286rb_get_iseq_flags_has_block(const rb_iseq_t *iseq)
287{
288 return iseq->body->param.flags.has_block;
289}
290
291bool
292rb_get_iseq_flags_ambiguous_param0(const rb_iseq_t *iseq)
293{
294 return iseq->body->param.flags.ambiguous_param0;
295}
296
297bool
298rb_get_iseq_flags_accepts_no_kwarg(const rb_iseq_t *iseq)
299{
300 return iseq->body->param.flags.accepts_no_kwarg;
301}
302
303bool
304rb_get_iseq_flags_forwardable(const rb_iseq_t *iseq)
305{
306 return iseq->body->param.flags.forwardable;
307}
308
309// This is defined only as a named struct inside rb_iseq_constant_body.
310// By giving it a separate typedef, we make it nameable by rust-bindgen.
311// Bindgen's temp/anon name isn't guaranteed stable.
312typedef struct rb_iseq_param_keyword rb_iseq_param_keyword_struct;
313
314const rb_iseq_param_keyword_struct *
315rb_get_iseq_body_param_keyword(const rb_iseq_t *iseq)
316{
317 return iseq->body->param.keyword;
318}
319
320unsigned
321rb_get_iseq_body_param_size(const rb_iseq_t *iseq)
322{
323 return iseq->body->param.size;
324}
325
326int
327rb_get_iseq_body_param_lead_num(const rb_iseq_t *iseq)
328{
329 return iseq->body->param.lead_num;
330}
331
332int
333rb_get_iseq_body_param_opt_num(const rb_iseq_t *iseq)
334{
335 return iseq->body->param.opt_num;
336}
337
338const VALUE *
339rb_get_iseq_body_param_opt_table(const rb_iseq_t *iseq)
340{
341 return iseq->body->param.opt_table;
342}
343
345rb_get_ec_cfp(const rb_execution_context_t *ec)
346{
347 return ec->cfp;
348}
349
350const rb_iseq_t *
351rb_get_cfp_iseq(struct rb_control_frame_struct *cfp)
352{
353 return cfp->iseq;
354}
355
356VALUE *
357rb_get_cfp_pc(struct rb_control_frame_struct *cfp)
358{
359 return (VALUE*)cfp->pc;
360}
361
362VALUE *
363rb_get_cfp_sp(struct rb_control_frame_struct *cfp)
364{
365 return cfp->sp;
366}
367
368VALUE
369rb_get_cfp_self(struct rb_control_frame_struct *cfp)
370{
371 return cfp->self;
372}
373
374VALUE *
375rb_get_cfp_ep(struct rb_control_frame_struct *cfp)
376{
377 return (VALUE*)cfp->ep;
378}
379
380const VALUE *
381rb_get_cfp_ep_level(struct rb_control_frame_struct *cfp, uint32_t lv)
382{
383 uint32_t i;
384 const VALUE *ep = (VALUE*)cfp->ep;
385 for (i = 0; i < lv; i++) {
386 ep = VM_ENV_PREV_EP(ep);
387 }
388 return ep;
389}
390
391VALUE
392rb_yarv_class_of(VALUE obj)
393{
394 return rb_class_of(obj);
395}
396
397// The FL_TEST() macro
398VALUE
399rb_FL_TEST(VALUE obj, VALUE flags)
400{
401 return RB_FL_TEST(obj, flags);
402}
403
404// The FL_TEST_RAW() macro, normally an internal implementation detail
405VALUE
406rb_FL_TEST_RAW(VALUE obj, VALUE flags)
407{
408 return FL_TEST_RAW(obj, flags);
409}
410
411// The RB_TYPE_P macro
412bool
413rb_RB_TYPE_P(VALUE obj, enum ruby_value_type t)
414{
415 return RB_TYPE_P(obj, t);
416}
417
418long
419rb_RSTRUCT_LEN(VALUE st)
420{
421 return RSTRUCT_LEN(st);
422}
423
424const struct rb_callinfo *
425rb_get_call_data_ci(const struct rb_call_data *cd)
426{
427 return cd->ci;
428}
429
430bool
431rb_BASIC_OP_UNREDEFINED_P(enum ruby_basic_operators bop, uint32_t klass)
432{
433 return BASIC_OP_UNREDEFINED_P(bop, klass);
434}
435
436VALUE
437rb_RCLASS_ORIGIN(VALUE c)
438{
439 return RCLASS_ORIGIN(c);
440}
441
442// For debug builds
443void
444rb_assert_iseq_handle(VALUE handle)
445{
446 RUBY_ASSERT_ALWAYS(IMEMO_TYPE_P(handle, imemo_iseq));
447}
448
449// Assert that we have the VM lock. Relevant mostly for multi ractor situations.
450// The GC takes the lock before calling us, and this asserts that it indeed happens.
451void
452rb_assert_holding_vm_lock(void)
453{
454 ASSERT_vm_locking();
455}
456
457int
458rb_IMEMO_TYPE_P(VALUE imemo, enum imemo_type imemo_type)
459{
460 return IMEMO_TYPE_P(imemo, imemo_type);
461}
462
463void
464rb_assert_cme_handle(VALUE handle)
465{
466 RUBY_ASSERT_ALWAYS(!rb_objspace_garbage_object_p(handle));
467 RUBY_ASSERT_ALWAYS(IMEMO_TYPE_P(handle, imemo_ment));
468}
469
470// YJIT and ZJIT need this function to never allocate and never raise
471VALUE
472rb_yarv_ary_entry_internal(VALUE ary, long offset)
473{
474 return rb_ary_entry_internal(ary, offset);
475}
476
477long
478rb_jit_array_len(VALUE a)
479{
480 return rb_array_len(a);
481}
482
483void
484rb_set_cfp_pc(struct rb_control_frame_struct *cfp, const VALUE *pc)
485{
486 cfp->pc = pc;
487}
488
489void
490rb_set_cfp_sp(struct rb_control_frame_struct *cfp, VALUE *sp)
491{
492 cfp->sp = sp;
493}
494
495bool
496rb_jit_shape_too_complex_p(shape_id_t shape_id)
497{
498 return rb_shape_too_complex_p(shape_id);
499}
500
501bool
502rb_jit_multi_ractor_p(void)
503{
504 return rb_multi_ractor_p();
505}
506
507// Acquire the VM lock and then signal all other Ruby threads (ractors) to
508// contend for the VM lock, putting them to sleep. ZJIT and YJIT use this to
509// evict threads running inside generated code so among other things, it can
510// safely change memory protection of regions housing generated code.
511void
512rb_jit_vm_lock_then_barrier(unsigned int *recursive_lock_level, const char *file, int line)
513{
514 rb_vm_lock_enter(recursive_lock_level, file, line);
515 rb_vm_barrier();
516}
517
518// Release the VM lock. The lock level must point to the same integer used to
519// acquire the lock.
520void
521rb_jit_vm_unlock(unsigned int *recursive_lock_level, const char *file, int line)
522{
523 rb_vm_lock_leave(recursive_lock_level, file, line);
524}
525
526void
527rb_iseq_reset_jit_func(const rb_iseq_t *iseq)
528{
529 RUBY_ASSERT_ALWAYS(IMEMO_TYPE_P(iseq, imemo_iseq));
530 iseq->body->jit_entry = NULL;
531 iseq->body->jit_exception = NULL;
532 // Enable re-compiling this ISEQ. Event when it's invalidated for TracePoint,
533 // we'd like to re-compile ISEQs that haven't been converted to trace_* insns.
534 iseq->body->jit_entry_calls = 0;
535 iseq->body->jit_exception_calls = 0;
536}
537
538// Callback data for rb_jit_for_each_iseq
540 rb_iseq_callback callback;
541 void *data;
542};
543
544// Heap-walking callback for rb_jit_for_each_iseq
545static int
546for_each_iseq_i(void *vstart, void *vend, size_t stride, void *data)
547{
548 const struct iseq_callback_data *callback_data = (struct iseq_callback_data *)data;
549 VALUE v = (VALUE)vstart;
550 for (; v != (VALUE)vend; v += stride) {
551 void *ptr = rb_asan_poisoned_object_p(v);
552 rb_asan_unpoison_object(v, false);
553
554 if (rb_obj_is_iseq(v)) {
555 rb_iseq_t *iseq = (rb_iseq_t *)v;
556 callback_data->callback(iseq, callback_data->data);
557 }
558
559 if (ptr) {
560 rb_asan_poison_object(v);
561 }
562 }
563 return 0;
564}
565
566uint32_t
567rb_jit_get_page_size(void)
568{
569#if defined(_SC_PAGESIZE)
570 long page_size = sysconf(_SC_PAGESIZE);
571 if (page_size <= 0) rb_bug("jit: failed to get page size");
572
573 // 1 GiB limit. x86 CPUs with PDPE1GB can do this and anything larger is unexpected.
574 // Though our design sort of assume we have fine grained control over memory protection
575 // which require small page sizes.
576 if (page_size > 0x40000000l) rb_bug("jit page size too large");
577
578 return (uint32_t)page_size;
579#else
580#error "JIT supports POSIX only for now"
581#endif
582}
583
584#if defined(MAP_FIXED_NOREPLACE) && defined(_SC_PAGESIZE)
585// Align the current write position to a multiple of bytes
586static uint8_t *
587align_ptr(uint8_t *ptr, uint32_t multiple)
588{
589 // Compute the pointer modulo the given alignment boundary
590 uint32_t rem = ((uint32_t)(uintptr_t)ptr) % multiple;
591
592 // If the pointer is already aligned, stop
593 if (rem == 0)
594 return ptr;
595
596 // Pad the pointer by the necessary amount to align it
597 uint32_t pad = multiple - rem;
598
599 return ptr + pad;
600}
601#endif
602
603// Address space reservation. Memory pages are mapped on an as needed basis.
604// See the Rust mm module for details.
605uint8_t *
606rb_jit_reserve_addr_space(uint32_t mem_size)
607{
608#ifndef _WIN32
609 uint8_t *mem_block;
610
611 // On Linux
612 #if defined(MAP_FIXED_NOREPLACE) && defined(_SC_PAGESIZE)
613 uint32_t const page_size = (uint32_t)sysconf(_SC_PAGESIZE);
614 uint8_t *const cfunc_sample_addr = (void *)(uintptr_t)&rb_jit_reserve_addr_space;
615 uint8_t *const probe_region_end = cfunc_sample_addr + INT32_MAX;
616 // Align the requested address to page size
617 uint8_t *req_addr = align_ptr(cfunc_sample_addr, page_size);
618
619 // Probe for addresses close to this function using MAP_FIXED_NOREPLACE
620 // to improve odds of being in range for 32-bit relative call instructions.
621 do {
622 mem_block = mmap(
623 req_addr,
624 mem_size,
625 PROT_NONE,
626 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED_NOREPLACE,
627 -1,
628 0
629 );
630
631 // If we succeeded, stop
632 if (mem_block != MAP_FAILED) {
633 ruby_annotate_mmap(mem_block, mem_size, "Ruby:rb_jit_reserve_addr_space");
634 break;
635 }
636
637 // -4MiB. Downwards to probe away from the heap. (On x86/A64 Linux
638 // main_code_addr < heap_addr, and in case we are in a shared
639 // library mapped higher than the heap, downwards is still better
640 // since it's towards the end of the heap rather than the stack.)
641 req_addr -= 4 * 1024 * 1024;
642 } while (req_addr < probe_region_end);
643
644 // On MacOS and other platforms
645 #else
646 // Try to map a chunk of memory as executable
647 mem_block = mmap(
648 (void *)rb_jit_reserve_addr_space,
649 mem_size,
650 PROT_NONE,
651 MAP_PRIVATE | MAP_ANONYMOUS,
652 -1,
653 0
654 );
655 #endif
656
657 // Fallback
658 if (mem_block == MAP_FAILED) {
659 // Try again without the address hint (e.g., valgrind)
660 mem_block = mmap(
661 NULL,
662 mem_size,
663 PROT_NONE,
664 MAP_PRIVATE | MAP_ANONYMOUS,
665 -1,
666 0
667 );
668
669 if (mem_block != MAP_FAILED) {
670 ruby_annotate_mmap(mem_block, mem_size, "Ruby:rb_jit_reserve_addr_space:fallback");
671 }
672 }
673
674 // Check that the memory mapping was successful
675 if (mem_block == MAP_FAILED) {
676 perror("ruby: jit: mmap:");
677 if(errno == ENOMEM) {
678 // No crash report if it's only insufficient memory
679 exit(EXIT_FAILURE);
680 }
681 rb_bug("mmap failed");
682 }
683
684 return mem_block;
685#else
686 // Windows not supported for now
687 return NULL;
688#endif
689}
690
691// Walk all ISEQs in the heap and invoke the callback - shared between YJIT and ZJIT
692void
693rb_jit_for_each_iseq(rb_iseq_callback callback, void *data)
694{
695 struct iseq_callback_data callback_data = { .callback = callback, .data = data };
696 rb_objspace_each_objects(for_each_iseq_i, (void *)&callback_data);
697}
698
699bool
700rb_jit_mark_writable(void *mem_block, uint32_t mem_size)
701{
702 return mprotect(mem_block, mem_size, PROT_READ | PROT_WRITE) == 0;
703}
704
705void
706rb_jit_mark_executable(void *mem_block, uint32_t mem_size)
707{
708 // Do not call mprotect when mem_size is zero. Some platforms may return
709 // an error for it. https://github.com/Shopify/ruby/issues/450
710 if (mem_size == 0) {
711 return;
712 }
713 if (mprotect(mem_block, mem_size, PROT_READ | PROT_EXEC)) {
714 rb_bug("Couldn't make JIT page (%p, %lu bytes) executable, errno: %s",
715 mem_block, (unsigned long)mem_size, strerror(errno));
716 }
717}
718
719// Free the specified memory block.
720bool
721rb_jit_mark_unused(void *mem_block, uint32_t mem_size)
722{
723 // On Linux, you need to use madvise MADV_DONTNEED to free memory.
724 // We might not need to call this on macOS, but it's not really documented.
725 // We generally prefer to do the same thing on both to ease testing too.
726 madvise(mem_block, mem_size, MADV_DONTNEED);
727
728 // On macOS, mprotect PROT_NONE seems to reduce RSS.
729 // We also call this on Linux to avoid executing unused pages.
730 return mprotect(mem_block, mem_size, PROT_NONE) == 0;
731}
732
733// Invalidate icache for arm64.
734// `start` is inclusive and `end` is exclusive.
735void
736rb_jit_icache_invalidate(void *start, void *end)
737{
738 // Clear/invalidate the instruction cache. Compiles to nothing on x86_64
739 // but required on ARM before running freshly written code.
740 // On Darwin it's the same as calling sys_icache_invalidate().
741#ifdef __GNUC__
742 __builtin___clear_cache(start, end);
743#elif defined(__aarch64__)
744#error No instruction cache clear available with this compiler on Aarch64!
745#endif
746}
747
748VALUE
749rb_jit_fix_mod_fix(VALUE recv, VALUE obj)
750{
751 return rb_fix_mod_fix(recv, obj);
752}
#define RUBY_ASSERT_ALWAYS(expr,...)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
Definition assert.h:199
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
static VALUE RB_FL_TEST(VALUE obj, VALUE flags)
Tests if the given flag(s) are set or not.
Definition fl_type.h:489
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:131
static VALUE rb_class_of(VALUE obj)
Object to class mapping function.
Definition globals.h:175
Defines RBIMPL_HAS_BUILTIN.
int len
Length of the buffer.
Definition io.h:8
static long rb_array_len(VALUE a)
Queries the length of the array.
Definition rarray.h:255
#define errno
Ractor-aware version of errno.
Definition ruby.h:388
Ruby's ordinal objects.
Definition robject.h:85
VALUE * fields
Pointer to a C array that holds instance variables.
Definition robject.h:99
struct RObject::@48::@49 heap
Object that use separated memory region for instance variables use this pattern.
Ruby's String.
Definition rstring.h:196
Definition method.h:63
struct rb_iseq_constant_body::@156 param
parameter information
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376
ruby_value_type
C-level type of an object.
Definition value_type.h:113