Ruby 4.0.0dev (2025-12-24 revision e2cf92eddc5403316b0d449b02ba403a27610d7b)
jit.c (e2cf92eddc5403316b0d449b02ba403a27610d7b)
1// Glue code shared between YJIT and ZJIT for use from Rust.
2// For FFI safety and bindgen compatibility reasons, certain types of C
3// functions require wrapping before they can be called from Rust. Those show
4// up here.
5//
6// Code specific to YJIT and ZJIT should go to yjit.c and zjit.c respectively.
7
8#include "internal.h"
9#include "vm_core.h"
10#include "vm_callinfo.h"
11#include "builtin.h"
12#include "insns.inc"
13#include "insns_info.inc"
14#include "iseq.h"
15#include "internal/gc.h"
16#include "vm_sync.h"
17#include "internal/fixnum.h"
18#include "internal/string.h"
19
20enum jit_bindgen_constants {
21 // Field offsets for the RObject struct
22 ROBJECT_OFFSET_AS_HEAP_FIELDS = offsetof(struct RObject, as.heap.fields),
23 ROBJECT_OFFSET_AS_ARY = offsetof(struct RObject, as.ary),
24
25 // Field offsets for the RString struct
26 RUBY_OFFSET_RSTRING_LEN = offsetof(struct RString, len),
27
28 // Field offsets for rb_execution_context_t
29 RUBY_OFFSET_EC_CFP = offsetof(rb_execution_context_t, cfp),
30 RUBY_OFFSET_EC_INTERRUPT_FLAG = offsetof(rb_execution_context_t, interrupt_flag),
31 RUBY_OFFSET_EC_INTERRUPT_MASK = offsetof(rb_execution_context_t, interrupt_mask),
32 RUBY_OFFSET_EC_THREAD_PTR = offsetof(rb_execution_context_t, thread_ptr),
33 RUBY_OFFSET_EC_RACTOR_ID = offsetof(rb_execution_context_t, ractor_id),
34};
35
36// Manually bound in rust since this is out-of-range of `int`,
37// so this can't be in a `enum`, and we avoid `static const`
38// to avoid allocating storage for the constant.
39const shape_id_t rb_invalid_shape_id = INVALID_SHAPE_ID;
40
41unsigned int
42rb_iseq_encoded_size(const rb_iseq_t *iseq)
43{
44 return iseq->body->iseq_size;
45}
46
47// Get the PC for a given index in an iseq
48VALUE *
49rb_iseq_pc_at_idx(const rb_iseq_t *iseq, uint32_t insn_idx)
50{
51 RUBY_ASSERT_ALWAYS(IMEMO_TYPE_P(iseq, imemo_iseq));
52 RUBY_ASSERT_ALWAYS(insn_idx < iseq->body->iseq_size);
53 VALUE *encoded = iseq->body->iseq_encoded;
54 VALUE *pc = &encoded[insn_idx];
55 return pc;
56}
57
58// Get the opcode given a program counter. Can return trace opcode variants.
59int
60rb_iseq_opcode_at_pc(const rb_iseq_t *iseq, const VALUE *pc)
61{
62 // YJIT should only use iseqs after AST to bytecode compilation.
63 // (Certain non-default interpreter configurations never set ISEQ_TRANSLATED)
64 if (OPT_DIRECT_THREADED_CODE || OPT_CALL_THREADED_CODE) {
65 RUBY_ASSERT_ALWAYS(FL_TEST_RAW((VALUE)iseq, ISEQ_TRANSLATED));
66 }
67
68 const VALUE at_pc = *pc;
69 return rb_vm_insn_addr2opcode((const void *)at_pc);
70}
71
72unsigned long
73rb_RSTRING_LEN(VALUE str)
74{
75 return RSTRING_LEN(str);
76}
77
78char *
79rb_RSTRING_PTR(VALUE str)
80{
81 return RSTRING_PTR(str);
82}
83
84const char *
85rb_insn_name(VALUE insn)
86{
87 return insn_name(insn);
88}
89
90unsigned int
91rb_vm_ci_argc(const struct rb_callinfo *ci)
92{
93 return vm_ci_argc(ci);
94}
95
96ID
97rb_vm_ci_mid(const struct rb_callinfo *ci)
98{
99 return vm_ci_mid(ci);
100}
101
102unsigned int
103rb_vm_ci_flag(const struct rb_callinfo *ci)
104{
105 return vm_ci_flag(ci);
106}
107
108const struct rb_callinfo_kwarg *
109rb_vm_ci_kwarg(const struct rb_callinfo *ci)
110{
111 return vm_ci_kwarg(ci);
112}
113
114int
115rb_get_cikw_keyword_len(const struct rb_callinfo_kwarg *cikw)
116{
117 return cikw->keyword_len;
118}
119
120VALUE
121rb_get_cikw_keywords_idx(const struct rb_callinfo_kwarg *cikw, int idx)
122{
123 return cikw->keywords[idx];
124}
125
126rb_method_visibility_t
127rb_METHOD_ENTRY_VISI(const rb_callable_method_entry_t *me)
128{
129 return METHOD_ENTRY_VISI(me);
130}
131
132rb_method_type_t
133rb_get_cme_def_type(const rb_callable_method_entry_t *cme)
134{
135 if (UNDEFINED_METHOD_ENTRY_P(cme)) {
136 return VM_METHOD_TYPE_UNDEF;
137 }
138 else {
139 return cme->def->type;
140 }
141}
142
143ID
144rb_get_cme_def_body_attr_id(const rb_callable_method_entry_t *cme)
145{
146 return cme->def->body.attr.id;
147}
148
149enum method_optimized_type
150rb_get_cme_def_body_optimized_type(const rb_callable_method_entry_t *cme)
151{
152 return cme->def->body.optimized.type;
153}
154
155unsigned int
156rb_get_cme_def_body_optimized_index(const rb_callable_method_entry_t *cme)
157{
158 return cme->def->body.optimized.index;
159}
160
162rb_get_cme_def_body_cfunc(const rb_callable_method_entry_t *cme)
163{
164 return UNALIGNED_MEMBER_PTR(cme->def, body.cfunc);
165}
166
167uintptr_t
168rb_get_def_method_serial(const rb_method_definition_t *def)
169{
170 return def->method_serial;
171}
172
173ID
174rb_get_def_original_id(const rb_method_definition_t *def)
175{
176 return def->original_id;
177}
178
179VALUE
180rb_get_def_bmethod_proc(rb_method_definition_t *def)
181{
182 RUBY_ASSERT(def->type == VM_METHOD_TYPE_BMETHOD);
183 return def->body.bmethod.proc;
184}
185
186rb_proc_t *
187rb_jit_get_proc_ptr(VALUE procv)
188{
189 rb_proc_t *proc;
190 GetProcPtr(procv, proc);
191 return proc;
192}
193
194unsigned int
195rb_jit_iseq_builtin_attrs(const rb_iseq_t *iseq)
196{
197 return iseq->body->builtin_attrs;
198}
199
200int
201rb_get_mct_argc(const rb_method_cfunc_t *mct)
202{
203 return mct->argc;
204}
205
206void *
207rb_get_mct_func(const rb_method_cfunc_t *mct)
208{
209 return (void*)(uintptr_t)mct->func; // this field is defined as type VALUE (*func)(ANYARGS)
210}
211
212const rb_iseq_t *
213rb_get_def_iseq_ptr(rb_method_definition_t *def)
214{
215 return def_iseq_ptr(def);
216}
217
218const rb_iseq_t *
219rb_get_iseq_body_local_iseq(const rb_iseq_t *iseq)
220{
221 return iseq->body->local_iseq;
222}
223
224const rb_iseq_t *
225rb_get_iseq_body_parent_iseq(const rb_iseq_t *iseq)
226{
227 return iseq->body->parent_iseq;
228}
229
230unsigned int
231rb_get_iseq_body_local_table_size(const rb_iseq_t *iseq)
232{
233 return iseq->body->local_table_size;
234}
235
236VALUE *
237rb_get_iseq_body_iseq_encoded(const rb_iseq_t *iseq)
238{
239 return iseq->body->iseq_encoded;
240}
241
242unsigned
243rb_get_iseq_body_stack_max(const rb_iseq_t *iseq)
244{
245 return iseq->body->stack_max;
246}
247
248enum rb_iseq_type
249rb_get_iseq_body_type(const rb_iseq_t *iseq)
250{
251 return iseq->body->type;
252}
253
254bool
255rb_get_iseq_flags_has_lead(const rb_iseq_t *iseq)
256{
257 return iseq->body->param.flags.has_lead;
258}
259
260bool
261rb_get_iseq_flags_has_opt(const rb_iseq_t *iseq)
262{
263 return iseq->body->param.flags.has_opt;
264}
265
266bool
267rb_get_iseq_flags_has_kw(const rb_iseq_t *iseq)
268{
269 return iseq->body->param.flags.has_kw;
270}
271
272bool
273rb_get_iseq_flags_has_post(const rb_iseq_t *iseq)
274{
275 return iseq->body->param.flags.has_post;
276}
277
278bool
279rb_get_iseq_flags_has_kwrest(const rb_iseq_t *iseq)
280{
281 return iseq->body->param.flags.has_kwrest;
282}
283
284bool
285rb_get_iseq_flags_anon_kwrest(const rb_iseq_t *iseq)
286{
287 return iseq->body->param.flags.anon_kwrest;
288}
289
290bool
291rb_get_iseq_flags_has_rest(const rb_iseq_t *iseq)
292{
293 return iseq->body->param.flags.has_rest;
294}
295
296bool
297rb_get_iseq_flags_ruby2_keywords(const rb_iseq_t *iseq)
298{
299 return iseq->body->param.flags.ruby2_keywords;
300}
301
302bool
303rb_get_iseq_flags_has_block(const rb_iseq_t *iseq)
304{
305 return iseq->body->param.flags.has_block;
306}
307
308bool
309rb_get_iseq_flags_ambiguous_param0(const rb_iseq_t *iseq)
310{
311 return iseq->body->param.flags.ambiguous_param0;
312}
313
314bool
315rb_get_iseq_flags_accepts_no_kwarg(const rb_iseq_t *iseq)
316{
317 return iseq->body->param.flags.accepts_no_kwarg;
318}
319
320bool
321rb_get_iseq_flags_forwardable(const rb_iseq_t *iseq)
322{
323 return iseq->body->param.flags.forwardable;
324}
325
326// This is defined only as a named struct inside rb_iseq_constant_body.
327// By giving it a separate typedef, we make it nameable by rust-bindgen.
328// Bindgen's temp/anon name isn't guaranteed stable.
329typedef struct rb_iseq_param_keyword rb_iseq_param_keyword_struct;
330
331const rb_iseq_param_keyword_struct *
332rb_get_iseq_body_param_keyword(const rb_iseq_t *iseq)
333{
334 return iseq->body->param.keyword;
335}
336
337unsigned
338rb_get_iseq_body_param_size(const rb_iseq_t *iseq)
339{
340 return iseq->body->param.size;
341}
342
343int
344rb_get_iseq_body_param_lead_num(const rb_iseq_t *iseq)
345{
346 return iseq->body->param.lead_num;
347}
348
349int
350rb_get_iseq_body_param_opt_num(const rb_iseq_t *iseq)
351{
352 return iseq->body->param.opt_num;
353}
354
355const VALUE *
356rb_get_iseq_body_param_opt_table(const rb_iseq_t *iseq)
357{
358 return iseq->body->param.opt_table;
359}
360
362rb_get_ec_cfp(const rb_execution_context_t *ec)
363{
364 return ec->cfp;
365}
366
367const rb_iseq_t *
368rb_get_cfp_iseq(struct rb_control_frame_struct *cfp)
369{
370 return cfp->iseq;
371}
372
373VALUE *
374rb_get_cfp_pc(struct rb_control_frame_struct *cfp)
375{
376 return (VALUE*)cfp->pc;
377}
378
379VALUE *
380rb_get_cfp_sp(struct rb_control_frame_struct *cfp)
381{
382 return cfp->sp;
383}
384
385VALUE
386rb_get_cfp_self(struct rb_control_frame_struct *cfp)
387{
388 return cfp->self;
389}
390
391VALUE *
392rb_get_cfp_ep(struct rb_control_frame_struct *cfp)
393{
394 return (VALUE*)cfp->ep;
395}
396
397const VALUE *
398rb_get_cfp_ep_level(struct rb_control_frame_struct *cfp, uint32_t lv)
399{
400 uint32_t i;
401 const VALUE *ep = (VALUE*)cfp->ep;
402 for (i = 0; i < lv; i++) {
403 ep = VM_ENV_PREV_EP(ep);
404 }
405 return ep;
406}
407
408VALUE
409rb_yarv_class_of(VALUE obj)
410{
411 return rb_class_of(obj);
412}
413
414// The FL_TEST() macro
415VALUE
416rb_FL_TEST(VALUE obj, VALUE flags)
417{
418 return RB_FL_TEST(obj, flags);
419}
420
421// The FL_TEST_RAW() macro, normally an internal implementation detail
422VALUE
423rb_FL_TEST_RAW(VALUE obj, VALUE flags)
424{
425 return FL_TEST_RAW(obj, flags);
426}
427
428// The RB_TYPE_P macro
429bool
430rb_RB_TYPE_P(VALUE obj, enum ruby_value_type t)
431{
432 return RB_TYPE_P(obj, t);
433}
434
435long
436rb_RSTRUCT_LEN(VALUE st)
437{
438 return RSTRUCT_LEN(st);
439}
440
441const struct rb_callinfo *
442rb_get_call_data_ci(const struct rb_call_data *cd)
443{
444 return cd->ci;
445}
446
447bool
448rb_BASIC_OP_UNREDEFINED_P(enum ruby_basic_operators bop, uint32_t klass)
449{
450 return BASIC_OP_UNREDEFINED_P(bop, klass);
451}
452
453VALUE
454rb_RCLASS_ORIGIN(VALUE c)
455{
456 return RCLASS_ORIGIN(c);
457}
458
459// For debug builds
460void
461rb_assert_iseq_handle(VALUE handle)
462{
463 RUBY_ASSERT_ALWAYS(IMEMO_TYPE_P(handle, imemo_iseq));
464}
465
466// Assert that we have the VM lock. Relevant mostly for multi ractor situations.
467// The GC takes the lock before calling us, and this asserts that it indeed happens.
468void
469rb_assert_holding_vm_lock(void)
470{
471 ASSERT_vm_locking();
472}
473
474int
475rb_IMEMO_TYPE_P(VALUE imemo, enum imemo_type imemo_type)
476{
477 return IMEMO_TYPE_P(imemo, imemo_type);
478}
479
480void
481rb_assert_cme_handle(VALUE handle)
482{
483 RUBY_ASSERT_ALWAYS(!rb_objspace_garbage_object_p(handle));
484 RUBY_ASSERT_ALWAYS(IMEMO_TYPE_P(handle, imemo_ment));
485}
486
487// YJIT and ZJIT need this function to never allocate and never raise
488VALUE
489rb_yarv_ary_entry_internal(VALUE ary, long offset)
490{
491 return rb_ary_entry_internal(ary, offset);
492}
493
494long
495rb_jit_array_len(VALUE a)
496{
497 return rb_array_len(a);
498}
499
500void
501rb_set_cfp_pc(struct rb_control_frame_struct *cfp, const VALUE *pc)
502{
503 cfp->pc = pc;
504}
505
506void
507rb_set_cfp_sp(struct rb_control_frame_struct *cfp, VALUE *sp)
508{
509 cfp->sp = sp;
510}
511
512bool
513rb_jit_shape_too_complex_p(shape_id_t shape_id)
514{
515 return rb_shape_too_complex_p(shape_id);
516}
517
518bool
519rb_jit_multi_ractor_p(void)
520{
521 return rb_multi_ractor_p();
522}
523
524// Acquire the VM lock and then signal all other Ruby threads (ractors) to
525// contend for the VM lock, putting them to sleep. ZJIT and YJIT use this to
526// evict threads running inside generated code so among other things, it can
527// safely change memory protection of regions housing generated code.
528void
529rb_jit_vm_lock_then_barrier(unsigned int *recursive_lock_level, const char *file, int line)
530{
531 rb_vm_lock_enter(recursive_lock_level, file, line);
532 rb_vm_barrier();
533}
534
535// Release the VM lock. The lock level must point to the same integer used to
536// acquire the lock.
537void
538rb_jit_vm_unlock(unsigned int *recursive_lock_level, const char *file, int line)
539{
540 rb_vm_lock_leave(recursive_lock_level, file, line);
541}
542
543void
544rb_iseq_reset_jit_func(const rb_iseq_t *iseq)
545{
546 RUBY_ASSERT_ALWAYS(IMEMO_TYPE_P(iseq, imemo_iseq));
547 iseq->body->jit_entry = NULL;
548 iseq->body->jit_exception = NULL;
549 // Enable re-compiling this ISEQ. Event when it's invalidated for TracePoint,
550 // we'd like to re-compile ISEQs that haven't been converted to trace_* insns.
551 iseq->body->jit_entry_calls = 0;
552 iseq->body->jit_exception_calls = 0;
553}
554
555// Callback data for rb_jit_for_each_iseq
557 rb_iseq_callback callback;
558 void *data;
559};
560
561// Heap-walking callback for rb_jit_for_each_iseq
562static int
563for_each_iseq_i(void *vstart, void *vend, size_t stride, void *data)
564{
565 const struct iseq_callback_data *callback_data = (struct iseq_callback_data *)data;
566 VALUE v = (VALUE)vstart;
567 for (; v != (VALUE)vend; v += stride) {
568 void *ptr = rb_asan_poisoned_object_p(v);
569 rb_asan_unpoison_object(v, false);
570
571 if (rb_obj_is_iseq(v)) {
572 rb_iseq_t *iseq = (rb_iseq_t *)v;
573 callback_data->callback(iseq, callback_data->data);
574 }
575
576 if (ptr) {
577 rb_asan_poison_object(v);
578 }
579 }
580 return 0;
581}
582
583uint32_t
584rb_jit_get_page_size(void)
585{
586#if defined(_SC_PAGESIZE)
587 long page_size = sysconf(_SC_PAGESIZE);
588 if (page_size <= 0) rb_bug("jit: failed to get page size");
589
590 // 1 GiB limit. x86 CPUs with PDPE1GB can do this and anything larger is unexpected.
591 // Though our design sort of assume we have fine grained control over memory protection
592 // which require small page sizes.
593 if (page_size > 0x40000000l) rb_bug("jit page size too large");
594
595 return (uint32_t)page_size;
596#else
597#error "JIT supports POSIX only for now"
598#endif
599}
600
601#if defined(MAP_FIXED_NOREPLACE) && defined(_SC_PAGESIZE)
602// Align the current write position to a multiple of bytes
603static uint8_t *
604align_ptr(uint8_t *ptr, uint32_t multiple)
605{
606 // Compute the pointer modulo the given alignment boundary
607 uint32_t rem = ((uint32_t)(uintptr_t)ptr) % multiple;
608
609 // If the pointer is already aligned, stop
610 if (rem == 0)
611 return ptr;
612
613 // Pad the pointer by the necessary amount to align it
614 uint32_t pad = multiple - rem;
615
616 return ptr + pad;
617}
618#endif
619
620// Address space reservation. Memory pages are mapped on an as needed basis.
621// See the Rust mm module for details.
622uint8_t *
623rb_jit_reserve_addr_space(uint32_t mem_size)
624{
625#ifndef _WIN32
626 uint8_t *mem_block;
627
628 // On Linux
629 #if defined(MAP_FIXED_NOREPLACE) && defined(_SC_PAGESIZE)
630 uint32_t const page_size = (uint32_t)sysconf(_SC_PAGESIZE);
631 uint8_t *const cfunc_sample_addr = (void *)(uintptr_t)&rb_jit_reserve_addr_space;
632 uint8_t *const probe_region_end = cfunc_sample_addr + INT32_MAX;
633 // Align the requested address to page size
634 uint8_t *req_addr = align_ptr(cfunc_sample_addr, page_size);
635
636 // Probe for addresses close to this function using MAP_FIXED_NOREPLACE
637 // to improve odds of being in range for 32-bit relative call instructions.
638 do {
639 mem_block = mmap(
640 req_addr,
641 mem_size,
642 PROT_NONE,
643 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED_NOREPLACE,
644 -1,
645 0
646 );
647
648 // If we succeeded, stop
649 if (mem_block != MAP_FAILED) {
650 ruby_annotate_mmap(mem_block, mem_size, "Ruby:rb_jit_reserve_addr_space");
651 break;
652 }
653
654 // -4MiB. Downwards to probe away from the heap. (On x86/A64 Linux
655 // main_code_addr < heap_addr, and in case we are in a shared
656 // library mapped higher than the heap, downwards is still better
657 // since it's towards the end of the heap rather than the stack.)
658 req_addr -= 4 * 1024 * 1024;
659 } while (req_addr < probe_region_end);
660
661 // On MacOS and other platforms
662 #else
663 // Try to map a chunk of memory as executable
664 mem_block = mmap(
665 (void *)rb_jit_reserve_addr_space,
666 mem_size,
667 PROT_NONE,
668 MAP_PRIVATE | MAP_ANONYMOUS,
669 -1,
670 0
671 );
672 #endif
673
674 // Fallback
675 if (mem_block == MAP_FAILED) {
676 // Try again without the address hint (e.g., valgrind)
677 mem_block = mmap(
678 NULL,
679 mem_size,
680 PROT_NONE,
681 MAP_PRIVATE | MAP_ANONYMOUS,
682 -1,
683 0
684 );
685
686 if (mem_block != MAP_FAILED) {
687 ruby_annotate_mmap(mem_block, mem_size, "Ruby:rb_jit_reserve_addr_space:fallback");
688 }
689 }
690
691 // Check that the memory mapping was successful
692 if (mem_block == MAP_FAILED) {
693 perror("ruby: jit: mmap:");
694 if(errno == ENOMEM) {
695 // No crash report if it's only insufficient memory
696 exit(EXIT_FAILURE);
697 }
698 rb_bug("mmap failed");
699 }
700
701 return mem_block;
702#else
703 // Windows not supported for now
704 return NULL;
705#endif
706}
707
708// Walk all ISEQs in the heap and invoke the callback - shared between YJIT and ZJIT
709void
710rb_jit_for_each_iseq(rb_iseq_callback callback, void *data)
711{
712 struct iseq_callback_data callback_data = { .callback = callback, .data = data };
713 rb_objspace_each_objects(for_each_iseq_i, (void *)&callback_data);
714}
715
716bool
717rb_jit_mark_writable(void *mem_block, uint32_t mem_size)
718{
719 return mprotect(mem_block, mem_size, PROT_READ | PROT_WRITE) == 0;
720}
721
722void
723rb_jit_mark_executable(void *mem_block, uint32_t mem_size)
724{
725 // Do not call mprotect when mem_size is zero. Some platforms may return
726 // an error for it. https://github.com/Shopify/ruby/issues/450
727 if (mem_size == 0) {
728 return;
729 }
730 if (mprotect(mem_block, mem_size, PROT_READ | PROT_EXEC)) {
731 rb_bug("Couldn't make JIT page (%p, %lu bytes) executable, errno: %s",
732 mem_block, (unsigned long)mem_size, strerror(errno));
733 }
734}
735
736// Free the specified memory block.
737bool
738rb_jit_mark_unused(void *mem_block, uint32_t mem_size)
739{
740 // On Linux, you need to use madvise MADV_DONTNEED to free memory.
741 // We might not need to call this on macOS, but it's not really documented.
742 // We generally prefer to do the same thing on both to ease testing too.
743 madvise(mem_block, mem_size, MADV_DONTNEED);
744
745 // On macOS, mprotect PROT_NONE seems to reduce RSS.
746 // We also call this on Linux to avoid executing unused pages.
747 return mprotect(mem_block, mem_size, PROT_NONE) == 0;
748}
749
750// Invalidate icache for arm64.
751// `start` is inclusive and `end` is exclusive.
752void
753rb_jit_icache_invalidate(void *start, void *end)
754{
755 // Clear/invalidate the instruction cache. Compiles to nothing on x86_64
756 // but required on ARM before running freshly written code.
757 // On Darwin it's the same as calling sys_icache_invalidate().
758#ifdef __GNUC__
759 __builtin___clear_cache(start, end);
760#elif defined(__aarch64__)
761#error No instruction cache clear available with this compiler on Aarch64!
762#endif
763}
764
765VALUE
766rb_jit_fix_mod_fix(VALUE recv, VALUE obj)
767{
768 return rb_fix_mod_fix(recv, obj);
769}
770
771VALUE
772rb_jit_fix_div_fix(VALUE recv, VALUE obj)
773{
774 return rb_fix_div_fix(recv, obj);
775}
776
777// YJIT/ZJIT need this function to never allocate and never raise
778VALUE
779rb_yarv_str_eql_internal(VALUE str1, VALUE str2)
780{
781 // We wrap this since it's static inline
782 return rb_str_eql_internal(str1, str2);
783}
784
785void rb_jit_str_concat_codepoint(VALUE str, VALUE codepoint);
786
787attr_index_t
788rb_jit_shape_capacity(shape_id_t shape_id)
789{
790 return RSHAPE_CAPACITY(shape_id);
791}
#define RUBY_ASSERT_ALWAYS(expr,...)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
Definition assert.h:199
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
static VALUE RB_FL_TEST(VALUE obj, VALUE flags)
Tests if the given flag(s) are set or not.
Definition fl_type.h:489
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:131
static VALUE rb_class_of(VALUE obj)
Object to class mapping function.
Definition globals.h:174
Defines RBIMPL_HAS_BUILTIN.
int len
Length of the buffer.
Definition io.h:8
static long rb_array_len(VALUE a)
Queries the length of the array.
Definition rarray.h:255
#define errno
Ractor-aware version of errno.
Definition ruby.h:388
Ruby's ordinal objects.
Definition robject.h:85
VALUE * fields
Pointer to a C array that holds instance variables.
Definition robject.h:99
struct RObject::@49::@50 heap
Object that use separated memory region for instance variables use this pattern.
Ruby's String.
Definition rstring.h:196
Definition method.h:63
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376
ruby_value_type
C-level type of an object.
Definition value_type.h:113