Ruby 4.1.0dev (2026-04-04 revision 3b6245536cf55da9e8bfcdb03c845fe9ef931d7f)
jit.c (3b6245536cf55da9e8bfcdb03c845fe9ef931d7f)
1// Glue code shared between YJIT and ZJIT for use from Rust.
2// For FFI safety and bindgen compatibility reasons, certain types of C
3// functions require wrapping before they can be called from Rust. Those show
4// up here.
5//
6// Code specific to YJIT and ZJIT should go to yjit.c and zjit.c respectively.
7
8#include "internal.h"
9#include "vm_core.h"
10#include "vm_callinfo.h"
11#include "builtin.h"
12#include "insns.inc"
13#include "insns_info.inc"
14#include "iseq.h"
15#include "internal/compile.h"
16#include "internal/gc.h"
17#include "vm_sync.h"
18#include "internal/fixnum.h"
19#include "internal/string.h"
20#include "internal/class.h"
21#include "internal/imemo.h"
23#include "zjit.h"
24
25#ifndef _WIN32
26#include <sys/mman.h>
27#endif
28
29enum jit_bindgen_constants {
30 // Field offsets for the RObject struct
31 ROBJECT_OFFSET_AS_HEAP_FIELDS = offsetof(struct RObject, as.heap.fields),
32 ROBJECT_OFFSET_AS_ARY = offsetof(struct RObject, as.ary),
33
34 // Field offset for prime classext's fields_obj from a class pointer
35 RCLASS_OFFSET_PRIME_FIELDS_OBJ = offsetof(struct RClass_and_rb_classext_t, classext.fields_obj),
36
37 // Field offset for fields_obj in RTypedData
38 RTYPEDDATA_OFFSET_FIELDS_OBJ = offsetof(struct RTypedData, fields_obj),
39
40 // Field offsets for the RString struct
41 RUBY_OFFSET_RSTRING_LEN = offsetof(struct RString, len),
42
43 // Field offsets for rb_execution_context_t
44 RUBY_OFFSET_EC_CFP = offsetof(rb_execution_context_t, cfp),
45 RUBY_OFFSET_EC_INTERRUPT_FLAG = offsetof(rb_execution_context_t, interrupt_flag),
46 RUBY_OFFSET_EC_INTERRUPT_MASK = offsetof(rb_execution_context_t, interrupt_mask),
47 RUBY_OFFSET_EC_THREAD_PTR = offsetof(rb_execution_context_t, thread_ptr),
48 RUBY_OFFSET_EC_RACTOR_ID = offsetof(rb_execution_context_t, ractor_id),
49};
50
51// Manually bound in rust since this is out-of-range of `int`,
52// so this can't be in a `enum`, and we avoid `static const`
53// to avoid allocating storage for the constant.
54const shape_id_t rb_invalid_shape_id = INVALID_SHAPE_ID;
55
56unsigned int
57rb_iseq_encoded_size(const rb_iseq_t *iseq)
58{
59 return iseq->body->iseq_size;
60}
61
62// Get the PC for a given index in an iseq
63VALUE *
64rb_iseq_pc_at_idx(const rb_iseq_t *iseq, uint32_t insn_idx)
65{
66 RUBY_ASSERT_ALWAYS(IMEMO_TYPE_P(iseq, imemo_iseq));
67 RUBY_ASSERT_ALWAYS(insn_idx < iseq->body->iseq_size);
68 VALUE *encoded = iseq->body->iseq_encoded;
69 VALUE *pc = &encoded[insn_idx];
70 return pc;
71}
72
73// Get the opcode given a program counter. Can return trace opcode variants.
74int
75rb_iseq_opcode_at_pc(const rb_iseq_t *iseq, const VALUE *pc)
76{
77 // YJIT should only use iseqs after AST to bytecode compilation.
78 // (Certain non-default interpreter configurations never set ISEQ_TRANSLATED)
79 if (OPT_DIRECT_THREADED_CODE || OPT_CALL_THREADED_CODE) {
80 RUBY_ASSERT_ALWAYS(FL_TEST_RAW((VALUE)iseq, ISEQ_TRANSLATED));
81 }
82
83 const VALUE at_pc = *pc;
84 return rb_vm_insn_addr2opcode((const void *)at_pc);
85}
86
87// Get the bare opcode given a program counter. Always returns the base
88// instruction, stripping trace/zjit variants.
89int
90rb_iseq_bare_opcode_at_pc(const rb_iseq_t *iseq, const VALUE *pc)
91{
92 if (OPT_DIRECT_THREADED_CODE || OPT_CALL_THREADED_CODE) {
93 RUBY_ASSERT_ALWAYS(FL_TEST_RAW((VALUE)iseq, ISEQ_TRANSLATED));
94 }
95
96 const VALUE at_pc = *pc;
97 return rb_vm_insn_addr2insn((const void *)at_pc);
98}
99
100unsigned long
101rb_RSTRING_LEN(VALUE str)
102{
103 return RSTRING_LEN(str);
104}
105
106char *
107rb_RSTRING_PTR(VALUE str)
108{
109 return RSTRING_PTR(str);
110}
111
112const char *
113rb_insn_name(VALUE insn)
114{
115 return insn_name(insn);
116}
117
118unsigned int
119rb_vm_ci_argc(const struct rb_callinfo *ci)
120{
121 return vm_ci_argc(ci);
122}
123
124ID
125rb_vm_ci_mid(const struct rb_callinfo *ci)
126{
127 return vm_ci_mid(ci);
128}
129
130unsigned int
131rb_vm_ci_flag(const struct rb_callinfo *ci)
132{
133 return vm_ci_flag(ci);
134}
135
136const struct rb_callinfo_kwarg *
137rb_vm_ci_kwarg(const struct rb_callinfo *ci)
138{
139 return vm_ci_kwarg(ci);
140}
141
142int
143rb_get_cikw_keyword_len(const struct rb_callinfo_kwarg *cikw)
144{
145 return cikw->keyword_len;
146}
147
148VALUE
149rb_get_cikw_keywords_idx(const struct rb_callinfo_kwarg *cikw, int idx)
150{
151 return cikw->keywords[idx];
152}
153
154rb_method_visibility_t
155rb_METHOD_ENTRY_VISI(const rb_callable_method_entry_t *me)
156{
157 return METHOD_ENTRY_VISI(me);
158}
159
160rb_method_type_t
161rb_get_cme_def_type(const rb_callable_method_entry_t *cme)
162{
163 if (UNDEFINED_METHOD_ENTRY_P(cme)) {
164 return VM_METHOD_TYPE_UNDEF;
165 }
166 else {
167 return cme->def->type;
168 }
169}
170
171ID
172rb_get_cme_def_body_attr_id(const rb_callable_method_entry_t *cme)
173{
174 return cme->def->body.attr.id;
175}
176
177enum method_optimized_type
178rb_get_cme_def_body_optimized_type(const rb_callable_method_entry_t *cme)
179{
180 return cme->def->body.optimized.type;
181}
182
183unsigned int
184rb_get_cme_def_body_optimized_index(const rb_callable_method_entry_t *cme)
185{
186 return cme->def->body.optimized.index;
187}
188
190rb_get_cme_def_body_cfunc(const rb_callable_method_entry_t *cme)
191{
192 return UNALIGNED_MEMBER_PTR(cme->def, body.cfunc);
193}
194
195uintptr_t
196rb_get_def_method_serial(const rb_method_definition_t *def)
197{
198 return def->method_serial;
199}
200
201ID
202rb_get_def_original_id(const rb_method_definition_t *def)
203{
204 return def->original_id;
205}
206
207VALUE
208rb_get_def_bmethod_proc(rb_method_definition_t *def)
209{
210 RUBY_ASSERT(def->type == VM_METHOD_TYPE_BMETHOD);
211 return def->body.bmethod.proc;
212}
213
214rb_proc_t *
215rb_jit_get_proc_ptr(VALUE procv)
216{
217 rb_proc_t *proc;
218 GetProcPtr(procv, proc);
219 return proc;
220}
221
222VALUE
223rb_optimized_call(VALUE recv, rb_execution_context_t *ec, int argc, VALUE *argv, int kw_splat, VALUE block_handler)
224{
225 rb_proc_t *proc;
226 GetProcPtr(recv, proc);
227 return rb_vm_invoke_proc(ec, proc, argc, argv, kw_splat, block_handler);
228}
229
230unsigned int
231rb_jit_iseq_builtin_attrs(const rb_iseq_t *iseq)
232{
233 return iseq->body->builtin_attrs;
234}
235
236int
237rb_get_mct_argc(const rb_method_cfunc_t *mct)
238{
239 return mct->argc;
240}
241
242void *
243rb_get_mct_func(const rb_method_cfunc_t *mct)
244{
245 return (void*)(uintptr_t)mct->func; // this field is defined as type VALUE (*func)(ANYARGS)
246}
247
248const rb_iseq_t *
249rb_get_def_iseq_ptr(rb_method_definition_t *def)
250{
251 return def_iseq_ptr(def);
252}
253
254const rb_iseq_t *
255rb_get_iseq_body_local_iseq(const rb_iseq_t *iseq)
256{
257 return iseq->body->local_iseq;
258}
259
260const rb_iseq_t *
261rb_get_iseq_body_parent_iseq(const rb_iseq_t *iseq)
262{
263 return iseq->body->parent_iseq;
264}
265
266unsigned int
267rb_get_iseq_body_local_table_size(const rb_iseq_t *iseq)
268{
269 return iseq->body->local_table_size;
270}
271
272VALUE *
273rb_get_iseq_body_iseq_encoded(const rb_iseq_t *iseq)
274{
275 return iseq->body->iseq_encoded;
276}
277
278unsigned
279rb_get_iseq_body_stack_max(const rb_iseq_t *iseq)
280{
281 return iseq->body->stack_max;
282}
283
284enum rb_iseq_type
285rb_get_iseq_body_type(const rb_iseq_t *iseq)
286{
287 return iseq->body->type;
288}
289
290bool
291rb_get_iseq_flags_has_lead(const rb_iseq_t *iseq)
292{
293 return iseq->body->param.flags.has_lead;
294}
295
296bool
297rb_get_iseq_flags_has_opt(const rb_iseq_t *iseq)
298{
299 return iseq->body->param.flags.has_opt;
300}
301
302bool
303rb_get_iseq_flags_has_kw(const rb_iseq_t *iseq)
304{
305 return iseq->body->param.flags.has_kw;
306}
307
308bool
309rb_get_iseq_flags_has_post(const rb_iseq_t *iseq)
310{
311 return iseq->body->param.flags.has_post;
312}
313
314bool
315rb_get_iseq_flags_has_kwrest(const rb_iseq_t *iseq)
316{
317 return iseq->body->param.flags.has_kwrest;
318}
319
320bool
321rb_get_iseq_flags_anon_kwrest(const rb_iseq_t *iseq)
322{
323 return iseq->body->param.flags.anon_kwrest;
324}
325
326bool
327rb_get_iseq_flags_has_rest(const rb_iseq_t *iseq)
328{
329 return iseq->body->param.flags.has_rest;
330}
331
332bool
333rb_get_iseq_flags_ruby2_keywords(const rb_iseq_t *iseq)
334{
335 return iseq->body->param.flags.ruby2_keywords;
336}
337
338bool
339rb_get_iseq_flags_has_block(const rb_iseq_t *iseq)
340{
341 return iseq->body->param.flags.has_block;
342}
343
344bool
345rb_get_iseq_flags_ambiguous_param0(const rb_iseq_t *iseq)
346{
347 return iseq->body->param.flags.ambiguous_param0;
348}
349
350bool
351rb_get_iseq_flags_accepts_no_kwarg(const rb_iseq_t *iseq)
352{
353 return iseq->body->param.flags.accepts_no_kwarg;
354}
355
356bool
357rb_get_iseq_flags_forwardable(const rb_iseq_t *iseq)
358{
359 return iseq->body->param.flags.forwardable;
360}
361
362// This is defined only as a named struct inside rb_iseq_constant_body.
363// By giving it a separate typedef, we make it nameable by rust-bindgen.
364// Bindgen's temp/anon name isn't guaranteed stable.
365typedef struct rb_iseq_param_keyword rb_iseq_param_keyword_struct;
366
367const rb_iseq_param_keyword_struct *
368rb_get_iseq_body_param_keyword(const rb_iseq_t *iseq)
369{
370 return iseq->body->param.keyword;
371}
372
373unsigned
374rb_get_iseq_body_param_size(const rb_iseq_t *iseq)
375{
376 return iseq->body->param.size;
377}
378
379int
380rb_get_iseq_body_param_lead_num(const rb_iseq_t *iseq)
381{
382 return iseq->body->param.lead_num;
383}
384
385int
386rb_get_iseq_body_param_opt_num(const rb_iseq_t *iseq)
387{
388 return iseq->body->param.opt_num;
389}
390
391const VALUE *
392rb_get_iseq_body_param_opt_table(const rb_iseq_t *iseq)
393{
394 return iseq->body->param.opt_table;
395}
396
398rb_get_ec_cfp(const rb_execution_context_t *ec)
399{
400 return ec->cfp;
401}
402
403const rb_iseq_t *
404rb_get_cfp_iseq(struct rb_control_frame_struct *cfp)
405{
406 return CFP_ISEQ(cfp);
407}
408
409VALUE *
410rb_get_cfp_pc(struct rb_control_frame_struct *cfp)
411{
412 return (VALUE*)cfp->pc;
413}
414
415VALUE *
416rb_get_cfp_sp(struct rb_control_frame_struct *cfp)
417{
418 return cfp->sp;
419}
420
421VALUE
422rb_get_cfp_self(struct rb_control_frame_struct *cfp)
423{
424 return cfp->self;
425}
426
427VALUE *
428rb_get_cfp_ep(struct rb_control_frame_struct *cfp)
429{
430 return (VALUE*)cfp->ep;
431}
432
433const VALUE *
434rb_get_cfp_ep_level(struct rb_control_frame_struct *cfp, uint32_t lv)
435{
436 uint32_t i;
437 const VALUE *ep = (VALUE*)cfp->ep;
438 for (i = 0; i < lv; i++) {
439 ep = VM_ENV_PREV_EP(ep);
440 }
441 return ep;
442}
443
444VALUE
445rb_yarv_class_of(VALUE obj)
446{
447 return rb_class_of(obj);
448}
449
450// The FL_TEST() macro
451VALUE
452rb_FL_TEST(VALUE obj, VALUE flags)
453{
454 return RB_FL_TEST(obj, flags);
455}
456
457// The FL_TEST_RAW() macro, normally an internal implementation detail
458VALUE
459rb_FL_TEST_RAW(VALUE obj, VALUE flags)
460{
461 return FL_TEST_RAW(obj, flags);
462}
463
464// The RB_TYPE_P macro
465bool
466rb_RB_TYPE_P(VALUE obj, enum ruby_value_type t)
467{
468 return RB_TYPE_P(obj, t);
469}
470
471long
472rb_RSTRUCT_LEN(VALUE st)
473{
474 return RSTRUCT_LEN(st);
475}
476
477const struct rb_callinfo *
478rb_get_call_data_ci(const struct rb_call_data *cd)
479{
480 return cd->ci;
481}
482
483bool
484rb_BASIC_OP_UNREDEFINED_P(enum ruby_basic_operators bop, uint32_t klass)
485{
486 return BASIC_OP_UNREDEFINED_P(bop, klass);
487}
488
489VALUE
490rb_RCLASS_ORIGIN(VALUE c)
491{
492 return RCLASS_ORIGIN(c);
493}
494
495// For debug builds
496void
497rb_assert_iseq_handle(VALUE handle)
498{
499 RUBY_ASSERT_ALWAYS(IMEMO_TYPE_P(handle, imemo_iseq));
500}
501
502// Assert that we have the VM lock. Relevant mostly for multi ractor situations.
503// The GC takes the lock before calling us, and this asserts that it indeed happens.
504void
505rb_assert_holding_vm_lock(void)
506{
507 ASSERT_vm_locking();
508}
509
510int
511rb_IMEMO_TYPE_P(VALUE imemo, enum imemo_type imemo_type)
512{
513 return IMEMO_TYPE_P(imemo, imemo_type);
514}
515
516void
517rb_assert_cme_handle(VALUE handle)
518{
519 RUBY_ASSERT_ALWAYS(!rb_objspace_garbage_object_p(handle));
520 RUBY_ASSERT_ALWAYS(IMEMO_TYPE_P(handle, imemo_ment));
521}
522
523// YJIT and ZJIT need this function to never allocate and never raise
524VALUE
525rb_yarv_ary_entry_internal(VALUE ary, long offset)
526{
527 return rb_ary_entry_internal(ary, offset);
528}
529
530long
531rb_jit_array_len(VALUE a)
532{
533 return rb_array_len(a);
534}
535
536void
537rb_set_cfp_pc(struct rb_control_frame_struct *cfp, const VALUE *pc)
538{
539 cfp->pc = pc;
540}
541
542void
543rb_set_cfp_sp(struct rb_control_frame_struct *cfp, VALUE *sp)
544{
545 cfp->sp = sp;
546}
547
548bool
549rb_jit_shape_too_complex_p(shape_id_t shape_id)
550{
551 return rb_shape_too_complex_p(shape_id);
552}
553
554bool
555rb_jit_multi_ractor_p(void)
556{
557 return rb_multi_ractor_p();
558}
559
560bool
561rb_jit_class_fields_embedded_p(VALUE klass)
562{
563 VALUE fields_obj = RCLASS_EXT_PRIME(klass)->fields_obj;
564 return !fields_obj || !FL_TEST_RAW(fields_obj, OBJ_FIELD_HEAP);
565}
566
567bool
568rb_jit_typed_data_fields_embedded_p(VALUE obj)
569{
570 VALUE fields_obj = RTYPEDDATA(obj)->fields_obj;
571 return !fields_obj || !FL_TEST_RAW(fields_obj, OBJ_FIELD_HEAP);
572}
573
574// Acquire the VM lock and then signal all other Ruby threads (ractors) to
575// contend for the VM lock, putting them to sleep. ZJIT and YJIT use this to
576// evict threads running inside generated code so among other things, it can
577// safely change memory protection of regions housing generated code.
578void
579rb_jit_vm_lock_then_barrier(unsigned int *recursive_lock_level, const char *file, int line)
580{
581 rb_vm_lock_enter(recursive_lock_level, file, line);
582 rb_vm_barrier();
583}
584
585// Release the VM lock. The lock level must point to the same integer used to
586// acquire the lock.
587void
588rb_jit_vm_unlock(unsigned int *recursive_lock_level, const char *file, int line)
589{
590 rb_vm_lock_leave(recursive_lock_level, file, line);
591}
592
593void
594rb_iseq_reset_jit_func(const rb_iseq_t *iseq)
595{
596 RUBY_ASSERT_ALWAYS(IMEMO_TYPE_P(iseq, imemo_iseq));
597 iseq->body->jit_entry = NULL;
598 iseq->body->jit_exception = NULL;
599 // Enable re-compiling this ISEQ. Event when it's invalidated for TracePoint,
600 // we'd like to re-compile ISEQs that haven't been converted to trace_* insns.
601 iseq->body->jit_entry_calls = 0;
602 iseq->body->jit_exception_calls = 0;
603}
604
605// Callback data for rb_jit_for_each_iseq
607 rb_iseq_callback callback;
608 void *data;
609};
610
611// Heap-walking callback for rb_jit_for_each_iseq
612static int
613for_each_iseq_i(void *vstart, void *vend, size_t stride, void *data)
614{
615 const struct iseq_callback_data *callback_data = (struct iseq_callback_data *)data;
616 VALUE v = (VALUE)vstart;
617 for (; v != (VALUE)vend; v += stride) {
618 void *ptr = rb_asan_poisoned_object_p(v);
619 rb_asan_unpoison_object(v, false);
620
621 if (rb_obj_is_iseq(v)) {
622 rb_iseq_t *iseq = (rb_iseq_t *)v;
623 callback_data->callback(iseq, callback_data->data);
624 }
625
626 if (ptr) {
627 rb_asan_poison_object(v);
628 }
629 }
630 return 0;
631}
632
633uint32_t
634rb_jit_get_page_size(void)
635{
636#if defined(_SC_PAGESIZE)
637 long page_size = sysconf(_SC_PAGESIZE);
638 if (page_size <= 0) rb_bug("jit: failed to get page size");
639
640 // 1 GiB limit. x86 CPUs with PDPE1GB can do this and anything larger is unexpected.
641 // Though our design sort of assume we have fine grained control over memory protection
642 // which require small page sizes.
643 if (page_size > 0x40000000l) rb_bug("jit page size too large");
644
645 return (uint32_t)page_size;
646#else
647#error "JIT supports POSIX only for now"
648#endif
649}
650
651#if defined(MAP_FIXED_NOREPLACE) && defined(_SC_PAGESIZE)
652// Align the current write position to a multiple of bytes
653static uint8_t *
654align_ptr(uint8_t *ptr, uint32_t multiple)
655{
656 // Compute the pointer modulo the given alignment boundary
657 uint32_t rem = ((uint32_t)(uintptr_t)ptr) % multiple;
658
659 // If the pointer is already aligned, stop
660 if (rem == 0)
661 return ptr;
662
663 // Pad the pointer by the necessary amount to align it
664 uint32_t pad = multiple - rem;
665
666 return ptr + pad;
667}
668#endif
669
670// Address space reservation. Memory pages are mapped on an as needed basis.
671// See the Rust mm module for details.
672uint8_t *
673rb_jit_reserve_addr_space(uint32_t mem_size)
674{
675#ifndef _WIN32
676 uint8_t *mem_block;
677
678 // On Linux
679 #if defined(MAP_FIXED_NOREPLACE) && defined(_SC_PAGESIZE)
680 uint32_t const page_size = (uint32_t)sysconf(_SC_PAGESIZE);
681 uint8_t *const cfunc_sample_addr = (void *)(uintptr_t)&rb_jit_reserve_addr_space;
682 uint8_t *const probe_region_end = cfunc_sample_addr + INT32_MAX;
683 // Align the requested address to page size
684 uint8_t *req_addr = align_ptr(cfunc_sample_addr, page_size);
685
686 // Probe for addresses close to this function using MAP_FIXED_NOREPLACE
687 // to improve odds of being in range for 32-bit relative call instructions.
688 do {
689 mem_block = mmap(
690 req_addr,
691 mem_size,
692 PROT_NONE,
693 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED_NOREPLACE,
694 -1,
695 0
696 );
697
698 // If we succeeded, stop
699 if (mem_block != MAP_FAILED) {
700 ruby_annotate_mmap(mem_block, mem_size, "Ruby:rb_jit_reserve_addr_space");
701 break;
702 }
703
704 // -4MiB. Downwards to probe away from the heap. (On x86/A64 Linux
705 // main_code_addr < heap_addr, and in case we are in a shared
706 // library mapped higher than the heap, downwards is still better
707 // since it's towards the end of the heap rather than the stack.)
708 req_addr -= 4 * 1024 * 1024;
709 } while (req_addr < probe_region_end);
710
711 // On MacOS and other platforms
712 #else
713 // Try to map a chunk of memory as executable
714 mem_block = mmap(
715 (void *)rb_jit_reserve_addr_space,
716 mem_size,
717 PROT_NONE,
718 MAP_PRIVATE | MAP_ANONYMOUS,
719 -1,
720 0
721 );
722 #endif
723
724 // Fallback
725 if (mem_block == MAP_FAILED) {
726 // Try again without the address hint (e.g., valgrind)
727 mem_block = mmap(
728 NULL,
729 mem_size,
730 PROT_NONE,
731 MAP_PRIVATE | MAP_ANONYMOUS,
732 -1,
733 0
734 );
735
736 if (mem_block != MAP_FAILED) {
737 ruby_annotate_mmap(mem_block, mem_size, "Ruby:rb_jit_reserve_addr_space:fallback");
738 }
739 }
740
741 // Check that the memory mapping was successful
742 if (mem_block == MAP_FAILED) {
743 perror("ruby: jit: mmap:");
744 if(errno == ENOMEM) {
745 // No crash report if it's only insufficient memory
746 exit(EXIT_FAILURE);
747 }
748 rb_bug("mmap failed");
749 }
750
751 return mem_block;
752#else
753 // Windows not supported for now
754 return NULL;
755#endif
756}
757
758// Walk all ISEQs in the heap and invoke the callback - shared between YJIT and ZJIT
759void
760rb_jit_for_each_iseq(rb_iseq_callback callback, void *data)
761{
762 struct iseq_callback_data callback_data = { .callback = callback, .data = data };
763 rb_objspace_each_objects(for_each_iseq_i, (void *)&callback_data);
764}
765
766bool
767rb_jit_mark_writable(void *mem_block, uint32_t mem_size)
768{
769 return mprotect(mem_block, mem_size, PROT_READ | PROT_WRITE) == 0;
770}
771
772void
773rb_jit_mark_executable(void *mem_block, uint32_t mem_size)
774{
775 // Do not call mprotect when mem_size is zero. Some platforms may return
776 // an error for it. https://github.com/Shopify/ruby/issues/450
777 if (mem_size == 0) {
778 return;
779 }
780 if (mprotect(mem_block, mem_size, PROT_READ | PROT_EXEC)) {
781 rb_bug("Couldn't make JIT page (%p, %lu bytes) executable, errno: %s",
782 mem_block, (unsigned long)mem_size, strerror(errno));
783 }
784}
785
786// Free the specified memory block.
787bool
788rb_jit_mark_unused(void *mem_block, uint32_t mem_size)
789{
790 // On Linux, you need to use madvise MADV_DONTNEED to free memory.
791 // We might not need to call this on macOS, but it's not really documented.
792 // We generally prefer to do the same thing on both to ease testing too.
793 madvise(mem_block, mem_size, MADV_DONTNEED);
794
795 // On macOS, mprotect PROT_NONE seems to reduce RSS.
796 // We also call this on Linux to avoid executing unused pages.
797 return mprotect(mem_block, mem_size, PROT_NONE) == 0;
798}
799
800// Invalidate icache for arm64.
801// `start` is inclusive and `end` is exclusive.
802void
803rb_jit_icache_invalidate(void *start, void *end)
804{
805 // Clear/invalidate the instruction cache. Compiles to nothing on x86_64
806 // but required on ARM before running freshly written code.
807 // On Darwin it's the same as calling sys_icache_invalidate().
808#ifdef __GNUC__
809 __builtin___clear_cache(start, end);
810#elif defined(__aarch64__)
811#error No instruction cache clear available with this compiler on Aarch64!
812#endif
813}
814
815VALUE
816rb_jit_fix_mod_fix(VALUE recv, VALUE obj)
817{
818 return rb_fix_mod_fix(recv, obj);
819}
820
821VALUE
822rb_jit_fix_div_fix(VALUE recv, VALUE obj)
823{
824 return rb_fix_div_fix(recv, obj);
825}
826
827// YJIT/ZJIT need this function to never allocate and never raise
828VALUE
829rb_yarv_str_eql_internal(VALUE str1, VALUE str2)
830{
831 // We wrap this since it's static inline
832 return rb_str_eql_internal(str1, str2);
833}
834
835void rb_jit_str_concat_codepoint(VALUE str, VALUE codepoint);
836
837attr_index_t
838rb_jit_shape_capacity(shape_id_t shape_id)
839{
840 return RSHAPE_CAPACITY(shape_id);
841}
#define RUBY_ASSERT_ALWAYS(expr,...)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
Definition assert.h:199
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
static VALUE RB_FL_TEST(VALUE obj, VALUE flags)
Tests if the given flag(s) are set or not.
Definition fl_type.h:430
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
Definition fl_type.h:128
static VALUE rb_class_of(VALUE obj)
Object to class mapping function.
Definition globals.h:174
Defines RBIMPL_HAS_BUILTIN.
int len
Length of the buffer.
Definition io.h:8
static long rb_array_len(VALUE a)
Queries the length of the array.
Definition rarray.h:255
static long RSTRUCT_LEN(VALUE st)
Returns the number of struct members.
Definition rstruct.h:82
Defines struct RTypedData.
#define RTYPEDDATA(obj)
Convenient casting macro.
Definition rtypeddata.h:96
#define errno
Ractor-aware version of errno.
Definition ruby.h:388
Ruby's ordinal objects.
Definition robject.h:85
VALUE * fields
Pointer to a C array that holds instance variables.
Definition robject.h:99
struct RObject::@51::@52 heap
Object that use separated memory region for instance variables use this pattern.
Ruby's String.
Definition rstring.h:196
"Typed" user data.
Definition rtypeddata.h:384
Definition method.h:63
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376
ruby_value_type
C-level type of an object.
Definition value_type.h:113