2#include "internal/sanitizers.h"
3#include "internal/string.h"
4#include "internal/hash.h"
5#include "internal/variable.h"
6#include "internal/compile.h"
7#include "internal/class.h"
8#include "internal/fixnum.h"
9#include "internal/numeric.h"
10#include "internal/gc.h"
11#include "internal/vm.h"
13#include "vm_callinfo.h"
16#include "insns_info.inc"
18#include "vm_insnhelper.h"
20#include "probes_helper.h"
23#include "internal/cont.h"
35rb_zjit_get_page_size(
void)
37#if defined(_SC_PAGESIZE)
38 long page_size = sysconf(_SC_PAGESIZE);
39 if (page_size <= 0) rb_bug(
"zjit: failed to get page size");
44 if (page_size > 0x40000000l) rb_bug(
"zjit page size too large");
46 return (uint32_t)page_size;
48#error "ZJIT supports POSIX only for now"
52#if defined(MAP_FIXED_NOREPLACE) && defined(_SC_PAGESIZE)
55align_ptr(uint8_t *ptr, uint32_t multiple)
58 uint32_t rem = ((uint32_t)(uintptr_t)ptr) % multiple;
65 uint32_t pad = multiple - rem;
74rb_zjit_reserve_addr_space(uint32_t mem_size)
80 #if defined(MAP_FIXED_NOREPLACE) && defined(_SC_PAGESIZE)
81 uint32_t
const page_size = (uint32_t)sysconf(_SC_PAGESIZE);
82 uint8_t *
const cfunc_sample_addr = (
void *)(uintptr_t)&rb_zjit_reserve_addr_space;
83 uint8_t *
const probe_region_end = cfunc_sample_addr + INT32_MAX;
85 uint8_t *req_addr = align_ptr(cfunc_sample_addr, page_size);
94 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED_NOREPLACE,
100 if (mem_block != MAP_FAILED) {
101 ruby_annotate_mmap(mem_block, mem_size,
"Ruby:rb_zjit_reserve_addr_space");
109 req_addr -= 4 * 1024 * 1024;
110 }
while (req_addr < probe_region_end);
116 (
void *)rb_zjit_reserve_addr_space,
119 MAP_PRIVATE | MAP_ANONYMOUS,
126 if (mem_block == MAP_FAILED) {
132 MAP_PRIVATE | MAP_ANONYMOUS,
137 if (mem_block != MAP_FAILED) {
138 ruby_annotate_mmap(mem_block, mem_size,
"Ruby:rb_zjit_reserve_addr_space:fallback");
143 if (mem_block == MAP_FAILED) {
144 perror(
"ruby: zjit: mmap:");
145 if(
errno == ENOMEM) {
149 rb_bug(
"mmap failed");
160rb_RSTRING_LEN(
VALUE str)
162 return RSTRING_LEN(str);
166rb_RSTRING_PTR(
VALUE str)
168 return RSTRING_PTR(str);
171void rb_zjit_profile_disable(
const rb_iseq_t *iseq);
180 rb_zjit_profile_disable(iseq);
184 uintptr_t code_ptr = (uintptr_t)rb_zjit_iseq_gen_entry_point(iseq, ec);
187 iseq->body->jit_entry = (rb_jit_func_t)code_ptr;
193rb_iseq_encoded_size(
const rb_iseq_t *iseq)
195 return iseq->body->iseq_size;
205 const VALUE at_pc = *pc;
206 return rb_vm_insn_addr2opcode((
const void *)at_pc);
211rb_iseq_pc_at_idx(
const rb_iseq_t *iseq, uint32_t insn_idx)
215 VALUE *encoded = iseq->body->iseq_encoded;
216 VALUE *pc = &encoded[insn_idx];
221rb_insn_name(
VALUE insn)
223 return insn_name(insn);
241 return (
VALUE*)cfp->pc;
259 return (
VALUE*)cfp->ep;
267 for (i = 0; i < lv; i++) {
268 ep = VM_ENV_PREV_EP(ep);
278 if (UNDEFINED_METHOD_ENTRY_P(cme)) {
279 return VM_METHOD_TYPE_UNDEF;
282 return cme->def->type;
289 return cme->def->body.attr.id;
292enum method_optimized_type
295 return cme->def->body.optimized.type;
301 return cme->def->body.optimized.index;
307 return UNALIGNED_MEMBER_PTR(cme->def, body.cfunc);
313 return def->method_serial;
319 return def->original_id;
331 return (
void*)(uintptr_t)mct->func;
337 return def_iseq_ptr(def);
341rb_get_iseq_body_local_iseq(
const rb_iseq_t *iseq)
343 return iseq->body->local_iseq;
347rb_get_iseq_body_iseq_encoded(
const rb_iseq_t *iseq)
349 return iseq->body->iseq_encoded;
353rb_get_iseq_body_stack_max(
const rb_iseq_t *iseq)
355 return iseq->body->stack_max;
359rb_get_iseq_body_type(
const rb_iseq_t *iseq)
361 return iseq->body->type;
365rb_get_iseq_flags_has_lead(
const rb_iseq_t *iseq)
367 return iseq->body->
param.flags.has_lead;
371rb_get_iseq_flags_has_opt(
const rb_iseq_t *iseq)
373 return iseq->body->
param.flags.has_opt;
377rb_get_iseq_flags_has_kw(
const rb_iseq_t *iseq)
379 return iseq->body->
param.flags.has_kw;
383rb_get_iseq_flags_has_post(
const rb_iseq_t *iseq)
385 return iseq->body->
param.flags.has_post;
389rb_get_iseq_flags_has_kwrest(
const rb_iseq_t *iseq)
391 return iseq->body->
param.flags.has_kwrest;
395rb_get_iseq_flags_anon_kwrest(
const rb_iseq_t *iseq)
397 return iseq->body->
param.flags.anon_kwrest;
401rb_get_iseq_flags_has_rest(
const rb_iseq_t *iseq)
403 return iseq->body->
param.flags.has_rest;
407rb_get_iseq_flags_ruby2_keywords(
const rb_iseq_t *iseq)
409 return iseq->body->
param.flags.ruby2_keywords;
413rb_get_iseq_flags_has_block(
const rb_iseq_t *iseq)
415 return iseq->body->
param.flags.has_block;
419rb_get_iseq_flags_ambiguous_param0(
const rb_iseq_t *iseq)
421 return iseq->body->
param.flags.ambiguous_param0;
425rb_get_iseq_flags_accepts_no_kwarg(
const rb_iseq_t *iseq)
427 return iseq->body->
param.flags.accepts_no_kwarg;
431rb_get_iseq_flags_forwardable(
const rb_iseq_t *iseq)
433 return iseq->body->
param.flags.forwardable;
439typedef struct rb_iseq_param_keyword rb_iseq_param_keyword_struct;
441const rb_iseq_param_keyword_struct *
442rb_get_iseq_body_param_keyword(
const rb_iseq_t *iseq)
444 return iseq->body->
param.keyword;
448rb_get_iseq_body_param_size(
const rb_iseq_t *iseq)
450 return iseq->body->
param.size;
454rb_get_iseq_body_param_lead_num(
const rb_iseq_t *iseq)
456 return iseq->body->
param.lead_num;
460rb_get_iseq_body_param_opt_num(
const rb_iseq_t *iseq)
462 return iseq->body->
param.opt_num;
466rb_get_iseq_body_param_opt_table(
const rb_iseq_t *iseq)
468 return iseq->body->
param.opt_table;
472rb_get_iseq_body_local_table_size(
const rb_iseq_t *iseq)
474 return iseq->body->local_table_size;
480 return cikw->keyword_len;
486 return cikw->keywords[idx];
517rb_RSTRUCT_LEN(
VALUE st)
519 return RSTRUCT_LEN(st);
523rb_BASIC_OP_UNREDEFINED_P(
enum ruby_basic_operators bop, uint32_t klass)
525 return BASIC_OP_UNREDEFINED_P(bop, klass);
529rb_zjit_multi_ractor_p(
void)
531 return rb_multi_ractor_p();
536rb_assert_iseq_handle(
VALUE handle)
544 return (ice->flags & IMEMO_CONST_CACHE_SHAREABLE) != 0;
548rb_assert_cme_handle(
VALUE handle)
555rb_IMEMO_TYPE_P(
VALUE imemo,
enum imemo_type imemo_type)
557 return IMEMO_TYPE_P(imemo, imemo_type);
563rb_zjit_vm_unlock(
unsigned int *recursive_lock_level,
const char *file,
int line)
565 rb_vm_lock_leave(recursive_lock_level, file, line);
569rb_zjit_mark_writable(
void *mem_block, uint32_t mem_size)
571 return mprotect(mem_block, mem_size, PROT_READ | PROT_WRITE) == 0;
575rb_zjit_mark_executable(
void *mem_block, uint32_t mem_size)
582 if (mprotect(mem_block, mem_size, PROT_READ | PROT_EXEC)) {
583 rb_bug(
"Couldn't make JIT page (%p, %lu bytes) executable, errno: %s",
584 mem_block, (
unsigned long)mem_size, strerror(
errno));
590rb_zjit_mark_unused(
void *mem_block, uint32_t mem_size)
595 madvise(mem_block, mem_size, MADV_DONTNEED);
599 return mprotect(mem_block, mem_size, PROT_NONE) == 0;
605rb_zjit_icache_invalidate(
void *start,
void *end)
611 __builtin___clear_cache(start, end);
612#elif defined(__aarch64__)
613#error No instruction cache clear available with this compiler on Aarch64!
620 return vm_ci_argc(ci);
626 return vm_ci_mid(ci);
632 return vm_ci_flag(ci);
638 return vm_ci_kwarg(ci);
641rb_method_visibility_t
644 return METHOD_ENTRY_VISI(me);
648rb_yarv_class_of(
VALUE obj)
658rb_zjit_vm_lock_then_barrier(
unsigned int *recursive_lock_level,
const char *file,
int line)
660 rb_vm_lock_enter(recursive_lock_level, file, line);
665rb_RCLASS_ORIGIN(
VALUE c)
667 return RCLASS_ORIGIN(c);
672rb_zjit_profile_enable(
const rb_iseq_t *iseq)
675 const void *
const *insn_table = rb_vm_get_insns_address_table();
677 unsigned int insn_idx = 0;
678 while (insn_idx < iseq->body->iseq_size) {
679 int insn = rb_vm_insn_addr2opcode((
void *)iseq->body->iseq_encoded[insn_idx]);
680 int zjit_insn = vm_bare_insn_to_zjit_insn(insn);
681 if (insn != zjit_insn) {
682 iseq->body->iseq_encoded[insn_idx] = (
VALUE)insn_table[zjit_insn];
684 insn_idx += insn_len(insn);
690rb_zjit_profile_disable(
const rb_iseq_t *iseq)
693 const void *
const *insn_table = rb_vm_get_insns_address_table();
695 unsigned int insn_idx = 0;
696 while (insn_idx < iseq->body->iseq_size) {
697 int insn = rb_vm_insn_addr2opcode((
void *)iseq->body->iseq_encoded[insn_idx]);
698 int bare_insn = vm_zjit_insn_to_bare_insn(insn);
699 if (insn != bare_insn) {
700 iseq->body->iseq_encoded[insn_idx] = (
VALUE)insn_table[bare_insn];
702 insn_idx += insn_len(insn);
708rb_iseq_get_zjit_payload(
const rb_iseq_t *iseq)
712 return iseq->body->zjit_payload;
722rb_iseq_set_zjit_payload(
const rb_iseq_t *iseq,
void *payload)
727 iseq->body->zjit_payload = payload;
734rb_zjit_print_exception(
void)
736 VALUE exception = rb_errinfo();
737 rb_set_errinfo(
Qnil);
738 assert(
RTEST(exception));
739 rb_warn(
"Ruby error: %"PRIsVALUE
"",
rb_funcall(exception, rb_intern(
"full_message"), 0));
#define RUBY_ASSERT_ALWAYS(expr,...)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
static VALUE RB_FL_TEST(VALUE obj, VALUE flags)
Tests if the given flag(s) are set or not.
#define FL_TEST_RAW
Old name of RB_FL_TEST_RAW.
#define Qnil
Old name of RUBY_Qnil.
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
static VALUE rb_class_of(VALUE obj)
Object to class mapping function.
VALUE rb_funcall(VALUE recv, ID mid, int n,...)
Calls a method.
Defines RBIMPL_HAS_BUILTIN.
#define errno
Ractor-aware version of errno.
#define RTEST
This is an old name of RB_TEST.
struct rb_iseq_constant_body::@155 param
parameter information
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
uintptr_t VALUE
Type that represents a Ruby object.
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
ruby_value_type
C-level type of an object.