Ruby 3.5.0dev (2025-01-10 revision 5fab31b15e32622c4b71d1d347a41937e9f9c212)
rjit_c.c (5fab31b15e32622c4b71d1d347a41937e9f9c212)
1/**********************************************************************
2
3 rjit_c.c - C helpers for RJIT
4
5 Copyright (C) 2017 Takashi Kokubun <k0kubun@ruby-lang.org>.
6
7**********************************************************************/
8
9#include "rjit.h" // defines USE_RJIT
10
11#if USE_RJIT
12
13#include "rjit_c.h"
14#include "include/ruby/assert.h"
15#include "include/ruby/debug.h"
16#include "internal.h"
17#include "internal/compile.h"
18#include "internal/fixnum.h"
19#include "internal/hash.h"
20#include "internal/sanitizers.h"
21#include "internal/gc.h"
22#include "internal/proc.h"
23#include "yjit.h"
24#include "vm_insnhelper.h"
25#include "probes.h"
26#include "probes_helper.h"
27
28#include "insns.inc"
29#include "insns_info.inc"
30
31// For mmapp(), sysconf()
32#ifndef _WIN32
33#include <unistd.h>
34#include <sys/mman.h>
35#endif
36
37#include <errno.h>
38
39#if defined(MAP_FIXED_NOREPLACE) && defined(_SC_PAGESIZE)
40// Align the current write position to a multiple of bytes
41static uint8_t *
42align_ptr(uint8_t *ptr, uint32_t multiple)
43{
44 // Compute the pointer modulo the given alignment boundary
45 uint32_t rem = ((uint32_t)(uintptr_t)ptr) % multiple;
46
47 // If the pointer is already aligned, stop
48 if (rem == 0)
49 return ptr;
50
51 // Pad the pointer by the necessary amount to align it
52 uint32_t pad = multiple - rem;
53
54 return ptr + pad;
55}
56#endif
57
58// Address space reservation. Memory pages are mapped on an as needed basis.
59// See the Rust mm module for details.
60static uint8_t *
61rjit_reserve_addr_space(uint32_t mem_size)
62{
63#ifndef _WIN32
64 uint8_t *mem_block;
65
66 // On Linux
67 #if defined(MAP_FIXED_NOREPLACE) && defined(_SC_PAGESIZE)
68 uint32_t const page_size = (uint32_t)sysconf(_SC_PAGESIZE);
69 uint8_t *const cfunc_sample_addr = (void *)(uintptr_t)&rjit_reserve_addr_space;
70 uint8_t *const probe_region_end = cfunc_sample_addr + INT32_MAX;
71 // Align the requested address to page size
72 uint8_t *req_addr = align_ptr(cfunc_sample_addr, page_size);
73
74 // Probe for addresses close to this function using MAP_FIXED_NOREPLACE
75 // to improve odds of being in range for 32-bit relative call instructions.
76 do {
77 mem_block = mmap(
78 req_addr,
79 mem_size,
80 PROT_NONE,
81 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED_NOREPLACE,
82 -1,
83 0
84 );
85
86 // If we succeeded, stop
87 if (mem_block != MAP_FAILED) {
88 ruby_annotate_mmap(mem_block, mem_size, "Ruby:rjit_reserve_addr_space");
89 break;
90 }
91
92 // +4MB
93 req_addr += 4 * 1024 * 1024;
94 } while (req_addr < probe_region_end);
95
96 // On MacOS and other platforms
97 #else
98 // Try to map a chunk of memory as executable
99 mem_block = mmap(
100 (void *)rjit_reserve_addr_space,
101 mem_size,
102 PROT_NONE,
103 MAP_PRIVATE | MAP_ANONYMOUS,
104 -1,
105 0
106 );
107 #endif
108
109 // Fallback
110 if (mem_block == MAP_FAILED) {
111 // Try again without the address hint (e.g., valgrind)
112 mem_block = mmap(
113 NULL,
114 mem_size,
115 PROT_NONE,
116 MAP_PRIVATE | MAP_ANONYMOUS,
117 -1,
118 0
119 );
120
121 if (mem_block != MAP_FAILED) {
122 ruby_annotate_mmap(mem_block, mem_size, "Ruby:rjit_reserve_addr_space:fallback");
123 }
124 }
125
126 // Check that the memory mapping was successful
127 if (mem_block == MAP_FAILED) {
128 perror("ruby: yjit: mmap:");
129 if(errno == ENOMEM) {
130 // No crash report if it's only insufficient memory
131 exit(EXIT_FAILURE);
132 }
133 rb_bug("mmap failed");
134 }
135
136 return mem_block;
137#else
138 // Windows not supported for now
139 return NULL;
140#endif
141}
142
143static VALUE
144mprotect_write(rb_execution_context_t *ec, VALUE self, VALUE rb_mem_block, VALUE rb_mem_size)
145{
146 void *mem_block = (void *)NUM2SIZET(rb_mem_block);
147 uint32_t mem_size = NUM2UINT(rb_mem_size);
148 return RBOOL(mprotect(mem_block, mem_size, PROT_READ | PROT_WRITE) == 0);
149}
150
151static VALUE
152mprotect_exec(rb_execution_context_t *ec, VALUE self, VALUE rb_mem_block, VALUE rb_mem_size)
153{
154 void *mem_block = (void *)NUM2SIZET(rb_mem_block);
155 uint32_t mem_size = NUM2UINT(rb_mem_size);
156 if (mem_size == 0) return Qfalse; // Some platforms return an error for mem_size 0.
157
158 if (mprotect(mem_block, mem_size, PROT_READ | PROT_EXEC)) {
159 rb_bug("Couldn't make JIT page (%p, %lu bytes) executable, errno: %s",
160 mem_block, (unsigned long)mem_size, strerror(errno));
161 }
162 return Qtrue;
163}
164
165static VALUE
166rjit_optimized_call(VALUE *recv, rb_execution_context_t *ec, int argc, VALUE *argv, int kw_splat, VALUE block_handler)
167{
168 rb_proc_t *proc;
169 GetProcPtr(recv, proc);
170 return rb_vm_invoke_proc(ec, proc, argc, argv, kw_splat, block_handler);
171}
172
173static VALUE
174rjit_str_neq_internal(VALUE str1, VALUE str2)
175{
176 return rb_str_eql_internal(str1, str2) == Qtrue ? Qfalse : Qtrue;
177}
178
179static VALUE
180rjit_str_simple_append(VALUE str1, VALUE str2)
181{
182 return rb_str_cat(str1, RSTRING_PTR(str2), RSTRING_LEN(str2));
183}
184
185static VALUE
186rjit_rb_ary_subseq_length(VALUE ary, long beg)
187{
188 long len = RARRAY_LEN(ary);
189 return rb_ary_subseq(ary, beg, len);
190}
191
192static VALUE
193rjit_build_kwhash(const struct rb_callinfo *ci, VALUE *sp)
194{
195 const struct rb_callinfo_kwarg *kw_arg = vm_ci_kwarg(ci);
196 int kw_len = kw_arg->keyword_len;
197 VALUE hash = rb_hash_new_with_size(kw_len);
198
199 for (int i = 0; i < kw_len; i++) {
200 VALUE key = kw_arg->keywords[i];
201 VALUE val = *(sp - kw_len + i);
202 rb_hash_aset(hash, key, val);
203 }
204 return hash;
205}
206
207// The code we generate in gen_send_cfunc() doesn't fire the c_return TracePoint event
208// like the interpreter. When tracing for c_return is enabled, we patch the code after
209// the C method return to call into this to fire the event.
210static void
211rjit_full_cfunc_return(rb_execution_context_t *ec, VALUE return_value)
212{
213 rb_control_frame_t *cfp = ec->cfp;
214 RUBY_ASSERT_ALWAYS(cfp == GET_EC()->cfp);
215 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(cfp);
216
217 RUBY_ASSERT_ALWAYS(RUBYVM_CFUNC_FRAME_P(cfp));
218 RUBY_ASSERT_ALWAYS(me->def->type == VM_METHOD_TYPE_CFUNC);
219
220 // CHECK_CFP_CONSISTENCY("full_cfunc_return"); TODO revive this
221
222 // Pop the C func's frame and fire the c_return TracePoint event
223 // Note that this is the same order as vm_call_cfunc_with_frame().
224 rb_vm_pop_frame(ec);
225 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, cfp->self, me->def->original_id, me->called_id, me->owner, return_value);
226 // Note, this deviates from the interpreter in that users need to enable
227 // a c_return TracePoint for this DTrace hook to work. A reasonable change
228 // since the Ruby return event works this way as well.
229 RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, me->owner, me->def->original_id);
230
231 // Push return value into the caller's stack. We know that it's a frame that
232 // uses cfp->sp because we are patching a call done with gen_send_cfunc().
233 ec->cfp->sp[0] = return_value;
234 ec->cfp->sp++;
235}
236
237static rb_proc_t *
238rjit_get_proc_ptr(VALUE procv)
239{
240 rb_proc_t *proc;
241 GetProcPtr(procv, proc);
242 return proc;
243}
244
245// Use the same buffer size as Stackprof.
246#define BUFF_LEN 2048
247
248extern VALUE rb_rjit_raw_samples;
249extern VALUE rb_rjit_line_samples;
250
251static void
252rjit_record_exit_stack(const VALUE *exit_pc)
253{
254 // Let Primitive.rjit_stop_stats stop this
255 if (!rb_rjit_call_p) return;
256
257 // Get the opcode from the encoded insn handler at this PC
258 int insn = rb_vm_insn_addr2opcode((void *)*exit_pc);
259
260 // Create 2 array buffers to be used to collect frames and lines.
261 VALUE frames_buffer[BUFF_LEN] = { 0 };
262 int lines_buffer[BUFF_LEN] = { 0 };
263
264 // Records call frame and line information for each method entry into two
265 // temporary buffers. Returns the number of times we added to the buffer (ie
266 // the length of the stack).
267 //
268 // Call frame info is stored in the frames_buffer, line number information
269 // in the lines_buffer. The first argument is the start point and the second
270 // argument is the buffer limit, set at 2048.
271 int stack_length = rb_profile_frames(0, BUFF_LEN, frames_buffer, lines_buffer);
272 int samples_length = stack_length + 3; // 3: length, insn, count
273
274 // If yjit_raw_samples is less than or equal to the current length of the samples
275 // we might have seen this stack trace previously.
276 int prev_stack_len_index = (int)RARRAY_LEN(rb_rjit_raw_samples) - samples_length;
277 VALUE prev_stack_len_obj;
278 if (RARRAY_LEN(rb_rjit_raw_samples) >= samples_length && FIXNUM_P(prev_stack_len_obj = RARRAY_AREF(rb_rjit_raw_samples, prev_stack_len_index))) {
279 int prev_stack_len = NUM2INT(prev_stack_len_obj);
280 int idx = stack_length - 1;
281 int prev_frame_idx = 0;
282 bool seen_already = true;
283
284 // If the previous stack length and current stack length are equal,
285 // loop and compare the current frame to the previous frame. If they are
286 // not equal, set seen_already to false and break out of the loop.
287 if (prev_stack_len == stack_length) {
288 while (idx >= 0) {
289 VALUE current_frame = frames_buffer[idx];
290 VALUE prev_frame = RARRAY_AREF(rb_rjit_raw_samples, prev_stack_len_index + prev_frame_idx + 1);
291
292 // If the current frame and previous frame are not equal, set
293 // seen_already to false and break out of the loop.
294 if (current_frame != prev_frame) {
295 seen_already = false;
296 break;
297 }
298
299 idx--;
300 prev_frame_idx++;
301 }
302
303 // If we know we've seen this stack before, increment the counter by 1.
304 if (seen_already) {
305 int prev_idx = (int)RARRAY_LEN(rb_rjit_raw_samples) - 1;
306 int prev_count = NUM2INT(RARRAY_AREF(rb_rjit_raw_samples, prev_idx));
307 int new_count = prev_count + 1;
308
309 rb_ary_store(rb_rjit_raw_samples, prev_idx, INT2NUM(new_count));
310 rb_ary_store(rb_rjit_line_samples, prev_idx, INT2NUM(new_count));
311 return;
312 }
313 }
314 }
315
316 rb_ary_push(rb_rjit_raw_samples, INT2NUM(stack_length));
317 rb_ary_push(rb_rjit_line_samples, INT2NUM(stack_length));
318
319 int idx = stack_length - 1;
320
321 while (idx >= 0) {
322 VALUE frame = frames_buffer[idx];
323 int line = lines_buffer[idx];
324
325 rb_ary_push(rb_rjit_raw_samples, frame);
326 rb_ary_push(rb_rjit_line_samples, INT2NUM(line));
327
328 idx--;
329 }
330
331 // Push the insn value into the yjit_raw_samples Vec.
332 rb_ary_push(rb_rjit_raw_samples, INT2NUM(insn));
333
334 // Push the current line onto the yjit_line_samples Vec. This
335 // points to the line in insns.def.
336 int line = (int)RARRAY_LEN(rb_rjit_line_samples) - 1;
337 rb_ary_push(rb_rjit_line_samples, INT2NUM(line));
338
339 // Push number of times seen onto the stack, which is 1
340 // because it's the first time we've seen it.
341 rb_ary_push(rb_rjit_raw_samples, INT2NUM(1));
342 rb_ary_push(rb_rjit_line_samples, INT2NUM(1));
343}
344
345// For a given raw_sample (frame), set the hash with the caller's
346// name, file, and line number. Return the hash with collected frame_info.
347static void
348rjit_add_frame(VALUE hash, VALUE frame)
349{
350 VALUE frame_id = SIZET2NUM(frame);
351
352 if (RTEST(rb_hash_aref(hash, frame_id))) {
353 return;
354 }
355 else {
356 VALUE frame_info = rb_hash_new();
357 // Full label for the frame
359 // Absolute path of the frame from rb_iseq_realpath
361 // Line number of the frame
363
364 // If absolute path isn't available use the rb_iseq_path
365 if (NIL_P(file)) {
366 file = rb_profile_frame_path(frame);
367 }
368
369 rb_hash_aset(frame_info, ID2SYM(rb_intern("name")), name);
370 rb_hash_aset(frame_info, ID2SYM(rb_intern("file")), file);
371 rb_hash_aset(frame_info, ID2SYM(rb_intern("samples")), INT2NUM(0));
372 rb_hash_aset(frame_info, ID2SYM(rb_intern("total_samples")), INT2NUM(0));
373 rb_hash_aset(frame_info, ID2SYM(rb_intern("edges")), rb_hash_new());
374 rb_hash_aset(frame_info, ID2SYM(rb_intern("lines")), rb_hash_new());
375
376 if (line != INT2FIX(0)) {
377 rb_hash_aset(frame_info, ID2SYM(rb_intern("line")), line);
378 }
379
380 rb_hash_aset(hash, frame_id, frame_info);
381 }
382}
383
384static VALUE
385rjit_exit_traces(void)
386{
387 int samples_len = (int)RARRAY_LEN(rb_rjit_raw_samples);
388 RUBY_ASSERT(samples_len == RARRAY_LEN(rb_rjit_line_samples));
389
390 VALUE result = rb_hash_new();
391 VALUE raw_samples = rb_ary_new_capa(samples_len);
392 VALUE line_samples = rb_ary_new_capa(samples_len);
393 VALUE frames = rb_hash_new();
394 int idx = 0;
395
396 // While the index is less than samples_len, parse yjit_raw_samples and
397 // yjit_line_samples, then add casted values to raw_samples and line_samples array.
398 while (idx < samples_len) {
399 int num = NUM2INT(RARRAY_AREF(rb_rjit_raw_samples, idx));
400 int line_num = NUM2INT(RARRAY_AREF(rb_rjit_line_samples, idx));
401 idx++;
402
403 rb_ary_push(raw_samples, SIZET2NUM(num));
404 rb_ary_push(line_samples, INT2NUM(line_num));
405
406 // Loop through the length of samples_len and add data to the
407 // frames hash. Also push the current value onto the raw_samples
408 // and line_samples array respectively.
409 for (int o = 0; o < num; o++) {
410 rjit_add_frame(frames, RARRAY_AREF(rb_rjit_raw_samples, idx));
411 rb_ary_push(raw_samples, SIZET2NUM(RARRAY_AREF(rb_rjit_raw_samples, idx)));
412 rb_ary_push(line_samples, RARRAY_AREF(rb_rjit_line_samples, idx));
413 idx++;
414 }
415
416 // insn BIN and lineno
417 rb_ary_push(raw_samples, RARRAY_AREF(rb_rjit_raw_samples, idx));
418 rb_ary_push(line_samples, RARRAY_AREF(rb_rjit_line_samples, idx));
419 idx++;
420
421 // Number of times seen
422 rb_ary_push(raw_samples, RARRAY_AREF(rb_rjit_raw_samples, idx));
423 rb_ary_push(line_samples, RARRAY_AREF(rb_rjit_line_samples, idx));
424 idx++;
425 }
426
427 // Set add the raw_samples, line_samples, and frames to the results
428 // hash.
429 rb_hash_aset(result, ID2SYM(rb_intern("raw")), raw_samples);
430 rb_hash_aset(result, ID2SYM(rb_intern("lines")), line_samples);
431 rb_hash_aset(result, ID2SYM(rb_intern("frames")), frames);
432
433 return result;
434}
435
436// An offsetof implementation that works for unnamed struct and union.
437// Multiplying 8 for compatibility with libclang's offsetof.
438#define OFFSETOF(ptr, member) RB_SIZE2NUM(((char *)&ptr.member - (char*)&ptr) * 8)
439
440#define SIZEOF(type) RB_SIZE2NUM(sizeof(type))
441#define SIGNED_TYPE_P(type) RBOOL((type)(-1) < (type)(1))
442
443// Insn side exit counters
444static size_t rjit_insn_exits[VM_INSTRUCTION_SIZE] = { 0 };
445
446// macOS: brew install capstone
447// Ubuntu/Debian: apt-get install libcapstone-dev
448// Fedora: dnf -y install capstone-devel
449#ifdef HAVE_LIBCAPSTONE
450#include <capstone/capstone.h>
451#endif
452
453// Return an array of [address, mnemonic, op_str]
454static VALUE
455dump_disasm(rb_execution_context_t *ec, VALUE self, VALUE from, VALUE to, VALUE test)
456{
457 VALUE result = rb_ary_new();
458#ifdef HAVE_LIBCAPSTONE
459 // Prepare for calling cs_disasm
460 static csh handle;
461 if (cs_open(CS_ARCH_X86, CS_MODE_64, &handle) != CS_ERR_OK) {
462 rb_raise(rb_eRuntimeError, "failed to make Capstone handle");
463 }
464 size_t from_addr = NUM2SIZET(from);
465 size_t to_addr = NUM2SIZET(to);
466
467 // Call cs_disasm and convert results to a Ruby array
468 cs_insn *insns;
469 size_t base_addr = RTEST(test) ? 0 : from_addr; // On tests, start from 0 for output stability.
470 size_t count = cs_disasm(handle, (const uint8_t *)from_addr, to_addr - from_addr, base_addr, 0, &insns);
471 for (size_t i = 0; i < count; i++) {
472 VALUE vals = rb_ary_new_from_args(3, LONG2NUM(insns[i].address), rb_str_new2(insns[i].mnemonic), rb_str_new2(insns[i].op_str));
473 rb_ary_push(result, vals);
474 }
475
476 // Free memory used by capstone
477 cs_free(insns, count);
478 cs_close(&handle);
479#endif
480 return result;
481}
482
483// Same as `RubyVM::RJIT.enabled?`, but this is used before it's defined.
484static VALUE
485rjit_enabled_p(rb_execution_context_t *ec, VALUE self)
486{
487 return RBOOL(rb_rjit_enabled);
488}
489
490static int
491for_each_iseq_i(void *vstart, void *vend, size_t stride, void *data)
492{
493 VALUE block = (VALUE)data;
494 VALUE v = (VALUE)vstart;
495 for (; v != (VALUE)vend; v += stride) {
496 void *ptr = rb_asan_poisoned_object_p(v);
497 rb_asan_unpoison_object(v, false);
498
499 if (rb_obj_is_iseq(v)) {
500 extern VALUE rb_rjit_iseq_new(rb_iseq_t *iseq);
501 rb_iseq_t *iseq = (rb_iseq_t *)v;
502 rb_funcall(block, rb_intern("call"), 1, rb_rjit_iseq_new(iseq));
503 }
504
505 asan_poison_object_if(ptr, v);
506 }
507 return 0;
508}
509
510static VALUE
511rjit_for_each_iseq(rb_execution_context_t *ec, VALUE self, VALUE block)
512{
513 rb_objspace_each_objects(for_each_iseq_i, (void *)block);
514 return Qnil;
515}
516
517// bindgen references
518extern ID rb_get_symbol_id(VALUE name);
519extern VALUE rb_fix_aref(VALUE fix, VALUE idx);
520extern VALUE rb_str_getbyte(VALUE str, VALUE index);
521extern VALUE rb_vm_concat_array(VALUE ary1, VALUE ary2st);
522extern VALUE rb_vm_get_ev_const(rb_execution_context_t *ec, VALUE orig_klass, ID id, VALUE allow_nil);
523extern VALUE rb_vm_getclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *cfp, ID id, ICVARC ic);
524extern VALUE rb_vm_opt_newarray_min(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr);
525extern VALUE rb_vm_opt_newarray_max(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr);
526extern VALUE rb_vm_opt_newarray_hash(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr);
527extern VALUE rb_vm_opt_newarray_pack(rb_execution_context_t *ec, rb_num_t num, const VALUE *ptr, VALUE fmt);
528extern VALUE rb_vm_splat_array(VALUE flag, VALUE array);
529extern bool rb_simple_iseq_p(const rb_iseq_t *iseq);
530extern bool rb_vm_defined(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t op_type, VALUE obj, VALUE v);
531extern bool rb_vm_ic_hit_p(IC ic, const VALUE *reg_ep);
532extern rb_event_flag_t rb_rjit_global_events;
533extern void rb_vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic);
534extern VALUE rb_vm_throw(const rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, rb_num_t throw_state, VALUE throwobj);
535extern VALUE rb_reg_new_ary(VALUE ary, int opt);
536extern void rb_vm_setclassvariable(const rb_iseq_t *iseq, const rb_control_frame_t *cfp, ID id, VALUE val, ICVARC ic);
537extern VALUE rb_str_bytesize(VALUE str);
538extern const rb_callable_method_entry_t *rb_callable_method_entry_or_negative(VALUE klass, ID mid);
539extern VALUE rb_vm_yield_with_cfunc(rb_execution_context_t *ec, const struct rb_captured_block *captured, int argc, const VALUE *argv);
540extern VALUE rb_vm_set_ivar_id(VALUE obj, ID id, VALUE val);
541extern VALUE rb_ary_unshift_m(int argc, VALUE *argv, VALUE ary);
542extern void* rb_rjit_entry_stub_hit(VALUE branch_stub);
543extern void* rb_rjit_branch_stub_hit(VALUE branch_stub, int sp_offset, int target0_p);
544extern uint64_t rb_vm_insns_count;
545
546#include "rjit_c.rbinc"
547
548#endif // USE_RJIT
#define RUBY_ASSERT_ALWAYS(expr,...)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
Definition assert.h:199
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
int rb_profile_frames(int start, int limit, VALUE *buff, int *lines)
Queries mysterious "frame"s of the given range.
VALUE rb_profile_frame_full_label(VALUE frame)
Identical to rb_profile_frame_label(), except it returns a qualified result.
VALUE rb_profile_frame_absolute_path(VALUE frame)
Identical to rb_profile_frame_path(), except it tries to expand the returning path.
VALUE rb_profile_frame_path(VALUE frame)
Queries the path of the passed backtrace.
VALUE rb_profile_frame_first_lineno(VALUE frame)
Queries the first line of the method of the passed frame pointer.
#define RUBY_EVENT_C_RETURN
Return from a method, written in C.
Definition event.h:44
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define rb_str_new2
Old name of rb_str_new_cstr.
Definition string.h:1675
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define SIZET2NUM
Old name of RB_SIZE2NUM.
Definition size_t.h:62
#define NUM2UINT
Old name of RB_NUM2UINT.
Definition int.h:45
#define LONG2NUM
Old name of RB_LONG2NUM.
Definition long.h:50
#define Qtrue
Old name of RUBY_Qtrue.
#define NUM2INT
Old name of RB_NUM2INT.
Definition int.h:44
#define INT2NUM
Old name of RB_INT2NUM.
Definition int.h:43
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define NIL_P
Old name of RB_NIL_P.
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define NUM2SIZET
Old name of RB_NUM2SIZE.
Definition size_t.h:61
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1428
VALUE rb_funcall(VALUE recv, ID mid, int n,...)
Calls a method.
Definition vm_eval.c:1099
VALUE rb_str_cat(VALUE dst, const char *src, long srclen)
Destructively appends the passed contents to the string.
Definition string.c:3444
int len
Length of the buffer.
Definition io.h:8
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define errno
Ractor-aware version of errno.
Definition ruby.h:388
#define RTEST
This is an old name of RB_TEST.
Definition vm_core.h:293
Definition vm_core.h:288
Definition method.h:62
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40