Ruby 3.5.0dev (2025-05-16 revision 06a56a7ffcb053d5bc45b9a984082d9301d6819c)
vm.c (06a56a7ffcb053d5bc45b9a984082d9301d6819c)
1/**********************************************************************
2
3 Vm.c -
4
5 $Author$
6
7 Copyright (C) 2004-2007 Koichi Sasada
8
9**********************************************************************/
10
11#define vm_exec rb_vm_exec
12
13#include "eval_intern.h"
14#include "internal.h"
15#include "internal/class.h"
16#include "internal/compile.h"
17#include "internal/cont.h"
18#include "internal/error.h"
19#include "internal/encoding.h"
20#include "internal/eval.h"
21#include "internal/gc.h"
22#include "internal/inits.h"
23#include "internal/missing.h"
24#include "internal/namespace.h"
25#include "internal/object.h"
26#include "internal/proc.h"
27#include "internal/re.h"
28#include "internal/ruby_parser.h"
29#include "internal/symbol.h"
30#include "internal/thread.h"
31#include "internal/transcode.h"
32#include "internal/vm.h"
33#include "internal/sanitizers.h"
34#include "internal/variable.h"
35#include "iseq.h"
36#include "symbol.h" // This includes a macro for a more performant rb_id2sym.
37#include "yjit.h"
38#include "ruby/st.h"
39#include "ruby/vm.h"
40#include "vm_core.h"
41#include "vm_callinfo.h"
42#include "vm_debug.h"
43#include "vm_exec.h"
44#include "vm_insnhelper.h"
45#include "ractor_core.h"
46#include "vm_sync.h"
47#include "shape.h"
48#include "insns.inc"
49#include "zjit.h"
50
51#include "builtin.h"
52
53#include "probes.h"
54#include "probes_helper.h"
55
56#ifdef RUBY_ASSERT_CRITICAL_SECTION
57int ruby_assert_critical_section_entered = 0;
58#endif
59
60static void *native_main_thread_stack_top;
61
62VALUE rb_str_concat_literals(size_t, const VALUE*);
63
65
66extern const char *const rb_debug_counter_names[];
67
68PUREFUNC(static inline const VALUE *VM_EP_LEP(const VALUE *));
69static inline const VALUE *
70VM_EP_LEP(const VALUE *ep)
71{
72 while (!VM_ENV_LOCAL_P(ep)) {
73 ep = VM_ENV_PREV_EP(ep);
74 }
75 return ep;
76}
77
78static inline const rb_control_frame_t *
79rb_vm_search_cf_from_ep(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, const VALUE * const ep)
80{
81 if (!ep) {
82 return NULL;
83 }
84 else {
85 const rb_control_frame_t * const eocfp = RUBY_VM_END_CONTROL_FRAME(ec); /* end of control frame pointer */
86
87 while (cfp < eocfp) {
88 if (cfp->ep == ep) {
89 return cfp;
90 }
91 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
92 }
93
94 return NULL;
95 }
96}
97
98const VALUE *
99rb_vm_ep_local_ep(const VALUE *ep)
100{
101 return VM_EP_LEP(ep);
102}
103
104PUREFUNC(static inline const VALUE *VM_CF_LEP(const rb_control_frame_t * const cfp));
105static inline const VALUE *
106VM_CF_LEP(const rb_control_frame_t * const cfp)
107{
108 return VM_EP_LEP(cfp->ep);
109}
110
111static inline const VALUE *
112VM_CF_PREV_EP(const rb_control_frame_t * const cfp)
113{
114 return VM_ENV_PREV_EP(cfp->ep);
115}
116
117PUREFUNC(static inline VALUE VM_CF_BLOCK_HANDLER(const rb_control_frame_t * const cfp));
118static inline VALUE
119VM_CF_BLOCK_HANDLER(const rb_control_frame_t * const cfp)
120{
121 const VALUE *ep = VM_CF_LEP(cfp);
122 return VM_ENV_BLOCK_HANDLER(ep);
123}
124
125int
126rb_vm_cframe_keyword_p(const rb_control_frame_t *cfp)
127{
128 return VM_FRAME_CFRAME_KW_P(cfp);
129}
130
131VALUE
132rb_vm_frame_block_handler(const rb_control_frame_t *cfp)
133{
134 return VM_CF_BLOCK_HANDLER(cfp);
135}
136
137#if VM_CHECK_MODE > 0
138static int
139VM_CFP_IN_HEAP_P(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
140{
141 const VALUE *start = ec->vm_stack;
142 const VALUE *end = (VALUE *)ec->vm_stack + ec->vm_stack_size;
143 VM_ASSERT(start != NULL);
144
145 if (start <= (VALUE *)cfp && (VALUE *)cfp < end) {
146 return FALSE;
147 }
148 else {
149 return TRUE;
150 }
151}
152
153static int
154VM_EP_IN_HEAP_P(const rb_execution_context_t *ec, const VALUE *ep)
155{
156 const VALUE *start = ec->vm_stack;
157 const VALUE *end = (VALUE *)ec->cfp;
158 VM_ASSERT(start != NULL);
159
160 if (start <= ep && ep < end) {
161 return FALSE;
162 }
163 else {
164 return TRUE;
165 }
166}
167
168static int
169vm_ep_in_heap_p_(const rb_execution_context_t *ec, const VALUE *ep)
170{
171 if (VM_EP_IN_HEAP_P(ec, ep)) {
172 VALUE envval = ep[VM_ENV_DATA_INDEX_ENV]; /* VM_ENV_ENVVAL(ep); */
173
174 if (!UNDEF_P(envval)) {
175 const rb_env_t *env = (const rb_env_t *)envval;
176
177 VM_ASSERT(imemo_type_p(envval, imemo_env));
178 VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED));
179 VM_ASSERT(env->ep == ep);
180 }
181 return TRUE;
182 }
183 else {
184 return FALSE;
185 }
186}
187
188int
189rb_vm_ep_in_heap_p(const VALUE *ep)
190{
191 const rb_execution_context_t *ec = GET_EC();
192 if (ec->vm_stack == NULL) return TRUE;
193 return vm_ep_in_heap_p_(ec, ep);
194}
195#endif
196
197static struct rb_captured_block *
198VM_CFP_TO_CAPTURED_BLOCK(const rb_control_frame_t *cfp)
199{
200 VM_ASSERT(!VM_CFP_IN_HEAP_P(GET_EC(), cfp));
201 return (struct rb_captured_block *)&cfp->self;
202}
203
204static rb_control_frame_t *
205VM_CAPTURED_BLOCK_TO_CFP(const struct rb_captured_block *captured)
206{
207 rb_control_frame_t *cfp = ((rb_control_frame_t *)((VALUE *)(captured) - 3));
208 VM_ASSERT(!VM_CFP_IN_HEAP_P(GET_EC(), cfp));
209 VM_ASSERT(sizeof(rb_control_frame_t)/sizeof(VALUE) == 7 + VM_DEBUG_BP_CHECK ? 1 : 0);
210 return cfp;
211}
212
213static int
214VM_BH_FROM_CFP_P(VALUE block_handler, const rb_control_frame_t *cfp)
215{
216 const struct rb_captured_block *captured = VM_CFP_TO_CAPTURED_BLOCK(cfp);
217 return VM_TAGGED_PTR_REF(block_handler, 0x03) == captured;
218}
219
220static VALUE
221vm_passed_block_handler(rb_execution_context_t *ec)
222{
223 VALUE block_handler = ec->passed_block_handler;
224 ec->passed_block_handler = VM_BLOCK_HANDLER_NONE;
225 vm_block_handler_verify(block_handler);
226 return block_handler;
227}
228
229static rb_cref_t *
230vm_cref_new0(VALUE klass, rb_method_visibility_t visi, int module_func, rb_cref_t *prev_cref, int pushed_by_eval, int use_prev_prev, int singleton)
231{
232 VALUE refinements = Qnil;
233 int omod_shared = FALSE;
234
235 /* scope */
236 union {
238 VALUE value;
239 } scope_visi;
240
241 scope_visi.visi.method_visi = visi;
242 scope_visi.visi.module_func = module_func;
243
244 /* refinements */
245 if (prev_cref != NULL && prev_cref != (void *)1 /* TODO: why CREF_NEXT(cref) is 1? */) {
246 refinements = CREF_REFINEMENTS(prev_cref);
247
248 if (!NIL_P(refinements)) {
249 omod_shared = TRUE;
250 CREF_OMOD_SHARED_SET(prev_cref);
251 }
252 }
253
254 VM_ASSERT(singleton || klass);
255
256 rb_cref_t *cref = IMEMO_NEW(rb_cref_t, imemo_cref, refinements);
257 cref->klass_or_self = klass;
258 cref->next = use_prev_prev ? CREF_NEXT(prev_cref) : prev_cref;
259 *((rb_scope_visibility_t *)&cref->scope_visi) = scope_visi.visi;
260
261 if (pushed_by_eval) CREF_PUSHED_BY_EVAL_SET(cref);
262 if (omod_shared) CREF_OMOD_SHARED_SET(cref);
263 if (singleton) CREF_SINGLETON_SET(cref);
264
265 return cref;
266}
267
268static rb_cref_t *
269vm_cref_new(VALUE klass, rb_method_visibility_t visi, int module_func, rb_cref_t *prev_cref, int pushed_by_eval, int singleton)
270{
271 return vm_cref_new0(klass, visi, module_func, prev_cref, pushed_by_eval, FALSE, singleton);
272}
273
274static rb_cref_t *
275vm_cref_new_use_prev(VALUE klass, rb_method_visibility_t visi, int module_func, rb_cref_t *prev_cref, int pushed_by_eval)
276{
277 return vm_cref_new0(klass, visi, module_func, prev_cref, pushed_by_eval, TRUE, FALSE);
278}
279
280static int
281ref_delete_symkey(VALUE key, VALUE value, VALUE unused)
282{
283 return SYMBOL_P(key) ? ST_DELETE : ST_CONTINUE;
284}
285
286static rb_cref_t *
287vm_cref_dup(const rb_cref_t *cref)
288{
289 const rb_scope_visibility_t *visi = CREF_SCOPE_VISI(cref);
290 rb_cref_t *next_cref = CREF_NEXT(cref), *new_cref;
291 int pushed_by_eval = CREF_PUSHED_BY_EVAL(cref);
292 int singleton = CREF_SINGLETON(cref);
293
294 new_cref = vm_cref_new(cref->klass_or_self, visi->method_visi, visi->module_func, next_cref, pushed_by_eval, singleton);
295
296 if (!NIL_P(CREF_REFINEMENTS(cref))) {
297 VALUE ref = rb_hash_dup(CREF_REFINEMENTS(cref));
298 rb_hash_foreach(ref, ref_delete_symkey, Qnil);
299 CREF_REFINEMENTS_SET(new_cref, ref);
300 CREF_OMOD_SHARED_UNSET(new_cref);
301 }
302
303 return new_cref;
304}
305
306
307rb_cref_t *
308rb_vm_cref_dup_without_refinements(const rb_cref_t *cref)
309{
310 const rb_scope_visibility_t *visi = CREF_SCOPE_VISI(cref);
311 rb_cref_t *next_cref = CREF_NEXT(cref), *new_cref;
312 int pushed_by_eval = CREF_PUSHED_BY_EVAL(cref);
313 int singleton = CREF_SINGLETON(cref);
314
315 new_cref = vm_cref_new(cref->klass_or_self, visi->method_visi, visi->module_func, next_cref, pushed_by_eval, singleton);
316
317 if (!NIL_P(CREF_REFINEMENTS(cref))) {
318 CREF_REFINEMENTS_SET(new_cref, Qnil);
319 CREF_OMOD_SHARED_UNSET(new_cref);
320 }
321
322 return new_cref;
323}
324
325static rb_cref_t *
326vm_cref_new_toplevel(rb_execution_context_t *ec)
327{
328 rb_cref_t *cref = vm_cref_new(rb_cObject, METHOD_VISI_PRIVATE /* toplevel visibility is private */, FALSE, NULL, FALSE, FALSE);
329 VALUE top_wrapper = rb_ec_thread_ptr(ec)->top_wrapper;
330
331 if (top_wrapper) {
332 cref = vm_cref_new(top_wrapper, METHOD_VISI_PRIVATE, FALSE, cref, FALSE, FALSE);
333 }
334
335 return cref;
336}
337
338rb_cref_t *
339rb_vm_cref_new_toplevel(void)
340{
341 return vm_cref_new_toplevel(GET_EC());
342}
343
344static void
345vm_cref_dump(const char *mesg, const rb_cref_t *cref)
346{
347 ruby_debug_printf("vm_cref_dump: %s (%p)\n", mesg, (void *)cref);
348
349 while (cref) {
350 ruby_debug_printf("= cref| klass: %s\n", RSTRING_PTR(rb_class_path(CREF_CLASS(cref))));
351 cref = CREF_NEXT(cref);
352 }
353}
354
355void
356rb_vm_block_ep_update(VALUE obj, const struct rb_block *dst, const VALUE *ep)
357{
358 *((const VALUE **)&dst->as.captured.ep) = ep;
359 RB_OBJ_WRITTEN(obj, Qundef, VM_ENV_ENVVAL(ep));
360}
361
362static void
363vm_bind_update_env(VALUE bindval, rb_binding_t *bind, VALUE envval)
364{
365 const rb_env_t *env = (rb_env_t *)envval;
366 RB_OBJ_WRITE(bindval, &bind->block.as.captured.code.iseq, env->iseq);
367 rb_vm_block_ep_update(bindval, &bind->block, env->ep);
368}
369
370#if VM_COLLECT_USAGE_DETAILS
371static void vm_collect_usage_operand(int insn, int n, VALUE op);
372static void vm_collect_usage_insn(int insn);
373static void vm_collect_usage_register(int reg, int isset);
374#endif
375
376static VALUE vm_make_env_object(const rb_execution_context_t *ec, rb_control_frame_t *cfp);
377static VALUE vm_invoke_bmethod(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
378 int argc, const VALUE *argv, int kw_splat, VALUE block_handler,
380static VALUE vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE block_handler);
381
382#if USE_YJIT
383// Counter to serve as a proxy for execution time, total number of calls
384static uint64_t yjit_total_entry_hits = 0;
385
386// Number of calls used to estimate how hot an ISEQ is
387#define YJIT_CALL_COUNT_INTERV 20u
388
390static inline bool
391rb_yjit_threshold_hit(const rb_iseq_t *iseq, uint64_t entry_calls)
392{
393 yjit_total_entry_hits += 1;
394
395 // Record the number of calls at the beginning of the interval
396 if (entry_calls + YJIT_CALL_COUNT_INTERV == rb_yjit_call_threshold) {
397 iseq->body->yjit_calls_at_interv = yjit_total_entry_hits;
398 }
399
400 // Try to estimate the total time taken (total number of calls) to reach 20 calls to this ISEQ
401 // This give us a ratio of how hot/cold this ISEQ is
402 if (entry_calls == rb_yjit_call_threshold) {
403 // We expect threshold 1 to compile everything immediately
404 if (rb_yjit_call_threshold < YJIT_CALL_COUNT_INTERV) {
405 return true;
406 }
407
408 uint64_t num_calls = yjit_total_entry_hits - iseq->body->yjit_calls_at_interv;
409
410 // Reject ISEQs that don't get called often enough
411 if (num_calls > rb_yjit_cold_threshold) {
412 rb_yjit_incr_counter("cold_iseq_entry");
413 return false;
414 }
415
416 return true;
417 }
418
419 return false;
420}
421#else
422#define rb_yjit_threshold_hit(iseq, entry_calls) false
423#endif
424
425#if USE_YJIT || USE_ZJIT
426// Generate JIT code that supports the following kinds of ISEQ entries:
427// * The first ISEQ on vm_exec (e.g. <main>, or Ruby methods/blocks
428// called by a C method). The current frame has VM_FRAME_FLAG_FINISH.
429// The current vm_exec stops if JIT code returns a non-Qundef value.
430// * ISEQs called by the interpreter on vm_sendish (e.g. Ruby methods or
431// blocks called by a Ruby frame that isn't compiled or side-exited).
432// The current frame doesn't have VM_FRAME_FLAG_FINISH. The current
433// vm_exec does NOT stop whether JIT code returns Qundef or not.
434static inline rb_jit_func_t
435jit_compile(rb_execution_context_t *ec)
436{
437 const rb_iseq_t *iseq = ec->cfp->iseq;
438 struct rb_iseq_constant_body *body = ISEQ_BODY(iseq);
439
440#if USE_ZJIT
441 if (body->jit_entry == NULL && rb_zjit_enabled_p) {
442 body->jit_entry_calls++;
443
444 // At profile-threshold, rewrite some of the YARV instructions
445 // to zjit_* instructions to profile these instructions.
446 if (body->jit_entry_calls == rb_zjit_profile_threshold) {
447 rb_zjit_profile_enable(iseq);
448 }
449
450 // At call-threshold, compile the ISEQ with ZJIT.
451 if (body->jit_entry_calls == rb_zjit_call_threshold) {
452 rb_zjit_compile_iseq(iseq, ec, false);
453 }
454 }
455#endif
456
457#if USE_YJIT
458 // Increment the ISEQ's call counter and trigger JIT compilation if not compiled
459 if (body->jit_entry == NULL && rb_yjit_enabled_p) {
460 body->jit_entry_calls++;
461 if (rb_yjit_threshold_hit(iseq, body->jit_entry_calls)) {
462 rb_yjit_compile_iseq(iseq, ec, false);
463 }
464 }
465#endif
466 return body->jit_entry;
467}
468
469// Execute JIT code compiled by jit_compile()
470static inline VALUE
471jit_exec(rb_execution_context_t *ec)
472{
473 rb_jit_func_t func = jit_compile(ec);
474 if (func) {
475 // Call the JIT code
476 return func(ec, ec->cfp);
477 }
478 else {
479 return Qundef;
480 }
481}
482#else
483# define jit_compile(ec) ((rb_jit_func_t)0)
484# define jit_exec(ec) Qundef
485#endif
486
487#if USE_YJIT
488// Generate JIT code that supports the following kind of ISEQ entry:
489// * The first ISEQ pushed by vm_exec_handle_exception. The frame would
490// point to a location specified by a catch table, and it doesn't have
491// VM_FRAME_FLAG_FINISH. The current vm_exec stops if JIT code returns
492// a non-Qundef value. So you should not return a non-Qundef value
493// until ec->cfp is changed to a frame with VM_FRAME_FLAG_FINISH.
494static inline rb_jit_func_t
495jit_compile_exception(rb_execution_context_t *ec)
496{
497 const rb_iseq_t *iseq = ec->cfp->iseq;
498 struct rb_iseq_constant_body *body = ISEQ_BODY(iseq);
499
500 // Increment the ISEQ's call counter and trigger JIT compilation if not compiled
501#if USE_YJIT
502 if (body->jit_exception == NULL && rb_yjit_enabled_p) {
503 body->jit_exception_calls++;
504 if (body->jit_exception_calls == rb_yjit_call_threshold) {
505 rb_yjit_compile_iseq(iseq, ec, true);
506 }
507 }
508#endif
509 return body->jit_exception;
510}
511
512// Execute JIT code compiled by jit_compile_exception()
513static inline VALUE
514jit_exec_exception(rb_execution_context_t *ec)
515{
516 rb_jit_func_t func = jit_compile_exception(ec);
517 if (func) {
518 // Call the JIT code
519 return func(ec, ec->cfp);
520 }
521 else {
522 return Qundef;
523 }
524}
525#else
526# define jit_compile_exception(ec) ((rb_jit_func_t)0)
527# define jit_exec_exception(ec) Qundef
528#endif
529
530static void add_opt_method_entry(const rb_method_entry_t *me);
531
532#define RB_TYPE_2_P(obj, type1, type2) \
533 (RB_TYPE_P(obj, type1) || RB_TYPE_P(obj, type2))
534#define RB_TYPE_3_P(obj, type1, type2, type3) \
535 (RB_TYPE_P(obj, type1) || RB_TYPE_P(obj, type2) || RB_TYPE_P(obj, type3))
536
537#define VM_ASSERT_TYPE(obj, type) \
538 VM_ASSERT(RB_TYPE_P(obj, type), #obj ": %s", rb_obj_info(obj))
539#define VM_ASSERT_TYPE2(obj, type1, type2) \
540 VM_ASSERT(RB_TYPE_2_P(obj, type1, type2), #obj ": %s", rb_obj_info(obj))
541#define VM_ASSERT_TYPE3(obj, type1, type2, type3) \
542 VM_ASSERT(RB_TYPE_3_P(obj, type1, type2, type3), #obj ": %s", rb_obj_info(obj))
543
544#include "vm_insnhelper.c"
545
546#include "vm_exec.c"
547
548#include "vm_method.c"
549#include "vm_eval.c"
550
551#define PROCDEBUG 0
552
553VALUE rb_cRubyVM;
555VALUE rb_mRubyVMFrozenCore;
556VALUE rb_block_param_proxy;
557
558VALUE ruby_vm_const_missing_count = 0;
559rb_vm_t *ruby_current_vm_ptr = NULL;
560rb_ractor_t *ruby_single_main_ractor;
561bool ruby_vm_keep_script_lines;
562
563#ifdef RB_THREAD_LOCAL_SPECIFIER
564RB_THREAD_LOCAL_SPECIFIER rb_execution_context_t *ruby_current_ec;
565
566#ifdef RUBY_NT_SERIAL
567RB_THREAD_LOCAL_SPECIFIER rb_atomic_t ruby_nt_serial;
568#endif
569
570// no-inline decl on vm_core.h
572rb_current_ec_noinline(void)
573{
574 return ruby_current_ec;
575}
576
577void
578rb_current_ec_set(rb_execution_context_t *ec)
579{
580 ruby_current_ec = ec;
581}
582
583
584#if defined(__arm64__) || defined(__aarch64__)
586rb_current_ec(void)
587{
588 return ruby_current_ec;
589}
590
591#endif
592#else
593native_tls_key_t ruby_current_ec_key;
594
595// no-inline decl on vm_core.h
597rb_current_ec_noinline(void)
598{
599 return native_tls_get(ruby_current_ec_key);
600}
601
602#endif
603
604rb_event_flag_t ruby_vm_event_flags;
605rb_event_flag_t ruby_vm_event_enabled_global_flags;
606unsigned int ruby_vm_event_local_num;
607
608rb_serial_t ruby_vm_constant_cache_invalidations = 0;
609rb_serial_t ruby_vm_constant_cache_misses = 0;
610rb_serial_t ruby_vm_global_cvar_state = 1;
611
612static const struct rb_callcache vm_empty_cc = {
613 .flags = T_IMEMO | (imemo_callcache << FL_USHIFT) | VM_CALLCACHE_UNMARKABLE,
614 .klass = Qfalse,
615 .cme_ = NULL,
616 .call_ = vm_call_general,
617 .aux_ = {
618 .v = Qfalse,
619 }
620};
621
622static const struct rb_callcache vm_empty_cc_for_super = {
623 .flags = T_IMEMO | (imemo_callcache << FL_USHIFT) | VM_CALLCACHE_UNMARKABLE,
624 .klass = Qfalse,
625 .cme_ = NULL,
626 .call_ = vm_call_super_method,
627 .aux_ = {
628 .v = Qfalse,
629 }
630};
631
632static void thread_free(void *ptr);
633
634void
635rb_vm_inc_const_missing_count(void)
636{
637 ruby_vm_const_missing_count +=1;
638}
639
640int
641rb_dtrace_setup(rb_execution_context_t *ec, VALUE klass, ID id,
642 struct ruby_dtrace_method_hook_args *args)
643{
645 if (!klass) {
646 if (!ec) ec = GET_EC();
647 if (!rb_ec_frame_method_id_and_class(ec, &id, 0, &klass) || !klass)
648 return FALSE;
649 }
650 if (RB_TYPE_P(klass, T_ICLASS)) {
651 klass = RBASIC(klass)->klass;
652 }
653 else if (RCLASS_SINGLETON_P(klass)) {
654 klass = RCLASS_ATTACHED_OBJECT(klass);
655 if (NIL_P(klass)) return FALSE;
656 }
657 type = BUILTIN_TYPE(klass);
658 if (type == T_CLASS || type == T_ICLASS || type == T_MODULE) {
659 VALUE name = rb_class_path(klass);
660 const char *classname, *filename;
661 const char *methodname = rb_id2name(id);
662 if (methodname && (filename = rb_source_location_cstr(&args->line_no)) != 0) {
663 if (NIL_P(name) || !(classname = StringValuePtr(name)))
664 classname = "<unknown>";
665 args->classname = classname;
666 args->methodname = methodname;
667 args->filename = filename;
668 args->klass = klass;
669 args->name = name;
670 return TRUE;
671 }
672 }
673 return FALSE;
674}
675
676extern unsigned int redblack_buffer_size;
677
678/*
679 * call-seq:
680 * RubyVM.stat -> Hash
681 * RubyVM.stat(hsh) -> hsh
682 * RubyVM.stat(Symbol) -> Numeric
683 *
684 * Returns a Hash containing implementation-dependent counters inside the VM.
685 *
686 * This hash includes information about method/constant caches:
687 *
688 * {
689 * :constant_cache_invalidations=>2,
690 * :constant_cache_misses=>14,
691 * :global_cvar_state=>27
692 * }
693 *
694 * If <tt>USE_DEBUG_COUNTER</tt> is enabled, debug counters will be included.
695 *
696 * The contents of the hash are implementation specific and may be changed in
697 * the future.
698 *
699 * This method is only expected to work on C Ruby.
700 */
701static VALUE
702vm_stat(int argc, VALUE *argv, VALUE self)
703{
704 static VALUE sym_constant_cache_invalidations, sym_constant_cache_misses, sym_global_cvar_state, sym_next_shape_id;
705 static VALUE sym_shape_cache_size;
706 VALUE arg = Qnil;
707 VALUE hash = Qnil, key = Qnil;
708
709 if (rb_check_arity(argc, 0, 1) == 1) {
710 arg = argv[0];
711 if (SYMBOL_P(arg))
712 key = arg;
713 else if (RB_TYPE_P(arg, T_HASH))
714 hash = arg;
715 else
716 rb_raise(rb_eTypeError, "non-hash or symbol given");
717 }
718 else {
719 hash = rb_hash_new();
720 }
721
722#define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
723 S(constant_cache_invalidations);
724 S(constant_cache_misses);
725 S(global_cvar_state);
726 S(next_shape_id);
727 S(shape_cache_size);
728#undef S
729
730#define SET(name, attr) \
731 if (key == sym_##name) \
732 return SERIALT2NUM(attr); \
733 else if (hash != Qnil) \
734 rb_hash_aset(hash, sym_##name, SERIALT2NUM(attr));
735
736 SET(constant_cache_invalidations, ruby_vm_constant_cache_invalidations);
737 SET(constant_cache_misses, ruby_vm_constant_cache_misses);
738 SET(global_cvar_state, ruby_vm_global_cvar_state);
739 SET(next_shape_id, (rb_serial_t)GET_SHAPE_TREE()->next_shape_id);
740 SET(shape_cache_size, (rb_serial_t)GET_SHAPE_TREE()->cache_size);
741#undef SET
742
743#if USE_DEBUG_COUNTER
744 ruby_debug_counter_show_at_exit(FALSE);
745 for (size_t i = 0; i < RB_DEBUG_COUNTER_MAX; i++) {
746 const VALUE name = rb_sym_intern_ascii_cstr(rb_debug_counter_names[i]);
747 const VALUE boxed_value = SIZET2NUM(rb_debug_counter[i]);
748
749 if (key == name) {
750 return boxed_value;
751 }
752 else if (hash != Qnil) {
753 rb_hash_aset(hash, name, boxed_value);
754 }
755 }
756#endif
757
758 if (!NIL_P(key)) { /* matched key should return above */
759 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
760 }
761
762 return hash;
763}
764
765/* control stack frame */
766
767static void
768vm_set_top_stack(rb_execution_context_t *ec, const rb_iseq_t *iseq)
769{
770 if (ISEQ_BODY(iseq)->type != ISEQ_TYPE_TOP) {
771 rb_raise(rb_eTypeError, "Not a toplevel InstructionSequence");
772 }
773
774 /* for return */
775 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_TOP | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH, rb_ec_thread_ptr(ec)->top_self,
776 VM_BLOCK_HANDLER_NONE,
777 (VALUE)vm_cref_new_toplevel(ec), /* cref or me */
778 ISEQ_BODY(iseq)->iseq_encoded, ec->cfp->sp,
779 ISEQ_BODY(iseq)->local_table_size, ISEQ_BODY(iseq)->stack_max);
780}
781
782static void
783vm_set_eval_stack(rb_execution_context_t *ec, const rb_iseq_t *iseq, const rb_cref_t *cref, const struct rb_block *base_block)
784{
785 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_EVAL | VM_FRAME_FLAG_FINISH,
786 vm_block_self(base_block), VM_GUARDED_PREV_EP(vm_block_ep(base_block)),
787 (VALUE)cref, /* cref or me */
788 ISEQ_BODY(iseq)->iseq_encoded,
789 ec->cfp->sp, ISEQ_BODY(iseq)->local_table_size,
790 ISEQ_BODY(iseq)->stack_max);
791}
792
793static void
794vm_set_main_stack(rb_execution_context_t *ec, const rb_iseq_t *iseq)
795{
796 VALUE toplevel_binding = rb_const_get(rb_cObject, rb_intern("TOPLEVEL_BINDING"));
797 rb_binding_t *bind;
798
799 GetBindingPtr(toplevel_binding, bind);
800 RUBY_ASSERT_MESG(bind, "TOPLEVEL_BINDING is not built");
801
802 vm_set_eval_stack(ec, iseq, 0, &bind->block);
803
804 /* save binding */
805 if (ISEQ_BODY(iseq)->local_table_size > 0) {
806 vm_bind_update_env(toplevel_binding, bind, vm_make_env_object(ec, ec->cfp));
807 }
808}
809
811rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
812{
813 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
814 if (cfp->iseq) {
815 return (rb_control_frame_t *)cfp;
816 }
817 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
818 }
819 return 0;
820}
821
823rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
824{
825 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
826 if (VM_FRAME_RUBYFRAME_P(cfp)) {
827 return (rb_control_frame_t *)cfp;
828 }
829 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
830 }
831 return 0;
832}
833
834static rb_control_frame_t *
835vm_get_ruby_level_caller_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
836{
837 if (VM_FRAME_RUBYFRAME_P(cfp)) {
838 return (rb_control_frame_t *)cfp;
839 }
840
841 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
842
843 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
844 if (VM_FRAME_RUBYFRAME_P(cfp)) {
845 return (rb_control_frame_t *)cfp;
846 }
847
848 if (VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_PASSED) == FALSE) {
849 break;
850 }
851 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
852 }
853 return 0;
854}
855
856void
857rb_vm_pop_cfunc_frame(void)
858{
859 rb_execution_context_t *ec = GET_EC();
860 rb_control_frame_t *cfp = ec->cfp;
861 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(cfp);
862
863 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, cfp->self, me->def->original_id, me->called_id, me->owner, Qnil);
864 RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, me->owner, me->def->original_id);
865 vm_pop_frame(ec, cfp, cfp->ep);
866}
867
868void
869rb_vm_rewind_cfp(rb_execution_context_t *ec, rb_control_frame_t *cfp)
870{
871 /* check skipped frame */
872 while (ec->cfp != cfp) {
873#if VMDEBUG
874 printf("skipped frame: %s\n", vm_frametype_name(ec->cfp));
875#endif
876 if (VM_FRAME_TYPE(ec->cfp) != VM_FRAME_MAGIC_CFUNC) {
877 rb_vm_pop_frame(ec);
878 }
879 else { /* unlikely path */
880 rb_vm_pop_cfunc_frame();
881 }
882 }
883}
884
885/* at exit */
886
887void
888ruby_vm_at_exit(void (*func)(rb_vm_t *))
889{
890 rb_vm_t *vm = GET_VM();
892 nl->func = func;
893 nl->next = vm->at_exit;
894 vm->at_exit = nl;
895}
896
897static void
898ruby_vm_run_at_exit_hooks(rb_vm_t *vm)
899{
900 rb_at_exit_list *l = vm->at_exit;
901
902 while (l) {
903 rb_at_exit_list* t = l->next;
904 rb_vm_at_exit_func *func = l->func;
905 ruby_xfree(l);
906 l = t;
907 (*func)(vm);
908 }
909}
910
911/* Env */
912
913static VALUE check_env_value(const rb_env_t *env);
914
915static int
916check_env(const rb_env_t *env)
917{
918 fputs("---\n", stderr);
919 ruby_debug_printf("envptr: %p\n", (void *)&env->ep[0]);
920 ruby_debug_printf("envval: %10p ", (void *)env->ep[1]);
921 dp(env->ep[1]);
922 ruby_debug_printf("ep: %10p\n", (void *)env->ep);
923 if (rb_vm_env_prev_env(env)) {
924 fputs(">>\n", stderr);
925 check_env_value(rb_vm_env_prev_env(env));
926 fputs("<<\n", stderr);
927 }
928 return 1;
929}
930
931static VALUE
932check_env_value(const rb_env_t *env)
933{
934 if (check_env(env)) {
935 return (VALUE)env;
936 }
937 rb_bug("invalid env");
938 return Qnil; /* unreachable */
939}
940
941static VALUE
942vm_block_handler_escape(const rb_execution_context_t *ec, VALUE block_handler)
943{
944 switch (vm_block_handler_type(block_handler)) {
945 case block_handler_type_ifunc:
946 case block_handler_type_iseq:
947 return rb_vm_make_proc(ec, VM_BH_TO_CAPT_BLOCK(block_handler), rb_cProc);
948
949 case block_handler_type_symbol:
950 case block_handler_type_proc:
951 return block_handler;
952 }
953 VM_UNREACHABLE(vm_block_handler_escape);
954 return Qnil;
955}
956
957static VALUE
958vm_make_env_each(const rb_execution_context_t * const ec, rb_control_frame_t *const cfp)
959{
960 const VALUE * const ep = cfp->ep;
961 VALUE *env_body, *env_ep;
962 int local_size, env_size;
963
964 if (VM_ENV_ESCAPED_P(ep)) {
965 return VM_ENV_ENVVAL(ep);
966 }
967
968 if (!VM_ENV_LOCAL_P(ep)) {
969 const VALUE *prev_ep = VM_ENV_PREV_EP(ep);
970 if (!VM_ENV_ESCAPED_P(prev_ep)) {
971 rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
972
973 while (prev_cfp->ep != prev_ep) {
974 prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(prev_cfp);
975 VM_ASSERT(prev_cfp->ep != NULL);
976 }
977
978 vm_make_env_each(ec, prev_cfp);
979 VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_SPECVAL], VM_GUARDED_PREV_EP(prev_cfp->ep));
980 }
981 }
982 else {
983 VALUE block_handler = VM_ENV_BLOCK_HANDLER(ep);
984
985 if (block_handler != VM_BLOCK_HANDLER_NONE) {
986 VALUE blockprocval = vm_block_handler_escape(ec, block_handler);
987 VM_STACK_ENV_WRITE(ep, VM_ENV_DATA_INDEX_SPECVAL, blockprocval);
988 }
989 }
990
991 if (!VM_FRAME_RUBYFRAME_P(cfp)) {
992 local_size = VM_ENV_DATA_SIZE;
993 }
994 else {
995 local_size = ISEQ_BODY(cfp->iseq)->local_table_size;
996 if (ISEQ_BODY(cfp->iseq)->param.flags.forwardable && VM_ENV_LOCAL_P(cfp->ep)) {
997 int ci_offset = local_size - ISEQ_BODY(cfp->iseq)->param.size + VM_ENV_DATA_SIZE;
998
999 CALL_INFO ci = (CALL_INFO)VM_CF_LEP(cfp)[-ci_offset];
1000 local_size += vm_ci_argc(ci);
1001 }
1002 local_size += VM_ENV_DATA_SIZE;
1003 }
1004
1005 /*
1006 * # local variables on a stack frame (N == local_size)
1007 * [lvar1, lvar2, ..., lvarN, SPECVAL]
1008 * ^
1009 * ep[0]
1010 *
1011 * # moved local variables
1012 * [lvar1, lvar2, ..., lvarN, SPECVAL, Envval, BlockProcval (if needed)]
1013 * ^ ^
1014 * env->env[0] ep[0]
1015 */
1016
1017 env_size = local_size +
1018 1 /* envval */;
1019
1020 // Careful with order in the following sequence. Each allocation can move objects.
1021 env_body = ALLOC_N(VALUE, env_size);
1022 rb_env_t *env = IMEMO_NEW(rb_env_t, imemo_env, 0);
1023
1024 // Set up env without WB since it's brand new (similar to newobj_init(), newobj_fill())
1025 MEMCPY(env_body, ep - (local_size - 1 /* specval */), VALUE, local_size);
1026
1027 env_ep = &env_body[local_size - 1 /* specval */];
1028 env_ep[VM_ENV_DATA_INDEX_ENV] = (VALUE)env;
1029
1030 env->iseq = (rb_iseq_t *)(VM_FRAME_RUBYFRAME_P(cfp) ? cfp->iseq : NULL);
1031 env->ep = env_ep;
1032 env->env = env_body;
1033 env->env_size = env_size;
1034
1035 cfp->ep = env_ep;
1036 VM_ENV_FLAGS_SET(env_ep, VM_ENV_FLAG_ESCAPED | VM_ENV_FLAG_WB_REQUIRED);
1037 VM_STACK_ENV_WRITE(ep, 0, (VALUE)env); /* GC mark */
1038
1039#if 0
1040 for (i = 0; i < local_size; i++) {
1041 if (VM_FRAME_RUBYFRAME_P(cfp)) {
1042 /* clear value stack for GC */
1043 ep[-local_size + i] = 0;
1044 }
1045 }
1046#endif
1047
1048 // Invalidate JIT code that assumes cfp->ep == vm_base_ptr(cfp).
1049 if (env->iseq) {
1050 rb_yjit_invalidate_ep_is_bp(env->iseq);
1051 rb_zjit_invalidate_ep_is_bp(env->iseq);
1052 }
1053
1054 return (VALUE)env;
1055}
1056
1057static VALUE
1058vm_make_env_object(const rb_execution_context_t *ec, rb_control_frame_t *cfp)
1059{
1060 VALUE envval = vm_make_env_each(ec, cfp);
1061
1062 if (PROCDEBUG) {
1063 check_env_value((const rb_env_t *)envval);
1064 }
1065
1066 return envval;
1067}
1068
1069void
1070rb_vm_stack_to_heap(rb_execution_context_t *ec)
1071{
1072 rb_control_frame_t *cfp = ec->cfp;
1073 while ((cfp = rb_vm_get_binding_creatable_next_cfp(ec, cfp)) != 0) {
1074 vm_make_env_object(ec, cfp);
1075 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
1076 }
1077}
1078
1079const rb_env_t *
1080rb_vm_env_prev_env(const rb_env_t *env)
1081{
1082 const VALUE *ep = env->ep;
1083
1084 if (VM_ENV_LOCAL_P(ep)) {
1085 return NULL;
1086 }
1087 else {
1088 const VALUE *prev_ep = VM_ENV_PREV_EP(ep);
1089 return VM_ENV_ENVVAL_PTR(prev_ep);
1090 }
1091}
1092
1093static int
1094collect_local_variables_in_iseq(const rb_iseq_t *iseq, const struct local_var_list *vars)
1095{
1096 unsigned int i;
1097 if (!iseq) return 0;
1098 for (i = 0; i < ISEQ_BODY(iseq)->local_table_size; i++) {
1099 local_var_list_add(vars, ISEQ_BODY(iseq)->local_table[i]);
1100 }
1101 return 1;
1102}
1103
1104static void
1105collect_local_variables_in_env(const rb_env_t *env, const struct local_var_list *vars)
1106{
1107 do {
1108 if (VM_ENV_FLAGS(env->ep, VM_ENV_FLAG_ISOLATED)) break;
1109 collect_local_variables_in_iseq(env->iseq, vars);
1110 } while ((env = rb_vm_env_prev_env(env)) != NULL);
1111}
1112
1113static int
1114vm_collect_local_variables_in_heap(const VALUE *ep, const struct local_var_list *vars)
1115{
1116 if (VM_ENV_ESCAPED_P(ep)) {
1117 collect_local_variables_in_env(VM_ENV_ENVVAL_PTR(ep), vars);
1118 return 1;
1119 }
1120 else {
1121 return 0;
1122 }
1123}
1124
1125VALUE
1126rb_vm_env_local_variables(const rb_env_t *env)
1127{
1128 struct local_var_list vars;
1129 local_var_list_init(&vars);
1130 collect_local_variables_in_env(env, &vars);
1131 return local_var_list_finish(&vars);
1132}
1133
1134VALUE
1135rb_vm_env_numbered_parameters(const rb_env_t *env)
1136{
1137 struct local_var_list vars;
1138 local_var_list_init(&vars);
1139 // if (VM_ENV_FLAGS(env->ep, VM_ENV_FLAG_ISOLATED)) break; // TODO: is this needed?
1140 const rb_iseq_t *iseq = env->iseq;
1141 unsigned int i;
1142 if (!iseq) return 0;
1143 for (i = 0; i < ISEQ_BODY(iseq)->local_table_size; i++) {
1144 numparam_list_add(&vars, ISEQ_BODY(iseq)->local_table[i]);
1145 }
1146 return local_var_list_finish(&vars);
1147}
1148
1149VALUE
1150rb_iseq_local_variables(const rb_iseq_t *iseq)
1151{
1152 struct local_var_list vars;
1153 local_var_list_init(&vars);
1154 while (collect_local_variables_in_iseq(iseq, &vars)) {
1155 iseq = ISEQ_BODY(iseq)->parent_iseq;
1156 }
1157 return local_var_list_finish(&vars);
1158}
1159
1160/* Proc */
1161
1162static VALUE
1163vm_proc_create_from_captured(VALUE klass,
1164 const struct rb_captured_block *captured,
1165 enum rb_block_type block_type,
1166 int8_t is_from_method, int8_t is_lambda)
1167{
1168 VALUE procval = rb_proc_alloc(klass);
1169 rb_proc_t *proc = RTYPEDDATA_DATA(procval);
1170 const rb_namespace_t *ns = rb_current_namespace();
1171
1172 VM_ASSERT(VM_EP_IN_HEAP_P(GET_EC(), captured->ep));
1173
1174 /* copy block */
1175 RB_OBJ_WRITE(procval, &proc->block.as.captured.code.val, captured->code.val);
1176 RB_OBJ_WRITE(procval, &proc->block.as.captured.self, captured->self);
1177 rb_vm_block_ep_update(procval, &proc->block, captured->ep);
1178
1179 vm_block_type_set(&proc->block, block_type);
1180 proc->ns = ns;
1181 proc->is_from_method = is_from_method;
1182 proc->is_lambda = is_lambda;
1183
1184 return procval;
1185}
1186
1187void
1188rb_vm_block_copy(VALUE obj, const struct rb_block *dst, const struct rb_block *src)
1189{
1190 /* copy block */
1191 switch (vm_block_type(src)) {
1192 case block_type_iseq:
1193 case block_type_ifunc:
1194 RB_OBJ_WRITE(obj, &dst->as.captured.self, src->as.captured.self);
1195 RB_OBJ_WRITE(obj, &dst->as.captured.code.val, src->as.captured.code.val);
1196 rb_vm_block_ep_update(obj, dst, src->as.captured.ep);
1197 break;
1198 case block_type_symbol:
1199 RB_OBJ_WRITE(obj, &dst->as.symbol, src->as.symbol);
1200 break;
1201 case block_type_proc:
1202 RB_OBJ_WRITE(obj, &dst->as.proc, src->as.proc);
1203 break;
1204 }
1205}
1206
1207static VALUE
1208proc_create(VALUE klass, const struct rb_block *block, int8_t is_from_method, int8_t is_lambda)
1209{
1210 VALUE procval = rb_proc_alloc(klass);
1211 rb_proc_t *proc = RTYPEDDATA_DATA(procval);
1212 const rb_namespace_t *ns = rb_current_namespace();
1213
1214 VM_ASSERT(VM_EP_IN_HEAP_P(GET_EC(), vm_block_ep(block)));
1215 rb_vm_block_copy(procval, &proc->block, block);
1216 vm_block_type_set(&proc->block, block->type);
1217 proc->ns = ns;
1218 proc->is_from_method = is_from_method;
1219 proc->is_lambda = is_lambda;
1220
1221 return procval;
1222}
1223
1224VALUE
1225rb_proc_dup(VALUE self)
1226{
1227 VALUE procval;
1228 rb_proc_t *src;
1229
1230 GetProcPtr(self, src);
1231
1232 switch (vm_block_type(&src->block)) {
1233 case block_type_ifunc:
1234 procval = rb_func_proc_dup(self);
1235 break;
1236 default:
1237 procval = proc_create(rb_obj_class(self), &src->block, src->is_from_method, src->is_lambda);
1238 break;
1239 }
1240
1241 if (RB_OBJ_SHAREABLE_P(self)) FL_SET_RAW(procval, RUBY_FL_SHAREABLE);
1242 RB_GC_GUARD(self); /* for: body = rb_proc_dup(body) */
1243 return procval;
1244}
1245
1247 VALUE ary;
1248 VALUE read_only;
1249 bool yield;
1250 bool isolate;
1251};
1252
1253static VALUE
1254ID2NUM(ID id)
1255{
1256 if (SIZEOF_VOIDP > SIZEOF_LONG)
1257 return ULL2NUM(id);
1258 else
1259 return ULONG2NUM(id);
1260}
1261
1262static ID
1263NUM2ID(VALUE num)
1264{
1265 if (SIZEOF_VOIDP > SIZEOF_LONG)
1266 return (ID)NUM2ULL(num);
1267 else
1268 return (ID)NUM2ULONG(num);
1269}
1270
1271static enum rb_id_table_iterator_result
1272collect_outer_variable_names(ID id, VALUE val, void *ptr)
1273{
1275
1276 if (id == rb_intern("yield")) {
1277 data->yield = true;
1278 }
1279 else {
1280 VALUE *store;
1281 if (data->isolate ||
1282 val == Qtrue /* write */) {
1283 store = &data->ary;
1284 }
1285 else {
1286 store = &data->read_only;
1287 }
1288 if (*store == Qfalse) *store = rb_ary_new();
1289 rb_ary_push(*store, ID2NUM(id));
1290 }
1291 return ID_TABLE_CONTINUE;
1292}
1293
1294static const rb_env_t *
1295env_copy(const VALUE *src_ep, VALUE read_only_variables)
1296{
1297 const rb_env_t *src_env = (rb_env_t *)VM_ENV_ENVVAL(src_ep);
1298 VM_ASSERT(src_env->ep == src_ep);
1299
1300 VALUE *env_body = ZALLOC_N(VALUE, src_env->env_size); // fill with Qfalse
1301 VALUE *ep = &env_body[src_env->env_size - 2];
1302 const rb_env_t *copied_env = vm_env_new(ep, env_body, src_env->env_size, src_env->iseq);
1303
1304 // Copy after allocations above, since they can move objects in src_ep.
1305 RB_OBJ_WRITE(copied_env, &ep[VM_ENV_DATA_INDEX_ME_CREF], src_ep[VM_ENV_DATA_INDEX_ME_CREF]);
1306 ep[VM_ENV_DATA_INDEX_FLAGS] = src_ep[VM_ENV_DATA_INDEX_FLAGS] | VM_ENV_FLAG_ISOLATED;
1307 if (!VM_ENV_LOCAL_P(src_ep)) {
1308 VM_ENV_FLAGS_SET(ep, VM_ENV_FLAG_LOCAL);
1309 }
1310
1311 if (read_only_variables) {
1312 for (int i=RARRAY_LENINT(read_only_variables)-1; i>=0; i--) {
1313 ID id = NUM2ID(RARRAY_AREF(read_only_variables, i));
1314
1315 for (unsigned int j=0; j<ISEQ_BODY(src_env->iseq)->local_table_size; j++) {
1316 if (id == ISEQ_BODY(src_env->iseq)->local_table[j]) {
1317 VALUE v = src_env->env[j];
1318 if (!rb_ractor_shareable_p(v)) {
1319 VALUE name = rb_id2str(id);
1320 VALUE msg = rb_sprintf("can not make shareable Proc because it can refer"
1321 " unshareable object %+" PRIsVALUE " from ", v);
1322 if (name)
1323 rb_str_catf(msg, "variable '%" PRIsVALUE "'", name);
1324 else
1325 rb_str_cat_cstr(msg, "a hidden variable");
1326 rb_exc_raise(rb_exc_new_str(rb_eRactorIsolationError, msg));
1327 }
1328 RB_OBJ_WRITE((VALUE)copied_env, &env_body[j], v);
1329 rb_ary_delete_at(read_only_variables, i);
1330 break;
1331 }
1332 }
1333 }
1334 }
1335
1336 if (!VM_ENV_LOCAL_P(src_ep)) {
1337 const VALUE *prev_ep = VM_ENV_PREV_EP(src_env->ep);
1338 const rb_env_t *new_prev_env = env_copy(prev_ep, read_only_variables);
1339 ep[VM_ENV_DATA_INDEX_SPECVAL] = VM_GUARDED_PREV_EP(new_prev_env->ep);
1340 RB_OBJ_WRITTEN(copied_env, Qundef, new_prev_env);
1341 VM_ENV_FLAGS_UNSET(ep, VM_ENV_FLAG_LOCAL);
1342 }
1343 else {
1344 ep[VM_ENV_DATA_INDEX_SPECVAL] = VM_BLOCK_HANDLER_NONE;
1345 }
1346
1347 return copied_env;
1348}
1349
1350static void
1351proc_isolate_env(VALUE self, rb_proc_t *proc, VALUE read_only_variables)
1352{
1353 const struct rb_captured_block *captured = &proc->block.as.captured;
1354 const rb_env_t *env = env_copy(captured->ep, read_only_variables);
1355 *((const VALUE **)&proc->block.as.captured.ep) = env->ep;
1356 RB_OBJ_WRITTEN(self, Qundef, env);
1357}
1358
1359static VALUE
1360proc_shared_outer_variables(struct rb_id_table *outer_variables, bool isolate, const char *message)
1361{
1362 struct collect_outer_variable_name_data data = {
1363 .isolate = isolate,
1364 .ary = Qfalse,
1365 .read_only = Qfalse,
1366 .yield = false,
1367 };
1368 rb_id_table_foreach(outer_variables, collect_outer_variable_names, (void *)&data);
1369
1370 if (data.ary != Qfalse) {
1371 VALUE str = rb_sprintf("can not %s because it accesses outer variables", message);
1372 VALUE ary = data.ary;
1373 const char *sep = " (";
1374 for (long i = 0; i < RARRAY_LEN(ary); i++) {
1375 VALUE name = rb_id2str(NUM2ID(RARRAY_AREF(ary, i)));
1376 if (!name) continue;
1377 rb_str_cat_cstr(str, sep);
1378 sep = ", ";
1379 rb_str_append(str, name);
1380 }
1381 if (*sep == ',') rb_str_cat_cstr(str, ")");
1382 rb_str_cat_cstr(str, data.yield ? " and uses 'yield'." : ".");
1383 rb_exc_raise(rb_exc_new_str(rb_eArgError, str));
1384 }
1385 else if (data.yield) {
1386 rb_raise(rb_eArgError, "can not %s because it uses 'yield'.", message);
1387 }
1388
1389 return data.read_only;
1390}
1391
1392VALUE
1393rb_proc_isolate_bang(VALUE self)
1394{
1395 const rb_iseq_t *iseq = vm_proc_iseq(self);
1396
1397 if (iseq) {
1398 rb_proc_t *proc = (rb_proc_t *)RTYPEDDATA_DATA(self);
1399 if (proc->block.type != block_type_iseq) rb_raise(rb_eRuntimeError, "not supported yet");
1400
1401 if (ISEQ_BODY(iseq)->outer_variables) {
1402 proc_shared_outer_variables(ISEQ_BODY(iseq)->outer_variables, true, "isolate a Proc");
1403 }
1404
1405 proc_isolate_env(self, proc, Qfalse);
1406 proc->is_isolated = TRUE;
1407 }
1408
1410 return self;
1411}
1412
1413VALUE
1414rb_proc_isolate(VALUE self)
1415{
1416 VALUE dst = rb_proc_dup(self);
1417 rb_proc_isolate_bang(dst);
1418 return dst;
1419}
1420
1421VALUE
1422rb_proc_ractor_make_shareable(VALUE self)
1423{
1424 const rb_iseq_t *iseq = vm_proc_iseq(self);
1425
1426 if (iseq) {
1427 rb_proc_t *proc = (rb_proc_t *)RTYPEDDATA_DATA(self);
1428 if (proc->block.type != block_type_iseq) rb_raise(rb_eRuntimeError, "not supported yet");
1429
1430 if (!rb_ractor_shareable_p(vm_block_self(&proc->block))) {
1431 rb_raise(rb_eRactorIsolationError,
1432 "Proc's self is not shareable: %" PRIsVALUE,
1433 self);
1434 }
1435
1436 VALUE read_only_variables = Qfalse;
1437
1438 if (ISEQ_BODY(iseq)->outer_variables) {
1439 read_only_variables =
1440 proc_shared_outer_variables(ISEQ_BODY(iseq)->outer_variables, false, "make a Proc shareable");
1441 }
1442
1443 proc_isolate_env(self, proc, read_only_variables);
1444 proc->is_isolated = TRUE;
1445 }
1446
1447 rb_obj_freeze(self);
1448 return self;
1449}
1450
1451VALUE
1452rb_vm_make_proc_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda)
1453{
1454 VALUE procval;
1455 enum imemo_type code_type = imemo_type(captured->code.val);
1456
1457 if (!VM_ENV_ESCAPED_P(captured->ep)) {
1458 rb_control_frame_t *cfp = VM_CAPTURED_BLOCK_TO_CFP(captured);
1459 vm_make_env_object(ec, cfp);
1460 }
1461
1462 VM_ASSERT(VM_EP_IN_HEAP_P(ec, captured->ep));
1463 VM_ASSERT(code_type == imemo_iseq || code_type == imemo_ifunc);
1464
1465 procval = vm_proc_create_from_captured(klass, captured,
1466 code_type == imemo_iseq ? block_type_iseq : block_type_ifunc,
1467 FALSE, is_lambda);
1468
1469 if (code_type == imemo_ifunc) {
1470 struct vm_ifunc *ifunc = (struct vm_ifunc *)captured->code.val;
1471 if (ifunc->svar_lep) {
1472 VALUE ep0 = ifunc->svar_lep[0];
1473 if (RB_TYPE_P(ep0, T_IMEMO) && imemo_type_p(ep0, imemo_env)) {
1474 // `ep0 == imemo_env` means this ep is escaped to heap (in env object).
1475 const rb_env_t *env = (const rb_env_t *)ep0;
1476 ifunc->svar_lep = (VALUE *)env->ep;
1477 }
1478 else {
1479 VM_ASSERT(FIXNUM_P(ep0));
1480 if (ep0 & VM_ENV_FLAG_ESCAPED) {
1481 // ok. do nothing
1482 }
1483 else {
1484 ifunc->svar_lep = NULL;
1485 }
1486 }
1487 }
1488 }
1489
1490 return procval;
1491}
1492
1493/* Binding */
1494
1495VALUE
1496rb_vm_make_binding(const rb_execution_context_t *ec, const rb_control_frame_t *src_cfp)
1497{
1498 rb_control_frame_t *cfp = rb_vm_get_binding_creatable_next_cfp(ec, src_cfp);
1499 rb_control_frame_t *ruby_level_cfp = rb_vm_get_ruby_level_next_cfp(ec, src_cfp);
1500 VALUE bindval, envval;
1501 rb_binding_t *bind;
1502
1503 if (cfp == 0 || ruby_level_cfp == 0) {
1504 rb_raise(rb_eRuntimeError, "Can't create Binding Object on top of Fiber.");
1505 }
1506 if (!VM_FRAME_RUBYFRAME_P(src_cfp) &&
1507 !VM_FRAME_RUBYFRAME_P(RUBY_VM_PREVIOUS_CONTROL_FRAME(src_cfp))) {
1508 rb_raise(rb_eRuntimeError, "Cannot create Binding object for non-Ruby caller");
1509 }
1510
1511 envval = vm_make_env_object(ec, cfp);
1512 bindval = rb_binding_alloc(rb_cBinding);
1513 GetBindingPtr(bindval, bind);
1514 vm_bind_update_env(bindval, bind, envval);
1515 RB_OBJ_WRITE(bindval, &bind->block.as.captured.self, cfp->self);
1516 RB_OBJ_WRITE(bindval, &bind->block.as.captured.code.iseq, cfp->iseq);
1517 RB_OBJ_WRITE(bindval, &bind->pathobj, ISEQ_BODY(ruby_level_cfp->iseq)->location.pathobj);
1518 bind->first_lineno = rb_vm_get_sourceline(ruby_level_cfp);
1519
1520 return bindval;
1521}
1522
1523const VALUE *
1524rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const ID *dynvars)
1525{
1526 VALUE envval, pathobj = bind->pathobj;
1527 VALUE path = pathobj_path(pathobj);
1528 VALUE realpath = pathobj_realpath(pathobj);
1529 const struct rb_block *base_block;
1530 const rb_env_t *env;
1531 rb_execution_context_t *ec = GET_EC();
1532 const rb_iseq_t *base_iseq, *iseq;
1533 rb_node_scope_t tmp_node;
1534
1535 if (dyncount < 0) return 0;
1536
1537 base_block = &bind->block;
1538 base_iseq = vm_block_iseq(base_block);
1539
1540 VALUE idtmp = 0;
1541 rb_ast_id_table_t *dyns = ALLOCV(idtmp, sizeof(rb_ast_id_table_t) + dyncount * sizeof(ID));
1542 dyns->size = dyncount;
1543 MEMCPY(dyns->ids, dynvars, ID, dyncount);
1544
1545 rb_node_init(RNODE(&tmp_node), NODE_SCOPE);
1546 tmp_node.nd_tbl = dyns;
1547 tmp_node.nd_body = 0;
1548 tmp_node.nd_args = 0;
1549
1550 VALUE ast_value = rb_ruby_ast_new(RNODE(&tmp_node));
1551
1552 if (base_iseq) {
1553 iseq = rb_iseq_new(ast_value, ISEQ_BODY(base_iseq)->location.label, path, realpath, base_iseq, ISEQ_TYPE_EVAL);
1554 }
1555 else {
1556 VALUE tempstr = rb_fstring_lit("<temp>");
1557 iseq = rb_iseq_new_top(ast_value, tempstr, tempstr, tempstr, NULL);
1558 }
1559 tmp_node.nd_tbl = 0; /* reset table */
1560 ALLOCV_END(idtmp);
1561
1562 vm_set_eval_stack(ec, iseq, 0, base_block);
1563 vm_bind_update_env(bindval, bind, envval = vm_make_env_object(ec, ec->cfp));
1564 rb_vm_pop_frame(ec);
1565
1566 env = (const rb_env_t *)envval;
1567 return env->env;
1568}
1569
1570/* C -> Ruby: block */
1571
1572static inline void
1573invoke_block(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE self, const struct rb_captured_block *captured, const rb_cref_t *cref, VALUE type, int opt_pc)
1574{
1575 int arg_size = ISEQ_BODY(iseq)->param.size;
1576
1577 vm_push_frame(ec, iseq, type | VM_FRAME_FLAG_FINISH, self,
1578 VM_GUARDED_PREV_EP(captured->ep),
1579 (VALUE)cref, /* cref or method */
1580 ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
1581 ec->cfp->sp + arg_size,
1582 ISEQ_BODY(iseq)->local_table_size - arg_size,
1583 ISEQ_BODY(iseq)->stack_max);
1584}
1585
1586static inline void
1587invoke_bmethod(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE self, const struct rb_captured_block *captured, const rb_callable_method_entry_t *me, VALUE type, int opt_pc)
1588{
1589 /* bmethod call from outside the VM */
1590 int arg_size = ISEQ_BODY(iseq)->param.size;
1591
1592 VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
1593
1594 vm_push_frame(ec, iseq, type | VM_FRAME_FLAG_BMETHOD, self,
1595 VM_GUARDED_PREV_EP(captured->ep),
1596 (VALUE)me,
1597 ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
1598 ec->cfp->sp + 1 /* self */ + arg_size,
1599 ISEQ_BODY(iseq)->local_table_size - arg_size,
1600 ISEQ_BODY(iseq)->stack_max);
1601
1602 VM_ENV_FLAGS_SET(ec->cfp->ep, VM_FRAME_FLAG_FINISH);
1603}
1604
1605ALWAYS_INLINE(static VALUE
1606 invoke_iseq_block_from_c(rb_execution_context_t *ec, const struct rb_captured_block *captured,
1607 VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler,
1608 const rb_cref_t *cref, int is_lambda, const rb_callable_method_entry_t *me));
1609
1610static inline VALUE
1611invoke_iseq_block_from_c(rb_execution_context_t *ec, const struct rb_captured_block *captured,
1612 VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler,
1613 const rb_cref_t *cref, int is_lambda, const rb_callable_method_entry_t *me)
1614{
1615 const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
1616 int opt_pc;
1617 VALUE type = VM_FRAME_MAGIC_BLOCK | (is_lambda ? VM_FRAME_FLAG_LAMBDA : 0);
1618 rb_control_frame_t *cfp = ec->cfp;
1619 VALUE *sp = cfp->sp;
1620 int flags = (kw_splat ? VM_CALL_KW_SPLAT : 0);
1621 VALUE *use_argv = (VALUE *)argv;
1622 VALUE av[2];
1623
1624 stack_check(ec);
1625
1626 if (UNLIKELY(argc > VM_ARGC_STACK_MAX) &&
1627 (VM_ARGC_STACK_MAX >= 1 ||
1628 /* Skip ruby array for potential autosplat case */
1629 (argc != 1 || is_lambda))) {
1630 use_argv = vm_argv_ruby_array(av, argv, &flags, &argc, kw_splat);
1631 }
1632
1633 CHECK_VM_STACK_OVERFLOW(cfp, argc + 1);
1634 vm_check_canary(ec, sp);
1635
1636 VALUE *stack_argv = sp;
1637 if (me) {
1638 *sp = self; // bemthods need `self` on the VM stack
1639 stack_argv++;
1640 }
1641 cfp->sp = stack_argv + argc;
1642 MEMCPY(stack_argv, use_argv, VALUE, argc); // restrict: new stack space
1643
1644 opt_pc = vm_yield_setup_args(ec, iseq, argc, stack_argv, flags, passed_block_handler,
1645 (is_lambda ? arg_setup_method : arg_setup_block));
1646 cfp->sp = sp;
1647
1648 if (me == NULL) {
1649 invoke_block(ec, iseq, self, captured, cref, type, opt_pc);
1650 }
1651 else {
1652 invoke_bmethod(ec, iseq, self, captured, me, type, opt_pc);
1653 }
1654
1655 return vm_exec(ec);
1656}
1657
1658static VALUE
1659invoke_block_from_c_bh(rb_execution_context_t *ec, VALUE block_handler,
1660 int argc, const VALUE *argv,
1661 int kw_splat, VALUE passed_block_handler, const rb_cref_t *cref,
1662 int is_lambda, int force_blockarg)
1663{
1664 again:
1665 switch (vm_block_handler_type(block_handler)) {
1666 case block_handler_type_iseq:
1667 {
1668 const struct rb_captured_block *captured = VM_BH_TO_ISEQ_BLOCK(block_handler);
1669 return invoke_iseq_block_from_c(ec, captured, captured->self,
1670 argc, argv, kw_splat, passed_block_handler,
1671 cref, is_lambda, NULL);
1672 }
1673 case block_handler_type_ifunc:
1674 return vm_yield_with_cfunc(ec, VM_BH_TO_IFUNC_BLOCK(block_handler),
1675 VM_BH_TO_IFUNC_BLOCK(block_handler)->self,
1676 argc, argv, kw_splat, passed_block_handler, NULL);
1677 case block_handler_type_symbol:
1678 return vm_yield_with_symbol(ec, VM_BH_TO_SYMBOL(block_handler),
1679 argc, argv, kw_splat, passed_block_handler);
1680 case block_handler_type_proc:
1681 if (force_blockarg == FALSE) {
1682 is_lambda = block_proc_is_lambda(VM_BH_TO_PROC(block_handler));
1683 }
1684 block_handler = vm_proc_to_block_handler(VM_BH_TO_PROC(block_handler));
1685 goto again;
1686 }
1687 VM_UNREACHABLE(invoke_block_from_c_splattable);
1688 return Qundef;
1689}
1690
1691static inline VALUE
1692check_block_handler(rb_execution_context_t *ec)
1693{
1694 VALUE block_handler = VM_CF_BLOCK_HANDLER(ec->cfp);
1695 vm_block_handler_verify(block_handler);
1696 if (UNLIKELY(block_handler == VM_BLOCK_HANDLER_NONE)) {
1697 rb_vm_localjump_error("no block given", Qnil, 0);
1698 }
1699
1700 return block_handler;
1701}
1702
1703static VALUE
1704vm_yield_with_cref(rb_execution_context_t *ec, int argc, const VALUE *argv, int kw_splat, const rb_cref_t *cref, int is_lambda)
1705{
1706 return invoke_block_from_c_bh(ec, check_block_handler(ec),
1707 argc, argv, kw_splat, VM_BLOCK_HANDLER_NONE,
1708 cref, is_lambda, FALSE);
1709}
1710
1711static VALUE
1712vm_yield(rb_execution_context_t *ec, int argc, const VALUE *argv, int kw_splat)
1713{
1714 return vm_yield_with_cref(ec, argc, argv, kw_splat, NULL, FALSE);
1715}
1716
1717static VALUE
1718vm_yield_with_block(rb_execution_context_t *ec, int argc, const VALUE *argv, VALUE block_handler, int kw_splat)
1719{
1720 return invoke_block_from_c_bh(ec, check_block_handler(ec),
1721 argc, argv, kw_splat, block_handler,
1722 NULL, FALSE, FALSE);
1723}
1724
1725static VALUE
1726vm_yield_force_blockarg(rb_execution_context_t *ec, VALUE args)
1727{
1728 return invoke_block_from_c_bh(ec, check_block_handler(ec), 1, &args,
1729 RB_NO_KEYWORDS, VM_BLOCK_HANDLER_NONE, NULL, FALSE, TRUE);
1730}
1731
1732ALWAYS_INLINE(static VALUE
1733 invoke_block_from_c_proc(rb_execution_context_t *ec, const rb_proc_t *proc,
1734 VALUE self, int argc, const VALUE *argv,
1735 int kw_splat, VALUE passed_block_handler, int is_lambda,
1736 const rb_callable_method_entry_t *me));
1737
1738static inline VALUE
1739invoke_block_from_c_proc(rb_execution_context_t *ec, const rb_proc_t *proc,
1740 VALUE self, int argc, const VALUE *argv,
1741 int kw_splat, VALUE passed_block_handler, int is_lambda,
1743{
1744 const struct rb_block *block = &proc->block;
1745
1746 again:
1747 switch (vm_block_type(block)) {
1748 case block_type_iseq:
1749 return invoke_iseq_block_from_c(ec, &block->as.captured, self, argc, argv, kw_splat, passed_block_handler, NULL, is_lambda, me);
1750 case block_type_ifunc:
1751 if (kw_splat == 1) {
1752 VALUE keyword_hash = argv[argc-1];
1753 if (!RB_TYPE_P(keyword_hash, T_HASH)) {
1754 keyword_hash = rb_to_hash_type(keyword_hash);
1755 }
1756 if (RHASH_EMPTY_P(keyword_hash)) {
1757 argc--;
1758 }
1759 else {
1760 ((VALUE *)argv)[argc-1] = rb_hash_dup(keyword_hash);
1761 }
1762 }
1763 return vm_yield_with_cfunc(ec, &block->as.captured, self, argc, argv, kw_splat, passed_block_handler, me);
1764 case block_type_symbol:
1765 return vm_yield_with_symbol(ec, block->as.symbol, argc, argv, kw_splat, passed_block_handler);
1766 case block_type_proc:
1767 is_lambda = block_proc_is_lambda(block->as.proc);
1768 block = vm_proc_block(block->as.proc);
1769 goto again;
1770 }
1771 VM_UNREACHABLE(invoke_block_from_c_proc);
1772 return Qundef;
1773}
1774
1775static VALUE
1776vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
1777 int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler)
1778{
1779 return invoke_block_from_c_proc(ec, proc, self, argc, argv, kw_splat, passed_block_handler, proc->is_lambda, NULL);
1780}
1781
1782static VALUE
1783vm_invoke_bmethod(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
1784 int argc, const VALUE *argv, int kw_splat, VALUE block_handler, const rb_callable_method_entry_t *me)
1785{
1786 return invoke_block_from_c_proc(ec, proc, self, argc, argv, kw_splat, block_handler, TRUE, me);
1787}
1788
1789VALUE
1790rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc,
1791 int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler)
1792{
1793 VALUE self = vm_block_self(&proc->block);
1794 vm_block_handler_verify(passed_block_handler);
1795
1796 if (proc->is_from_method) {
1797 return vm_invoke_bmethod(ec, proc, self, argc, argv, kw_splat, passed_block_handler, NULL);
1798 }
1799 else {
1800 return vm_invoke_proc(ec, proc, self, argc, argv, kw_splat, passed_block_handler);
1801 }
1802}
1803
1804VALUE
1805rb_vm_invoke_proc_with_self(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
1806 int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler)
1807{
1808 vm_block_handler_verify(passed_block_handler);
1809
1810 if (proc->is_from_method) {
1811 return vm_invoke_bmethod(ec, proc, self, argc, argv, kw_splat, passed_block_handler, NULL);
1812 }
1813 else {
1814 return vm_invoke_proc(ec, proc, self, argc, argv, kw_splat, passed_block_handler);
1815 }
1816}
1817
1818/* special variable */
1819
1820VALUE *
1821rb_vm_svar_lep(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
1822{
1823 while (cfp->pc == 0 || cfp->iseq == 0) {
1824 if (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_IFUNC) {
1825 struct vm_ifunc *ifunc = (struct vm_ifunc *)cfp->iseq;
1826 return ifunc->svar_lep;
1827 }
1828 else {
1829 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
1830 }
1831
1832 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
1833 return NULL;
1834 }
1835 }
1836
1837 return (VALUE *)VM_CF_LEP(cfp);
1838}
1839
1840static VALUE
1841vm_cfp_svar_get(const rb_execution_context_t *ec, rb_control_frame_t *cfp, VALUE key)
1842{
1843 return lep_svar_get(ec, rb_vm_svar_lep(ec, cfp), key);
1844}
1845
1846static void
1847vm_cfp_svar_set(const rb_execution_context_t *ec, rb_control_frame_t *cfp, VALUE key, const VALUE val)
1848{
1849 lep_svar_set(ec, rb_vm_svar_lep(ec, cfp), key, val);
1850}
1851
1852static VALUE
1853vm_svar_get(const rb_execution_context_t *ec, VALUE key)
1854{
1855 return vm_cfp_svar_get(ec, ec->cfp, key);
1856}
1857
1858static void
1859vm_svar_set(const rb_execution_context_t *ec, VALUE key, VALUE val)
1860{
1861 vm_cfp_svar_set(ec, ec->cfp, key, val);
1862}
1863
1864VALUE
1866{
1867 return vm_svar_get(GET_EC(), VM_SVAR_BACKREF);
1868}
1869
1870void
1872{
1873 vm_svar_set(GET_EC(), VM_SVAR_BACKREF, val);
1874}
1875
1876VALUE
1878{
1879 return vm_svar_get(GET_EC(), VM_SVAR_LASTLINE);
1880}
1881
1882void
1884{
1885 vm_svar_set(GET_EC(), VM_SVAR_LASTLINE, val);
1886}
1887
1888void
1889rb_lastline_set_up(VALUE val, unsigned int up)
1890{
1891 rb_control_frame_t * cfp = GET_EC()->cfp;
1892
1893 for(unsigned int i = 0; i < up; i++) {
1894 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
1895 }
1896 vm_cfp_svar_set(GET_EC(), cfp, VM_SVAR_LASTLINE, val);
1897}
1898
1899/* misc */
1900
1901const char *
1903{
1904 const rb_execution_context_t *ec = GET_EC();
1905 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
1906
1907 if (cfp) {
1908 return RSTRING_PTR(rb_iseq_path(cfp->iseq));
1909 }
1910 else {
1911 return 0;
1912 }
1913}
1914
1915int
1917{
1918 const rb_execution_context_t *ec = GET_EC();
1919 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
1920
1921 if (cfp) {
1922 return rb_vm_get_sourceline(cfp);
1923 }
1924 else {
1925 return 0;
1926 }
1927}
1928
1929VALUE
1930rb_source_location(int *pline)
1931{
1932 const rb_execution_context_t *ec = GET_EC();
1933 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
1934
1935 if (cfp && VM_FRAME_RUBYFRAME_P(cfp)) {
1936 if (pline) *pline = rb_vm_get_sourceline(cfp);
1937 return rb_iseq_path(cfp->iseq);
1938 }
1939 else {
1940 if (pline) *pline = 0;
1941 return Qnil;
1942 }
1943}
1944
1945const char *
1946rb_source_location_cstr(int *pline)
1947{
1948 VALUE path = rb_source_location(pline);
1949 if (NIL_P(path)) return NULL;
1950 return RSTRING_PTR(path);
1951}
1952
1953rb_cref_t *
1954rb_vm_cref(void)
1955{
1956 const rb_execution_context_t *ec = GET_EC();
1957 return vm_ec_cref(ec);
1958}
1959
1960rb_cref_t *
1961rb_vm_cref_replace_with_duplicated_cref(void)
1962{
1963 const rb_execution_context_t *ec = GET_EC();
1964 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
1965 rb_cref_t *cref = vm_cref_replace_with_duplicated_cref(cfp->ep);
1966 ASSUME(cref);
1967 return cref;
1968}
1969
1970const rb_cref_t *
1971rb_vm_cref_in_context(VALUE self, VALUE cbase)
1972{
1973 const rb_execution_context_t *ec = GET_EC();
1974 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
1975 const rb_cref_t *cref;
1976 if (!cfp || cfp->self != self) return NULL;
1977 if (!vm_env_cref_by_cref(cfp->ep)) return NULL;
1978 cref = vm_get_cref(cfp->ep);
1979 if (CREF_CLASS(cref) != cbase) return NULL;
1980 return cref;
1981}
1982
1983#if 0
1984void
1985debug_cref(rb_cref_t *cref)
1986{
1987 while (cref) {
1988 dp(CREF_CLASS(cref));
1989 printf("%ld\n", CREF_VISI(cref));
1990 cref = CREF_NEXT(cref);
1991 }
1992}
1993#endif
1994
1995VALUE
1996rb_vm_cbase(void)
1997{
1998 const rb_execution_context_t *ec = GET_EC();
1999 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
2000
2001 if (cfp == 0) {
2002 rb_raise(rb_eRuntimeError, "Can't call on top of Fiber or Thread");
2003 }
2004 return vm_get_cbase(cfp->ep);
2005}
2006
2007/* jump */
2008
2009static VALUE
2010make_localjump_error(const char *mesg, VALUE value, int reason)
2011{
2014 ID id;
2015
2016 switch (reason) {
2017 case TAG_BREAK:
2018 CONST_ID(id, "break");
2019 break;
2020 case TAG_REDO:
2021 CONST_ID(id, "redo");
2022 break;
2023 case TAG_RETRY:
2024 CONST_ID(id, "retry");
2025 break;
2026 case TAG_NEXT:
2027 CONST_ID(id, "next");
2028 break;
2029 case TAG_RETURN:
2030 CONST_ID(id, "return");
2031 break;
2032 default:
2033 CONST_ID(id, "noreason");
2034 break;
2035 }
2036 rb_iv_set(exc, "@exit_value", value);
2037 rb_iv_set(exc, "@reason", ID2SYM(id));
2038 return exc;
2039}
2040
2041void
2042rb_vm_localjump_error(const char *mesg, VALUE value, int reason)
2043{
2044 VALUE exc = make_localjump_error(mesg, value, reason);
2045 rb_exc_raise(exc);
2046}
2047
2048VALUE
2049rb_vm_make_jump_tag_but_local_jump(enum ruby_tag_type state, VALUE val)
2050{
2051 const char *mesg;
2052
2053 switch (state) {
2054 case TAG_RETURN:
2055 mesg = "unexpected return";
2056 break;
2057 case TAG_BREAK:
2058 mesg = "unexpected break";
2059 break;
2060 case TAG_NEXT:
2061 mesg = "unexpected next";
2062 break;
2063 case TAG_REDO:
2064 mesg = "unexpected redo";
2065 val = Qnil;
2066 break;
2067 case TAG_RETRY:
2068 mesg = "retry outside of rescue clause";
2069 val = Qnil;
2070 break;
2071 default:
2072 return Qnil;
2073 }
2074 if (UNDEF_P(val)) {
2075 val = GET_EC()->tag->retval;
2076 }
2077 return make_localjump_error(mesg, val, state);
2078}
2079
2080void
2081rb_vm_jump_tag_but_local_jump(enum ruby_tag_type state)
2082{
2083 VALUE exc = rb_vm_make_jump_tag_but_local_jump(state, Qundef);
2084 if (!NIL_P(exc)) rb_exc_raise(exc);
2085 EC_JUMP_TAG(GET_EC(), state);
2086}
2087
2088static rb_control_frame_t *
2089next_not_local_frame(rb_control_frame_t *cfp)
2090{
2091 while (VM_ENV_LOCAL_P(cfp->ep)) {
2092 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
2093 }
2094 return cfp;
2095}
2096
2097NORETURN(static void vm_iter_break(rb_execution_context_t *ec, VALUE val));
2098
2099static void
2100vm_iter_break(rb_execution_context_t *ec, VALUE val)
2101{
2102 rb_control_frame_t *cfp = next_not_local_frame(ec->cfp);
2103 const VALUE *ep = VM_CF_PREV_EP(cfp);
2104 const rb_control_frame_t *target_cfp = rb_vm_search_cf_from_ep(ec, cfp, ep);
2105
2106 if (!target_cfp) {
2107 rb_vm_localjump_error("unexpected break", val, TAG_BREAK);
2108 }
2109
2110 ec->errinfo = (VALUE)THROW_DATA_NEW(val, target_cfp, TAG_BREAK);
2111 EC_JUMP_TAG(ec, TAG_BREAK);
2112}
2113
2114void
2116{
2117 vm_iter_break(GET_EC(), Qnil);
2118}
2119
2120void
2122{
2123 vm_iter_break(GET_EC(), val);
2124}
2125
2126/* optimization: redefine management */
2127
2128short ruby_vm_redefined_flag[BOP_LAST_];
2129static st_table *vm_opt_method_def_table = 0;
2130static st_table *vm_opt_mid_table = 0;
2131
2132void
2133rb_free_vm_opt_tables(void)
2134{
2135 st_free_table(vm_opt_method_def_table);
2136 st_free_table(vm_opt_mid_table);
2137}
2138
2139static int
2140vm_redefinition_check_flag(VALUE klass)
2141{
2142 if (klass == rb_cInteger) return INTEGER_REDEFINED_OP_FLAG;
2143 if (klass == rb_cFloat) return FLOAT_REDEFINED_OP_FLAG;
2144 if (klass == rb_cString) return STRING_REDEFINED_OP_FLAG;
2145 if (klass == rb_cArray) return ARRAY_REDEFINED_OP_FLAG;
2146 if (klass == rb_cHash) return HASH_REDEFINED_OP_FLAG;
2147 if (klass == rb_cSymbol) return SYMBOL_REDEFINED_OP_FLAG;
2148#if 0
2149 if (klass == rb_cTime) return TIME_REDEFINED_OP_FLAG;
2150#endif
2151 if (klass == rb_cRegexp) return REGEXP_REDEFINED_OP_FLAG;
2152 if (klass == rb_cNilClass) return NIL_REDEFINED_OP_FLAG;
2153 if (klass == rb_cTrueClass) return TRUE_REDEFINED_OP_FLAG;
2154 if (klass == rb_cFalseClass) return FALSE_REDEFINED_OP_FLAG;
2155 if (klass == rb_cProc) return PROC_REDEFINED_OP_FLAG;
2156 return 0;
2157}
2158
2159int
2160rb_vm_check_optimizable_mid(VALUE mid)
2161{
2162 if (!vm_opt_mid_table) {
2163 return FALSE;
2164 }
2165
2166 return st_lookup(vm_opt_mid_table, mid, NULL);
2167}
2168
2169static int
2170vm_redefinition_check_method_type(const rb_method_entry_t *me)
2171{
2172 if (me->called_id != me->def->original_id) {
2173 return FALSE;
2174 }
2175
2176 if (METHOD_ENTRY_BASIC(me)) return TRUE;
2177
2178 const rb_method_definition_t *def = me->def;
2179 switch (def->type) {
2180 case VM_METHOD_TYPE_CFUNC:
2181 case VM_METHOD_TYPE_OPTIMIZED:
2182 return TRUE;
2183 default:
2184 return FALSE;
2185 }
2186}
2187
2188static void
2189rb_vm_check_redefinition_opt_method(const rb_method_entry_t *me, VALUE klass)
2190{
2191 st_data_t bop;
2192 if (RB_TYPE_P(klass, T_ICLASS) && RICLASS_IS_ORIGIN_P(klass) &&
2193 RB_TYPE_P(RBASIC_CLASS(klass), T_CLASS)) {
2194 klass = RBASIC_CLASS(klass);
2195 }
2196 if (vm_redefinition_check_method_type(me)) {
2197 if (st_lookup(vm_opt_method_def_table, (st_data_t)me->def, &bop)) {
2198 int flag = vm_redefinition_check_flag(klass);
2199 if (flag != 0) {
2202 "Redefining '%s#%s' disables interpreter and JIT optimizations",
2203 rb_class2name(me->owner),
2204 rb_id2name(me->called_id)
2205 );
2206 rb_yjit_bop_redefined(flag, (enum ruby_basic_operators)bop);
2207 rb_zjit_bop_redefined(flag, (enum ruby_basic_operators)bop);
2208 ruby_vm_redefined_flag[bop] |= flag;
2209 }
2210 }
2211 }
2212}
2213
2214static enum rb_id_table_iterator_result
2215check_redefined_method(ID mid, VALUE value, void *data)
2216{
2217 VALUE klass = (VALUE)data;
2218 const rb_method_entry_t *me = (rb_method_entry_t *)value;
2219 const rb_method_entry_t *newme = rb_method_entry(klass, mid);
2220
2221 if (newme != me) rb_vm_check_redefinition_opt_method(me, me->owner);
2222
2223 return ID_TABLE_CONTINUE;
2224}
2225
2226void
2227rb_vm_check_redefinition_by_prepend(VALUE klass)
2228{
2229 if (!vm_redefinition_check_flag(klass)) return;
2230 rb_id_table_foreach(RCLASS_M_TBL(RCLASS_ORIGIN(klass)), check_redefined_method, (void *)klass);
2231}
2232
2233static void
2234add_opt_method_entry_bop(const rb_method_entry_t *me, ID mid, enum ruby_basic_operators bop)
2235{
2236 st_insert(vm_opt_method_def_table, (st_data_t)me->def, (st_data_t)bop);
2237 st_insert(vm_opt_mid_table, (st_data_t)mid, (st_data_t)Qtrue);
2238}
2239
2240static void
2241add_opt_method(VALUE klass, ID mid, enum ruby_basic_operators bop)
2242{
2243 const rb_method_entry_t *me = rb_method_entry_at(klass, mid);
2244
2245 if (me && vm_redefinition_check_method_type(me)) {
2246 add_opt_method_entry_bop(me, mid, bop);
2247 }
2248 else {
2249 rb_bug("undefined optimized method: %s", rb_id2name(mid));
2250 }
2251}
2252
2253static enum ruby_basic_operators vm_redefinition_bop_for_id(ID mid);
2254
2255static void
2256add_opt_method_entry(const rb_method_entry_t *me)
2257{
2258 if (me && vm_redefinition_check_method_type(me)) {
2259 ID mid = me->called_id;
2260 enum ruby_basic_operators bop = vm_redefinition_bop_for_id(mid);
2261 if ((int)bop >= 0) {
2262 add_opt_method_entry_bop(me, mid, bop);
2263 }
2264 }
2265}
2266
2267static void
2268vm_init_redefined_flag(void)
2269{
2270 ID mid;
2271 enum ruby_basic_operators bop;
2272
2273#define OP(mid_, bop_) (mid = id##mid_, bop = BOP_##bop_, ruby_vm_redefined_flag[bop] = 0)
2274#define C(k) add_opt_method(rb_c##k, mid, bop)
2275 OP(PLUS, PLUS), (C(Integer), C(Float), C(String), C(Array));
2276 OP(MINUS, MINUS), (C(Integer), C(Float));
2277 OP(MULT, MULT), (C(Integer), C(Float));
2278 OP(DIV, DIV), (C(Integer), C(Float));
2279 OP(MOD, MOD), (C(Integer), C(Float));
2280 OP(Eq, EQ), (C(Integer), C(Float), C(String), C(Symbol));
2281 OP(Eqq, EQQ), (C(Integer), C(Float), C(Symbol), C(String),
2282 C(NilClass), C(TrueClass), C(FalseClass));
2283 OP(LT, LT), (C(Integer), C(Float));
2284 OP(LE, LE), (C(Integer), C(Float));
2285 OP(GT, GT), (C(Integer), C(Float));
2286 OP(GE, GE), (C(Integer), C(Float));
2287 OP(LTLT, LTLT), (C(String), C(Array));
2288 OP(AREF, AREF), (C(Array), C(Hash), C(Integer));
2289 OP(ASET, ASET), (C(Array), C(Hash));
2290 OP(Length, LENGTH), (C(Array), C(String), C(Hash));
2291 OP(Size, SIZE), (C(Array), C(String), C(Hash));
2292 OP(EmptyP, EMPTY_P), (C(Array), C(String), C(Hash));
2293 OP(Succ, SUCC), (C(Integer), C(String));
2294 OP(EqTilde, MATCH), (C(Regexp), C(String));
2295 OP(Freeze, FREEZE), (C(String), C(Array), C(Hash));
2296 OP(UMinus, UMINUS), (C(String));
2297 OP(Max, MAX), (C(Array));
2298 OP(Min, MIN), (C(Array));
2299 OP(Hash, HASH), (C(Array));
2300 OP(Call, CALL), (C(Proc));
2301 OP(And, AND), (C(Integer));
2302 OP(Or, OR), (C(Integer));
2303 OP(NilP, NIL_P), (C(NilClass));
2304 OP(Cmp, CMP), (C(Integer), C(Float), C(String));
2305 OP(Default, DEFAULT), (C(Hash));
2306 OP(IncludeP, INCLUDE_P), (C(Array));
2307#undef C
2308#undef OP
2309}
2310
2311static enum ruby_basic_operators
2312vm_redefinition_bop_for_id(ID mid)
2313{
2314 switch (mid) {
2315#define OP(mid_, bop_) case id##mid_: return BOP_##bop_
2316 OP(PLUS, PLUS);
2317 OP(MINUS, MINUS);
2318 OP(MULT, MULT);
2319 OP(DIV, DIV);
2320 OP(MOD, MOD);
2321 OP(Eq, EQ);
2322 OP(Eqq, EQQ);
2323 OP(LT, LT);
2324 OP(LE, LE);
2325 OP(GT, GT);
2326 OP(GE, GE);
2327 OP(LTLT, LTLT);
2328 OP(AREF, AREF);
2329 OP(ASET, ASET);
2330 OP(Length, LENGTH);
2331 OP(Size, SIZE);
2332 OP(EmptyP, EMPTY_P);
2333 OP(Succ, SUCC);
2334 OP(EqTilde, MATCH);
2335 OP(Freeze, FREEZE);
2336 OP(UMinus, UMINUS);
2337 OP(Max, MAX);
2338 OP(Min, MIN);
2339 OP(Hash, HASH);
2340 OP(Call, CALL);
2341 OP(And, AND);
2342 OP(Or, OR);
2343 OP(NilP, NIL_P);
2344 OP(Cmp, CMP);
2345 OP(Default, DEFAULT);
2346 OP(Pack, PACK);
2347#undef OP
2348 }
2349 return -1;
2350}
2351
2352/* for vm development */
2353
2354#if VMDEBUG
2355static const char *
2356vm_frametype_name(const rb_control_frame_t *cfp)
2357{
2358 switch (VM_FRAME_TYPE(cfp)) {
2359 case VM_FRAME_MAGIC_METHOD: return "method";
2360 case VM_FRAME_MAGIC_BLOCK: return "block";
2361 case VM_FRAME_MAGIC_CLASS: return "class";
2362 case VM_FRAME_MAGIC_TOP: return "top";
2363 case VM_FRAME_MAGIC_CFUNC: return "cfunc";
2364 case VM_FRAME_MAGIC_IFUNC: return "ifunc";
2365 case VM_FRAME_MAGIC_EVAL: return "eval";
2366 case VM_FRAME_MAGIC_RESCUE: return "rescue";
2367 default:
2368 rb_bug("unknown frame");
2369 }
2370}
2371#endif
2372
2373static VALUE
2374frame_return_value(const struct vm_throw_data *err)
2375{
2376 if (THROW_DATA_P(err) &&
2377 THROW_DATA_STATE(err) == TAG_BREAK &&
2378 THROW_DATA_CONSUMED_P(err) == FALSE) {
2379 return THROW_DATA_VAL(err);
2380 }
2381 else {
2382 return Qnil;
2383 }
2384}
2385
2386#if 0
2387/* for debug */
2388static const char *
2389frame_name(const rb_control_frame_t *cfp)
2390{
2391 unsigned long type = VM_FRAME_TYPE(cfp);
2392#define C(t) if (type == VM_FRAME_MAGIC_##t) return #t
2393 C(METHOD);
2394 C(BLOCK);
2395 C(CLASS);
2396 C(TOP);
2397 C(CFUNC);
2398 C(PROC);
2399 C(IFUNC);
2400 C(EVAL);
2401 C(LAMBDA);
2402 C(RESCUE);
2403 C(DUMMY);
2404#undef C
2405 return "unknown";
2406}
2407#endif
2408
2409// cfp_returning_with_value:
2410// Whether cfp is the last frame in the unwinding process for a non-local return.
2411static void
2412hook_before_rewind(rb_execution_context_t *ec, bool cfp_returning_with_value, int state, struct vm_throw_data *err)
2413{
2414 if (state == TAG_RAISE && RBASIC(err)->klass == rb_eSysStackError) {
2415 return;
2416 }
2417 else {
2418 const rb_iseq_t *iseq = ec->cfp->iseq;
2419 rb_hook_list_t *local_hooks = iseq->aux.exec.local_hooks;
2420
2421 switch (VM_FRAME_TYPE(ec->cfp)) {
2422 case VM_FRAME_MAGIC_METHOD:
2423 RUBY_DTRACE_METHOD_RETURN_HOOK(ec, 0, 0);
2424 EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_RETURN, ec->cfp->self, 0, 0, 0, frame_return_value(err));
2425
2426 if (UNLIKELY(local_hooks && local_hooks->events & RUBY_EVENT_RETURN)) {
2427 rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_RETURN,
2428 ec->cfp->self, 0, 0, 0, frame_return_value(err), TRUE);
2429 }
2430
2431 THROW_DATA_CONSUMED_SET(err);
2432 break;
2433 case VM_FRAME_MAGIC_BLOCK:
2434 if (VM_FRAME_BMETHOD_P(ec->cfp)) {
2435 VALUE bmethod_return_value = frame_return_value(err);
2436 if (cfp_returning_with_value) {
2437 // Non-local return terminating at a BMETHOD control frame.
2438 bmethod_return_value = THROW_DATA_VAL(err);
2439 }
2440
2441
2442 EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_B_RETURN, ec->cfp->self, 0, 0, 0, bmethod_return_value);
2443 if (UNLIKELY(local_hooks && local_hooks->events & RUBY_EVENT_B_RETURN)) {
2444 rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_B_RETURN,
2445 ec->cfp->self, 0, 0, 0, bmethod_return_value, TRUE);
2446 }
2447
2448 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(ec->cfp);
2449
2450 EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_RETURN, ec->cfp->self,
2451 rb_vm_frame_method_entry(ec->cfp)->def->original_id,
2452 rb_vm_frame_method_entry(ec->cfp)->called_id,
2453 rb_vm_frame_method_entry(ec->cfp)->owner,
2454 bmethod_return_value);
2455
2456 VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
2457 local_hooks = me->def->body.bmethod.hooks;
2458
2459 if (UNLIKELY(local_hooks && local_hooks->events & RUBY_EVENT_RETURN)) {
2460 rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_RETURN, ec->cfp->self,
2461 rb_vm_frame_method_entry(ec->cfp)->def->original_id,
2462 rb_vm_frame_method_entry(ec->cfp)->called_id,
2463 rb_vm_frame_method_entry(ec->cfp)->owner,
2464 bmethod_return_value, TRUE);
2465 }
2466 THROW_DATA_CONSUMED_SET(err);
2467 }
2468 else {
2469 EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_B_RETURN, ec->cfp->self, 0, 0, 0, frame_return_value(err));
2470 if (UNLIKELY(local_hooks && local_hooks->events & RUBY_EVENT_B_RETURN)) {
2471 rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_B_RETURN,
2472 ec->cfp->self, 0, 0, 0, frame_return_value(err), TRUE);
2473 }
2474 THROW_DATA_CONSUMED_SET(err);
2475 }
2476 break;
2477 case VM_FRAME_MAGIC_CLASS:
2478 EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_END, ec->cfp->self, 0, 0, 0, Qnil);
2479 break;
2480 }
2481 }
2482}
2483
2484/* evaluator body */
2485
2486/* finish
2487 VMe (h1) finish
2488 VM finish F1 F2
2489 cfunc finish F1 F2 C1
2490 rb_funcall finish F1 F2 C1
2491 VMe finish F1 F2 C1
2492 VM finish F1 F2 C1 F3
2493
2494 F1 - F3 : pushed by VM
2495 C1 : pushed by send insn (CFUNC)
2496
2497 struct CONTROL_FRAME {
2498 VALUE *pc; // cfp[0], program counter
2499 VALUE *sp; // cfp[1], stack pointer
2500 rb_iseq_t *iseq; // cfp[2], iseq
2501 VALUE self; // cfp[3], self
2502 const VALUE *ep; // cfp[4], env pointer
2503 const void *block_code; // cfp[5], block code
2504 };
2505
2506 struct rb_captured_block {
2507 VALUE self;
2508 VALUE *ep;
2509 union code;
2510 };
2511
2512 struct METHOD_ENV {
2513 VALUE param0;
2514 ...
2515 VALUE paramN;
2516 VALUE lvar1;
2517 ...
2518 VALUE lvarM;
2519 VALUE cref; // ep[-2]
2520 VALUE special; // ep[-1]
2521 VALUE flags; // ep[ 0] == lep[0]
2522 };
2523
2524 struct BLOCK_ENV {
2525 VALUE block_param0;
2526 ...
2527 VALUE block_paramN;
2528 VALUE block_lvar1;
2529 ...
2530 VALUE block_lvarM;
2531 VALUE cref; // ep[-2]
2532 VALUE special; // ep[-1]
2533 VALUE flags; // ep[ 0]
2534 };
2535
2536 struct CLASS_ENV {
2537 VALUE class_lvar0;
2538 ...
2539 VALUE class_lvarN;
2540 VALUE cref;
2541 VALUE prev_ep; // for frame jump
2542 VALUE flags;
2543 };
2544
2545 struct C_METHOD_CONTROL_FRAME {
2546 VALUE *pc; // 0
2547 VALUE *sp; // stack pointer
2548 rb_iseq_t *iseq; // cmi
2549 VALUE self; // ?
2550 VALUE *ep; // ep == lep
2551 void *code; //
2552 };
2553
2554 struct C_BLOCK_CONTROL_FRAME {
2555 VALUE *pc; // point only "finish" insn
2556 VALUE *sp; // sp
2557 rb_iseq_t *iseq; // ?
2558 VALUE self; //
2559 VALUE *ep; // ep
2560 void *code; //
2561 };
2562 */
2563
2564static inline VALUE
2565vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state, VALUE errinfo);
2566static inline VALUE
2567vm_exec_loop(rb_execution_context_t *ec, enum ruby_tag_type state, struct rb_vm_tag *tag, VALUE result);
2568
2569// for non-Emscripten Wasm build, use vm_exec with optimized setjmp for runtime performance
2570#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
2571
2572struct rb_vm_exec_context {
2573 rb_execution_context_t *const ec;
2574 struct rb_vm_tag *const tag;
2575
2576 VALUE result;
2577};
2578
2579static void
2580vm_exec_bottom_main(void *context)
2581{
2582 struct rb_vm_exec_context *ctx = context;
2583 rb_execution_context_t *ec = ctx->ec;
2584
2585 ctx->result = vm_exec_loop(ec, TAG_NONE, ctx->tag, vm_exec_core(ec));
2586}
2587
2588static void
2589vm_exec_bottom_rescue(void *context)
2590{
2591 struct rb_vm_exec_context *ctx = context;
2592 rb_execution_context_t *ec = ctx->ec;
2593
2594 ctx->result = vm_exec_loop(ec, rb_ec_tag_state(ec), ctx->tag, ec->errinfo);
2595}
2596#endif
2597
2598VALUE
2599vm_exec(rb_execution_context_t *ec)
2600{
2601 VALUE result = Qundef;
2602
2603 EC_PUSH_TAG(ec);
2604
2605 _tag.retval = Qnil;
2606
2607#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
2608 struct rb_vm_exec_context ctx = {
2609 .ec = ec,
2610 .tag = &_tag,
2611 };
2612 struct rb_wasm_try_catch try_catch;
2613
2614 EC_REPUSH_TAG();
2615
2616 rb_wasm_try_catch_init(&try_catch, vm_exec_bottom_main, vm_exec_bottom_rescue, &ctx);
2617
2618 rb_wasm_try_catch_loop_run(&try_catch, &RB_VM_TAG_JMPBUF_GET(_tag.buf));
2619
2620 result = ctx.result;
2621#else
2622 enum ruby_tag_type state;
2623 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
2624 if (UNDEF_P(result = jit_exec(ec))) {
2625 result = vm_exec_core(ec);
2626 }
2627 /* fallback to the VM */
2628 result = vm_exec_loop(ec, TAG_NONE, &_tag, result);
2629 }
2630 else {
2631 result = vm_exec_loop(ec, state, &_tag, ec->errinfo);
2632 }
2633#endif
2634
2635 EC_POP_TAG();
2636 return result;
2637}
2638
2639static inline VALUE
2640vm_exec_loop(rb_execution_context_t *ec, enum ruby_tag_type state,
2641 struct rb_vm_tag *tag, VALUE result)
2642{
2643 if (state == TAG_NONE) { /* no jumps, result is discarded */
2644 goto vm_loop_start;
2645 }
2646
2647 rb_ec_raised_reset(ec, RAISED_STACKOVERFLOW | RAISED_NOMEMORY);
2648 while (UNDEF_P(result = vm_exec_handle_exception(ec, state, result))) {
2649 // caught a jump, exec the handler. JIT code in jit_exec_exception()
2650 // may return Qundef to run remaining frames with vm_exec_core().
2651 if (UNDEF_P(result = jit_exec_exception(ec))) {
2652 result = vm_exec_core(ec);
2653 }
2654 vm_loop_start:
2655 VM_ASSERT(ec->tag == tag);
2656 /* when caught `throw`, `tag.state` is set. */
2657 if ((state = tag->state) == TAG_NONE) break;
2658 tag->state = TAG_NONE;
2659 }
2660
2661 return result;
2662}
2663
2664static inline VALUE
2665vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state, VALUE errinfo)
2666{
2667 struct vm_throw_data *err = (struct vm_throw_data *)errinfo;
2668
2669 for (;;) {
2670 unsigned int i;
2671 const struct iseq_catch_table_entry *entry;
2672 const struct iseq_catch_table *ct;
2673 unsigned long epc, cont_pc, cont_sp;
2674 const rb_iseq_t *catch_iseq;
2675 VALUE type;
2676 const rb_control_frame_t *escape_cfp;
2677
2678 cont_pc = cont_sp = 0;
2679 catch_iseq = NULL;
2680
2681 while (ec->cfp->pc == 0 || ec->cfp->iseq == 0) {
2682 if (UNLIKELY(VM_FRAME_TYPE(ec->cfp) == VM_FRAME_MAGIC_CFUNC)) {
2683 EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_C_RETURN, ec->cfp->self,
2684 rb_vm_frame_method_entry(ec->cfp)->def->original_id,
2685 rb_vm_frame_method_entry(ec->cfp)->called_id,
2686 rb_vm_frame_method_entry(ec->cfp)->owner, Qnil);
2687 RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec,
2688 rb_vm_frame_method_entry(ec->cfp)->owner,
2689 rb_vm_frame_method_entry(ec->cfp)->def->original_id);
2690 }
2691 rb_vm_pop_frame(ec);
2692 }
2693
2694 rb_control_frame_t *const cfp = ec->cfp;
2695 epc = cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded;
2696
2697 escape_cfp = NULL;
2698 if (state == TAG_BREAK || state == TAG_RETURN) {
2699 escape_cfp = THROW_DATA_CATCH_FRAME(err);
2700
2701 if (cfp == escape_cfp) {
2702 if (state == TAG_RETURN) {
2703 if (!VM_FRAME_FINISHED_P(cfp)) {
2704 THROW_DATA_CATCH_FRAME_SET(err, cfp + 1);
2705 THROW_DATA_STATE_SET(err, state = TAG_BREAK);
2706 }
2707 else {
2708 ct = ISEQ_BODY(cfp->iseq)->catch_table;
2709 if (ct) for (i = 0; i < ct->size; i++) {
2710 entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
2711 if (entry->start < epc && entry->end >= epc) {
2712 if (entry->type == CATCH_TYPE_ENSURE) {
2713 catch_iseq = entry->iseq;
2714 cont_pc = entry->cont;
2715 cont_sp = entry->sp;
2716 break;
2717 }
2718 }
2719 }
2720 if (catch_iseq == NULL) {
2721 ec->errinfo = Qnil;
2722 THROW_DATA_CATCH_FRAME_SET(err, cfp + 1);
2723 // cfp == escape_cfp here so calling with cfp_returning_with_value = true
2724 hook_before_rewind(ec, true, state, err);
2725 rb_vm_pop_frame(ec);
2726 return THROW_DATA_VAL(err);
2727 }
2728 }
2729 /* through */
2730 }
2731 else {
2732 /* TAG_BREAK */
2733 *cfp->sp++ = THROW_DATA_VAL(err);
2734 ec->errinfo = Qnil;
2735 return Qundef;
2736 }
2737 }
2738 }
2739
2740 if (state == TAG_RAISE) {
2741 ct = ISEQ_BODY(cfp->iseq)->catch_table;
2742 if (ct) for (i = 0; i < ct->size; i++) {
2743 entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
2744 if (entry->start < epc && entry->end >= epc) {
2745
2746 if (entry->type == CATCH_TYPE_RESCUE ||
2747 entry->type == CATCH_TYPE_ENSURE) {
2748 catch_iseq = entry->iseq;
2749 cont_pc = entry->cont;
2750 cont_sp = entry->sp;
2751 break;
2752 }
2753 }
2754 }
2755 }
2756 else if (state == TAG_RETRY) {
2757 ct = ISEQ_BODY(cfp->iseq)->catch_table;
2758 if (ct) for (i = 0; i < ct->size; i++) {
2759 entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
2760 if (entry->start < epc && entry->end >= epc) {
2761
2762 if (entry->type == CATCH_TYPE_ENSURE) {
2763 catch_iseq = entry->iseq;
2764 cont_pc = entry->cont;
2765 cont_sp = entry->sp;
2766 break;
2767 }
2768 else if (entry->type == CATCH_TYPE_RETRY) {
2769 const rb_control_frame_t *escape_cfp;
2770 escape_cfp = THROW_DATA_CATCH_FRAME(err);
2771 if (cfp == escape_cfp) {
2772 cfp->pc = ISEQ_BODY(cfp->iseq)->iseq_encoded + entry->cont;
2773 ec->errinfo = Qnil;
2774 return Qundef;
2775 }
2776 }
2777 }
2778 }
2779 }
2780 else if ((state == TAG_BREAK && !escape_cfp) ||
2781 (state == TAG_REDO) ||
2782 (state == TAG_NEXT)) {
2783 type = (const enum rb_catch_type[TAG_MASK]) {
2784 [TAG_BREAK] = CATCH_TYPE_BREAK,
2785 [TAG_NEXT] = CATCH_TYPE_NEXT,
2786 [TAG_REDO] = CATCH_TYPE_REDO,
2787 /* otherwise = dontcare */
2788 }[state];
2789
2790 ct = ISEQ_BODY(cfp->iseq)->catch_table;
2791 if (ct) for (i = 0; i < ct->size; i++) {
2792 entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
2793
2794 if (entry->start < epc && entry->end >= epc) {
2795 if (entry->type == CATCH_TYPE_ENSURE) {
2796 catch_iseq = entry->iseq;
2797 cont_pc = entry->cont;
2798 cont_sp = entry->sp;
2799 break;
2800 }
2801 else if (entry->type == type) {
2802 cfp->pc = ISEQ_BODY(cfp->iseq)->iseq_encoded + entry->cont;
2803 cfp->sp = vm_base_ptr(cfp) + entry->sp;
2804
2805 if (state != TAG_REDO) {
2806 *cfp->sp++ = THROW_DATA_VAL(err);
2807 }
2808 ec->errinfo = Qnil;
2809 VM_ASSERT(ec->tag->state == TAG_NONE);
2810 return Qundef;
2811 }
2812 }
2813 }
2814 }
2815 else {
2816 ct = ISEQ_BODY(cfp->iseq)->catch_table;
2817 if (ct) for (i = 0; i < ct->size; i++) {
2818 entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
2819 if (entry->start < epc && entry->end >= epc) {
2820
2821 if (entry->type == CATCH_TYPE_ENSURE) {
2822 catch_iseq = entry->iseq;
2823 cont_pc = entry->cont;
2824 cont_sp = entry->sp;
2825 break;
2826 }
2827 }
2828 }
2829 }
2830
2831 if (catch_iseq != NULL) { /* found catch table */
2832 /* enter catch scope */
2833 const int arg_size = 1;
2834
2835 rb_iseq_check(catch_iseq);
2836 cfp->sp = vm_base_ptr(cfp) + cont_sp;
2837 cfp->pc = ISEQ_BODY(cfp->iseq)->iseq_encoded + cont_pc;
2838
2839 /* push block frame */
2840 cfp->sp[0] = (VALUE)err;
2841 vm_push_frame(ec, catch_iseq, VM_FRAME_MAGIC_RESCUE,
2842 cfp->self,
2843 VM_GUARDED_PREV_EP(cfp->ep),
2844 0, /* cref or me */
2845 ISEQ_BODY(catch_iseq)->iseq_encoded,
2846 cfp->sp + arg_size /* push value */,
2847 ISEQ_BODY(catch_iseq)->local_table_size - arg_size,
2848 ISEQ_BODY(catch_iseq)->stack_max);
2849
2850 state = 0;
2851 ec->tag->state = TAG_NONE;
2852 ec->errinfo = Qnil;
2853
2854 return Qundef;
2855 }
2856 else {
2857 hook_before_rewind(ec, (cfp == escape_cfp), state, err);
2858
2859 if (VM_FRAME_FINISHED_P(ec->cfp)) {
2860 rb_vm_pop_frame(ec);
2861 ec->errinfo = (VALUE)err;
2862 rb_vm_tag_jmpbuf_deinit(&ec->tag->buf);
2863 ec->tag = ec->tag->prev;
2864 EC_JUMP_TAG(ec, state);
2865 }
2866 else {
2867 rb_vm_pop_frame(ec);
2868 }
2869 }
2870 }
2871}
2872
2873/* misc */
2874
2875VALUE
2876rb_iseq_eval(const rb_iseq_t *iseq)
2877{
2878 rb_execution_context_t *ec = GET_EC();
2879 VALUE val;
2880 vm_set_top_stack(ec, iseq);
2881 // TODO: set the namespace frame like require/load
2882 val = vm_exec(ec);
2883 return val;
2884}
2885
2886VALUE
2887rb_iseq_eval_with_refinement(const rb_iseq_t *iseq, VALUE mod)
2888{
2889 rb_execution_context_t *ec = GET_EC();
2890 VALUE val;
2891 vm_set_top_stack(ec, iseq);
2892 rb_vm_using_module(mod);
2893 // TODO: set the namespace frame like require/load
2894 val = vm_exec(ec);
2895 return val;
2896}
2897
2898VALUE
2899rb_iseq_eval_main(const rb_iseq_t *iseq)
2900{
2901 rb_execution_context_t *ec = GET_EC();
2902 VALUE val;
2903 vm_set_main_stack(ec, iseq);
2904 // TODO: set the namespace frame like require/load
2905 val = vm_exec(ec);
2906 return val;
2907}
2908
2909int
2910rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp)
2911{
2912 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(cfp);
2913
2914 if (me) {
2915 if (idp) *idp = me->def->original_id;
2916 if (called_idp) *called_idp = me->called_id;
2917 if (klassp) *klassp = me->owner;
2918 return TRUE;
2919 }
2920 else {
2921 return FALSE;
2922 }
2923}
2924
2925int
2926rb_ec_frame_method_id_and_class(const rb_execution_context_t *ec, ID *idp, ID *called_idp, VALUE *klassp)
2927{
2928 return rb_vm_control_frame_id_and_class(ec->cfp, idp, called_idp, klassp);
2929}
2930
2931int
2933{
2934 return rb_ec_frame_method_id_and_class(GET_EC(), idp, 0, klassp);
2935}
2936
2937VALUE
2938rb_vm_call_cfunc(VALUE recv, VALUE (*func)(VALUE), VALUE arg,
2939 VALUE block_handler, VALUE filename)
2940{
2941 rb_execution_context_t *ec = GET_EC();
2942 const rb_control_frame_t *reg_cfp = ec->cfp;
2943 const rb_iseq_t *iseq = rb_iseq_new(Qnil, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
2944 VALUE val;
2945
2946 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_TOP | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH,
2947 recv, block_handler,
2948 (VALUE)vm_cref_new_toplevel(ec), /* cref or me */
2949 0, reg_cfp->sp, 0, 0);
2950
2951 val = (*func)(arg);
2952
2953 rb_vm_pop_frame(ec);
2954 return val;
2955}
2956
2957VALUE
2958rb_vm_call_cfunc2(VALUE recv, VALUE (*func)(VALUE, VALUE), VALUE arg1, VALUE arg2,
2959 VALUE block_handler, VALUE filename)
2960{
2961 rb_execution_context_t *ec = GET_EC();
2962 const rb_control_frame_t *reg_cfp = ec->cfp;
2963 const rb_iseq_t *iseq = rb_iseq_new(Qnil, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
2964 VALUE val;
2965
2966 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_TOP | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH,
2967 recv, block_handler,
2968 (VALUE)vm_cref_new_toplevel(ec), /* cref or me */
2969 0, reg_cfp->sp, 0, 0);
2970
2971 val = (*func)(arg1, arg2);
2972
2973 rb_vm_pop_frame(ec);
2974 return val;
2975}
2976
2977/* vm */
2978
2979void
2980rb_vm_update_references(void *ptr)
2981{
2982 if (ptr) {
2983 rb_vm_t *vm = ptr;
2984
2985 vm->mark_object_ary = rb_gc_location(vm->mark_object_ary);
2986 vm->load_path = rb_gc_location(vm->load_path);
2987 vm->load_path_snapshot = rb_gc_location(vm->load_path_snapshot);
2988
2989 if (vm->load_path_check_cache) {
2990 vm->load_path_check_cache = rb_gc_location(vm->load_path_check_cache);
2991 }
2992
2993 vm->expanded_load_path = rb_gc_location(vm->expanded_load_path);
2994 vm->loaded_features = rb_gc_location(vm->loaded_features);
2995 vm->loaded_features_snapshot = rb_gc_location(vm->loaded_features_snapshot);
2996 vm->loaded_features_realpaths = rb_gc_location(vm->loaded_features_realpaths);
2997 vm->loaded_features_realpath_map = rb_gc_location(vm->loaded_features_realpath_map);
2998 vm->top_self = rb_gc_location(vm->top_self);
2999 vm->require_stack = rb_gc_location(vm->require_stack);
3000 vm->orig_progname = rb_gc_location(vm->orig_progname);
3001
3002 rb_gc_update_values(RUBY_NSIG, vm->trap_list.cmd);
3003
3004 if (vm->coverages) {
3005 vm->coverages = rb_gc_location(vm->coverages);
3006 vm->me2counter = rb_gc_location(vm->me2counter);
3007 }
3008 }
3009}
3010
3011void
3012rb_vm_each_stack_value(void *ptr, void (*cb)(VALUE, void*), void *ctx)
3013{
3014 if (ptr) {
3015 rb_vm_t *vm = ptr;
3016 rb_ractor_t *r = 0;
3017 ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
3018 VM_ASSERT(rb_ractor_status_p(r, ractor_blocking) ||
3019 rb_ractor_status_p(r, ractor_running));
3020 if (r->threads.cnt > 0) {
3021 rb_thread_t *th = 0;
3022 ccan_list_for_each(&r->threads.set, th, lt_node) {
3023 VM_ASSERT(th != NULL);
3024 rb_execution_context_t * ec = th->ec;
3025 if (ec->vm_stack) {
3026 VALUE *p = ec->vm_stack;
3027 VALUE *sp = ec->cfp->sp;
3028 while (p < sp) {
3029 if (!RB_SPECIAL_CONST_P(*p)) {
3030 cb(*p, ctx);
3031 }
3032 p++;
3033 }
3034 }
3035 }
3036 }
3037 }
3038 }
3039}
3040
3041static enum rb_id_table_iterator_result
3042vm_mark_negative_cme(VALUE val, void *dmy)
3043{
3044 rb_gc_mark(val);
3045 return ID_TABLE_CONTINUE;
3046}
3047
3048void rb_thread_sched_mark_zombies(rb_vm_t *vm);
3049
3050void
3051rb_vm_mark(void *ptr)
3052{
3053 RUBY_MARK_ENTER("vm");
3054 RUBY_GC_INFO("-------------------------------------------------\n");
3055 if (ptr) {
3056 rb_vm_t *vm = ptr;
3057 rb_ractor_t *r = 0;
3058 long i;
3059
3060 ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
3061 // ractor.set only contains blocking or running ractors
3062 VM_ASSERT(rb_ractor_status_p(r, ractor_blocking) ||
3063 rb_ractor_status_p(r, ractor_running));
3064 rb_gc_mark(rb_ractor_self(r));
3065 }
3066
3067 for (struct global_object_list *list = vm->global_object_list; list; list = list->next) {
3068 rb_gc_mark_maybe(*list->varptr);
3069 }
3070
3071 if (vm->main_namespace) {
3072 rb_namespace_entry_mark((void *)vm->main_namespace);
3073 }
3074
3075 rb_gc_mark_movable(vm->mark_object_ary);
3076 rb_gc_mark_movable(vm->load_path);
3077 rb_gc_mark_movable(vm->load_path_snapshot);
3078 rb_gc_mark_movable(vm->load_path_check_cache);
3079 rb_gc_mark_movable(vm->expanded_load_path);
3080 rb_gc_mark_movable(vm->loaded_features);
3081 rb_gc_mark_movable(vm->loaded_features_snapshot);
3082 rb_gc_mark_movable(vm->loaded_features_realpaths);
3083 rb_gc_mark_movable(vm->loaded_features_realpath_map);
3084 rb_gc_mark_movable(vm->require_stack);
3085 rb_gc_mark_movable(vm->top_self);
3086 rb_gc_mark_movable(vm->orig_progname);
3087 rb_gc_mark_movable(vm->coverages);
3088 rb_gc_mark_movable(vm->me2counter);
3089
3090 if (vm->loading_table) {
3091 rb_mark_tbl(vm->loading_table);
3092 }
3093
3094 rb_gc_mark_values(RUBY_NSIG, vm->trap_list.cmd);
3095
3096 rb_id_table_foreach_values(vm->negative_cme_table, vm_mark_negative_cme, NULL);
3097 rb_mark_tbl_no_pin(vm->overloaded_cme_table);
3098 for (i=0; i<VM_GLOBAL_CC_CACHE_TABLE_SIZE; i++) {
3099 const struct rb_callcache *cc = vm->global_cc_cache_table[i];
3100
3101 if (cc != NULL) {
3102 if (!vm_cc_invalidated_p(cc)) {
3103 rb_gc_mark((VALUE)cc);
3104 }
3105 else {
3106 vm->global_cc_cache_table[i] = NULL;
3107 }
3108 }
3109 }
3110
3111 rb_thread_sched_mark_zombies(vm);
3112 }
3113
3114 RUBY_MARK_LEAVE("vm");
3115}
3116
3117#undef rb_vm_register_special_exception
3118void
3119rb_vm_register_special_exception_str(enum ruby_special_exceptions sp, VALUE cls, VALUE mesg)
3120{
3121 rb_vm_t *vm = GET_VM();
3122 VALUE exc = rb_exc_new3(cls, rb_obj_freeze(mesg));
3123 OBJ_FREEZE(exc);
3124 ((VALUE *)vm->special_exceptions)[sp] = exc;
3125 rb_vm_register_global_object(exc);
3126}
3127
3128static int
3129free_loading_table_entry(st_data_t key, st_data_t value, st_data_t arg)
3130{
3131 xfree((char *)key);
3132 return ST_DELETE;
3133}
3134
3135void rb_free_loaded_features_index(rb_vm_t *vm);
3136void rb_objspace_free_objects(void *objspace);
3137
3138int
3140{
3141 RUBY_FREE_ENTER("vm");
3142
3143 if (vm) {
3144 rb_thread_t *th = vm->ractor.main_thread;
3145 VALUE *stack = th->ec->vm_stack;
3146 if (rb_free_at_exit) {
3147 rb_free_encoded_insn_data();
3148 rb_free_global_enc_table();
3149 rb_free_loaded_builtin_table();
3150
3151 rb_free_shared_fiber_pool();
3152 rb_free_static_symid_str();
3153 rb_free_transcoder_table();
3154 rb_free_vm_opt_tables();
3155 rb_free_warning();
3156 rb_free_rb_global_tbl();
3157 rb_free_loaded_features_index(vm);
3158
3159 rb_id_table_free(vm->negative_cme_table);
3160 st_free_table(vm->overloaded_cme_table);
3161
3162 // TODO: Is this ignorable for classext->m_tbl ?
3163 // rb_id_table_free(RCLASS(rb_mRubyVMFrozenCore)->m_tbl);
3164
3165 rb_shape_free_all();
3166
3167 st_free_table(vm->static_ext_inits);
3168
3169 rb_vm_postponed_job_free();
3170
3171 rb_id_table_free(vm->constant_cache);
3172 set_free_table(vm->unused_block_warning_table);
3173
3174 xfree(th->nt);
3175 th->nt = NULL;
3176
3177#ifndef HAVE_SETPROCTITLE
3178 ruby_free_proctitle();
3179#endif
3180 }
3181 else {
3182 rb_fiber_reset_root_local_storage(th);
3183 thread_free(th);
3184 }
3185
3186 struct rb_objspace *objspace = vm->gc.objspace;
3187
3188 rb_vm_living_threads_init(vm);
3189 ruby_vm_run_at_exit_hooks(vm);
3190 if (vm->loading_table) {
3191 st_foreach(vm->loading_table, free_loading_table_entry, 0);
3192 st_free_table(vm->loading_table);
3193 vm->loading_table = 0;
3194 }
3195 if (vm->ci_table) {
3196 st_free_table(vm->ci_table);
3197 vm->ci_table = NULL;
3198 }
3199 RB_ALTSTACK_FREE(vm->main_altstack);
3200
3201 struct global_object_list *next;
3202 for (struct global_object_list *list = vm->global_object_list; list; list = next) {
3203 next = list->next;
3204 xfree(list);
3205 }
3206
3207 if (objspace) {
3208 if (rb_free_at_exit) {
3209 rb_objspace_free_objects(objspace);
3210 rb_free_generic_fields_tbl_();
3211 rb_free_default_rand_key();
3212 if (th && vm->fork_gen == 0) {
3213 /* If we have forked, main_thread may not be the initial thread */
3214 xfree(stack);
3215 ruby_mimfree(th);
3216 }
3217 }
3218 rb_objspace_free(objspace);
3219 }
3220 rb_native_mutex_destroy(&vm->workqueue_lock);
3221 /* after freeing objspace, you *can't* use ruby_xfree() */
3222 ruby_mimfree(vm);
3223 ruby_current_vm_ptr = NULL;
3224
3225#if USE_YJIT
3226 if (rb_free_at_exit) {
3227 rb_yjit_free_at_exit();
3228 }
3229#endif
3230 }
3231 RUBY_FREE_LEAVE("vm");
3232 return 0;
3233}
3234
3235size_t rb_vm_memsize_workqueue(struct ccan_list_head *workqueue); // vm_trace.c
3236
3237// Used for VM memsize reporting. Returns the size of the at_exit list by
3238// looping through the linked list and adding up the size of the structs.
3239static enum rb_id_table_iterator_result
3240vm_memsize_constant_cache_i(ID id, VALUE ics, void *size)
3241{
3242 *((size_t *) size) += rb_st_memsize((st_table *) ics);
3243 return ID_TABLE_CONTINUE;
3244}
3245
3246// Returns a size_t representing the memory footprint of the VM's constant
3247// cache, which is the memsize of the table as well as the memsize of all of the
3248// nested tables.
3249static size_t
3250vm_memsize_constant_cache(void)
3251{
3252 rb_vm_t *vm = GET_VM();
3253 size_t size = rb_id_table_memsize(vm->constant_cache);
3254
3255 rb_id_table_foreach(vm->constant_cache, vm_memsize_constant_cache_i, &size);
3256 return size;
3257}
3258
3259static size_t
3260vm_memsize_at_exit_list(rb_at_exit_list *at_exit)
3261{
3262 size_t size = 0;
3263
3264 while (at_exit) {
3265 size += sizeof(rb_at_exit_list);
3266 at_exit = at_exit->next;
3267 }
3268
3269 return size;
3270}
3271
3272// Used for VM memsize reporting. Returns the size of the builtin function
3273// table if it has been defined.
3274static size_t
3275vm_memsize_builtin_function_table(const struct rb_builtin_function *builtin_function_table)
3276{
3277 return builtin_function_table == NULL ? 0 : sizeof(struct rb_builtin_function);
3278}
3279
3280// Reports the memsize of the VM struct object and the structs that are
3281// associated with it.
3282static size_t
3283vm_memsize(const void *ptr)
3284{
3285 rb_vm_t *vm = GET_VM();
3286
3287 return (
3288 sizeof(rb_vm_t) +
3289 rb_st_memsize(vm->loaded_features_index) +
3290 rb_st_memsize(vm->loading_table) +
3291 rb_vm_memsize_postponed_job_queue() +
3292 rb_vm_memsize_workqueue(&vm->workqueue) +
3293 vm_memsize_at_exit_list(vm->at_exit) +
3294 rb_st_memsize(vm->ci_table) +
3295 vm_memsize_builtin_function_table(vm->builtin_function_table) +
3296 rb_id_table_memsize(vm->negative_cme_table) +
3297 rb_st_memsize(vm->overloaded_cme_table) +
3298 vm_memsize_constant_cache() +
3299 GET_SHAPE_TREE()->cache_size * sizeof(redblack_node_t)
3300 );
3301
3302 // TODO
3303 // struct { struct ccan_list_head set; } ractor;
3304 // void *main_altstack; #ifdef USE_SIGALTSTACK
3305 // struct rb_objspace *objspace;
3306}
3307
3308static const rb_data_type_t vm_data_type = {
3309 "VM",
3310 {0, 0, vm_memsize,},
3311 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
3312};
3313
3314
3315static VALUE
3316vm_default_params(void)
3317{
3318 rb_vm_t *vm = GET_VM();
3319 VALUE result = rb_hash_new_with_size(4);
3320#define SET(name) rb_hash_aset(result, ID2SYM(rb_intern(#name)), SIZET2NUM(vm->default_params.name));
3321 SET(thread_vm_stack_size);
3322 SET(thread_machine_stack_size);
3323 SET(fiber_vm_stack_size);
3324 SET(fiber_machine_stack_size);
3325#undef SET
3326 rb_obj_freeze(result);
3327 return result;
3328}
3329
3330static size_t
3331get_param(const char *name, size_t default_value, size_t min_value)
3332{
3333 const char *envval;
3334 size_t result = default_value;
3335 if ((envval = getenv(name)) != 0) {
3336 long val = atol(envval);
3337 if (val < (long)min_value) {
3338 val = (long)min_value;
3339 }
3340 result = (size_t)(((val -1 + RUBY_VM_SIZE_ALIGN) / RUBY_VM_SIZE_ALIGN) * RUBY_VM_SIZE_ALIGN);
3341 }
3342 if (0) ruby_debug_printf("%s: %"PRIuSIZE"\n", name, result); /* debug print */
3343
3344 return result;
3345}
3346
3347static void
3348check_machine_stack_size(size_t *sizep)
3349{
3350#ifdef PTHREAD_STACK_MIN
3351 size_t size = *sizep;
3352#endif
3353
3354#ifdef PTHREAD_STACK_MIN
3355 if (size < (size_t)PTHREAD_STACK_MIN) {
3356 *sizep = (size_t)PTHREAD_STACK_MIN * 2;
3357 }
3358#endif
3359}
3360
3361static void
3362vm_default_params_setup(rb_vm_t *vm)
3363{
3364 vm->default_params.thread_vm_stack_size =
3365 get_param("RUBY_THREAD_VM_STACK_SIZE",
3366 RUBY_VM_THREAD_VM_STACK_SIZE,
3367 RUBY_VM_THREAD_VM_STACK_SIZE_MIN);
3368
3369 vm->default_params.thread_machine_stack_size =
3370 get_param("RUBY_THREAD_MACHINE_STACK_SIZE",
3371 RUBY_VM_THREAD_MACHINE_STACK_SIZE,
3372 RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN);
3373
3374 vm->default_params.fiber_vm_stack_size =
3375 get_param("RUBY_FIBER_VM_STACK_SIZE",
3376 RUBY_VM_FIBER_VM_STACK_SIZE,
3377 RUBY_VM_FIBER_VM_STACK_SIZE_MIN);
3378
3379 vm->default_params.fiber_machine_stack_size =
3380 get_param("RUBY_FIBER_MACHINE_STACK_SIZE",
3381 RUBY_VM_FIBER_MACHINE_STACK_SIZE,
3382 RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN);
3383
3384 /* environment dependent check */
3385 check_machine_stack_size(&vm->default_params.thread_machine_stack_size);
3386 check_machine_stack_size(&vm->default_params.fiber_machine_stack_size);
3387}
3388
3389static void
3390vm_init2(rb_vm_t *vm)
3391{
3392 rb_vm_living_threads_init(vm);
3393 vm->thread_report_on_exception = 1;
3394 vm->src_encoding_index = -1;
3395
3396 vm_default_params_setup(vm);
3397}
3398
3399void
3400rb_execution_context_update(rb_execution_context_t *ec)
3401{
3402 /* update VM stack */
3403 if (ec->vm_stack) {
3404 long i;
3405 VM_ASSERT(ec->cfp);
3406 VALUE *p = ec->vm_stack;
3407 VALUE *sp = ec->cfp->sp;
3408 rb_control_frame_t *cfp = ec->cfp;
3409 rb_control_frame_t *limit_cfp = (void *)(ec->vm_stack + ec->vm_stack_size);
3410
3411 for (i = 0; i < (long)(sp - p); i++) {
3412 VALUE ref = p[i];
3413 VALUE update = rb_gc_location(ref);
3414 if (ref != update) {
3415 p[i] = update;
3416 }
3417 }
3418
3419 while (cfp != limit_cfp) {
3420 const VALUE *ep = cfp->ep;
3421 cfp->self = rb_gc_location(cfp->self);
3422 cfp->iseq = (rb_iseq_t *)rb_gc_location((VALUE)cfp->iseq);
3423 cfp->block_code = (void *)rb_gc_location((VALUE)cfp->block_code);
3424
3425 if (!VM_ENV_LOCAL_P(ep)) {
3426 const VALUE *prev_ep = VM_ENV_PREV_EP(ep);
3427 if (VM_ENV_FLAGS(prev_ep, VM_ENV_FLAG_ESCAPED)) {
3428 VM_FORCE_WRITE(&prev_ep[VM_ENV_DATA_INDEX_ENV], rb_gc_location(prev_ep[VM_ENV_DATA_INDEX_ENV]));
3429 }
3430
3431 if (VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED)) {
3432 VM_FORCE_WRITE(&ep[VM_ENV_DATA_INDEX_ENV], rb_gc_location(ep[VM_ENV_DATA_INDEX_ENV]));
3433 VM_FORCE_WRITE(&ep[VM_ENV_DATA_INDEX_ME_CREF], rb_gc_location(ep[VM_ENV_DATA_INDEX_ME_CREF]));
3434 }
3435 }
3436
3437 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
3438 }
3439 }
3440
3441 ec->storage = rb_gc_location(ec->storage);
3442}
3443
3444static enum rb_id_table_iterator_result
3445mark_local_storage_i(VALUE local, void *data)
3446{
3447 rb_gc_mark(local);
3448 return ID_TABLE_CONTINUE;
3449}
3450
3451void
3452rb_execution_context_mark(const rb_execution_context_t *ec)
3453{
3454 /* mark VM stack */
3455 if (ec->vm_stack) {
3456 VM_ASSERT(ec->cfp);
3457 VALUE *p = ec->vm_stack;
3458 VALUE *sp = ec->cfp->sp;
3459 rb_control_frame_t *cfp = ec->cfp;
3460 rb_control_frame_t *limit_cfp = (void *)(ec->vm_stack + ec->vm_stack_size);
3461
3462 VM_ASSERT(sp == ec->cfp->sp);
3463 rb_gc_mark_vm_stack_values((long)(sp - p), p);
3464
3465 while (cfp != limit_cfp) {
3466 const VALUE *ep = cfp->ep;
3467 VM_ASSERT(!!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) == vm_ep_in_heap_p_(ec, ep));
3468
3469 rb_gc_mark_movable(cfp->self);
3470 rb_gc_mark_movable((VALUE)cfp->iseq);
3471 rb_gc_mark_movable((VALUE)cfp->block_code);
3472
3473 if (!VM_ENV_LOCAL_P(ep)) {
3474 const VALUE *prev_ep = VM_ENV_PREV_EP(ep);
3475 if (VM_ENV_FLAGS(prev_ep, VM_ENV_FLAG_ESCAPED)) {
3476 rb_gc_mark_movable(prev_ep[VM_ENV_DATA_INDEX_ENV]);
3477 }
3478
3479 if (VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED)) {
3480 rb_gc_mark_movable(ep[VM_ENV_DATA_INDEX_ENV]);
3481 rb_gc_mark(ep[VM_ENV_DATA_INDEX_ME_CREF]);
3482 }
3483 }
3484
3485 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
3486 }
3487 }
3488
3489 /* mark machine stack */
3490 if (ec->machine.stack_start && ec->machine.stack_end &&
3491 ec != GET_EC() /* marked for current ec at the first stage of marking */
3492 ) {
3493 rb_gc_mark_machine_context(ec);
3494 }
3495
3496 rb_gc_mark(ec->errinfo);
3497 rb_gc_mark(ec->root_svar);
3498 if (ec->local_storage) {
3499 rb_id_table_foreach_values(ec->local_storage, mark_local_storage_i, NULL);
3500 }
3501 rb_gc_mark(ec->local_storage_recursive_hash);
3502 rb_gc_mark(ec->local_storage_recursive_hash_for_trace);
3503 rb_gc_mark(ec->private_const_reference);
3504
3505 rb_gc_mark_movable(ec->storage);
3506}
3507
3508void rb_fiber_mark_self(rb_fiber_t *fib);
3509void rb_fiber_update_self(rb_fiber_t *fib);
3510void rb_threadptr_root_fiber_setup(rb_thread_t *th);
3511void rb_threadptr_root_fiber_release(rb_thread_t *th);
3512
3513static void
3514thread_compact(void *ptr)
3515{
3516 rb_thread_t *th = ptr;
3517
3518 th->self = rb_gc_location(th->self);
3519
3520 if (!th->root_fiber) {
3521 rb_execution_context_update(th->ec);
3522 }
3523}
3524
3525static void
3526thread_mark(void *ptr)
3527{
3528 rb_thread_t *th = ptr;
3529 RUBY_MARK_ENTER("thread");
3530 rb_fiber_mark_self(th->ec->fiber_ptr);
3531
3532 /* mark ruby objects */
3533 switch (th->invoke_type) {
3534 case thread_invoke_type_proc:
3535 case thread_invoke_type_ractor_proc:
3536 rb_gc_mark(th->invoke_arg.proc.proc);
3537 rb_gc_mark(th->invoke_arg.proc.args);
3538 break;
3539 case thread_invoke_type_func:
3540 rb_gc_mark_maybe((VALUE)th->invoke_arg.func.arg);
3541 break;
3542 default:
3543 break;
3544 }
3545
3546 rb_gc_mark(rb_ractor_self(th->ractor));
3547 rb_gc_mark(th->thgroup);
3548 rb_gc_mark(th->value);
3549 rb_gc_mark(th->pending_interrupt_queue);
3550 rb_gc_mark(th->pending_interrupt_mask_stack);
3551 rb_gc_mark(th->top_self);
3552 rb_gc_mark(th->top_wrapper);
3553 rb_gc_mark(th->namespaces);
3554 if (NAMESPACE_USER_P(th->ns)) rb_namespace_entry_mark(th->ns);
3555 if (th->root_fiber) rb_fiber_mark_self(th->root_fiber);
3556
3557 RUBY_ASSERT(th->ec == rb_fiberptr_get_ec(th->ec->fiber_ptr));
3558 rb_gc_mark(th->last_status);
3559 rb_gc_mark(th->locking_mutex);
3560 rb_gc_mark(th->name);
3561 rb_gc_mark(th->ractor_waiting.receiving_mutex);
3562
3563 rb_gc_mark(th->scheduler);
3564
3565 rb_threadptr_interrupt_exec_task_mark(th);
3566
3567 RUBY_MARK_LEAVE("thread");
3568}
3569
3570void rb_threadptr_sched_free(rb_thread_t *th); // thread_*.c
3571
3572static void
3573thread_free(void *ptr)
3574{
3575 rb_thread_t *th = ptr;
3576 RUBY_FREE_ENTER("thread");
3577
3578 rb_threadptr_sched_free(th);
3579
3580 if (th->locking_mutex != Qfalse) {
3581 rb_bug("thread_free: locking_mutex must be NULL (%p:%p)", (void *)th, (void *)th->locking_mutex);
3582 }
3583 if (th->keeping_mutexes != NULL) {
3584 rb_bug("thread_free: keeping_mutexes must be NULL (%p:%p)", (void *)th, (void *)th->keeping_mutexes);
3585 }
3586
3587 ruby_xfree(th->specific_storage);
3588
3589 rb_threadptr_root_fiber_release(th);
3590
3591 if (th->vm && th->vm->ractor.main_thread == th) {
3592 RUBY_GC_INFO("MRI main thread\n");
3593 }
3594 else {
3595 // ruby_xfree(th->nt);
3596 // TODO: MN system collect nt, but without MN system it should be freed here.
3597 ruby_xfree(th);
3598 }
3599
3600 RUBY_FREE_LEAVE("thread");
3601}
3602
3603static size_t
3604thread_memsize(const void *ptr)
3605{
3606 const rb_thread_t *th = ptr;
3607 size_t size = sizeof(rb_thread_t);
3608
3609 if (!th->root_fiber) {
3610 size += th->ec->vm_stack_size * sizeof(VALUE);
3611 }
3612 if (th->ec->local_storage) {
3613 size += rb_id_table_memsize(th->ec->local_storage);
3614 }
3615 return size;
3616}
3617
3618#define thread_data_type ruby_threadptr_data_type
3619const rb_data_type_t ruby_threadptr_data_type = {
3620 "VM/thread",
3621 {
3622 thread_mark,
3623 thread_free,
3624 thread_memsize,
3625 thread_compact,
3626 },
3627 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
3628};
3629
3630VALUE
3631rb_obj_is_thread(VALUE obj)
3632{
3633 return RBOOL(rb_typeddata_is_kind_of(obj, &thread_data_type));
3634}
3635
3636static VALUE
3637thread_alloc(VALUE klass)
3638{
3639 rb_thread_t *th;
3640 return TypedData_Make_Struct(klass, rb_thread_t, &thread_data_type, th);
3641}
3642
3643void
3644rb_ec_set_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size)
3645{
3646 ec->vm_stack = stack;
3647 ec->vm_stack_size = size;
3648}
3649
3650void
3651rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size)
3652{
3653 rb_ec_set_vm_stack(ec, stack, size);
3654
3655#if VM_CHECK_MODE > 0
3656 MEMZERO(stack, VALUE, size); // malloc memory could have the VM canary in it
3657#endif
3658
3659 ec->cfp = (void *)(ec->vm_stack + ec->vm_stack_size);
3660
3661 vm_push_frame(ec,
3662 NULL /* dummy iseq */,
3663 VM_FRAME_MAGIC_DUMMY | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH | VM_FRAME_FLAG_CFRAME /* dummy frame */,
3664 Qnil /* dummy self */, VM_BLOCK_HANDLER_NONE /* dummy block ptr */,
3665 0 /* dummy cref/me */,
3666 0 /* dummy pc */, ec->vm_stack, 0, 0
3667 );
3668}
3669
3670void
3671rb_ec_clear_vm_stack(rb_execution_context_t *ec)
3672{
3673 rb_ec_set_vm_stack(ec, NULL, 0);
3674
3675 // Avoid dangling pointers:
3676 ec->cfp = NULL;
3677}
3678
3679static void
3680th_init(rb_thread_t *th, VALUE self, rb_vm_t *vm)
3681{
3682 th->self = self;
3683
3684 rb_threadptr_root_fiber_setup(th);
3685
3686 /* All threads are blocking until a non-blocking fiber is scheduled */
3687 th->blocking = 1;
3688 th->scheduler = Qnil;
3689
3690 if (self == 0) {
3691 size_t size = vm->default_params.thread_vm_stack_size / sizeof(VALUE);
3692 rb_ec_initialize_vm_stack(th->ec, ALLOC_N(VALUE, size), size);
3693 }
3694 else {
3695 VM_ASSERT(th->ec->cfp == NULL);
3696 VM_ASSERT(th->ec->vm_stack == NULL);
3697 VM_ASSERT(th->ec->vm_stack_size == 0);
3698 }
3699
3700 th->status = THREAD_RUNNABLE;
3701 th->last_status = Qnil;
3702 th->top_wrapper = 0;
3703 th->top_self = vm->top_self; // 0 while self == 0
3704 th->namespaces = 0;
3705 th->ns = 0;
3706 th->value = Qundef;
3707
3708 th->ec->errinfo = Qnil;
3709 th->ec->root_svar = Qfalse;
3710 th->ec->local_storage_recursive_hash = Qnil;
3711 th->ec->local_storage_recursive_hash_for_trace = Qnil;
3712
3713 th->ec->storage = Qnil;
3714
3715#if OPT_CALL_THREADED_CODE
3716 th->retval = Qundef;
3717#endif
3718 th->name = Qnil;
3719 th->report_on_exception = vm->thread_report_on_exception;
3720 th->ext_config.ractor_safe = true;
3721
3722 ccan_list_head_init(&th->interrupt_exec_tasks);
3723 ccan_list_node_init(&th->ractor_waiting.waiting_node);
3724#ifndef RUBY_THREAD_PTHREAD_H
3725 rb_native_cond_initialize(&th->ractor_waiting.cond);
3726#endif
3727
3728#if USE_RUBY_DEBUG_LOG
3729 static rb_atomic_t thread_serial = 1;
3730 th->serial = RUBY_ATOMIC_FETCH_ADD(thread_serial, 1);
3731
3732 RUBY_DEBUG_LOG("th:%u", th->serial);
3733#endif
3734}
3735
3736VALUE
3737rb_thread_alloc(VALUE klass)
3738{
3739 rb_namespace_t *ns;
3740 rb_execution_context_t *ec = GET_EC();
3741 VALUE self = thread_alloc(klass);
3742 rb_thread_t *target_th = rb_thread_ptr(self);
3743 target_th->ractor = GET_RACTOR();
3744 th_init(target_th, self, target_th->vm = GET_VM());
3745 if ((ns = rb_ec_thread_ptr(ec)->ns) == 0) {
3746 ns = rb_main_namespace();
3747 }
3748 target_th->ns = ns;
3749 return self;
3750}
3751
3752#define REWIND_CFP(expr) do { \
3753 rb_execution_context_t *ec__ = GET_EC(); \
3754 VALUE *const curr_sp = (ec__->cfp++)->sp; \
3755 VALUE *const saved_sp = ec__->cfp->sp; \
3756 ec__->cfp->sp = curr_sp; \
3757 expr; \
3758 (ec__->cfp--)->sp = saved_sp; \
3759} while (0)
3760
3761static VALUE
3762m_core_set_method_alias(VALUE self, VALUE cbase, VALUE sym1, VALUE sym2)
3763{
3764 REWIND_CFP({
3765 rb_alias(cbase, SYM2ID(sym1), SYM2ID(sym2));
3766 });
3767 return Qnil;
3768}
3769
3770static VALUE
3771m_core_set_variable_alias(VALUE self, VALUE sym1, VALUE sym2)
3772{
3773 REWIND_CFP({
3774 rb_alias_variable(SYM2ID(sym1), SYM2ID(sym2));
3775 });
3776 return Qnil;
3777}
3778
3779static VALUE
3780m_core_undef_method(VALUE self, VALUE cbase, VALUE sym)
3781{
3782 REWIND_CFP({
3783 ID mid = SYM2ID(sym);
3784 rb_undef(cbase, mid);
3785 rb_clear_method_cache(self, mid);
3786 });
3787 return Qnil;
3788}
3789
3790static VALUE
3791m_core_set_postexe(VALUE self)
3792{
3793 rb_set_end_proc(rb_call_end_proc, rb_block_proc());
3794 return Qnil;
3795}
3796
3797static VALUE core_hash_merge_kwd(VALUE hash, VALUE kw);
3798
3799static VALUE
3800core_hash_merge(VALUE hash, long argc, const VALUE *argv)
3801{
3802 Check_Type(hash, T_HASH);
3803 VM_ASSERT(argc % 2 == 0);
3804 rb_hash_bulk_insert(argc, argv, hash);
3805 return hash;
3806}
3807
3808static VALUE
3809m_core_hash_merge_ptr(int argc, VALUE *argv, VALUE recv)
3810{
3811 VALUE hash = argv[0];
3812
3813 REWIND_CFP(hash = core_hash_merge(hash, argc-1, argv+1));
3814
3815 return hash;
3816}
3817
3818static int
3819kwmerge_i(VALUE key, VALUE value, VALUE hash)
3820{
3821 rb_hash_aset(hash, key, value);
3822 return ST_CONTINUE;
3823}
3824
3825static VALUE
3826m_core_hash_merge_kwd(VALUE recv, VALUE hash, VALUE kw)
3827{
3828 if (!NIL_P(kw)) {
3829 REWIND_CFP(hash = core_hash_merge_kwd(hash, kw));
3830 }
3831 return hash;
3832}
3833
3834static VALUE
3835m_core_make_shareable(VALUE recv, VALUE obj)
3836{
3837 return rb_ractor_make_shareable(obj);
3838}
3839
3840static VALUE
3841m_core_make_shareable_copy(VALUE recv, VALUE obj)
3842{
3844}
3845
3846static VALUE
3847m_core_ensure_shareable(VALUE recv, VALUE obj, VALUE name)
3848{
3849 return rb_ractor_ensure_shareable(obj, name);
3850}
3851
3852static VALUE
3853core_hash_merge_kwd(VALUE hash, VALUE kw)
3854{
3855 rb_hash_foreach(rb_to_hash_type(kw), kwmerge_i, hash);
3856 return hash;
3857}
3858
3859extern VALUE *rb_gc_stack_start;
3860extern size_t rb_gc_stack_maxsize;
3861
3862/* debug functions */
3863
3864/* :nodoc: */
3865static VALUE
3866sdr(VALUE self)
3867{
3868 rb_vm_bugreport(NULL, stderr);
3869 return Qnil;
3870}
3871
3872/* :nodoc: */
3873static VALUE
3874nsdr(VALUE self)
3875{
3876 VALUE ary = rb_ary_new();
3877#ifdef HAVE_BACKTRACE
3878#include <execinfo.h>
3879#define MAX_NATIVE_TRACE 1024
3880 static void *trace[MAX_NATIVE_TRACE];
3881 int n = (int)backtrace(trace, MAX_NATIVE_TRACE);
3882 char **syms = backtrace_symbols(trace, n);
3883 int i;
3884
3885 if (syms == 0) {
3886 rb_memerror();
3887 }
3888
3889 for (i=0; i<n; i++) {
3890 rb_ary_push(ary, rb_str_new2(syms[i]));
3891 }
3892 free(syms); /* OK */
3893#endif
3894 return ary;
3895}
3896
3897#if VM_COLLECT_USAGE_DETAILS
3898static VALUE usage_analysis_insn_start(VALUE self);
3899static VALUE usage_analysis_operand_start(VALUE self);
3900static VALUE usage_analysis_register_start(VALUE self);
3901static VALUE usage_analysis_insn_stop(VALUE self);
3902static VALUE usage_analysis_operand_stop(VALUE self);
3903static VALUE usage_analysis_register_stop(VALUE self);
3904static VALUE usage_analysis_insn_running(VALUE self);
3905static VALUE usage_analysis_operand_running(VALUE self);
3906static VALUE usage_analysis_register_running(VALUE self);
3907static VALUE usage_analysis_insn_clear(VALUE self);
3908static VALUE usage_analysis_operand_clear(VALUE self);
3909static VALUE usage_analysis_register_clear(VALUE self);
3910#endif
3911
3912static VALUE
3913f_raise(int c, VALUE *v, VALUE _)
3914{
3915 return rb_f_raise(c, v);
3916}
3917
3918static VALUE
3919f_proc(VALUE _)
3920{
3921 return rb_block_proc();
3922}
3923
3924static VALUE
3925f_lambda(VALUE _)
3926{
3927 return rb_block_lambda();
3928}
3929
3930static VALUE
3931f_sprintf(int c, const VALUE *v, VALUE _)
3932{
3933 return rb_f_sprintf(c, v);
3934}
3935
3936/* :nodoc: */
3937static VALUE
3938vm_mtbl(VALUE self, VALUE obj, VALUE sym)
3939{
3940 vm_mtbl_dump(CLASS_OF(obj), RTEST(sym) ? SYM2ID(sym) : 0);
3941 return Qnil;
3942}
3943
3944/* :nodoc: */
3945static VALUE
3946vm_mtbl2(VALUE self, VALUE obj, VALUE sym)
3947{
3948 vm_mtbl_dump(obj, RTEST(sym) ? SYM2ID(sym) : 0);
3949 return Qnil;
3950}
3951
3952/*
3953 * call-seq:
3954 * RubyVM.keep_script_lines -> true or false
3955 *
3956 * Return current +keep_script_lines+ status. Now it only returns
3957 * +true+ of +false+, but it can return other objects in future.
3958 *
3959 * Note that this is an API for ruby internal use, debugging,
3960 * and research. Do not use this for any other purpose.
3961 * The compatibility is not guaranteed.
3962 */
3963static VALUE
3964vm_keep_script_lines(VALUE self)
3965{
3966 return RBOOL(ruby_vm_keep_script_lines);
3967}
3968
3969/*
3970 * call-seq:
3971 * RubyVM.keep_script_lines = true / false
3972 *
3973 * It set +keep_script_lines+ flag. If the flag is set, all
3974 * loaded scripts are recorded in a interpreter process.
3975 *
3976 * Note that this is an API for ruby internal use, debugging,
3977 * and research. Do not use this for any other purpose.
3978 * The compatibility is not guaranteed.
3979 */
3980static VALUE
3981vm_keep_script_lines_set(VALUE self, VALUE flags)
3982{
3983 ruby_vm_keep_script_lines = RTEST(flags);
3984 return flags;
3985}
3986
3987void
3988Init_VM(void)
3989{
3990 VALUE opts;
3991 VALUE klass;
3992 VALUE fcore;
3993
3994 /*
3995 * Document-class: RubyVM
3996 *
3997 * The RubyVM module only exists on MRI. +RubyVM+ is not defined in
3998 * other Ruby implementations such as JRuby and TruffleRuby.
3999 *
4000 * The RubyVM module provides some access to MRI internals.
4001 * This module is for very limited purposes, such as debugging,
4002 * prototyping, and research. Normal users must not use it.
4003 * This module is not portable between Ruby implementations.
4004 */
4005 rb_cRubyVM = rb_define_class("RubyVM", rb_cObject);
4006 rb_undef_alloc_func(rb_cRubyVM);
4007 rb_undef_method(CLASS_OF(rb_cRubyVM), "new");
4008 rb_define_singleton_method(rb_cRubyVM, "stat", vm_stat, -1);
4009 rb_define_singleton_method(rb_cRubyVM, "keep_script_lines", vm_keep_script_lines, 0);
4010 rb_define_singleton_method(rb_cRubyVM, "keep_script_lines=", vm_keep_script_lines_set, 1);
4011
4012#if USE_DEBUG_COUNTER
4013 rb_define_singleton_method(rb_cRubyVM, "reset_debug_counters", rb_debug_counter_reset, 0);
4014 rb_define_singleton_method(rb_cRubyVM, "show_debug_counters", rb_debug_counter_show, 0);
4015#endif
4016
4017 /* FrozenCore (hidden) */
4019 rb_set_class_path(fcore, rb_cRubyVM, "FrozenCore");
4020 rb_vm_register_global_object(rb_class_path_cached(fcore));
4021 RB_FL_UNSET_RAW(fcore, T_MASK);
4022 RB_FL_SET_RAW(fcore, T_ICLASS);
4023 klass = rb_singleton_class(fcore);
4024 rb_define_method_id(klass, id_core_set_method_alias, m_core_set_method_alias, 3);
4025 rb_define_method_id(klass, id_core_set_variable_alias, m_core_set_variable_alias, 2);
4026 rb_define_method_id(klass, id_core_undef_method, m_core_undef_method, 2);
4027 rb_define_method_id(klass, id_core_set_postexe, m_core_set_postexe, 0);
4028 rb_define_method_id(klass, id_core_hash_merge_ptr, m_core_hash_merge_ptr, -1);
4029 rb_define_method_id(klass, id_core_hash_merge_kwd, m_core_hash_merge_kwd, 2);
4030 rb_define_method_id(klass, id_core_raise, f_raise, -1);
4031 rb_define_method_id(klass, id_core_sprintf, f_sprintf, -1);
4032 rb_define_method_id(klass, idProc, f_proc, 0);
4033 rb_define_method_id(klass, idLambda, f_lambda, 0);
4034 rb_define_method(klass, "make_shareable", m_core_make_shareable, 1);
4035 rb_define_method(klass, "make_shareable_copy", m_core_make_shareable_copy, 1);
4036 rb_define_method(klass, "ensure_shareable", m_core_ensure_shareable, 2);
4037 rb_obj_freeze(fcore);
4038 RBASIC_CLEAR_CLASS(klass);
4039 rb_obj_freeze(klass);
4040 rb_vm_register_global_object(fcore);
4041 rb_mRubyVMFrozenCore = fcore;
4042
4043 /*
4044 * Document-class: Thread
4045 *
4046 * Threads are the Ruby implementation for a concurrent programming model.
4047 *
4048 * Programs that require multiple threads of execution are a perfect
4049 * candidate for Ruby's Thread class.
4050 *
4051 * For example, we can create a new thread separate from the main thread's
4052 * execution using ::new.
4053 *
4054 * thr = Thread.new { puts "What's the big deal" }
4055 *
4056 * Then we are able to pause the execution of the main thread and allow
4057 * our new thread to finish, using #join:
4058 *
4059 * thr.join #=> "What's the big deal"
4060 *
4061 * If we don't call +thr.join+ before the main thread terminates, then all
4062 * other threads including +thr+ will be killed.
4063 *
4064 * Alternatively, you can use an array for handling multiple threads at
4065 * once, like in the following example:
4066 *
4067 * threads = []
4068 * threads << Thread.new { puts "What's the big deal" }
4069 * threads << Thread.new { 3.times { puts "Threads are fun!" } }
4070 *
4071 * After creating a few threads we wait for them all to finish
4072 * consecutively.
4073 *
4074 * threads.each { |thr| thr.join }
4075 *
4076 * To retrieve the last value of a thread, use #value
4077 *
4078 * thr = Thread.new { sleep 1; "Useful value" }
4079 * thr.value #=> "Useful value"
4080 *
4081 * === Thread initialization
4082 *
4083 * In order to create new threads, Ruby provides ::new, ::start, and
4084 * ::fork. A block must be provided with each of these methods, otherwise
4085 * a ThreadError will be raised.
4086 *
4087 * When subclassing the Thread class, the +initialize+ method of your
4088 * subclass will be ignored by ::start and ::fork. Otherwise, be sure to
4089 * call super in your +initialize+ method.
4090 *
4091 * === Thread termination
4092 *
4093 * For terminating threads, Ruby provides a variety of ways to do this.
4094 *
4095 * The class method ::kill, is meant to exit a given thread:
4096 *
4097 * thr = Thread.new { sleep }
4098 * Thread.kill(thr) # sends exit() to thr
4099 *
4100 * Alternatively, you can use the instance method #exit, or any of its
4101 * aliases #kill or #terminate.
4102 *
4103 * thr.exit
4104 *
4105 * === Thread status
4106 *
4107 * Ruby provides a few instance methods for querying the state of a given
4108 * thread. To get a string with the current thread's state use #status
4109 *
4110 * thr = Thread.new { sleep }
4111 * thr.status # => "sleep"
4112 * thr.exit
4113 * thr.status # => false
4114 *
4115 * You can also use #alive? to tell if the thread is running or sleeping,
4116 * and #stop? if the thread is dead or sleeping.
4117 *
4118 * === Thread variables and scope
4119 *
4120 * Since threads are created with blocks, the same rules apply to other
4121 * Ruby blocks for variable scope. Any local variables created within this
4122 * block are accessible to only this thread.
4123 *
4124 * ==== Fiber-local vs. Thread-local
4125 *
4126 * Each fiber has its own bucket for Thread#[] storage. When you set a
4127 * new fiber-local it is only accessible within this Fiber. To illustrate:
4128 *
4129 * Thread.new {
4130 * Thread.current[:foo] = "bar"
4131 * Fiber.new {
4132 * p Thread.current[:foo] # => nil
4133 * }.resume
4134 * }.join
4135 *
4136 * This example uses #[] for getting and #[]= for setting fiber-locals,
4137 * you can also use #keys to list the fiber-locals for a given
4138 * thread and #key? to check if a fiber-local exists.
4139 *
4140 * When it comes to thread-locals, they are accessible within the entire
4141 * scope of the thread. Given the following example:
4142 *
4143 * Thread.new{
4144 * Thread.current.thread_variable_set(:foo, 1)
4145 * p Thread.current.thread_variable_get(:foo) # => 1
4146 * Fiber.new{
4147 * Thread.current.thread_variable_set(:foo, 2)
4148 * p Thread.current.thread_variable_get(:foo) # => 2
4149 * }.resume
4150 * p Thread.current.thread_variable_get(:foo) # => 2
4151 * }.join
4152 *
4153 * You can see that the thread-local +:foo+ carried over into the fiber
4154 * and was changed to +2+ by the end of the thread.
4155 *
4156 * This example makes use of #thread_variable_set to create new
4157 * thread-locals, and #thread_variable_get to reference them.
4158 *
4159 * There is also #thread_variables to list all thread-locals, and
4160 * #thread_variable? to check if a given thread-local exists.
4161 *
4162 * === Exception handling
4163 *
4164 * When an unhandled exception is raised inside a thread, it will
4165 * terminate. By default, this exception will not propagate to other
4166 * threads. The exception is stored and when another thread calls #value
4167 * or #join, the exception will be re-raised in that thread.
4168 *
4169 * t = Thread.new{ raise 'something went wrong' }
4170 * t.value #=> RuntimeError: something went wrong
4171 *
4172 * An exception can be raised from outside the thread using the
4173 * Thread#raise instance method, which takes the same parameters as
4174 * Kernel#raise.
4175 *
4176 * Setting Thread.abort_on_exception = true, Thread#abort_on_exception =
4177 * true, or $DEBUG = true will cause a subsequent unhandled exception
4178 * raised in a thread to be automatically re-raised in the main thread.
4179 *
4180 * With the addition of the class method ::handle_interrupt, you can now
4181 * handle exceptions asynchronously with threads.
4182 *
4183 * === Scheduling
4184 *
4185 * Ruby provides a few ways to support scheduling threads in your program.
4186 *
4187 * The first way is by using the class method ::stop, to put the current
4188 * running thread to sleep and schedule the execution of another thread.
4189 *
4190 * Once a thread is asleep, you can use the instance method #wakeup to
4191 * mark your thread as eligible for scheduling.
4192 *
4193 * You can also try ::pass, which attempts to pass execution to another
4194 * thread but is dependent on the OS whether a running thread will switch
4195 * or not. The same goes for #priority, which lets you hint to the thread
4196 * scheduler which threads you want to take precedence when passing
4197 * execution. This method is also dependent on the OS and may be ignored
4198 * on some platforms.
4199 *
4200 */
4201 rb_cThread = rb_define_class("Thread", rb_cObject);
4203
4204#if VM_COLLECT_USAGE_DETAILS
4205 /* ::RubyVM::USAGE_ANALYSIS_* */
4206#define define_usage_analysis_hash(name) /* shut up rdoc -C */ \
4207 rb_define_const(rb_cRubyVM, "USAGE_ANALYSIS_" #name, rb_hash_new())
4208 define_usage_analysis_hash(INSN);
4209 define_usage_analysis_hash(REGS);
4210 define_usage_analysis_hash(INSN_BIGRAM);
4211
4212 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_INSN_START", usage_analysis_insn_start, 0);
4213 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_OPERAND_START", usage_analysis_operand_start, 0);
4214 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_REGISTER_START", usage_analysis_register_start, 0);
4215 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_INSN_STOP", usage_analysis_insn_stop, 0);
4216 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_OPERAND_STOP", usage_analysis_operand_stop, 0);
4217 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_REGISTER_STOP", usage_analysis_register_stop, 0);
4218 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_INSN_RUNNING", usage_analysis_insn_running, 0);
4219 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_OPERAND_RUNNING", usage_analysis_operand_running, 0);
4220 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_REGISTER_RUNNING", usage_analysis_register_running, 0);
4221 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_INSN_CLEAR", usage_analysis_insn_clear, 0);
4222 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_OPERAND_CLEAR", usage_analysis_operand_clear, 0);
4223 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_REGISTER_CLEAR", usage_analysis_register_clear, 0);
4224#endif
4225
4226 /* ::RubyVM::OPTS
4227 * An Array of VM build options.
4228 * This constant is MRI specific.
4229 */
4230 rb_define_const(rb_cRubyVM, "OPTS", opts = rb_ary_new());
4231
4232#if OPT_DIRECT_THREADED_CODE
4233 rb_ary_push(opts, rb_str_new2("direct threaded code"));
4234#elif OPT_TOKEN_THREADED_CODE
4235 rb_ary_push(opts, rb_str_new2("token threaded code"));
4236#elif OPT_CALL_THREADED_CODE
4237 rb_ary_push(opts, rb_str_new2("call threaded code"));
4238#endif
4239
4240#if OPT_OPERANDS_UNIFICATION
4241 rb_ary_push(opts, rb_str_new2("operands unification"));
4242#endif
4243#if OPT_INSTRUCTIONS_UNIFICATION
4244 rb_ary_push(opts, rb_str_new2("instructions unification"));
4245#endif
4246#if OPT_INLINE_METHOD_CACHE
4247 rb_ary_push(opts, rb_str_new2("inline method cache"));
4248#endif
4249
4250 /* ::RubyVM::INSTRUCTION_NAMES
4251 * A list of bytecode instruction names in MRI.
4252 * This constant is MRI specific.
4253 */
4254 rb_define_const(rb_cRubyVM, "INSTRUCTION_NAMES", rb_insns_name_array());
4255
4256 /* ::RubyVM::DEFAULT_PARAMS
4257 * This constant exposes the VM's default parameters.
4258 * Note that changing these values does not affect VM execution.
4259 * Specification is not stable and you should not depend on this value.
4260 * Of course, this constant is MRI specific.
4261 */
4262 rb_define_const(rb_cRubyVM, "DEFAULT_PARAMS", vm_default_params());
4263
4264 /* debug functions ::RubyVM::SDR(), ::RubyVM::NSDR() */
4265#if VMDEBUG
4266 rb_define_singleton_method(rb_cRubyVM, "SDR", sdr, 0);
4267 rb_define_singleton_method(rb_cRubyVM, "NSDR", nsdr, 0);
4268 rb_define_singleton_method(rb_cRubyVM, "mtbl", vm_mtbl, 2);
4269 rb_define_singleton_method(rb_cRubyVM, "mtbl2", vm_mtbl2, 2);
4270#else
4271 (void)sdr;
4272 (void)nsdr;
4273 (void)vm_mtbl;
4274 (void)vm_mtbl2;
4275#endif
4276
4277 /* VM bootstrap: phase 2 */
4278 {
4279 rb_vm_t *vm = ruby_current_vm_ptr;
4280 rb_thread_t *th = GET_THREAD();
4281 VALUE filename = rb_fstring_lit("<main>");
4282 const rb_iseq_t *iseq = rb_iseq_new(Qnil, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
4283
4284 // Ractor setup
4285 rb_ractor_main_setup(vm, th->ractor, th);
4286
4287 /* create vm object */
4288 vm->self = TypedData_Wrap_Struct(rb_cRubyVM, &vm_data_type, vm);
4289
4290 /* create main thread */
4291 th->self = TypedData_Wrap_Struct(rb_cThread, &thread_data_type, th);
4292 vm->ractor.main_thread = th;
4293 vm->ractor.main_ractor = th->ractor;
4294 th->vm = vm;
4295 th->top_wrapper = 0;
4296 th->top_self = rb_vm_top_self();
4297 th->namespaces = 0;
4298 th->ns = 0;
4299
4300 rb_vm_register_global_object((VALUE)iseq);
4301 th->ec->cfp->iseq = iseq;
4302 th->ec->cfp->pc = ISEQ_BODY(iseq)->iseq_encoded;
4303 th->ec->cfp->self = th->top_self;
4304
4305 VM_ENV_FLAGS_UNSET(th->ec->cfp->ep, VM_FRAME_FLAG_CFRAME);
4306 VM_STACK_ENV_WRITE(th->ec->cfp->ep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)vm_cref_new(rb_cObject, METHOD_VISI_PRIVATE, FALSE, NULL, FALSE, FALSE));
4307
4308 /*
4309 * The Binding of the top level scope
4310 */
4311 rb_define_global_const("TOPLEVEL_BINDING", rb_binding_new());
4312
4313#ifdef _WIN32
4314 rb_objspace_gc_enable(vm->gc.objspace);
4315#endif
4316 }
4317 vm_init_redefined_flag();
4318
4319 rb_block_param_proxy = rb_obj_alloc(rb_cObject);
4320 rb_add_method_optimized(rb_singleton_class(rb_block_param_proxy), idCall,
4321 OPTIMIZED_METHOD_TYPE_BLOCK_CALL, 0, METHOD_VISI_PUBLIC);
4322 rb_obj_freeze(rb_block_param_proxy);
4323 rb_vm_register_global_object(rb_block_param_proxy);
4324
4325 /* vm_backtrace.c */
4326 Init_vm_backtrace();
4327}
4328
4329void
4330rb_vm_set_progname(VALUE filename)
4331{
4332 rb_thread_t *th = GET_VM()->ractor.main_thread;
4333 rb_control_frame_t *cfp = (void *)(th->ec->vm_stack + th->ec->vm_stack_size);
4334 --cfp;
4335
4336 filename = rb_str_new_frozen(filename);
4337 rb_iseq_pathobj_set(cfp->iseq, filename, rb_iseq_realpath(cfp->iseq));
4338}
4339
4340extern const struct st_hash_type rb_fstring_hash_type;
4341
4342void
4343Init_BareVM(void)
4344{
4345 /* VM bootstrap: phase 1 */
4346 rb_vm_t *vm = ruby_mimcalloc(1, sizeof(*vm));
4347 rb_thread_t *th = ruby_mimcalloc(1, sizeof(*th));
4348 if (!vm || !th) {
4349 fputs("[FATAL] failed to allocate memory\n", stderr);
4350 exit(EXIT_FAILURE);
4351 }
4352
4353 // setup the VM
4354 vm_init2(vm);
4355
4356 rb_vm_postponed_job_queue_init(vm);
4357 ruby_current_vm_ptr = vm;
4358 rb_objspace_alloc();
4359 vm->negative_cme_table = rb_id_table_create(16);
4360 vm->overloaded_cme_table = st_init_numtable();
4361 vm->constant_cache = rb_id_table_create(0);
4362 vm->unused_block_warning_table = set_init_numtable();
4363
4364 // setup main thread
4365 th->nt = ZALLOC(struct rb_native_thread);
4366 th->vm = vm;
4367 th->ractor = vm->ractor.main_ractor = rb_ractor_main_alloc();
4368 Init_native_thread(th);
4369 rb_jit_cont_init();
4370 th_init(th, 0, vm);
4371
4372 rb_ractor_set_current_ec(th->ractor, th->ec);
4373 /* n.b. native_main_thread_stack_top is set by the INIT_STACK macro */
4374 ruby_thread_init_stack(th, native_main_thread_stack_top);
4375
4376 // setup ractor system
4377 rb_native_mutex_initialize(&vm->ractor.sync.lock);
4378 rb_native_cond_initialize(&vm->ractor.sync.terminate_cond);
4379
4380 vm_opt_method_def_table = st_init_numtable();
4381 vm_opt_mid_table = st_init_numtable();
4382
4383#ifdef RUBY_THREAD_WIN32_H
4384 rb_native_cond_initialize(&vm->ractor.sync.barrier_cond);
4385#endif
4386}
4387
4388void
4390{
4391 native_main_thread_stack_top = addr;
4392}
4393
4394#ifndef _WIN32
4395#include <unistd.h>
4396#include <sys/mman.h>
4397#endif
4398
4399
4400#ifndef MARK_OBJECT_ARY_BUCKET_SIZE
4401#define MARK_OBJECT_ARY_BUCKET_SIZE 1024
4402#endif
4403
4405 VALUE next;
4406 long len;
4407 VALUE *array;
4408};
4409
4410static void
4411pin_array_list_mark(void *data)
4412{
4413 struct pin_array_list *array = (struct pin_array_list *)data;
4414 rb_gc_mark_movable(array->next);
4415
4416 rb_gc_mark_vm_stack_values(array->len, array->array);
4417}
4418
4419static void
4420pin_array_list_free(void *data)
4421{
4422 struct pin_array_list *array = (struct pin_array_list *)data;
4423 xfree(array->array);
4424}
4425
4426static size_t
4427pin_array_list_memsize(const void *data)
4428{
4429 return sizeof(struct pin_array_list) + (MARK_OBJECT_ARY_BUCKET_SIZE * sizeof(VALUE));
4430}
4431
4432static void
4433pin_array_list_update_references(void *data)
4434{
4435 struct pin_array_list *array = (struct pin_array_list *)data;
4436 array->next = rb_gc_location(array->next);
4437}
4438
4439static const rb_data_type_t pin_array_list_type = {
4440 .wrap_struct_name = "VM/pin_array_list",
4441 .function = {
4442 .dmark = pin_array_list_mark,
4443 .dfree = pin_array_list_free,
4444 .dsize = pin_array_list_memsize,
4445 .dcompact = pin_array_list_update_references,
4446 },
4447 .flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE,
4448};
4449
4450static VALUE
4451pin_array_list_new(VALUE next)
4452{
4453 struct pin_array_list *array_list;
4454 VALUE obj = TypedData_Make_Struct(0, struct pin_array_list, &pin_array_list_type, array_list);
4455 RB_OBJ_WRITE(obj, &array_list->next, next);
4456 array_list->array = ALLOC_N(VALUE, MARK_OBJECT_ARY_BUCKET_SIZE);
4457 return obj;
4458}
4459
4460static VALUE
4461pin_array_list_append(VALUE obj, VALUE item)
4462{
4463 struct pin_array_list *array_list;
4464 TypedData_Get_Struct(obj, struct pin_array_list, &pin_array_list_type, array_list);
4465
4466 if (array_list->len >= MARK_OBJECT_ARY_BUCKET_SIZE) {
4467 obj = pin_array_list_new(obj);
4468 TypedData_Get_Struct(obj, struct pin_array_list, &pin_array_list_type, array_list);
4469 }
4470
4471 RB_OBJ_WRITE(obj, &array_list->array[array_list->len], item);
4472 array_list->len++;
4473 return obj;
4474}
4475
4476void
4477rb_vm_register_global_object(VALUE obj)
4478{
4480 if (RB_SPECIAL_CONST_P(obj)) {
4481 return;
4482 }
4483
4484 switch (RB_BUILTIN_TYPE(obj)) {
4485 case T_CLASS:
4486 case T_MODULE:
4487 if (FL_TEST(obj, RCLASS_IS_ROOT)) {
4488 return;
4489 }
4490 FL_SET(obj, RCLASS_IS_ROOT);
4491 break;
4492 default:
4493 break;
4494 }
4495 RB_VM_LOCK_ENTER();
4496 {
4497 VALUE list = GET_VM()->mark_object_ary;
4498 VALUE head = pin_array_list_append(list, obj);
4499 if (head != list) {
4500 GET_VM()->mark_object_ary = head;
4501 }
4502 RB_GC_GUARD(obj);
4503 }
4504 RB_VM_LOCK_LEAVE();
4505}
4506
4507void
4508Init_vm_objects(void)
4509{
4510 rb_vm_t *vm = GET_VM();
4511
4512 /* initialize mark object array, hash */
4513 vm->mark_object_ary = pin_array_list_new(Qnil);
4514 vm->loading_table = st_init_strtable();
4515 vm->ci_table = st_init_table(&vm_ci_hashtype);
4516}
4517
4518// Stub for builtin function when not building YJIT units
4519#if !USE_YJIT
4520void Init_builtin_yjit(void) {}
4521#endif
4522
4523// Whether YJIT is enabled or not, we load yjit_hook.rb to remove Kernel#with_yjit.
4524#include "yjit_hook.rbinc"
4525
4526// Stub for builtin function when not building ZJIT units
4527#if !USE_ZJIT
4528void Init_builtin_zjit(void) {}
4529#endif
4530
4531/* top self */
4532
4533static VALUE
4534main_to_s(VALUE obj)
4535{
4536 return rb_str_new2("main");
4537}
4538
4539VALUE
4540rb_vm_top_self(void)
4541{
4542 return GET_VM()->top_self;
4543}
4544
4545void
4546Init_top_self(void)
4547{
4548 rb_vm_t *vm = GET_VM();
4549
4550 vm->top_self = rb_obj_alloc(rb_cObject);
4551 rb_define_singleton_method(rb_vm_top_self(), "to_s", main_to_s, 0);
4552 rb_define_alias(rb_singleton_class(rb_vm_top_self()), "inspect", "to_s");
4553}
4554
4555VALUE *
4557{
4558 rb_ractor_t *cr = GET_RACTOR();
4559 return &cr->verbose;
4560}
4561
4562VALUE *
4564{
4565 rb_ractor_t *cr = GET_RACTOR();
4566 return &cr->debug;
4567}
4568
4569bool rb_free_at_exit = false;
4570
4571bool
4572ruby_free_at_exit_p(void)
4573{
4574 return rb_free_at_exit;
4575}
4576
4577/* iseq.c */
4578VALUE rb_insn_operand_intern(const rb_iseq_t *iseq,
4579 VALUE insn, int op_no, VALUE op,
4580 int len, size_t pos, VALUE *pnop, VALUE child);
4581
4582#if VM_COLLECT_USAGE_DETAILS
4583
4584#define HASH_ASET(h, k, v) rb_hash_aset((h), (st_data_t)(k), (st_data_t)(v))
4585
4586/* uh = {
4587 * insn(Fixnum) => ihash(Hash)
4588 * }
4589 * ihash = {
4590 * -1(Fixnum) => count, # insn usage
4591 * 0(Fixnum) => ophash, # operand usage
4592 * }
4593 * ophash = {
4594 * val(interned string) => count(Fixnum)
4595 * }
4596 */
4597static void
4598vm_analysis_insn(int insn)
4599{
4600 ID usage_hash;
4601 ID bigram_hash;
4602 static int prev_insn = -1;
4603
4604 VALUE uh;
4605 VALUE ihash;
4606 VALUE cv;
4607
4608 CONST_ID(usage_hash, "USAGE_ANALYSIS_INSN");
4609 CONST_ID(bigram_hash, "USAGE_ANALYSIS_INSN_BIGRAM");
4610 uh = rb_const_get(rb_cRubyVM, usage_hash);
4611 if (NIL_P(ihash = rb_hash_aref(uh, INT2FIX(insn)))) {
4612 ihash = rb_hash_new();
4613 HASH_ASET(uh, INT2FIX(insn), ihash);
4614 }
4615 if (NIL_P(cv = rb_hash_aref(ihash, INT2FIX(-1)))) {
4616 cv = INT2FIX(0);
4617 }
4618 HASH_ASET(ihash, INT2FIX(-1), INT2FIX(FIX2INT(cv) + 1));
4619
4620 /* calc bigram */
4621 if (prev_insn != -1) {
4622 VALUE bi;
4623 VALUE ary[2];
4624 VALUE cv;
4625
4626 ary[0] = INT2FIX(prev_insn);
4627 ary[1] = INT2FIX(insn);
4628 bi = rb_ary_new4(2, &ary[0]);
4629
4630 uh = rb_const_get(rb_cRubyVM, bigram_hash);
4631 if (NIL_P(cv = rb_hash_aref(uh, bi))) {
4632 cv = INT2FIX(0);
4633 }
4634 HASH_ASET(uh, bi, INT2FIX(FIX2INT(cv) + 1));
4635 }
4636 prev_insn = insn;
4637}
4638
4639static void
4640vm_analysis_operand(int insn, int n, VALUE op)
4641{
4642 ID usage_hash;
4643
4644 VALUE uh;
4645 VALUE ihash;
4646 VALUE ophash;
4647 VALUE valstr;
4648 VALUE cv;
4649
4650 CONST_ID(usage_hash, "USAGE_ANALYSIS_INSN");
4651
4652 uh = rb_const_get(rb_cRubyVM, usage_hash);
4653 if (NIL_P(ihash = rb_hash_aref(uh, INT2FIX(insn)))) {
4654 ihash = rb_hash_new();
4655 HASH_ASET(uh, INT2FIX(insn), ihash);
4656 }
4657 if (NIL_P(ophash = rb_hash_aref(ihash, INT2FIX(n)))) {
4658 ophash = rb_hash_new();
4659 HASH_ASET(ihash, INT2FIX(n), ophash);
4660 }
4661 /* intern */
4662 valstr = rb_insn_operand_intern(GET_EC()->cfp->iseq, insn, n, op, 0, 0, 0, 0);
4663
4664 /* set count */
4665 if (NIL_P(cv = rb_hash_aref(ophash, valstr))) {
4666 cv = INT2FIX(0);
4667 }
4668 HASH_ASET(ophash, valstr, INT2FIX(FIX2INT(cv) + 1));
4669}
4670
4671static void
4672vm_analysis_register(int reg, int isset)
4673{
4674 ID usage_hash;
4675 VALUE uh;
4676 VALUE valstr;
4677 static const char regstrs[][5] = {
4678 "pc", /* 0 */
4679 "sp", /* 1 */
4680 "ep", /* 2 */
4681 "cfp", /* 3 */
4682 "self", /* 4 */
4683 "iseq", /* 5 */
4684 };
4685 static const char getsetstr[][4] = {
4686 "get",
4687 "set",
4688 };
4689 static VALUE syms[sizeof(regstrs) / sizeof(regstrs[0])][2];
4690
4691 VALUE cv;
4692
4693 CONST_ID(usage_hash, "USAGE_ANALYSIS_REGS");
4694 if (syms[0] == 0) {
4695 char buff[0x10];
4696 int i;
4697
4698 for (i = 0; i < (int)(sizeof(regstrs) / sizeof(regstrs[0])); i++) {
4699 int j;
4700 for (j = 0; j < 2; j++) {
4701 snprintf(buff, 0x10, "%d %s %-4s", i, getsetstr[j], regstrs[i]);
4702 syms[i][j] = ID2SYM(rb_intern(buff));
4703 }
4704 }
4705 }
4706 valstr = syms[reg][isset];
4707
4708 uh = rb_const_get(rb_cRubyVM, usage_hash);
4709 if (NIL_P(cv = rb_hash_aref(uh, valstr))) {
4710 cv = INT2FIX(0);
4711 }
4712 HASH_ASET(uh, valstr, INT2FIX(FIX2INT(cv) + 1));
4713}
4714
4715#undef HASH_ASET
4716
4717static void (*ruby_vm_collect_usage_func_insn)(int insn) = NULL;
4718static void (*ruby_vm_collect_usage_func_operand)(int insn, int n, VALUE op) = NULL;
4719static void (*ruby_vm_collect_usage_func_register)(int reg, int isset) = NULL;
4720
4721/* :nodoc: */
4722static VALUE
4723usage_analysis_insn_start(VALUE self)
4724{
4725 ruby_vm_collect_usage_func_insn = vm_analysis_insn;
4726 return Qnil;
4727}
4728
4729/* :nodoc: */
4730static VALUE
4731usage_analysis_operand_start(VALUE self)
4732{
4733 ruby_vm_collect_usage_func_operand = vm_analysis_operand;
4734 return Qnil;
4735}
4736
4737/* :nodoc: */
4738static VALUE
4739usage_analysis_register_start(VALUE self)
4740{
4741 ruby_vm_collect_usage_func_register = vm_analysis_register;
4742 return Qnil;
4743}
4744
4745/* :nodoc: */
4746static VALUE
4747usage_analysis_insn_stop(VALUE self)
4748{
4749 ruby_vm_collect_usage_func_insn = 0;
4750 return Qnil;
4751}
4752
4753/* :nodoc: */
4754static VALUE
4755usage_analysis_operand_stop(VALUE self)
4756{
4757 ruby_vm_collect_usage_func_operand = 0;
4758 return Qnil;
4759}
4760
4761/* :nodoc: */
4762static VALUE
4763usage_analysis_register_stop(VALUE self)
4764{
4765 ruby_vm_collect_usage_func_register = 0;
4766 return Qnil;
4767}
4768
4769/* :nodoc: */
4770static VALUE
4771usage_analysis_insn_running(VALUE self)
4772{
4773 return RBOOL(ruby_vm_collect_usage_func_insn != 0);
4774}
4775
4776/* :nodoc: */
4777static VALUE
4778usage_analysis_operand_running(VALUE self)
4779{
4780 return RBOOL(ruby_vm_collect_usage_func_operand != 0);
4781}
4782
4783/* :nodoc: */
4784static VALUE
4785usage_analysis_register_running(VALUE self)
4786{
4787 return RBOOL(ruby_vm_collect_usage_func_register != 0);
4788}
4789
4790static VALUE
4791usage_analysis_clear(VALUE self, ID usage_hash)
4792{
4793 VALUE uh;
4794 uh = rb_const_get(self, usage_hash);
4795 rb_hash_clear(uh);
4796
4797 return Qtrue;
4798}
4799
4800
4801/* :nodoc: */
4802static VALUE
4803usage_analysis_insn_clear(VALUE self)
4804{
4805 ID usage_hash;
4806 ID bigram_hash;
4807
4808 CONST_ID(usage_hash, "USAGE_ANALYSIS_INSN");
4809 CONST_ID(bigram_hash, "USAGE_ANALYSIS_INSN_BIGRAM");
4810 usage_analysis_clear(rb_cRubyVM, usage_hash);
4811 return usage_analysis_clear(rb_cRubyVM, bigram_hash);
4812}
4813
4814/* :nodoc: */
4815static VALUE
4816usage_analysis_operand_clear(VALUE self)
4817{
4818 ID usage_hash;
4819
4820 CONST_ID(usage_hash, "USAGE_ANALYSIS_INSN");
4821 return usage_analysis_clear(self, usage_hash);
4822}
4823
4824/* :nodoc: */
4825static VALUE
4826usage_analysis_register_clear(VALUE self)
4827{
4828 ID usage_hash;
4829
4830 CONST_ID(usage_hash, "USAGE_ANALYSIS_REGS");
4831 return usage_analysis_clear(self, usage_hash);
4832}
4833
4834#else
4835
4836MAYBE_UNUSED(static void (*ruby_vm_collect_usage_func_insn)(int insn)) = 0;
4837MAYBE_UNUSED(static void (*ruby_vm_collect_usage_func_operand)(int insn, int n, VALUE op)) = 0;
4838MAYBE_UNUSED(static void (*ruby_vm_collect_usage_func_register)(int reg, int isset)) = 0;
4839
4840#endif
4841
4842#if VM_COLLECT_USAGE_DETAILS
4843/* @param insn instruction number */
4844static void
4845vm_collect_usage_insn(int insn)
4846{
4847 if (RUBY_DTRACE_INSN_ENABLED()) {
4848 RUBY_DTRACE_INSN(rb_insns_name(insn));
4849 }
4850 if (ruby_vm_collect_usage_func_insn)
4851 (*ruby_vm_collect_usage_func_insn)(insn);
4852}
4853
4854/* @param insn instruction number
4855 * @param n n-th operand
4856 * @param op operand value
4857 */
4858static void
4859vm_collect_usage_operand(int insn, int n, VALUE op)
4860{
4861 if (RUBY_DTRACE_INSN_OPERAND_ENABLED()) {
4862 VALUE valstr;
4863
4864 valstr = rb_insn_operand_intern(GET_EC()->cfp->iseq, insn, n, op, 0, 0, 0, 0);
4865
4866 RUBY_DTRACE_INSN_OPERAND(RSTRING_PTR(valstr), rb_insns_name(insn));
4867 RB_GC_GUARD(valstr);
4868 }
4869 if (ruby_vm_collect_usage_func_operand)
4870 (*ruby_vm_collect_usage_func_operand)(insn, n, op);
4871}
4872
4873/* @param reg register id. see code of vm_analysis_register() */
4874/* @param isset 0: read, 1: write */
4875static void
4876vm_collect_usage_register(int reg, int isset)
4877{
4878 if (ruby_vm_collect_usage_func_register)
4879 (*ruby_vm_collect_usage_func_register)(reg, isset);
4880}
4881#endif
4882
4883const struct rb_callcache *
4884rb_vm_empty_cc(void)
4885{
4886 return &vm_empty_cc;
4887}
4888
4889const struct rb_callcache *
4890rb_vm_empty_cc_for_super(void)
4891{
4892 return &vm_empty_cc_for_super;
4893}
4894
4895#include "vm_call_iseq_optimized.inc" /* required from vm_insnhelper.c */
#define RUBY_ASSERT_MESG(expr,...)
Asserts that the expression is truthy.
Definition assert.h:186
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ATOMIC_FETCH_ADD(var, val)
Atomically replaces the value pointed by var with the result of addition of val to the old value of v...
Definition atomic.h:93
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_method_id(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
#define RUBY_EVENT_END
Encountered an end of a class clause.
Definition event.h:40
#define RUBY_EVENT_B_RETURN
Encountered a next statement.
Definition event.h:56
#define RUBY_EVENT_RETURN
Encountered a return statement.
Definition event.h:42
#define RUBY_EVENT_C_RETURN
Return from a method, written in C.
Definition event.h:44
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
static void RB_FL_SET_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_SET().
Definition fl_type.h:593
static void RB_FL_UNSET_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_UNSET().
Definition fl_type.h:653
@ RUBY_FL_SHAREABLE
This flag has something to do with Ractor.
Definition fl_type.h:265
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition class.c:1479
VALUE rb_class_new(VALUE super)
Creates a new, anonymous class.
Definition class.c:851
VALUE rb_singleton_class(VALUE obj)
Finds or creates the singleton class of the passed object.
Definition class.c:2795
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
Definition class.c:2843
void rb_undef_method(VALUE klass, const char *name)
Defines an undef of a method.
Definition class.c:2664
#define rb_str_new2
Old name of rb_str_new_cstr.
Definition string.h:1675
#define NUM2ULONG
Old name of RB_NUM2ULONG.
Definition long.h:52
#define ALLOCV
Old name of RB_ALLOCV.
Definition memory.h:404
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:400
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define T_MASK
Old name of RUBY_T_MASK.
Definition value_type.h:68
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition value_type.h:67
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition fl_type.h:134
#define ULONG2NUM
Old name of RB_ULONG2NUM.
Definition long.h:60
#define SYM2ID
Old name of RB_SYM2ID.
Definition symbol.h:45
#define ZALLOC
Old name of RB_ZALLOC.
Definition memory.h:402
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:205
#define rb_ary_new4
Old name of rb_ary_new_from_values.
Definition array.h:659
#define SIZET2NUM
Old name of RB_SIZE2NUM.
Definition size_t.h:62
#define rb_exc_new2
Old name of rb_exc_new_cstr.
Definition error.h:37
#define FIX2INT
Old name of RB_FIX2INT.
Definition int.h:41
#define T_MODULE
Old name of RUBY_T_MODULE.
Definition value_type.h:70
#define ZALLOC_N
Old name of RB_ZALLOC_N.
Definition memory.h:401
#define ASSUME
Old name of RBIMPL_ASSUME.
Definition assume.h:27
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define ALLOC_N
Old name of RB_ALLOC_N.
Definition memory.h:399
#define FL_SET
Old name of RB_FL_SET.
Definition fl_type.h:128
#define rb_exc_new3
Old name of rb_exc_new_str.
Definition error.h:38
#define ULL2NUM
Old name of RB_ULL2NUM.
Definition long_long.h:31
#define Qtrue
Old name of RUBY_Qtrue.
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define NIL_P
Old name of RB_NIL_P.
#define NUM2ULL
Old name of RB_NUM2ULL.
Definition long_long.h:35
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define FL_TEST
Old name of RB_FL_TEST.
Definition fl_type.h:130
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define FL_USHIFT
Old name of RUBY_FL_USHIFT.
Definition fl_type.h:68
#define CONST_ID
Old name of RUBY_CONST_ID.
Definition symbol.h:47
#define FL_SET_RAW
Old name of RB_FL_SET_RAW.
Definition fl_type.h:129
#define ALLOCV_END
Old name of RB_ALLOCV_END.
Definition memory.h:406
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
void ruby_init_stack(void *addr)
Set stack bottom of Ruby implementation.
Definition vm.c:4389
VALUE rb_eLocalJumpError
LocalJumpError exception.
Definition eval.c:48
void rb_category_warn(rb_warning_category_t category, const char *fmt,...)
Identical to rb_category_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:476
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition eval.c:680
int rb_typeddata_is_kind_of(VALUE obj, const rb_data_type_t *data_type)
Checks if the given object is of given kind.
Definition error.c:1380
void rb_iter_break(void)
Breaks from a block.
Definition vm.c:2115
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1430
void rb_iter_break_value(VALUE val)
Identical to rb_iter_break(), except it additionally takes the "value" of this breakage.
Definition vm.c:2121
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1428
VALUE * rb_ruby_verbose_ptr(void)
This is an implementation detail of ruby_verbose.
Definition vm.c:4556
VALUE rb_exc_new_str(VALUE etype, VALUE str)
Identical to rb_exc_new_cstr(), except it takes a Ruby's string instead of C's.
Definition error.c:1481
VALUE * rb_ruby_debug_ptr(void)
This is an implementation detail of ruby_debug.
Definition vm.c:4563
VALUE rb_eSysStackError
SystemStackError exception.
Definition eval.c:49
@ RB_WARN_CATEGORY_PERFORMANCE
Warning is for performance issues (not enabled by -w).
Definition error.h:54
VALUE rb_cTime
Time class.
Definition time.c:678
VALUE rb_cArray
Array class.
VALUE rb_obj_alloc(VALUE klass)
Allocates an instance of the given class.
Definition object.c:2123
VALUE rb_cInteger
Module class.
Definition numeric.c:198
VALUE rb_cNilClass
NilClass class.
Definition object.c:71
VALUE rb_cBinding
Binding class.
Definition proc.c:43
VALUE rb_cRegexp
Regexp class.
Definition re.c:2661
VALUE rb_cHash
Hash class.
Definition hash.c:113
VALUE rb_cFalseClass
FalseClass class.
Definition object.c:73
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:247
VALUE rb_cSymbol
Symbol class.
Definition string.c:83
VALUE rb_cBasicObject
BasicObject class.
Definition object.c:64
VALUE rb_cThread
Thread class.
Definition vm.c:554
VALUE rb_obj_freeze(VALUE obj)
Just calls rb_obj_freeze_inline() inside.
Definition object.c:1297
VALUE rb_cFloat
Float class.
Definition numeric.c:197
VALUE rb_cProc
Proc class.
Definition proc.c:44
VALUE rb_cTrueClass
TrueClass class.
Definition object.c:72
VALUE rb_cString
String class.
Definition string.c:82
#define RB_OBJ_WRITTEN(old, oldv, young)
Identical to RB_OBJ_WRITE(), except it doesn't write any values, but only a WB declaration.
Definition gc.h:615
#define RB_OBJ_WRITE(old, slot, young)
Declaration of a "back" pointer.
Definition gc.h:603
Defines RBIMPL_HAS_BUILTIN.
VALUE rb_ary_delete_at(VALUE ary, long pos)
Destructively removes an element which resides at the specific index of the passed array.
VALUE rb_ary_new(void)
Allocates a new, empty array.
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
void rb_undef(VALUE mod, ID mid)
Inserts a method entry that hides previous method definition of the given name.
Definition vm_method.c:2010
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:284
VALUE rb_backref_get(void)
Queries the last match, or Regexp.last_match, or the $~.
Definition vm.c:1865
void rb_lastline_set(VALUE str)
Updates $_.
Definition vm.c:1883
VALUE rb_lastline_get(void)
Queries the last line, or the $_.
Definition vm.c:1877
void rb_backref_set(VALUE md)
Updates $~.
Definition vm.c:1871
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition proc.c:842
VALUE rb_block_lambda(void)
Identical to rb_proc_new(), except it returns a lambda.
Definition proc.c:861
VALUE rb_binding_new(void)
Snapshots the current execution context and turn it into an instance of rb_cBinding.
Definition proc.c:324
VALUE rb_str_append(VALUE dst, VALUE src)
Identical to rb_str_buf_append(), except it converts the right hand side before concatenating.
Definition string.c:4102
VALUE rb_str_new_frozen(VALUE str)
Creates a frozen copy of the string, if necessary.
Definition string.c:1841
#define rb_str_cat_cstr(buf, str)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1656
VALUE rb_const_get(VALUE space, ID name)
Identical to rb_const_defined(), except it returns the actual defined value.
Definition variable.c:3651
void rb_set_class_path(VALUE klass, VALUE space, const char *name)
Names a class.
Definition variable.c:438
VALUE rb_class_path_cached(VALUE mod)
Just another name of rb_mod_name.
Definition variable.c:387
void rb_alias_variable(ID dst, ID src)
Aliases a global variable.
Definition variable.c:1129
VALUE rb_class_path(VALUE mod)
Identical to rb_mod_name(), except it returns #<Class: ...> style inspection for anonymous modules.
Definition variable.c:378
void rb_undef_alloc_func(VALUE klass)
Deletes the allocator function of a class.
Definition vm_method.c:1387
const char * rb_sourcefile(void)
Resembles __FILE__.
Definition vm.c:1902
void rb_alias(VALUE klass, ID dst, ID src)
Resembles alias.
Definition vm_method.c:2393
int rb_frame_method_id_and_class(ID *idp, VALUE *klassp)
Resembles __method__.
Definition vm.c:2932
int rb_sourceline(void)
Resembles __LINE__.
Definition vm.c:1916
VALUE rb_sym2str(VALUE symbol)
Obtain a frozen string representation of a symbol (not including the leading colon).
Definition symbol.c:987
void rb_define_global_const(const char *name, VALUE val)
Identical to rb_define_const(), except it defines that of "global", i.e.
Definition variable.c:4235
VALUE rb_iv_set(VALUE obj, const char *name, VALUE val)
Assigns to an instance variable.
Definition variable.c:4720
int len
Length of the buffer.
Definition io.h:8
VALUE rb_ractor_make_shareable_copy(VALUE obj)
Identical to rb_ractor_make_shareable(), except it returns a (deep) copy of the passed one instead of...
Definition ractor.c:3206
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
Definition ractor.h:249
#define RB_OBJ_SHAREABLE_P(obj)
Queries if the passed object has previously classified as shareable or not.
Definition ractor.h:235
VALUE rb_ractor_make_shareable(VALUE obj)
Destructively transforms the passed object so that multiple Ractors can share it.
Definition ractor.c:3197
void ruby_vm_at_exit(void(*func)(ruby_vm_t *))
ruby_vm_at_exit registers a function func to be invoked when a VM passed away.
Definition vm.c:888
int ruby_vm_destruct(ruby_vm_t *vm)
Destructs the passed VM.
Definition vm.c:3139
VALUE rb_f_sprintf(int argc, const VALUE *argv)
Identical to rb_str_format(), except how the arguments are arranged.
Definition sprintf.c:209
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition memory.h:372
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
Definition memory.h:360
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
VALUE type(ANYARGS)
ANYARGS-ed function type.
void rb_hash_foreach(VALUE q, int_type *w, VALUE e)
Iteration over the given hash.
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
static int RARRAY_LENINT(VALUE ary)
Identical to rb_array_len(), except it differs for the return type.
Definition rarray.h:281
#define RARRAY_AREF(a, i)
Definition rarray.h:403
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:150
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
#define StringValuePtr(v)
Identical to StringValue, except it returns a char*.
Definition rstring.h:76
#define RTYPEDDATA_DATA(v)
Convenient getter macro.
Definition rtypeddata.h:102
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
Definition rtypeddata.h:516
#define TypedData_Wrap_Struct(klass, data_type, sval)
Converts sval, a pointer to your struct, into a Ruby object.
Definition rtypeddata.h:450
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
Definition rtypeddata.h:498
const char * rb_class2name(VALUE klass)
Queries the name of the passed class.
Definition variable.c:503
#define RB_NO_KEYWORDS
Do not pass keywords.
Definition scan_args.h:69
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
#define _(args)
This was a transition path from K&R to ANSI.
Definition stdarg.h:35
Definition proc.c:29
Definition iseq.h:280
Definition method.h:63
CREF (Class REFerence)
Definition method.h:45
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:203
const char * wrap_struct_name
Name of structs of this kind.
Definition rtypeddata.h:210
Definition method.h:55
Internal header for Namespace.
Definition namespace.h:14
Definition st.h:79
IFUNC (Internal FUNCtion)
Definition imemo.h:86
THROW_DATA.
Definition imemo.h:59
void rb_native_cond_initialize(rb_nativethread_cond_t *cond)
Fills the passed condition variable with an initial value.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy.
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static enum ruby_value_type RB_BUILTIN_TYPE(VALUE obj)
Queries the type of the object.
Definition value_type.h:182
static void Check_Type(VALUE v, enum ruby_value_type t)
Identical to RB_TYPE_P(), except it raises exceptions on predication failure.
Definition value_type.h:433
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376
ruby_value_type
C-level type of an object.
Definition value_type.h:113