Ruby 4.1.0dev (2025-12-29 revision 44e762a99c2234756594382f36fc64db1d6c31d0)
vm.c (44e762a99c2234756594382f36fc64db1d6c31d0)
1/**********************************************************************
2
3 Vm.c -
4
5 $Author$
6
7 Copyright (C) 2004-2007 Koichi Sasada
8
9**********************************************************************/
10
11#define vm_exec rb_vm_exec
12
13#include "eval_intern.h"
14#include "internal.h"
15#include "internal/box.h"
16#include "internal/class.h"
17#include "internal/compile.h"
18#include "internal/cont.h"
19#include "internal/error.h"
20#include "internal/encoding.h"
21#include "internal/eval.h"
22#include "internal/gc.h"
23#include "internal/inits.h"
24#include "internal/missing.h"
25#include "internal/object.h"
26#include "internal/proc.h"
27#include "internal/re.h"
28#include "internal/ruby_parser.h"
29#include "internal/symbol.h"
30#include "internal/thread.h"
31#include "internal/transcode.h"
32#include "internal/vm.h"
33#include "internal/sanitizers.h"
34#include "internal/variable.h"
35#include "iseq.h"
36#include "symbol.h" // This includes a macro for a more performant rb_id2sym.
37#include "yjit.h"
38#include "insns.inc"
39#include "zjit.h"
40#include "ruby/st.h"
41#include "ruby/vm.h"
42#include "vm_core.h"
43#include "vm_callinfo.h"
44#include "vm_debug.h"
45#include "vm_exec.h"
46#include "vm_insnhelper.h"
47#include "ractor_core.h"
48#include "vm_sync.h"
49#include "shape.h"
50
51#include "builtin.h"
52
53#include "probes.h"
54#include "probes_helper.h"
55
56#ifdef RUBY_ASSERT_CRITICAL_SECTION
57int ruby_assert_critical_section_entered = 0;
58#endif
59
60static void *native_main_thread_stack_top;
61
62bool ruby_vm_during_cleanup = false;
63
64VALUE rb_str_concat_literals(size_t, const VALUE*);
65
67
68extern const char *const rb_debug_counter_names[];
69
70PUREFUNC(static inline const VALUE *VM_EP_LEP(const VALUE *));
71static inline const VALUE *
72VM_EP_LEP(const VALUE *ep)
73{
74 while (!VM_ENV_LOCAL_P(ep)) {
75 ep = VM_ENV_PREV_EP(ep);
76 }
77 return ep;
78}
79
80static inline const rb_control_frame_t *
81rb_vm_search_cf_from_ep(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, const VALUE * const ep)
82{
83 if (!ep) {
84 return NULL;
85 }
86 else {
87 const rb_control_frame_t * const eocfp = RUBY_VM_END_CONTROL_FRAME(ec); /* end of control frame pointer */
88
89 while (cfp < eocfp) {
90 if (cfp->ep == ep) {
91 return cfp;
92 }
93 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
94 }
95
96 return NULL;
97 }
98}
99
100#if VM_CHECK_MODE > 0
101// ruby_box_crashed defined in internal/box.h
102#define VM_BOX_CRASHED() {ruby_box_crashed = true;}
103#define VM_BOX_ASSERT(expr, msg) \
104 if (!(expr)) { ruby_box_crashed = true; rb_bug(msg); }
105#else
106#define VM_BOX_CRASHED() {}
107#define VM_BOX_ASSERT(expr, msg) ((void)0)
108#endif
109
110static const VALUE *
111VM_EP_RUBY_LEP(const rb_execution_context_t *ec, const rb_control_frame_t *current_cfp)
112{
113 // rb_vmdebug_box_env_dump_raw() simulates this function
114 const VALUE *ep = current_cfp->ep;
115 const rb_control_frame_t * const eocfp = RUBY_VM_END_CONTROL_FRAME(ec); /* end of control frame pointer */
116 const rb_control_frame_t *cfp = current_cfp;
117
118 if (VM_ENV_FRAME_TYPE_P(ep, VM_FRAME_MAGIC_IFUNC)) {
119 ep = VM_EP_LEP(current_cfp->ep);
148 VM_ASSERT(VM_ENV_FRAME_TYPE_P(ep, VM_FRAME_MAGIC_CFUNC));
149 return ep;
150 }
151
152 while (VM_ENV_FRAME_TYPE_P(ep, VM_FRAME_MAGIC_CFUNC)) {
153 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
154
155 VM_BOX_ASSERT(cfp, "CFUNC should have a valid previous control frame");
156 VM_BOX_ASSERT(cfp < eocfp, "CFUNC should have a valid caller frame");
157 if (!cfp || cfp >= eocfp) {
158 return NULL;
159 }
160
161 VM_BOX_ASSERT(cfp->ep, "CFUNC should have a valid caller frame with env");
162 ep = cfp->ep;
163 if (!ep) {
164 return NULL;
165 }
166 }
167
168 while (!VM_ENV_LOCAL_P(ep)) {
169 ep = VM_ENV_PREV_EP(ep);
170 }
171
172 return ep;
173}
174
175const VALUE *
176rb_vm_ep_local_ep(const VALUE *ep)
177{
178 return VM_EP_LEP(ep);
179}
180
181PUREFUNC(static inline const VALUE *VM_CF_LEP(const rb_control_frame_t * const cfp));
182static inline const VALUE *
183VM_CF_LEP(const rb_control_frame_t * const cfp)
184{
185 return VM_EP_LEP(cfp->ep);
186}
187
188static inline const VALUE *
189VM_CF_PREV_EP(const rb_control_frame_t * const cfp)
190{
191 return VM_ENV_PREV_EP(cfp->ep);
192}
193
194PUREFUNC(static inline VALUE VM_CF_BLOCK_HANDLER(const rb_control_frame_t * const cfp));
195static inline VALUE
196VM_CF_BLOCK_HANDLER(const rb_control_frame_t * const cfp)
197{
198 const VALUE *ep;
199 if (VM_ENV_BOXED_P(cfp->ep)) {
200 VM_ASSERT(VM_ENV_LOCAL_P(cfp->ep));
201 /* Never set black_handler for VM_FRAME_MAGIC_TOP or VM_FRAME_MAGIC_CLASS
202 * and the specval is used for boxes (rb_box_t) in these case
203 */
204 return VM_BLOCK_HANDLER_NONE;
205 }
206 ep = VM_CF_LEP(cfp);
207 return VM_ENV_BLOCK_HANDLER(ep);
208}
209
210int
211rb_vm_cframe_keyword_p(const rb_control_frame_t *cfp)
212{
213 return VM_FRAME_CFRAME_KW_P(cfp);
214}
215
216VALUE
217rb_vm_frame_block_handler(const rb_control_frame_t *cfp)
218{
219 return VM_CF_BLOCK_HANDLER(cfp);
220}
221
222#if VM_CHECK_MODE > 0
223static int
224VM_CFP_IN_HEAP_P(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
225{
226 const VALUE *start = ec->vm_stack;
227 const VALUE *end = (VALUE *)ec->vm_stack + ec->vm_stack_size;
228 VM_ASSERT(start != NULL);
229
230 if (start <= (VALUE *)cfp && (VALUE *)cfp < end) {
231 return FALSE;
232 }
233 else {
234 return TRUE;
235 }
236}
237
238static int
239VM_EP_IN_HEAP_P(const rb_execution_context_t *ec, const VALUE *ep)
240{
241 const VALUE *start = ec->vm_stack;
242 const VALUE *end = (VALUE *)ec->cfp;
243 VM_ASSERT(start != NULL);
244
245 if (start <= ep && ep < end) {
246 return FALSE;
247 }
248 else {
249 return TRUE;
250 }
251}
252
253static int
254vm_ep_in_heap_p_(const rb_execution_context_t *ec, const VALUE *ep)
255{
256 if (VM_EP_IN_HEAP_P(ec, ep)) {
257 VALUE envval = ep[VM_ENV_DATA_INDEX_ENV]; /* VM_ENV_ENVVAL(ep); */
258
259 if (!UNDEF_P(envval)) {
260 const rb_env_t *env = (const rb_env_t *)envval;
261
262 VM_ASSERT(imemo_type_p(envval, imemo_env));
263 VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED));
264 VM_ASSERT(env->ep == ep);
265 }
266 return TRUE;
267 }
268 else {
269 return FALSE;
270 }
271}
272
273int
274rb_vm_ep_in_heap_p(const VALUE *ep)
275{
276 const rb_execution_context_t *ec = GET_EC();
277 if (ec->vm_stack == NULL) return TRUE;
278 return vm_ep_in_heap_p_(ec, ep);
279}
280#endif
281
282static struct rb_captured_block *
283VM_CFP_TO_CAPTURED_BLOCK(const rb_control_frame_t *cfp)
284{
285 VM_ASSERT(!VM_CFP_IN_HEAP_P(GET_EC(), cfp));
286 return (struct rb_captured_block *)&cfp->self;
287}
288
289static rb_control_frame_t *
290VM_CAPTURED_BLOCK_TO_CFP(const struct rb_captured_block *captured)
291{
292 rb_control_frame_t *cfp = ((rb_control_frame_t *)((VALUE *)(captured) - 3));
293 VM_ASSERT(!VM_CFP_IN_HEAP_P(GET_EC(), cfp));
294 VM_ASSERT(sizeof(rb_control_frame_t)/sizeof(VALUE) == 7 + VM_DEBUG_BP_CHECK ? 1 : 0);
295 return cfp;
296}
297
298static int
299VM_BH_FROM_CFP_P(VALUE block_handler, const rb_control_frame_t *cfp)
300{
301 const struct rb_captured_block *captured = VM_CFP_TO_CAPTURED_BLOCK(cfp);
302 return VM_TAGGED_PTR_REF(block_handler, 0x03) == captured;
303}
304
305static VALUE
306vm_passed_block_handler(rb_execution_context_t *ec)
307{
308 VALUE block_handler = ec->passed_block_handler;
309 ec->passed_block_handler = VM_BLOCK_HANDLER_NONE;
310 vm_block_handler_verify(block_handler);
311 return block_handler;
312}
313
314static rb_cref_t *
315vm_cref_new0(VALUE klass, rb_method_visibility_t visi, int module_func, rb_cref_t *prev_cref, int pushed_by_eval, int use_prev_prev, int singleton)
316{
317 VALUE refinements = Qnil;
318 int omod_shared = FALSE;
319
320 /* scope */
321 rb_scope_visibility_t scope_visi;
322 scope_visi.method_visi = visi;
323 scope_visi.module_func = module_func;
324
325 /* refinements */
326 if (prev_cref != NULL && prev_cref != (void *)1 /* TODO: why CREF_NEXT(cref) is 1? */) {
327 refinements = CREF_REFINEMENTS(prev_cref);
328
329 if (!NIL_P(refinements)) {
330 omod_shared = TRUE;
331 CREF_OMOD_SHARED_SET(prev_cref);
332 }
333 }
334
335 VM_ASSERT(singleton || klass);
336
337 rb_cref_t *cref = SHAREABLE_IMEMO_NEW(rb_cref_t, imemo_cref, refinements);
338 cref->klass_or_self = klass;
339 cref->next = use_prev_prev ? CREF_NEXT(prev_cref) : prev_cref;
340 *((rb_scope_visibility_t *)&cref->scope_visi) = scope_visi;
341
342 if (pushed_by_eval) CREF_PUSHED_BY_EVAL_SET(cref);
343 if (omod_shared) CREF_OMOD_SHARED_SET(cref);
344 if (singleton) CREF_SINGLETON_SET(cref);
345
346 return cref;
347}
348
349static rb_cref_t *
350vm_cref_new(VALUE klass, rb_method_visibility_t visi, int module_func, rb_cref_t *prev_cref, int pushed_by_eval, int singleton)
351{
352 return vm_cref_new0(klass, visi, module_func, prev_cref, pushed_by_eval, FALSE, singleton);
353}
354
355static rb_cref_t *
356vm_cref_new_use_prev(VALUE klass, rb_method_visibility_t visi, int module_func, rb_cref_t *prev_cref, int pushed_by_eval)
357{
358 return vm_cref_new0(klass, visi, module_func, prev_cref, pushed_by_eval, TRUE, FALSE);
359}
360
361static int
362ref_delete_symkey(VALUE key, VALUE value, VALUE unused)
363{
364 return SYMBOL_P(key) ? ST_DELETE : ST_CONTINUE;
365}
366
367static rb_cref_t *
368vm_cref_dup(const rb_cref_t *cref)
369{
370 const rb_scope_visibility_t *visi = CREF_SCOPE_VISI(cref);
371 rb_cref_t *next_cref = CREF_NEXT(cref), *new_cref;
372 int pushed_by_eval = CREF_PUSHED_BY_EVAL(cref);
373 int singleton = CREF_SINGLETON(cref);
374
375 new_cref = vm_cref_new(cref->klass_or_self, visi->method_visi, visi->module_func, next_cref, pushed_by_eval, singleton);
376
377 if (!NIL_P(CREF_REFINEMENTS(cref))) {
378 VALUE ref = rb_hash_dup(CREF_REFINEMENTS(cref));
379 rb_hash_foreach(ref, ref_delete_symkey, Qnil);
380 CREF_REFINEMENTS_SET(new_cref, ref);
381 CREF_OMOD_SHARED_UNSET(new_cref);
382 }
383
384 return new_cref;
385}
386
387
388rb_cref_t *
389rb_vm_cref_dup_without_refinements(const rb_cref_t *cref)
390{
391 const rb_scope_visibility_t *visi = CREF_SCOPE_VISI(cref);
392 rb_cref_t *next_cref = CREF_NEXT(cref), *new_cref;
393 int pushed_by_eval = CREF_PUSHED_BY_EVAL(cref);
394 int singleton = CREF_SINGLETON(cref);
395
396 new_cref = vm_cref_new(cref->klass_or_self, visi->method_visi, visi->module_func, next_cref, pushed_by_eval, singleton);
397
398 if (!NIL_P(CREF_REFINEMENTS(cref))) {
399 CREF_REFINEMENTS_SET(new_cref, Qnil);
400 CREF_OMOD_SHARED_UNSET(new_cref);
401 }
402
403 return new_cref;
404}
405
406static rb_cref_t *
407vm_cref_new_toplevel(rb_execution_context_t *ec)
408{
409 rb_cref_t *cref = vm_cref_new(rb_cObject, METHOD_VISI_PRIVATE /* toplevel visibility is private */, FALSE, NULL, FALSE, FALSE);
410 VALUE top_wrapper = rb_ec_thread_ptr(ec)->top_wrapper;
411
412 if (top_wrapper) {
413 cref = vm_cref_new(top_wrapper, METHOD_VISI_PRIVATE, FALSE, cref, FALSE, FALSE);
414 }
415
416 return cref;
417}
418
419rb_cref_t *
420rb_vm_cref_new_toplevel(void)
421{
422 return vm_cref_new_toplevel(GET_EC());
423}
424
425static void
426vm_cref_dump(const char *mesg, const rb_cref_t *cref)
427{
428 ruby_debug_printf("vm_cref_dump: %s (%p)\n", mesg, (void *)cref);
429
430 while (cref) {
431 ruby_debug_printf("= cref| klass: %s\n", RSTRING_PTR(rb_class_path(CREF_CLASS(cref))));
432 cref = CREF_NEXT(cref);
433 }
434}
435
436void
437rb_vm_block_ep_update(VALUE obj, const struct rb_block *dst, const VALUE *ep)
438{
439 *((const VALUE **)&dst->as.captured.ep) = ep;
440 RB_OBJ_WRITTEN(obj, Qundef, VM_ENV_ENVVAL(ep));
441}
442
443static void
444vm_bind_update_env(VALUE bindval, rb_binding_t *bind, VALUE envval)
445{
446 const rb_env_t *env = (rb_env_t *)envval;
447 RB_OBJ_WRITE(bindval, &bind->block.as.captured.code.iseq, env->iseq);
448 rb_vm_block_ep_update(bindval, &bind->block, env->ep);
449}
450
451#if VM_COLLECT_USAGE_DETAILS
452static void vm_collect_usage_operand(int insn, int n, VALUE op);
453static void vm_collect_usage_insn(int insn);
454static void vm_collect_usage_register(int reg, int isset);
455#endif
456
457static VALUE vm_make_env_object(const rb_execution_context_t *ec, rb_control_frame_t *cfp);
458static VALUE vm_invoke_bmethod(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
459 int argc, const VALUE *argv, int kw_splat, VALUE block_handler,
461static VALUE vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE block_handler);
462
463#if USE_YJIT
464// Counter to serve as a proxy for execution time, total number of calls
465static uint64_t yjit_total_entry_hits = 0;
466
467// Number of calls used to estimate how hot an ISEQ is
468#define YJIT_CALL_COUNT_INTERV 20u
469
471static inline bool
472rb_yjit_threshold_hit(const rb_iseq_t *iseq, uint64_t entry_calls)
473{
474 yjit_total_entry_hits += 1;
475
476 // Record the number of calls at the beginning of the interval
477 if (entry_calls + YJIT_CALL_COUNT_INTERV == rb_yjit_call_threshold) {
478 iseq->body->yjit_calls_at_interv = yjit_total_entry_hits;
479 }
480
481 // Try to estimate the total time taken (total number of calls) to reach 20 calls to this ISEQ
482 // This give us a ratio of how hot/cold this ISEQ is
483 if (entry_calls == rb_yjit_call_threshold) {
484 // We expect threshold 1 to compile everything immediately
485 if (rb_yjit_call_threshold < YJIT_CALL_COUNT_INTERV) {
486 return true;
487 }
488
489 uint64_t num_calls = yjit_total_entry_hits - iseq->body->yjit_calls_at_interv;
490
491 // Reject ISEQs that don't get called often enough
492 if (num_calls > rb_yjit_cold_threshold) {
493 rb_yjit_incr_counter("cold_iseq_entry");
494 return false;
495 }
496
497 return true;
498 }
499
500 return false;
501}
502#else
503#define rb_yjit_threshold_hit(iseq, entry_calls) false
504#endif
505
506#if USE_YJIT
507// Generate JIT code that supports the following kinds of ISEQ entries:
508// * The first ISEQ on vm_exec (e.g. <main>, or Ruby methods/blocks
509// called by a C method). The current frame has VM_FRAME_FLAG_FINISH.
510// The current vm_exec stops if JIT code returns a non-Qundef value.
511// * ISEQs called by the interpreter on vm_sendish (e.g. Ruby methods or
512// blocks called by a Ruby frame that isn't compiled or side-exited).
513// The current frame doesn't have VM_FRAME_FLAG_FINISH. The current
514// vm_exec does NOT stop whether JIT code returns Qundef or not.
515static inline rb_jit_func_t
516yjit_compile(rb_execution_context_t *ec)
517{
518 const rb_iseq_t *iseq = ec->cfp->iseq;
519 struct rb_iseq_constant_body *body = ISEQ_BODY(iseq);
520
521 // Increment the ISEQ's call counter and trigger JIT compilation if not compiled
522 if (body->jit_entry == NULL) {
523 body->jit_entry_calls++;
524 if (rb_yjit_threshold_hit(iseq, body->jit_entry_calls)) {
525 rb_yjit_compile_iseq(iseq, ec, false);
526 }
527 }
528 return body->jit_entry;
529}
530#else
531# define yjit_compile(ec) ((rb_jit_func_t)0)
532#endif
533
534#if USE_ZJIT
535static inline rb_jit_func_t
536zjit_compile(rb_execution_context_t *ec)
537{
538 const rb_iseq_t *iseq = ec->cfp->iseq;
539 struct rb_iseq_constant_body *body = ISEQ_BODY(iseq);
540
541 if (body->jit_entry == NULL) {
542 body->jit_entry_calls++;
543
544 // At profile-threshold, rewrite some of the YARV instructions
545 // to zjit_* instructions to profile these instructions.
546 if (body->jit_entry_calls == rb_zjit_profile_threshold) {
547 rb_zjit_profile_enable(iseq);
548 }
549
550 // At call-threshold, compile the ISEQ with ZJIT.
551 if (body->jit_entry_calls == rb_zjit_call_threshold) {
552 rb_zjit_compile_iseq(iseq, false);
553 }
554 }
555 return body->jit_entry;
556}
557#else
558# define zjit_compile(ec) ((rb_jit_func_t)0)
559#endif
560
561// Execute JIT code compiled by yjit_compile() or zjit_compile()
562static inline VALUE
563jit_exec(rb_execution_context_t *ec)
564{
565#if USE_YJIT
566 if (rb_yjit_enabled_p) {
567 rb_jit_func_t func = yjit_compile(ec);
568 if (func) {
569 return func(ec, ec->cfp);
570 }
571 return Qundef;
572 }
573#endif
574
575#if USE_ZJIT
576 void *zjit_entry = rb_zjit_entry;
577 if (zjit_entry) {
578 rb_jit_func_t func = zjit_compile(ec);
579 if (func) {
580 return ((rb_zjit_func_t)zjit_entry)(ec, ec->cfp, func);
581 }
582 }
583#endif
584 return Qundef;
585}
586
587#if USE_YJIT || USE_ZJIT
588// Generate JIT code that supports the following kind of ISEQ entry:
589// * The first ISEQ pushed by vm_exec_handle_exception. The frame would
590// point to a location specified by a catch table, and it doesn't have
591// VM_FRAME_FLAG_FINISH. The current vm_exec stops if JIT code returns
592// a non-Qundef value. So you should not return a non-Qundef value
593// until ec->cfp is changed to a frame with VM_FRAME_FLAG_FINISH.
594static inline rb_jit_func_t
595jit_compile_exception(rb_execution_context_t *ec)
596{
597 const rb_iseq_t *iseq = ec->cfp->iseq;
598 struct rb_iseq_constant_body *body = ISEQ_BODY(iseq);
599
600#if USE_ZJIT
601 if (body->jit_exception == NULL && rb_zjit_enabled_p) {
602 body->jit_exception_calls++;
603
604 // At profile-threshold, rewrite some of the YARV instructions
605 // to zjit_* instructions to profile these instructions.
606 if (body->jit_exception_calls == rb_zjit_profile_threshold) {
607 rb_zjit_profile_enable(iseq);
608 }
609
610 // At call-threshold, compile the ISEQ with ZJIT.
611 if (body->jit_exception_calls == rb_zjit_call_threshold) {
612 rb_zjit_compile_iseq(iseq, true);
613 }
614 }
615#endif
616
617#if USE_YJIT
618 // Increment the ISEQ's call counter and trigger JIT compilation if not compiled
619 if (body->jit_exception == NULL && rb_yjit_enabled_p) {
620 body->jit_exception_calls++;
621 if (body->jit_exception_calls == rb_yjit_call_threshold) {
622 rb_yjit_compile_iseq(iseq, ec, true);
623 }
624 }
625#endif
626 return body->jit_exception;
627}
628
629// Execute JIT code compiled by jit_compile_exception()
630static inline VALUE
631jit_exec_exception(rb_execution_context_t *ec)
632{
633 rb_jit_func_t func = jit_compile_exception(ec);
634 if (func) {
635 // Call the JIT code
636 return func(ec, ec->cfp);
637 }
638 else {
639 return Qundef;
640 }
641}
642#else
643# define jit_compile_exception(ec) ((rb_jit_func_t)0)
644# define jit_exec_exception(ec) Qundef
645#endif
646
647static void add_opt_method_entry(const rb_method_entry_t *me);
648
649#define RB_TYPE_2_P(obj, type1, type2) \
650 (RB_TYPE_P(obj, type1) || RB_TYPE_P(obj, type2))
651#define RB_TYPE_3_P(obj, type1, type2, type3) \
652 (RB_TYPE_P(obj, type1) || RB_TYPE_P(obj, type2) || RB_TYPE_P(obj, type3))
653
654#define VM_ASSERT_TYPE(obj, type) \
655 VM_ASSERT(RB_TYPE_P(obj, type), #obj ": %s", rb_obj_info(obj))
656#define VM_ASSERT_TYPE2(obj, type1, type2) \
657 VM_ASSERT(RB_TYPE_2_P(obj, type1, type2), #obj ": %s", rb_obj_info(obj))
658#define VM_ASSERT_TYPE3(obj, type1, type2, type3) \
659 VM_ASSERT(RB_TYPE_3_P(obj, type1, type2, type3), #obj ": %s", rb_obj_info(obj))
660
661#include "vm_insnhelper.c"
662
663#include "vm_exec.c"
664
665#include "vm_method.c"
666#include "vm_eval.c"
667
668#define PROCDEBUG 0
669
670VALUE rb_cRubyVM;
672VALUE rb_mRubyVMFrozenCore;
673VALUE rb_block_param_proxy;
674
675VALUE ruby_vm_const_missing_count = 0;
676rb_vm_t *ruby_current_vm_ptr = NULL;
677rb_ractor_t *ruby_single_main_ractor;
678bool ruby_vm_keep_script_lines;
679
680#ifdef RB_THREAD_LOCAL_SPECIFIER
681RB_THREAD_LOCAL_SPECIFIER rb_execution_context_t *ruby_current_ec;
682
683#ifdef RUBY_NT_SERIAL
684RB_THREAD_LOCAL_SPECIFIER rb_atomic_t ruby_nt_serial;
685#endif
686
687// no-inline decl on vm_core.h
689rb_current_ec_noinline(void)
690{
691 return ruby_current_ec;
692}
693
694void
695rb_current_ec_set(rb_execution_context_t *ec)
696{
697 ruby_current_ec = ec;
698}
699
700
701#ifdef RB_THREAD_CURRENT_EC_NOINLINE
703rb_current_ec(void)
704{
705 return ruby_current_ec;
706}
707
708#endif
709#else
710native_tls_key_t ruby_current_ec_key;
711
712// no-inline decl on vm_core.h
714rb_current_ec_noinline(void)
715{
716 return native_tls_get(ruby_current_ec_key);
717}
718
719#endif
720
721rb_event_flag_t ruby_vm_event_flags = 0;
722rb_event_flag_t ruby_vm_event_enabled_global_flags = 0;
723unsigned int ruby_vm_c_events_enabled = 0;
724unsigned int ruby_vm_iseq_events_enabled = 0;
725
726rb_serial_t ruby_vm_constant_cache_invalidations = 0;
727rb_serial_t ruby_vm_constant_cache_misses = 0;
728rb_serial_t ruby_vm_global_cvar_state = 1;
729
730static const struct rb_callcache vm_empty_cc = {
731 .flags = T_IMEMO | (imemo_callcache << FL_USHIFT) | VM_CALLCACHE_UNMARKABLE,
732 .klass = Qundef,
733 .cme_ = NULL,
734 .call_ = vm_call_general,
735 .aux_ = {
736 .v = Qfalse,
737 }
738};
739
740static const struct rb_callcache vm_empty_cc_for_super = {
741 .flags = T_IMEMO | (imemo_callcache << FL_USHIFT) | VM_CALLCACHE_UNMARKABLE,
742 .klass = Qundef,
743 .cme_ = NULL,
744 .call_ = vm_call_super_method,
745 .aux_ = {
746 .v = Qfalse,
747 }
748};
749
750static void thread_free(void *ptr);
751
752void
753rb_vm_inc_const_missing_count(void)
754{
755 ruby_vm_const_missing_count +=1;
756}
757
758int
759rb_dtrace_setup(rb_execution_context_t *ec, VALUE klass, ID id,
760 struct ruby_dtrace_method_hook_args *args)
761{
763 if (!klass) {
764 if (!ec) ec = GET_EC();
765 if (!rb_ec_frame_method_id_and_class(ec, &id, 0, &klass) || !klass)
766 return FALSE;
767 }
768 if (RB_TYPE_P(klass, T_ICLASS)) {
769 klass = RBASIC(klass)->klass;
770 }
771 else if (RCLASS_SINGLETON_P(klass)) {
772 klass = RCLASS_ATTACHED_OBJECT(klass);
773 if (NIL_P(klass)) return FALSE;
774 }
775 type = BUILTIN_TYPE(klass);
776 if (type == T_CLASS || type == T_ICLASS || type == T_MODULE) {
777 VALUE name = rb_class_path(klass);
778 const char *classname, *filename;
779 const char *methodname = rb_id2name(id);
780 if (methodname && (filename = rb_source_location_cstr(&args->line_no)) != 0) {
781 if (NIL_P(name) || !(classname = StringValuePtr(name)))
782 classname = "<unknown>";
783 args->classname = classname;
784 args->methodname = methodname;
785 args->filename = filename;
786 args->klass = klass;
787 args->name = name;
788 return TRUE;
789 }
790 }
791 return FALSE;
792}
793
794extern unsigned int redblack_buffer_size;
795
796/*
797 * call-seq:
798 * RubyVM.stat -> Hash
799 * RubyVM.stat(hsh) -> hsh
800 * RubyVM.stat(Symbol) -> Numeric
801 *
802 * Returns a Hash containing implementation-dependent counters inside the VM.
803 *
804 * This hash includes information about method/constant caches:
805 *
806 * {
807 * :constant_cache_invalidations=>2,
808 * :constant_cache_misses=>14,
809 * :global_cvar_state=>27
810 * }
811 *
812 * If <tt>USE_DEBUG_COUNTER</tt> is enabled, debug counters will be included.
813 *
814 * The contents of the hash are implementation specific and may be changed in
815 * the future.
816 *
817 * This method is only expected to work on C Ruby.
818 */
819static VALUE
820vm_stat(int argc, VALUE *argv, VALUE self)
821{
822 static VALUE sym_constant_cache_invalidations, sym_constant_cache_misses, sym_global_cvar_state, sym_next_shape_id;
823 static VALUE sym_shape_cache_size;
824 VALUE arg = Qnil;
825 VALUE hash = Qnil, key = Qnil;
826
827 if (rb_check_arity(argc, 0, 1) == 1) {
828 arg = argv[0];
829 if (SYMBOL_P(arg))
830 key = arg;
831 else if (RB_TYPE_P(arg, T_HASH))
832 hash = arg;
833 else
834 rb_raise(rb_eTypeError, "non-hash or symbol given");
835 }
836 else {
837 hash = rb_hash_new();
838 }
839
840#define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
841 S(constant_cache_invalidations);
842 S(constant_cache_misses);
843 S(global_cvar_state);
844 S(next_shape_id);
845 S(shape_cache_size);
846#undef S
847
848#define SET(name, attr) \
849 if (key == sym_##name) \
850 return SERIALT2NUM(attr); \
851 else if (hash != Qnil) \
852 rb_hash_aset(hash, sym_##name, SERIALT2NUM(attr));
853
854 SET(constant_cache_invalidations, ruby_vm_constant_cache_invalidations);
855 SET(constant_cache_misses, ruby_vm_constant_cache_misses);
856 SET(global_cvar_state, ruby_vm_global_cvar_state);
857 SET(next_shape_id, (rb_serial_t)rb_shapes_count());
858 SET(shape_cache_size, (rb_serial_t)rb_shape_tree.cache_size);
859#undef SET
860
861#if USE_DEBUG_COUNTER
862 ruby_debug_counter_show_at_exit(FALSE);
863 for (size_t i = 0; i < RB_DEBUG_COUNTER_MAX; i++) {
864 const VALUE name = rb_sym_intern_ascii_cstr(rb_debug_counter_names[i]);
865 const VALUE boxed_value = SIZET2NUM(rb_debug_counter[i]);
866
867 if (key == name) {
868 return boxed_value;
869 }
870 else if (hash != Qnil) {
871 rb_hash_aset(hash, name, boxed_value);
872 }
873 }
874#endif
875
876 if (!NIL_P(key)) { /* matched key should return above */
877 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
878 }
879
880 return hash;
881}
882
883/* control stack frame */
884
885static void
886vm_set_top_stack(rb_execution_context_t *ec, const rb_iseq_t *iseq, const rb_box_t *box)
887{
888 if (ISEQ_BODY(iseq)->type != ISEQ_TYPE_TOP) {
889 rb_raise(rb_eTypeError, "Not a toplevel InstructionSequence");
890 }
891
892 /* for return */
893 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_TOP | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH,
894 rb_ec_thread_ptr(ec)->top_self,
895 GC_GUARDED_PTR(box),
896 (VALUE)vm_cref_new_toplevel(ec), /* cref or me */
897 ISEQ_BODY(iseq)->iseq_encoded, ec->cfp->sp,
898 ISEQ_BODY(iseq)->local_table_size, ISEQ_BODY(iseq)->stack_max);
899}
900
901static void
902vm_set_eval_stack(rb_execution_context_t *ec, const rb_iseq_t *iseq, const rb_cref_t *cref, const struct rb_block *base_block)
903{
904 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_EVAL | VM_FRAME_FLAG_FINISH,
905 vm_block_self(base_block), VM_GUARDED_PREV_EP(vm_block_ep(base_block)),
906 (VALUE)cref, /* cref or me */
907 ISEQ_BODY(iseq)->iseq_encoded,
908 ec->cfp->sp, ISEQ_BODY(iseq)->local_table_size,
909 ISEQ_BODY(iseq)->stack_max);
910}
911
912static void
913vm_set_main_stack(rb_execution_context_t *ec, const rb_iseq_t *iseq)
914{
915 VALUE toplevel_binding = rb_const_get(rb_cObject, rb_intern("TOPLEVEL_BINDING"));
916 rb_binding_t *bind;
917
918 GetBindingPtr(toplevel_binding, bind);
919 RUBY_ASSERT_MESG(bind, "TOPLEVEL_BINDING is not built");
920
921 vm_set_eval_stack(ec, iseq, 0, &bind->block);
922
923 /* save binding */
924 if (ISEQ_BODY(iseq)->local_table_size > 0) {
925 vm_bind_update_env(toplevel_binding, bind, vm_make_env_object(ec, ec->cfp));
926 }
927}
928
930rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
931{
932 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
933 if (cfp->iseq) {
934 return (rb_control_frame_t *)cfp;
935 }
936 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
937 }
938 return 0;
939}
940
942rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
943{
944 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
945 if (VM_FRAME_RUBYFRAME_P(cfp)) {
946 return (rb_control_frame_t *)cfp;
947 }
948 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
949 }
950 return 0;
951}
952
953static rb_control_frame_t *
954vm_get_ruby_level_caller_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
955{
956 if (VM_FRAME_RUBYFRAME_P(cfp)) {
957 return (rb_control_frame_t *)cfp;
958 }
959
960 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
961
962 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
963 if (VM_FRAME_RUBYFRAME_P(cfp)) {
964 return (rb_control_frame_t *)cfp;
965 }
966
967 if (VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_PASSED) == FALSE) {
968 break;
969 }
970 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
971 }
972 return 0;
973}
974
975void
976rb_vm_pop_cfunc_frame(void)
977{
978 rb_execution_context_t *ec = GET_EC();
979 rb_control_frame_t *cfp = ec->cfp;
980 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(cfp);
981
982 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, cfp->self, me->def->original_id, me->called_id, me->owner, Qnil);
983 RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, me->owner, me->def->original_id);
984 vm_pop_frame(ec, cfp, cfp->ep);
985}
986
987void
988rb_vm_rewind_cfp(rb_execution_context_t *ec, rb_control_frame_t *cfp)
989{
990 /* check skipped frame */
991 while (ec->cfp != cfp) {
992#if VMDEBUG
993 printf("skipped frame: %s\n", vm_frametype_name(ec->cfp));
994#endif
995 if (VM_FRAME_TYPE(ec->cfp) != VM_FRAME_MAGIC_CFUNC) {
996 rb_vm_pop_frame(ec);
997 }
998 else { /* unlikely path */
999 rb_vm_pop_cfunc_frame();
1000 }
1001 }
1002}
1003
1004/* at exit */
1005
1006void
1007ruby_vm_at_exit(void (*func)(rb_vm_t *))
1008{
1009 rb_vm_t *vm = GET_VM();
1011 nl->func = func;
1012 nl->next = vm->at_exit;
1013 vm->at_exit = nl;
1014}
1015
1016static void
1017ruby_vm_run_at_exit_hooks(rb_vm_t *vm)
1018{
1019 rb_at_exit_list *l = vm->at_exit;
1020
1021 while (l) {
1022 rb_at_exit_list* t = l->next;
1023 rb_vm_at_exit_func *func = l->func;
1024 ruby_xfree(l);
1025 l = t;
1026 (*func)(vm);
1027 }
1028}
1029
1030/* Env */
1031
1032static VALUE check_env_value(const rb_env_t *env);
1033
1034static int
1035check_env(const rb_env_t *env)
1036{
1037 fputs("---\n", stderr);
1038 ruby_debug_printf("envptr: %p\n", (void *)&env->ep[0]);
1039 ruby_debug_printf("envval: %10p ", (void *)env->ep[1]);
1040 dp(env->ep[1]);
1041 ruby_debug_printf("ep: %10p\n", (void *)env->ep);
1042 if (rb_vm_env_prev_env(env)) {
1043 fputs(">>\n", stderr);
1044 check_env_value(rb_vm_env_prev_env(env));
1045 fputs("<<\n", stderr);
1046 }
1047 return 1;
1048}
1049
1050static VALUE
1051check_env_value(const rb_env_t *env)
1052{
1053 if (check_env(env)) {
1054 return (VALUE)env;
1055 }
1056 rb_bug("invalid env");
1057 return Qnil; /* unreachable */
1058}
1059
1060static VALUE
1061vm_block_handler_escape(const rb_execution_context_t *ec, VALUE block_handler)
1062{
1063 switch (vm_block_handler_type(block_handler)) {
1064 case block_handler_type_ifunc:
1065 case block_handler_type_iseq:
1066 return rb_vm_make_proc(ec, VM_BH_TO_CAPT_BLOCK(block_handler), rb_cProc);
1067
1068 case block_handler_type_symbol:
1069 case block_handler_type_proc:
1070 return block_handler;
1071 }
1072 VM_UNREACHABLE(vm_block_handler_escape);
1073 return Qnil;
1074}
1075
1076static VALUE
1077vm_make_env_each(const rb_execution_context_t * const ec, rb_control_frame_t *const cfp)
1078{
1079 const VALUE * const ep = cfp->ep;
1080 VALUE *env_body, *env_ep;
1081 int local_size, env_size;
1082
1083 if (VM_ENV_ESCAPED_P(ep)) {
1084 return VM_ENV_ENVVAL(ep);
1085 }
1086
1087 if (!VM_ENV_LOCAL_P(ep)) {
1088 const VALUE *prev_ep = VM_ENV_PREV_EP(ep);
1089 if (!VM_ENV_ESCAPED_P(prev_ep)) {
1090 rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
1091
1092 while (prev_cfp->ep != prev_ep) {
1093 prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(prev_cfp);
1094 VM_ASSERT(prev_cfp->ep != NULL);
1095 }
1096
1097 vm_make_env_each(ec, prev_cfp);
1098 VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_SPECVAL], VM_GUARDED_PREV_EP(prev_cfp->ep));
1099 }
1100 }
1101 else {
1102 VM_ASSERT(VM_ENV_LOCAL_P(ep));
1103 VALUE block_handler = VM_ENV_BLOCK_HANDLER(ep);
1104
1105 if (block_handler != VM_BLOCK_HANDLER_NONE) {
1106 VALUE blockprocval = vm_block_handler_escape(ec, block_handler);
1107 VM_STACK_ENV_WRITE(ep, VM_ENV_DATA_INDEX_SPECVAL, blockprocval);
1108 }
1109 }
1110
1111 if (!VM_FRAME_RUBYFRAME_P(cfp)) {
1112 local_size = VM_ENV_DATA_SIZE;
1113 }
1114 else {
1115 local_size = ISEQ_BODY(cfp->iseq)->local_table_size;
1116 if (ISEQ_BODY(cfp->iseq)->param.flags.forwardable && VM_ENV_LOCAL_P(cfp->ep)) {
1117 int ci_offset = local_size - ISEQ_BODY(cfp->iseq)->param.size + VM_ENV_DATA_SIZE;
1118
1119 CALL_INFO ci = (CALL_INFO)VM_CF_LEP(cfp)[-ci_offset];
1120 local_size += vm_ci_argc(ci);
1121 }
1122 local_size += VM_ENV_DATA_SIZE;
1123 }
1124
1125 // Invalidate JIT code that assumes cfp->ep == vm_base_ptr(cfp).
1126 // This is done before creating the imemo_env because VM_STACK_ENV_WRITE
1127 // below leaves the on-stack ep in a state that is unsafe to GC.
1128 if (VM_FRAME_RUBYFRAME_P(cfp)) {
1129 rb_yjit_invalidate_ep_is_bp(cfp->iseq);
1130 rb_zjit_invalidate_no_ep_escape(cfp->iseq);
1131 }
1132
1133 /*
1134 * # local variables on a stack frame (N == local_size)
1135 * [lvar1, lvar2, ..., lvarN, SPECVAL]
1136 * ^
1137 * ep[0]
1138 *
1139 * # moved local variables
1140 * [lvar1, lvar2, ..., lvarN, SPECVAL, Envval, BlockProcval (if needed)]
1141 * ^ ^
1142 * env->env[0] ep[0]
1143 */
1144
1145 env_size = local_size +
1146 1 /* envval */;
1147
1148 // Careful with order in the following sequence. Each allocation can move objects.
1149 env_body = ALLOC_N(VALUE, env_size);
1150 rb_env_t *env = IMEMO_NEW(rb_env_t, imemo_env, 0);
1151
1152 // Set up env without WB since it's brand new (similar to newobj_init(), newobj_fill())
1153 MEMCPY(env_body, ep - (local_size - 1 /* specval */), VALUE, local_size);
1154
1155 env_ep = &env_body[local_size - 1 /* specval */];
1156 env_ep[VM_ENV_DATA_INDEX_ENV] = (VALUE)env;
1157
1158 env->iseq = (rb_iseq_t *)(VM_FRAME_RUBYFRAME_P(cfp) ? cfp->iseq : NULL);
1159 env->ep = env_ep;
1160 env->env = env_body;
1161 env->env_size = env_size;
1162
1163 cfp->ep = env_ep;
1164 VM_ENV_FLAGS_SET(env_ep, VM_ENV_FLAG_ESCAPED | VM_ENV_FLAG_WB_REQUIRED);
1165 VM_STACK_ENV_WRITE(ep, 0, (VALUE)env); /* GC mark */
1166
1167#if 0
1168 for (i = 0; i < local_size; i++) {
1169 if (VM_FRAME_RUBYFRAME_P(cfp)) {
1170 /* clear value stack for GC */
1171 ep[-local_size + i] = 0;
1172 }
1173 }
1174#endif
1175
1176 return (VALUE)env;
1177}
1178
1179static VALUE
1180vm_make_env_object(const rb_execution_context_t *ec, rb_control_frame_t *cfp)
1181{
1182 VALUE envval = vm_make_env_each(ec, cfp);
1183
1184 if (PROCDEBUG) {
1185 check_env_value((const rb_env_t *)envval);
1186 }
1187
1188 return envval;
1189}
1190
1191void
1192rb_vm_stack_to_heap(rb_execution_context_t *ec)
1193{
1194 rb_control_frame_t *cfp = ec->cfp;
1195 while ((cfp = rb_vm_get_binding_creatable_next_cfp(ec, cfp)) != 0) {
1196 vm_make_env_object(ec, cfp);
1197 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
1198 }
1199}
1200
1201const rb_env_t *
1202rb_vm_env_prev_env(const rb_env_t *env)
1203{
1204 const VALUE *ep = env->ep;
1205
1206 if (VM_ENV_LOCAL_P(ep)) {
1207 return NULL;
1208 }
1209 else {
1210 const VALUE *prev_ep = VM_ENV_PREV_EP(ep);
1211 return VM_ENV_ENVVAL_PTR(prev_ep);
1212 }
1213}
1214
1215static int
1216collect_local_variables_in_iseq(const rb_iseq_t *iseq, const struct local_var_list *vars)
1217{
1218 unsigned int i;
1219 if (!iseq) return 0;
1220 for (i = 0; i < ISEQ_BODY(iseq)->local_table_size; i++) {
1221 local_var_list_add(vars, ISEQ_BODY(iseq)->local_table[i]);
1222 }
1223 return 1;
1224}
1225
1226static void
1227collect_local_variables_in_env(const rb_env_t *env, const struct local_var_list *vars)
1228{
1229 do {
1230 if (VM_ENV_FLAGS(env->ep, VM_ENV_FLAG_ISOLATED)) break;
1231 collect_local_variables_in_iseq(env->iseq, vars);
1232 } while ((env = rb_vm_env_prev_env(env)) != NULL);
1233}
1234
1235static int
1236vm_collect_local_variables_in_heap(const VALUE *ep, const struct local_var_list *vars)
1237{
1238 if (VM_ENV_ESCAPED_P(ep)) {
1239 collect_local_variables_in_env(VM_ENV_ENVVAL_PTR(ep), vars);
1240 return 1;
1241 }
1242 else {
1243 return 0;
1244 }
1245}
1246
1247VALUE
1248rb_vm_env_local_variables(const rb_env_t *env)
1249{
1250 struct local_var_list vars;
1251 local_var_list_init(&vars);
1252 collect_local_variables_in_env(env, &vars);
1253 return local_var_list_finish(&vars);
1254}
1255
1256VALUE
1257rb_vm_env_numbered_parameters(const rb_env_t *env)
1258{
1259 struct local_var_list vars;
1260 local_var_list_init(&vars);
1261 // if (VM_ENV_FLAGS(env->ep, VM_ENV_FLAG_ISOLATED)) break; // TODO: is this needed?
1262 const rb_iseq_t *iseq = env->iseq;
1263 unsigned int i;
1264 if (!iseq) return 0;
1265 for (i = 0; i < ISEQ_BODY(iseq)->local_table_size; i++) {
1266 numparam_list_add(&vars, ISEQ_BODY(iseq)->local_table[i]);
1267 }
1268 return local_var_list_finish(&vars);
1269}
1270
1271VALUE
1272rb_iseq_local_variables(const rb_iseq_t *iseq)
1273{
1274 struct local_var_list vars;
1275 local_var_list_init(&vars);
1276 while (collect_local_variables_in_iseq(iseq, &vars)) {
1277 iseq = ISEQ_BODY(iseq)->parent_iseq;
1278 }
1279 return local_var_list_finish(&vars);
1280}
1281
1282/* Proc */
1283
1284static VALUE
1285vm_proc_create_from_captured(VALUE klass,
1286 const struct rb_captured_block *captured,
1287 enum rb_block_type block_type,
1288 int8_t is_from_method, int8_t is_lambda)
1289{
1290 VALUE procval = rb_proc_alloc(klass);
1291 rb_proc_t *proc = RTYPEDDATA_DATA(procval);
1292
1293 VM_ASSERT(VM_EP_IN_HEAP_P(GET_EC(), captured->ep));
1294
1295 /* copy block */
1296 RB_OBJ_WRITE(procval, &proc->block.as.captured.code.val, captured->code.val);
1297 RB_OBJ_WRITE(procval, &proc->block.as.captured.self, captured->self);
1298 rb_vm_block_ep_update(procval, &proc->block, captured->ep);
1299
1300 vm_block_type_set(&proc->block, block_type);
1301 proc->is_from_method = is_from_method;
1302 proc->is_lambda = is_lambda;
1303
1304 return procval;
1305}
1306
1307void
1308rb_vm_block_copy(VALUE obj, const struct rb_block *dst, const struct rb_block *src)
1309{
1310 /* copy block */
1311 switch (vm_block_type(src)) {
1312 case block_type_iseq:
1313 case block_type_ifunc:
1314 RB_OBJ_WRITE(obj, &dst->as.captured.self, src->as.captured.self);
1315 RB_OBJ_WRITE(obj, &dst->as.captured.code.val, src->as.captured.code.val);
1316 rb_vm_block_ep_update(obj, dst, src->as.captured.ep);
1317 break;
1318 case block_type_symbol:
1319 RB_OBJ_WRITE(obj, &dst->as.symbol, src->as.symbol);
1320 break;
1321 case block_type_proc:
1322 RB_OBJ_WRITE(obj, &dst->as.proc, src->as.proc);
1323 break;
1324 }
1325}
1326
1327static VALUE
1328proc_create(VALUE klass, const struct rb_block *block, int8_t is_from_method, int8_t is_lambda)
1329{
1330 VALUE procval = rb_proc_alloc(klass);
1331 rb_proc_t *proc = RTYPEDDATA_DATA(procval);
1332
1333 VM_ASSERT(VM_EP_IN_HEAP_P(GET_EC(), vm_block_ep(block)));
1334 rb_vm_block_copy(procval, &proc->block, block);
1335 vm_block_type_set(&proc->block, block->type);
1336 proc->is_from_method = is_from_method;
1337 proc->is_lambda = is_lambda;
1338
1339 return procval;
1340}
1341
1342VALUE
1343rb_proc_dup(VALUE self)
1344{
1345 VALUE procval;
1346 rb_proc_t *src;
1347
1348 GetProcPtr(self, src);
1349
1350 switch (vm_block_type(&src->block)) {
1351 case block_type_ifunc:
1352 procval = rb_func_proc_dup(self);
1353 break;
1354 default:
1355 procval = proc_create(rb_obj_class(self), &src->block, src->is_from_method, src->is_lambda);
1356 break;
1357 }
1358
1359 if (RB_OBJ_SHAREABLE_P(self)) RB_OBJ_SET_SHAREABLE(procval);
1360 RB_GC_GUARD(self); /* for: body = rb_proc_dup(body) */
1361 return procval;
1362}
1363
1365 VALUE ary;
1366 VALUE read_only;
1367 bool yield;
1368 bool isolate;
1369};
1370
1371static VALUE
1372ID2NUM(ID id)
1373{
1374 if (SIZEOF_VOIDP > SIZEOF_LONG)
1375 return ULL2NUM(id);
1376 else
1377 return ULONG2NUM(id);
1378}
1379
1380static ID
1381NUM2ID(VALUE num)
1382{
1383 if (SIZEOF_VOIDP > SIZEOF_LONG)
1384 return (ID)NUM2ULL(num);
1385 else
1386 return (ID)NUM2ULONG(num);
1387}
1388
1389static enum rb_id_table_iterator_result
1390collect_outer_variable_names(ID id, VALUE val, void *ptr)
1391{
1393
1394 if (id == rb_intern("yield")) {
1395 data->yield = true;
1396 }
1397 else {
1398 VALUE *store;
1399 if (data->isolate ||
1400 val == Qtrue /* write */) {
1401 store = &data->ary;
1402 }
1403 else {
1404 store = &data->read_only;
1405 }
1406 if (*store == Qfalse) *store = rb_ary_new();
1407 rb_ary_push(*store, ID2NUM(id));
1408 }
1409 return ID_TABLE_CONTINUE;
1410}
1411
1412static const rb_env_t *
1413env_copy(const VALUE *src_ep, VALUE read_only_variables)
1414{
1415 const rb_env_t *src_env = (rb_env_t *)VM_ENV_ENVVAL(src_ep);
1416 VM_ASSERT(src_env->ep == src_ep);
1417
1418 VALUE *env_body = ZALLOC_N(VALUE, src_env->env_size); // fill with Qfalse
1419 VALUE *ep = &env_body[src_env->env_size - 2];
1420 const rb_env_t *copied_env = vm_env_new(ep, env_body, src_env->env_size, src_env->iseq);
1421
1422 // Copy after allocations above, since they can move objects in src_ep.
1423 VALUE svar_val = src_ep[VM_ENV_DATA_INDEX_ME_CREF];
1424 if (imemo_type_p(svar_val, imemo_svar)) {
1425 const struct vm_svar *svar = (struct vm_svar *)svar_val;
1426
1427 if (svar->cref_or_me) {
1428 svar_val = svar->cref_or_me;
1429 }
1430 else {
1431 svar_val = Qfalse;
1432 }
1433 }
1434 RB_OBJ_WRITE(copied_env, &ep[VM_ENV_DATA_INDEX_ME_CREF], svar_val);
1435
1436 ep[VM_ENV_DATA_INDEX_FLAGS] = src_ep[VM_ENV_DATA_INDEX_FLAGS] | VM_ENV_FLAG_ISOLATED;
1437 if (!VM_ENV_LOCAL_P(src_ep)) {
1438 VM_ENV_FLAGS_SET(ep, VM_ENV_FLAG_LOCAL);
1439 }
1440
1441 if (read_only_variables) {
1442 for (int i=RARRAY_LENINT(read_only_variables)-1; i>=0; i--) {
1443 ID id = NUM2ID(RARRAY_AREF(read_only_variables, i));
1444
1445 const struct rb_iseq_constant_body *body = ISEQ_BODY(src_env->iseq);
1446 for (unsigned int j=0; j<body->local_table_size; j++) {
1447 if (id == body->local_table[j]) {
1448 // check reassignment
1449 if (body->lvar_states[j] == lvar_reassigned) {
1450 VALUE name = rb_id2str(id);
1451 VALUE msg = rb_sprintf("cannot make a shareable Proc because "
1452 "the outer variable '%" PRIsVALUE "' may be reassigned.", name);
1453 rb_exc_raise(rb_exc_new_str(rb_eRactorIsolationError, msg));
1454 }
1455
1456 // check shareable
1457 VALUE v = src_env->env[j];
1458 if (!rb_ractor_shareable_p(v)) {
1459 VALUE name = rb_id2str(id);
1460 VALUE msg = rb_sprintf("cannot make a shareable Proc because it can refer"
1461 " unshareable object %+" PRIsVALUE " from ", v);
1462 if (name)
1463 rb_str_catf(msg, "variable '%" PRIsVALUE "'", name);
1464 else
1465 rb_str_cat_cstr(msg, "a hidden variable");
1466 rb_exc_raise(rb_exc_new_str(rb_eRactorIsolationError, msg));
1467 }
1468 RB_OBJ_WRITE((VALUE)copied_env, &env_body[j], v);
1469 rb_ary_delete_at(read_only_variables, i);
1470 break;
1471 }
1472 }
1473 }
1474 }
1475
1476 if (!VM_ENV_LOCAL_P(src_ep)) {
1477 const VALUE *prev_ep = VM_ENV_PREV_EP(src_env->ep);
1478 const rb_env_t *new_prev_env = env_copy(prev_ep, read_only_variables);
1479 ep[VM_ENV_DATA_INDEX_SPECVAL] = VM_GUARDED_PREV_EP(new_prev_env->ep);
1480 RB_OBJ_WRITTEN(copied_env, Qundef, new_prev_env);
1481 VM_ENV_FLAGS_UNSET(ep, VM_ENV_FLAG_LOCAL);
1482 }
1483 else {
1484 ep[VM_ENV_DATA_INDEX_SPECVAL] = VM_BLOCK_HANDLER_NONE;
1485 }
1486
1487 RB_OBJ_SET_SHAREABLE((VALUE)copied_env);
1488 return copied_env;
1489}
1490
1491static void
1492proc_isolate_env(VALUE self, rb_proc_t *proc, VALUE read_only_variables)
1493{
1494 const struct rb_captured_block *captured = &proc->block.as.captured;
1495 const rb_env_t *env = env_copy(captured->ep, read_only_variables);
1496 *((const VALUE **)&proc->block.as.captured.ep) = env->ep;
1497 RB_OBJ_WRITTEN(self, Qundef, env);
1498}
1499
1500static VALUE
1501proc_shared_outer_variables(struct rb_id_table *outer_variables, bool isolate, const char *message)
1502{
1503 struct collect_outer_variable_name_data data = {
1504 .isolate = isolate,
1505 .ary = Qfalse,
1506 .read_only = Qfalse,
1507 .yield = false,
1508 };
1509 rb_id_table_foreach(outer_variables, collect_outer_variable_names, (void *)&data);
1510
1511 if (data.ary != Qfalse) {
1512 VALUE str = rb_sprintf("can not %s because it accesses outer variables", message);
1513 VALUE ary = data.ary;
1514 const char *sep = " (";
1515 for (long i = 0; i < RARRAY_LEN(ary); i++) {
1516 VALUE name = rb_id2str(NUM2ID(RARRAY_AREF(ary, i)));
1517 if (!name) continue;
1518 rb_str_cat_cstr(str, sep);
1519 sep = ", ";
1520 rb_str_append(str, name);
1521 }
1522 if (*sep == ',') rb_str_cat_cstr(str, ")");
1523 rb_str_cat_cstr(str, data.yield ? " and uses 'yield'." : ".");
1524 rb_exc_raise(rb_exc_new_str(rb_eArgError, str));
1525 }
1526 else if (data.yield) {
1527 rb_raise(rb_eArgError, "can not %s because it uses 'yield'.", message);
1528 }
1529
1530 return data.read_only;
1531}
1532
1533VALUE
1534rb_proc_isolate_bang(VALUE self, VALUE replace_self)
1535{
1536 const rb_iseq_t *iseq = vm_proc_iseq(self);
1537
1538 if (iseq) {
1539 rb_proc_t *proc = (rb_proc_t *)RTYPEDDATA_DATA(self);
1540
1541 if (!UNDEF_P(replace_self)) {
1542 VM_ASSERT(rb_ractor_shareable_p(replace_self));
1543 RB_OBJ_WRITE(self, &proc->block.as.captured.self, replace_self);
1544 }
1545
1546 if (proc->block.type != block_type_iseq) rb_raise(rb_eRuntimeError, "not supported yet");
1547
1548 if (ISEQ_BODY(iseq)->outer_variables) {
1549 proc_shared_outer_variables(ISEQ_BODY(iseq)->outer_variables, true, "isolate a Proc");
1550 }
1551
1552 proc_isolate_env(self, proc, Qfalse);
1553 proc->is_isolated = TRUE;
1554 RB_OBJ_WRITE(self, &proc->block.as.captured.self, Qnil);
1555 }
1556
1557 RB_OBJ_SET_SHAREABLE(self);
1558 return self;
1559}
1560
1561VALUE
1562rb_proc_isolate(VALUE self)
1563{
1564 VALUE dst = rb_proc_dup(self);
1565 rb_proc_isolate_bang(dst, Qundef);
1566 return dst;
1567}
1568
1569VALUE
1570rb_proc_ractor_make_shareable(VALUE self, VALUE replace_self)
1571{
1572 const rb_iseq_t *iseq = vm_proc_iseq(self);
1573
1574 if (iseq) {
1575 rb_proc_t *proc = (rb_proc_t *)RTYPEDDATA_DATA(self);
1576
1577 if (!UNDEF_P(replace_self)) {
1578 RB_OBJ_WRITE(self, &proc->block.as.captured.self, replace_self);
1579 }
1580
1581 if (proc->block.type != block_type_iseq) rb_raise(rb_eRuntimeError, "not supported yet");
1582
1583 if (!rb_ractor_shareable_p(vm_block_self(&proc->block))) {
1584 rb_raise(rb_eRactorIsolationError,
1585 "Proc's self is not shareable: %" PRIsVALUE,
1586 self);
1587 }
1588
1589 VALUE read_only_variables = Qfalse;
1590
1591 if (ISEQ_BODY(iseq)->outer_variables) {
1592 read_only_variables =
1593 proc_shared_outer_variables(ISEQ_BODY(iseq)->outer_variables, false, "make a Proc shareable");
1594 }
1595
1596 proc_isolate_env(self, proc, read_only_variables);
1597 proc->is_isolated = TRUE;
1598 }
1599 else {
1600 const struct rb_block *block = vm_proc_block(self);
1601 if (block->type != block_type_symbol) rb_raise(rb_eRuntimeError, "not supported yet");
1602
1603 VALUE proc_self = vm_block_self(block);
1604 if (!rb_ractor_shareable_p(proc_self)) {
1605 rb_raise(rb_eRactorIsolationError,
1606 "Proc's self is not shareable: %" PRIsVALUE,
1607 self);
1608 }
1609 }
1610
1611 RB_OBJ_SET_FROZEN_SHAREABLE(self);
1612 return self;
1613}
1614
1615VALUE
1616rb_vm_make_proc_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda)
1617{
1618 VALUE procval;
1619 enum imemo_type code_type = imemo_type(captured->code.val);
1620
1621 if (!VM_ENV_ESCAPED_P(captured->ep)) {
1622 rb_control_frame_t *cfp = VM_CAPTURED_BLOCK_TO_CFP(captured);
1623 vm_make_env_object(ec, cfp);
1624 }
1625
1626 VM_ASSERT(VM_EP_IN_HEAP_P(ec, captured->ep));
1627 VM_ASSERT(code_type == imemo_iseq || code_type == imemo_ifunc);
1628
1629 procval = vm_proc_create_from_captured(klass, captured,
1630 code_type == imemo_iseq ? block_type_iseq : block_type_ifunc,
1631 FALSE, is_lambda);
1632
1633 if (code_type == imemo_ifunc) {
1634 struct vm_ifunc *ifunc = (struct vm_ifunc *)captured->code.val;
1635 if (ifunc->svar_lep) {
1636 VALUE ep0 = ifunc->svar_lep[0];
1637 if (RB_TYPE_P(ep0, T_IMEMO) && imemo_type_p(ep0, imemo_env)) {
1638 // `ep0 == imemo_env` means this ep is escaped to heap (in env object).
1639 const rb_env_t *env = (const rb_env_t *)ep0;
1640 ifunc->svar_lep = (VALUE *)env->ep;
1641 }
1642 else {
1643 VM_ASSERT(FIXNUM_P(ep0));
1644 if (ep0 & VM_ENV_FLAG_ESCAPED) {
1645 // ok. do nothing
1646 }
1647 else {
1648 ifunc->svar_lep = NULL;
1649 }
1650 }
1651 }
1652 }
1653
1654 return procval;
1655}
1656
1657/* Binding */
1658
1659VALUE
1660rb_vm_make_binding(const rb_execution_context_t *ec, const rb_control_frame_t *src_cfp)
1661{
1662 rb_control_frame_t *cfp = rb_vm_get_binding_creatable_next_cfp(ec, src_cfp);
1663 rb_control_frame_t *ruby_level_cfp = rb_vm_get_ruby_level_next_cfp(ec, src_cfp);
1664 VALUE bindval, envval;
1665 rb_binding_t *bind;
1666
1667 if (cfp == 0 || ruby_level_cfp == 0) {
1668 rb_raise(rb_eRuntimeError, "Can't create Binding Object on top of Fiber.");
1669 }
1670 if (!VM_FRAME_RUBYFRAME_P(src_cfp) &&
1671 !VM_FRAME_RUBYFRAME_P(RUBY_VM_PREVIOUS_CONTROL_FRAME(src_cfp))) {
1672 rb_raise(rb_eRuntimeError, "Cannot create Binding object for non-Ruby caller");
1673 }
1674
1675 envval = vm_make_env_object(ec, cfp);
1676 bindval = rb_binding_alloc(rb_cBinding);
1677 GetBindingPtr(bindval, bind);
1678 vm_bind_update_env(bindval, bind, envval);
1679 RB_OBJ_WRITE(bindval, &bind->block.as.captured.self, cfp->self);
1680 RB_OBJ_WRITE(bindval, &bind->block.as.captured.code.iseq, cfp->iseq);
1681 RB_OBJ_WRITE(bindval, &bind->pathobj, ISEQ_BODY(ruby_level_cfp->iseq)->location.pathobj);
1682 bind->first_lineno = rb_vm_get_sourceline(ruby_level_cfp);
1683
1684 return bindval;
1685}
1686
1687const VALUE *
1688rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const ID *dynvars)
1689{
1690 VALUE envval, pathobj = bind->pathobj;
1691 VALUE path = pathobj_path(pathobj);
1692 VALUE realpath = pathobj_realpath(pathobj);
1693 const struct rb_block *base_block;
1694 const rb_env_t *env;
1695 rb_execution_context_t *ec = GET_EC();
1696 const rb_iseq_t *base_iseq, *iseq;
1697 rb_node_scope_t tmp_node;
1698
1699 if (dyncount < 0) return 0;
1700
1701 base_block = &bind->block;
1702 base_iseq = vm_block_iseq(base_block);
1703
1704 VALUE idtmp = 0;
1705 rb_ast_id_table_t *dyns = ALLOCV(idtmp, sizeof(rb_ast_id_table_t) + dyncount * sizeof(ID));
1706 dyns->size = dyncount;
1707 MEMCPY(dyns->ids, dynvars, ID, dyncount);
1708
1709 rb_node_init(RNODE(&tmp_node), NODE_SCOPE);
1710 tmp_node.nd_tbl = dyns;
1711 tmp_node.nd_body = 0;
1712 tmp_node.nd_parent = NULL;
1713 tmp_node.nd_args = 0;
1714
1715 VALUE ast_value = rb_ruby_ast_new(RNODE(&tmp_node));
1716
1717 if (base_iseq) {
1718 iseq = rb_iseq_new(ast_value, ISEQ_BODY(base_iseq)->location.label, path, realpath, base_iseq, ISEQ_TYPE_EVAL);
1719 }
1720 else {
1721 VALUE tempstr = rb_fstring_lit("<temp>");
1722 iseq = rb_iseq_new_top(ast_value, tempstr, tempstr, tempstr, NULL);
1723 }
1724 tmp_node.nd_tbl = 0; /* reset table */
1725 ALLOCV_END(idtmp);
1726
1727 vm_set_eval_stack(ec, iseq, 0, base_block);
1728 vm_bind_update_env(bindval, bind, envval = vm_make_env_object(ec, ec->cfp));
1729 rb_vm_pop_frame(ec);
1730
1731 env = (const rb_env_t *)envval;
1732 return env->env;
1733}
1734
1735/* C -> Ruby: block */
1736
1737static inline void
1738invoke_block(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE self, const struct rb_captured_block *captured, const rb_cref_t *cref, VALUE type, int opt_pc)
1739{
1740 int arg_size = ISEQ_BODY(iseq)->param.size;
1741
1742 vm_push_frame(ec, iseq, type | VM_FRAME_FLAG_FINISH, self,
1743 VM_GUARDED_PREV_EP(captured->ep),
1744 (VALUE)cref, /* cref or method */
1745 ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
1746 ec->cfp->sp + arg_size,
1747 ISEQ_BODY(iseq)->local_table_size - arg_size,
1748 ISEQ_BODY(iseq)->stack_max);
1749}
1750
1751static inline void
1752invoke_bmethod(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE self, const struct rb_captured_block *captured, const rb_callable_method_entry_t *me, VALUE type, int opt_pc)
1753{
1754 /* bmethod call from outside the VM */
1755 int arg_size = ISEQ_BODY(iseq)->param.size;
1756
1757 VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
1758
1759 vm_push_frame(ec, iseq, type | VM_FRAME_FLAG_BMETHOD, self,
1760 VM_GUARDED_PREV_EP(captured->ep),
1761 (VALUE)me,
1762 ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
1763 ec->cfp->sp + 1 /* self */ + arg_size,
1764 ISEQ_BODY(iseq)->local_table_size - arg_size,
1765 ISEQ_BODY(iseq)->stack_max);
1766
1767 VM_ENV_FLAGS_SET(ec->cfp->ep, VM_FRAME_FLAG_FINISH);
1768}
1769
1770ALWAYS_INLINE(static VALUE
1771 invoke_iseq_block_from_c(rb_execution_context_t *ec, const struct rb_captured_block *captured,
1772 VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler,
1773 const rb_cref_t *cref, int is_lambda, const rb_callable_method_entry_t *me));
1774
1775static inline VALUE
1776invoke_iseq_block_from_c(rb_execution_context_t *ec, const struct rb_captured_block *captured,
1777 VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler,
1778 const rb_cref_t *cref, int is_lambda, const rb_callable_method_entry_t *me)
1779{
1780 const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
1781 int opt_pc;
1782 VALUE type = VM_FRAME_MAGIC_BLOCK | (is_lambda ? VM_FRAME_FLAG_LAMBDA : 0);
1783 rb_control_frame_t *cfp = ec->cfp;
1784 VALUE *sp = cfp->sp;
1785 int flags = (kw_splat ? VM_CALL_KW_SPLAT : 0);
1786 VALUE *use_argv = (VALUE *)argv;
1787 VALUE av[2];
1788
1789 stack_check(ec);
1790
1791 if (UNLIKELY(argc > VM_ARGC_STACK_MAX) &&
1792 (VM_ARGC_STACK_MAX >= 1 ||
1793 /* Skip ruby array for potential autosplat case */
1794 (argc != 1 || is_lambda))) {
1795 use_argv = vm_argv_ruby_array(av, argv, &flags, &argc, kw_splat);
1796 }
1797
1798 CHECK_VM_STACK_OVERFLOW(cfp, argc + 1);
1799 vm_check_canary(ec, sp);
1800
1801 VALUE *stack_argv = sp;
1802 if (me) {
1803 *sp = self; // bemthods need `self` on the VM stack
1804 stack_argv++;
1805 }
1806 cfp->sp = stack_argv + argc;
1807 MEMCPY(stack_argv, use_argv, VALUE, argc); // restrict: new stack space
1808
1809 opt_pc = vm_yield_setup_args(ec, iseq, argc, stack_argv, flags, passed_block_handler,
1810 (is_lambda ? arg_setup_method : arg_setup_block));
1811 cfp->sp = sp;
1812
1813 if (me == NULL) {
1814 invoke_block(ec, iseq, self, captured, cref, type, opt_pc);
1815 }
1816 else {
1817 invoke_bmethod(ec, iseq, self, captured, me, type, opt_pc);
1818 }
1819
1820 return vm_exec(ec);
1821}
1822
1823static VALUE
1824invoke_block_from_c_bh(rb_execution_context_t *ec, VALUE block_handler,
1825 int argc, const VALUE *argv,
1826 int kw_splat, VALUE passed_block_handler, const rb_cref_t *cref,
1827 int is_lambda, int force_blockarg)
1828{
1829 again:
1830 switch (vm_block_handler_type(block_handler)) {
1831 case block_handler_type_iseq:
1832 {
1833 const struct rb_captured_block *captured = VM_BH_TO_ISEQ_BLOCK(block_handler);
1834 return invoke_iseq_block_from_c(ec, captured, captured->self,
1835 argc, argv, kw_splat, passed_block_handler,
1836 cref, is_lambda, NULL);
1837 }
1838 case block_handler_type_ifunc:
1839 return vm_yield_with_cfunc(ec, VM_BH_TO_IFUNC_BLOCK(block_handler),
1840 VM_BH_TO_IFUNC_BLOCK(block_handler)->self,
1841 argc, argv, kw_splat, passed_block_handler, NULL);
1842 case block_handler_type_symbol:
1843 return vm_yield_with_symbol(ec, VM_BH_TO_SYMBOL(block_handler),
1844 argc, argv, kw_splat, passed_block_handler);
1845 case block_handler_type_proc:
1846 if (force_blockarg == FALSE) {
1847 is_lambda = block_proc_is_lambda(VM_BH_TO_PROC(block_handler));
1848 }
1849 block_handler = vm_proc_to_block_handler(VM_BH_TO_PROC(block_handler));
1850 goto again;
1851 }
1852 VM_UNREACHABLE(invoke_block_from_c_splattable);
1853 return Qundef;
1854}
1855
1856static inline VALUE
1857check_block_handler(rb_execution_context_t *ec)
1858{
1859 VALUE block_handler = VM_CF_BLOCK_HANDLER(ec->cfp);
1860 vm_block_handler_verify(block_handler);
1861 if (UNLIKELY(block_handler == VM_BLOCK_HANDLER_NONE)) {
1862 rb_vm_localjump_error("no block given", Qnil, 0);
1863 }
1864
1865 return block_handler;
1866}
1867
1868static VALUE
1869vm_yield_with_cref(rb_execution_context_t *ec, int argc, const VALUE *argv, int kw_splat, const rb_cref_t *cref, int is_lambda)
1870{
1871 return invoke_block_from_c_bh(ec, check_block_handler(ec),
1872 argc, argv, kw_splat, VM_BLOCK_HANDLER_NONE,
1873 cref, is_lambda, FALSE);
1874}
1875
1876static VALUE
1877vm_yield(rb_execution_context_t *ec, int argc, const VALUE *argv, int kw_splat)
1878{
1879 return vm_yield_with_cref(ec, argc, argv, kw_splat, NULL, FALSE);
1880}
1881
1882static VALUE
1883vm_yield_with_block(rb_execution_context_t *ec, int argc, const VALUE *argv, VALUE block_handler, int kw_splat)
1884{
1885 return invoke_block_from_c_bh(ec, check_block_handler(ec),
1886 argc, argv, kw_splat, block_handler,
1887 NULL, FALSE, FALSE);
1888}
1889
1890static VALUE
1891vm_yield_force_blockarg(rb_execution_context_t *ec, VALUE args)
1892{
1893 return invoke_block_from_c_bh(ec, check_block_handler(ec), 1, &args,
1894 RB_NO_KEYWORDS, VM_BLOCK_HANDLER_NONE, NULL, FALSE, TRUE);
1895}
1896
1897ALWAYS_INLINE(static VALUE
1898 invoke_block_from_c_proc(rb_execution_context_t *ec, const rb_proc_t *proc,
1899 VALUE self, int argc, const VALUE *argv,
1900 int kw_splat, VALUE passed_block_handler, int is_lambda,
1901 const rb_callable_method_entry_t *me));
1902
1903static inline VALUE
1904invoke_block_from_c_proc(rb_execution_context_t *ec, const rb_proc_t *proc,
1905 VALUE self, int argc, const VALUE *argv,
1906 int kw_splat, VALUE passed_block_handler, int is_lambda,
1908{
1909 const struct rb_block *block = &proc->block;
1910
1911 again:
1912 switch (vm_block_type(block)) {
1913 case block_type_iseq:
1914 return invoke_iseq_block_from_c(ec, &block->as.captured, self, argc, argv, kw_splat, passed_block_handler, NULL, is_lambda, me);
1915 case block_type_ifunc:
1916 if (kw_splat == 1) {
1917 VALUE keyword_hash = argv[argc-1];
1918 if (!RB_TYPE_P(keyword_hash, T_HASH)) {
1919 keyword_hash = rb_to_hash_type(keyword_hash);
1920 }
1921 if (RHASH_EMPTY_P(keyword_hash)) {
1922 argc--;
1923 }
1924 else {
1925 ((VALUE *)argv)[argc-1] = rb_hash_dup(keyword_hash);
1926 }
1927 }
1928 return vm_yield_with_cfunc(ec, &block->as.captured, self, argc, argv, kw_splat, passed_block_handler, me);
1929 case block_type_symbol:
1930 return vm_yield_with_symbol(ec, block->as.symbol, argc, argv, kw_splat, passed_block_handler);
1931 case block_type_proc:
1932 is_lambda = block_proc_is_lambda(block->as.proc);
1933 block = vm_proc_block(block->as.proc);
1934 goto again;
1935 }
1936 VM_UNREACHABLE(invoke_block_from_c_proc);
1937 return Qundef;
1938}
1939
1940static VALUE
1941vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
1942 int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler)
1943{
1944 return invoke_block_from_c_proc(ec, proc, self, argc, argv, kw_splat, passed_block_handler, proc->is_lambda, NULL);
1945}
1946
1947static VALUE
1948vm_invoke_bmethod(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
1949 int argc, const VALUE *argv, int kw_splat, VALUE block_handler, const rb_callable_method_entry_t *me)
1950{
1951 return invoke_block_from_c_proc(ec, proc, self, argc, argv, kw_splat, block_handler, TRUE, me);
1952}
1953
1954VALUE
1955rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc,
1956 int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler)
1957{
1958 VALUE self = vm_block_self(&proc->block);
1959 vm_block_handler_verify(passed_block_handler);
1960
1961 if (proc->is_from_method) {
1962 return vm_invoke_bmethod(ec, proc, self, argc, argv, kw_splat, passed_block_handler, NULL);
1963 }
1964 else {
1965 return vm_invoke_proc(ec, proc, self, argc, argv, kw_splat, passed_block_handler);
1966 }
1967}
1968
1969VALUE
1970rb_vm_invoke_proc_with_self(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
1971 int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler)
1972{
1973 vm_block_handler_verify(passed_block_handler);
1974
1975 if (proc->is_from_method) {
1976 return vm_invoke_bmethod(ec, proc, self, argc, argv, kw_splat, passed_block_handler, NULL);
1977 }
1978 else {
1979 return vm_invoke_proc(ec, proc, self, argc, argv, kw_splat, passed_block_handler);
1980 }
1981}
1982
1983/* special variable */
1984
1985VALUE *
1986rb_vm_svar_lep(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
1987{
1988 while (cfp->pc == 0 || cfp->iseq == 0) {
1989 if (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_IFUNC) {
1990 struct vm_ifunc *ifunc = (struct vm_ifunc *)cfp->iseq;
1991 return ifunc->svar_lep;
1992 }
1993 else {
1994 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
1995 }
1996
1997 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
1998 return NULL;
1999 }
2000 }
2001
2002 return (VALUE *)VM_CF_LEP(cfp);
2003}
2004
2005static VALUE
2006vm_cfp_svar_get(const rb_execution_context_t *ec, rb_control_frame_t *cfp, VALUE key)
2007{
2008 return lep_svar_get(ec, rb_vm_svar_lep(ec, cfp), key);
2009}
2010
2011static void
2012vm_cfp_svar_set(const rb_execution_context_t *ec, rb_control_frame_t *cfp, VALUE key, const VALUE val)
2013{
2014 lep_svar_set(ec, rb_vm_svar_lep(ec, cfp), key, val);
2015}
2016
2017static VALUE
2018vm_svar_get(const rb_execution_context_t *ec, VALUE key)
2019{
2020 return vm_cfp_svar_get(ec, ec->cfp, key);
2021}
2022
2023static void
2024vm_svar_set(const rb_execution_context_t *ec, VALUE key, VALUE val)
2025{
2026 vm_cfp_svar_set(ec, ec->cfp, key, val);
2027}
2028
2029VALUE
2031{
2032 return vm_svar_get(GET_EC(), VM_SVAR_BACKREF);
2033}
2034
2035void
2037{
2038 vm_svar_set(GET_EC(), VM_SVAR_BACKREF, val);
2039}
2040
2041VALUE
2043{
2044 return vm_svar_get(GET_EC(), VM_SVAR_LASTLINE);
2045}
2046
2047void
2049{
2050 vm_svar_set(GET_EC(), VM_SVAR_LASTLINE, val);
2051}
2052
2053void
2054rb_lastline_set_up(VALUE val, unsigned int up)
2055{
2056 rb_control_frame_t * cfp = GET_EC()->cfp;
2057
2058 for(unsigned int i = 0; i < up; i++) {
2059 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
2060 }
2061 vm_cfp_svar_set(GET_EC(), cfp, VM_SVAR_LASTLINE, val);
2062}
2063
2064/* misc */
2065
2066const char *
2068{
2069 const rb_execution_context_t *ec = GET_EC();
2070 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
2071
2072 if (cfp) {
2073 return RSTRING_PTR(rb_iseq_path(cfp->iseq));
2074 }
2075 else {
2076 return 0;
2077 }
2078}
2079
2080int
2082{
2083 const rb_execution_context_t *ec = GET_EC();
2084 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
2085
2086 if (cfp) {
2087 return rb_vm_get_sourceline(cfp);
2088 }
2089 else {
2090 return 0;
2091 }
2092}
2093
2094VALUE
2095rb_source_location(int *pline)
2096{
2097 const rb_execution_context_t *ec = GET_EC();
2098 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
2099
2100 if (cfp && VM_FRAME_RUBYFRAME_P(cfp)) {
2101 if (pline) *pline = rb_vm_get_sourceline(cfp);
2102 return rb_iseq_path(cfp->iseq);
2103 }
2104 else {
2105 if (pline) *pline = 0;
2106 return Qnil;
2107 }
2108}
2109
2110const char *
2111rb_source_location_cstr(int *pline)
2112{
2113 VALUE path = rb_source_location(pline);
2114 if (NIL_P(path)) return NULL;
2115 return RSTRING_PTR(path);
2116}
2117
2118rb_cref_t *
2119rb_vm_cref(void)
2120{
2121 const rb_execution_context_t *ec = GET_EC();
2122 return vm_ec_cref(ec);
2123}
2124
2125rb_cref_t *
2126rb_vm_cref_replace_with_duplicated_cref(void)
2127{
2128 const rb_execution_context_t *ec = GET_EC();
2129 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
2130 rb_cref_t *cref = vm_cref_replace_with_duplicated_cref(cfp->ep);
2131 ASSUME(cref);
2132 return cref;
2133}
2134
2135const rb_cref_t *
2136rb_vm_cref_in_context(VALUE self, VALUE cbase)
2137{
2138 const rb_execution_context_t *ec = GET_EC();
2139 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
2140 const rb_cref_t *cref;
2141 if (!cfp || cfp->self != self) return NULL;
2142 if (!vm_env_cref_by_cref(cfp->ep)) return NULL;
2143 cref = vm_get_cref(cfp->ep);
2144 if (CREF_CLASS(cref) != cbase) return NULL;
2145 return cref;
2146}
2147
2148#if 0
2149void
2150debug_cref(rb_cref_t *cref)
2151{
2152 while (cref) {
2153 dp(CREF_CLASS(cref));
2154 printf("%ld\n", CREF_VISI(cref));
2155 cref = CREF_NEXT(cref);
2156 }
2157}
2158#endif
2159
2160VALUE
2161rb_vm_cbase(void)
2162{
2163 const rb_execution_context_t *ec = GET_EC();
2164 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
2165
2166 if (cfp == 0) {
2167 rb_raise(rb_eRuntimeError, "Can't call on top of Fiber or Thread");
2168 }
2169 return vm_get_cbase(cfp->ep);
2170}
2171
2172/* jump */
2173
2174static VALUE
2175make_localjump_error(const char *mesg, VALUE value, int reason)
2176{
2179 ID id;
2180
2181 switch (reason) {
2182 case TAG_BREAK:
2183 CONST_ID(id, "break");
2184 break;
2185 case TAG_REDO:
2186 CONST_ID(id, "redo");
2187 break;
2188 case TAG_RETRY:
2189 CONST_ID(id, "retry");
2190 break;
2191 case TAG_NEXT:
2192 CONST_ID(id, "next");
2193 break;
2194 case TAG_RETURN:
2195 CONST_ID(id, "return");
2196 break;
2197 default:
2198 CONST_ID(id, "noreason");
2199 break;
2200 }
2201 rb_iv_set(exc, "@exit_value", value);
2202 rb_iv_set(exc, "@reason", ID2SYM(id));
2203 return exc;
2204}
2205
2206void
2207rb_vm_localjump_error(const char *mesg, VALUE value, int reason)
2208{
2209 VALUE exc = make_localjump_error(mesg, value, reason);
2210 rb_exc_raise(exc);
2211}
2212
2213VALUE
2214rb_vm_make_jump_tag_but_local_jump(enum ruby_tag_type state, VALUE val)
2215{
2216 const char *mesg;
2217
2218 switch (state) {
2219 case TAG_RETURN:
2220 mesg = "unexpected return";
2221 break;
2222 case TAG_BREAK:
2223 mesg = "unexpected break";
2224 break;
2225 case TAG_NEXT:
2226 mesg = "unexpected next";
2227 break;
2228 case TAG_REDO:
2229 mesg = "unexpected redo";
2230 val = Qnil;
2231 break;
2232 case TAG_RETRY:
2233 mesg = "retry outside of rescue clause";
2234 val = Qnil;
2235 break;
2236 default:
2237 return Qnil;
2238 }
2239 if (UNDEF_P(val)) {
2240 val = GET_EC()->tag->retval;
2241 }
2242 return make_localjump_error(mesg, val, state);
2243}
2244
2245void
2246rb_vm_jump_tag_but_local_jump(enum ruby_tag_type state)
2247{
2248 VALUE exc = rb_vm_make_jump_tag_but_local_jump(state, Qundef);
2249 if (!NIL_P(exc)) rb_exc_raise(exc);
2250 EC_JUMP_TAG(GET_EC(), state);
2251}
2252
2253static rb_control_frame_t *
2254next_not_local_frame(rb_control_frame_t *cfp)
2255{
2256 while (VM_ENV_LOCAL_P(cfp->ep)) {
2257 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
2258 }
2259 return cfp;
2260}
2261
2262NORETURN(static void vm_iter_break(rb_execution_context_t *ec, VALUE val));
2263
2264static void
2265vm_iter_break(rb_execution_context_t *ec, VALUE val)
2266{
2267 rb_control_frame_t *cfp = next_not_local_frame(ec->cfp);
2268 const VALUE *ep = VM_CF_PREV_EP(cfp);
2269 const rb_control_frame_t *target_cfp = rb_vm_search_cf_from_ep(ec, cfp, ep);
2270
2271 if (!target_cfp) {
2272 rb_vm_localjump_error("unexpected break", val, TAG_BREAK);
2273 }
2274
2275 ec->errinfo = (VALUE)THROW_DATA_NEW(val, target_cfp, TAG_BREAK);
2276 EC_JUMP_TAG(ec, TAG_BREAK);
2277}
2278
2279void
2281{
2282 vm_iter_break(GET_EC(), Qnil);
2283}
2284
2285void
2287{
2288 vm_iter_break(GET_EC(), val);
2289}
2290
2291/* optimization: redefine management */
2292
2293short ruby_vm_redefined_flag[BOP_LAST_];
2294static st_table *vm_opt_method_def_table = 0;
2295static st_table *vm_opt_mid_table = 0;
2296
2297void
2298rb_free_vm_opt_tables(void)
2299{
2300 st_free_table(vm_opt_method_def_table);
2301 st_free_table(vm_opt_mid_table);
2302}
2303
2304static int
2305vm_redefinition_check_flag(VALUE klass)
2306{
2307 if (klass == rb_cInteger) return INTEGER_REDEFINED_OP_FLAG;
2308 if (klass == rb_cFloat) return FLOAT_REDEFINED_OP_FLAG;
2309 if (klass == rb_cString) return STRING_REDEFINED_OP_FLAG;
2310 if (klass == rb_cArray) return ARRAY_REDEFINED_OP_FLAG;
2311 if (klass == rb_cHash) return HASH_REDEFINED_OP_FLAG;
2312 if (klass == rb_cSymbol) return SYMBOL_REDEFINED_OP_FLAG;
2313#if 0
2314 if (klass == rb_cTime) return TIME_REDEFINED_OP_FLAG;
2315#endif
2316 if (klass == rb_cRegexp) return REGEXP_REDEFINED_OP_FLAG;
2317 if (klass == rb_cNilClass) return NIL_REDEFINED_OP_FLAG;
2318 if (klass == rb_cTrueClass) return TRUE_REDEFINED_OP_FLAG;
2319 if (klass == rb_cFalseClass) return FALSE_REDEFINED_OP_FLAG;
2320 if (klass == rb_cProc) return PROC_REDEFINED_OP_FLAG;
2321 return 0;
2322}
2323
2324int
2325rb_vm_check_optimizable_mid(VALUE mid)
2326{
2327 if (!vm_opt_mid_table) {
2328 return FALSE;
2329 }
2330
2331 return st_lookup(vm_opt_mid_table, mid, NULL);
2332}
2333
2334static int
2335vm_redefinition_check_method_type(const rb_method_entry_t *me)
2336{
2337 if (me->called_id != me->def->original_id) {
2338 return FALSE;
2339 }
2340
2341 if (METHOD_ENTRY_BASIC(me)) return TRUE;
2342
2343 const rb_method_definition_t *def = me->def;
2344 switch (def->type) {
2345 case VM_METHOD_TYPE_CFUNC:
2346 case VM_METHOD_TYPE_OPTIMIZED:
2347 return TRUE;
2348 default:
2349 return FALSE;
2350 }
2351}
2352
2353static void
2354rb_vm_check_redefinition_opt_method(const rb_method_entry_t *me, VALUE klass)
2355{
2356 st_data_t bop;
2357 if (RB_TYPE_P(klass, T_ICLASS) && RICLASS_IS_ORIGIN_P(klass) &&
2358 RB_TYPE_P(RBASIC_CLASS(klass), T_CLASS)) {
2359 klass = RBASIC_CLASS(klass);
2360 }
2361 if (vm_redefinition_check_method_type(me)) {
2362 if (st_lookup(vm_opt_method_def_table, (st_data_t)me->def, &bop)) {
2363 int flag = vm_redefinition_check_flag(klass);
2364 if (flag != 0) {
2367 "Redefining '%s#%s' disables interpreter and JIT optimizations",
2368 rb_class2name(me->owner),
2369 rb_id2name(me->called_id)
2370 );
2371 rb_yjit_bop_redefined(flag, (enum ruby_basic_operators)bop);
2372 rb_zjit_bop_redefined(flag, (enum ruby_basic_operators)bop);
2373 ruby_vm_redefined_flag[bop] |= flag;
2374 }
2375 }
2376 }
2377}
2378
2379static enum rb_id_table_iterator_result
2380check_redefined_method(ID mid, VALUE value, void *data)
2381{
2382 VALUE klass = (VALUE)data;
2383 const rb_method_entry_t *me = (rb_method_entry_t *)value;
2384 const rb_method_entry_t *newme = rb_method_entry(klass, mid);
2385
2386 if (newme != me) rb_vm_check_redefinition_opt_method(me, me->owner);
2387
2388 return ID_TABLE_CONTINUE;
2389}
2390
2391void
2392rb_vm_check_redefinition_by_prepend(VALUE klass)
2393{
2394 if (!vm_redefinition_check_flag(klass)) return;
2395 rb_id_table_foreach(RCLASS_M_TBL(RCLASS_ORIGIN(klass)), check_redefined_method, (void *)klass);
2396}
2397
2398static void
2399add_opt_method_entry_bop(const rb_method_entry_t *me, ID mid, enum ruby_basic_operators bop)
2400{
2401 st_insert(vm_opt_method_def_table, (st_data_t)me->def, (st_data_t)bop);
2402 st_insert(vm_opt_mid_table, (st_data_t)mid, (st_data_t)Qtrue);
2403}
2404
2405static void
2406add_opt_method(VALUE klass, ID mid, enum ruby_basic_operators bop)
2407{
2408 const rb_method_entry_t *me = rb_method_entry_at(klass, mid);
2409
2410 if (me && vm_redefinition_check_method_type(me)) {
2411 add_opt_method_entry_bop(me, mid, bop);
2412 }
2413 else {
2414 rb_bug("undefined optimized method: %s", rb_id2name(mid));
2415 }
2416}
2417
2418static enum ruby_basic_operators vm_redefinition_bop_for_id(ID mid);
2419
2420static void
2421add_opt_method_entry(const rb_method_entry_t *me)
2422{
2423 if (me && vm_redefinition_check_method_type(me)) {
2424 ID mid = me->called_id;
2425 enum ruby_basic_operators bop = vm_redefinition_bop_for_id(mid);
2426 if ((int)bop >= 0) {
2427 add_opt_method_entry_bop(me, mid, bop);
2428 }
2429 }
2430}
2431
2432static void
2433vm_init_redefined_flag(void)
2434{
2435 ID mid;
2436 enum ruby_basic_operators bop;
2437
2438#define OP(mid_, bop_) (mid = id##mid_, bop = BOP_##bop_, ruby_vm_redefined_flag[bop] = 0)
2439#define C(k) add_opt_method(rb_c##k, mid, bop)
2440 OP(PLUS, PLUS), (C(Integer), C(Float), C(String), C(Array));
2441 OP(MINUS, MINUS), (C(Integer), C(Float));
2442 OP(MULT, MULT), (C(Integer), C(Float));
2443 OP(DIV, DIV), (C(Integer), C(Float));
2444 OP(MOD, MOD), (C(Integer), C(Float));
2445 OP(Eq, EQ), (C(Integer), C(Float), C(String), C(Symbol));
2446 OP(Eqq, EQQ), (C(Integer), C(Float), C(Symbol), C(String),
2447 C(NilClass), C(TrueClass), C(FalseClass));
2448 OP(LT, LT), (C(Integer), C(Float));
2449 OP(LE, LE), (C(Integer), C(Float));
2450 OP(GT, GT), (C(Integer), C(Float));
2451 OP(GE, GE), (C(Integer), C(Float));
2452 OP(LTLT, LTLT), (C(String), C(Array));
2453 OP(GTGT, GTGT), (C(Integer));
2454 OP(AREF, AREF), (C(Array), C(Hash), C(Integer));
2455 OP(ASET, ASET), (C(Array), C(Hash));
2456 OP(Length, LENGTH), (C(Array), C(String), C(Hash));
2457 OP(Size, SIZE), (C(Array), C(String), C(Hash));
2458 OP(EmptyP, EMPTY_P), (C(Array), C(String), C(Hash));
2459 OP(Succ, SUCC), (C(Integer), C(String));
2460 OP(EqTilde, MATCH), (C(Regexp), C(String));
2461 OP(Freeze, FREEZE), (C(String), C(Array), C(Hash));
2462 OP(UMinus, UMINUS), (C(String));
2463 OP(Max, MAX), (C(Array));
2464 OP(Min, MIN), (C(Array));
2465 OP(Hash, HASH), (C(Array));
2466 OP(Call, CALL), (C(Proc));
2467 OP(And, AND), (C(Integer));
2468 OP(Or, OR), (C(Integer));
2469 OP(NilP, NIL_P), (C(NilClass));
2470 OP(Cmp, CMP), (C(Integer), C(Float), C(String));
2471 OP(Default, DEFAULT), (C(Hash));
2472 OP(IncludeP, INCLUDE_P), (C(Array));
2473#undef C
2474#undef OP
2475}
2476
2477static enum ruby_basic_operators
2478vm_redefinition_bop_for_id(ID mid)
2479{
2480 switch (mid) {
2481#define OP(mid_, bop_) case id##mid_: return BOP_##bop_
2482 OP(PLUS, PLUS);
2483 OP(MINUS, MINUS);
2484 OP(MULT, MULT);
2485 OP(DIV, DIV);
2486 OP(MOD, MOD);
2487 OP(Eq, EQ);
2488 OP(Eqq, EQQ);
2489 OP(LT, LT);
2490 OP(LE, LE);
2491 OP(GT, GT);
2492 OP(GE, GE);
2493 OP(LTLT, LTLT);
2494 OP(AREF, AREF);
2495 OP(ASET, ASET);
2496 OP(Length, LENGTH);
2497 OP(Size, SIZE);
2498 OP(EmptyP, EMPTY_P);
2499 OP(Succ, SUCC);
2500 OP(EqTilde, MATCH);
2501 OP(Freeze, FREEZE);
2502 OP(UMinus, UMINUS);
2503 OP(Max, MAX);
2504 OP(Min, MIN);
2505 OP(Hash, HASH);
2506 OP(Call, CALL);
2507 OP(And, AND);
2508 OP(Or, OR);
2509 OP(NilP, NIL_P);
2510 OP(Cmp, CMP);
2511 OP(Default, DEFAULT);
2512 OP(Pack, PACK);
2513#undef OP
2514 }
2515 return -1;
2516}
2517
2518/* for vm development */
2519
2520#if VMDEBUG
2521static const char *
2522vm_frametype_name(const rb_control_frame_t *cfp)
2523{
2524 switch (VM_FRAME_TYPE(cfp)) {
2525 case VM_FRAME_MAGIC_METHOD: return "method";
2526 case VM_FRAME_MAGIC_BLOCK: return "block";
2527 case VM_FRAME_MAGIC_CLASS: return "class";
2528 case VM_FRAME_MAGIC_TOP: return "top";
2529 case VM_FRAME_MAGIC_CFUNC: return "cfunc";
2530 case VM_FRAME_MAGIC_IFUNC: return "ifunc";
2531 case VM_FRAME_MAGIC_EVAL: return "eval";
2532 case VM_FRAME_MAGIC_RESCUE: return "rescue";
2533 default:
2534 rb_bug("unknown frame");
2535 }
2536}
2537#endif
2538
2539static VALUE
2540frame_return_value(const struct vm_throw_data *err)
2541{
2542 if (THROW_DATA_P(err) &&
2543 THROW_DATA_STATE(err) == TAG_BREAK &&
2544 THROW_DATA_CONSUMED_P(err) == FALSE) {
2545 return THROW_DATA_VAL(err);
2546 }
2547 else {
2548 return Qnil;
2549 }
2550}
2551
2552#if 0
2553/* for debug */
2554static const char *
2555frame_name(const rb_control_frame_t *cfp)
2556{
2557 unsigned long type = VM_FRAME_TYPE(cfp);
2558#define C(t) if (type == VM_FRAME_MAGIC_##t) return #t
2559 C(METHOD);
2560 C(BLOCK);
2561 C(CLASS);
2562 C(TOP);
2563 C(CFUNC);
2564 C(PROC);
2565 C(IFUNC);
2566 C(EVAL);
2567 C(LAMBDA);
2568 C(RESCUE);
2569 C(DUMMY);
2570#undef C
2571 return "unknown";
2572}
2573#endif
2574
2575// cfp_returning_with_value:
2576// Whether cfp is the last frame in the unwinding process for a non-local return.
2577static void
2578hook_before_rewind(rb_execution_context_t *ec, bool cfp_returning_with_value, int state, struct vm_throw_data *err)
2579{
2580 if (state == TAG_RAISE && RBASIC(err)->klass == rb_eSysStackError) {
2581 return;
2582 }
2583 else {
2584 const rb_iseq_t *iseq = ec->cfp->iseq;
2585 rb_hook_list_t *local_hooks = NULL;
2586 unsigned int local_hooks_cnt = iseq->aux.exec.local_hooks_cnt;
2587 if (RB_UNLIKELY(local_hooks_cnt > 0)) {
2588 local_hooks = rb_iseq_local_hooks(iseq, rb_ec_ractor_ptr(ec), false);
2589 }
2590
2591 switch (VM_FRAME_TYPE(ec->cfp)) {
2592 case VM_FRAME_MAGIC_METHOD:
2593 RUBY_DTRACE_METHOD_RETURN_HOOK(ec, 0, 0);
2594 EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_RETURN, ec->cfp->self, 0, 0, 0, frame_return_value(err));
2595
2596 if (UNLIKELY(local_hooks && local_hooks->events & RUBY_EVENT_RETURN)) {
2597 rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_RETURN,
2598 ec->cfp->self, 0, 0, 0, frame_return_value(err), TRUE);
2599 }
2600
2601 THROW_DATA_CONSUMED_SET(err);
2602 break;
2603 case VM_FRAME_MAGIC_BLOCK:
2604 if (VM_FRAME_BMETHOD_P(ec->cfp)) {
2605 VALUE bmethod_return_value = frame_return_value(err);
2606 if (cfp_returning_with_value) {
2607 // Non-local return terminating at a BMETHOD control frame.
2608 bmethod_return_value = THROW_DATA_VAL(err);
2609 }
2610
2611
2612 EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_B_RETURN, ec->cfp->self, 0, 0, 0, bmethod_return_value);
2613 if (UNLIKELY(local_hooks && local_hooks->events & RUBY_EVENT_B_RETURN)) {
2614 rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_B_RETURN,
2615 ec->cfp->self, 0, 0, 0, bmethod_return_value, TRUE);
2616 }
2617
2618 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(ec->cfp);
2619
2620 EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_RETURN, ec->cfp->self,
2621 rb_vm_frame_method_entry(ec->cfp)->def->original_id,
2622 rb_vm_frame_method_entry(ec->cfp)->called_id,
2623 rb_vm_frame_method_entry(ec->cfp)->owner,
2624 bmethod_return_value);
2625
2626 VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
2627 unsigned int local_hooks_cnt = me->def->body.bmethod.local_hooks_cnt;
2628 if (UNLIKELY(local_hooks_cnt > 0)) {
2629 local_hooks = rb_method_def_local_hooks(me->def, rb_ec_ractor_ptr(ec), false);
2630 if (local_hooks && local_hooks->events & RUBY_EVENT_RETURN) {
2631 rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_RETURN, ec->cfp->self,
2632 rb_vm_frame_method_entry(ec->cfp)->def->original_id,
2633 rb_vm_frame_method_entry(ec->cfp)->called_id,
2634 rb_vm_frame_method_entry(ec->cfp)->owner,
2635 bmethod_return_value, TRUE);
2636 }
2637 }
2638
2639 THROW_DATA_CONSUMED_SET(err);
2640 }
2641 else {
2642 EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_B_RETURN, ec->cfp->self, 0, 0, 0, frame_return_value(err));
2643 if (UNLIKELY(local_hooks && local_hooks->events & RUBY_EVENT_B_RETURN)) {
2644 rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_B_RETURN,
2645 ec->cfp->self, 0, 0, 0, frame_return_value(err), TRUE);
2646 }
2647 THROW_DATA_CONSUMED_SET(err);
2648 }
2649 break;
2650 case VM_FRAME_MAGIC_CLASS:
2651 EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_END, ec->cfp->self, 0, 0, 0, Qnil);
2652 break;
2653 }
2654 }
2655}
2656
2657/* evaluator body */
2658
2659/* finish
2660 VMe (h1) finish
2661 VM finish F1 F2
2662 cfunc finish F1 F2 C1
2663 rb_funcall finish F1 F2 C1
2664 VMe finish F1 F2 C1
2665 VM finish F1 F2 C1 F3
2666
2667 F1 - F3 : pushed by VM
2668 C1 : pushed by send insn (CFUNC)
2669
2670 struct CONTROL_FRAME {
2671 VALUE *pc; // cfp[0], program counter
2672 VALUE *sp; // cfp[1], stack pointer
2673 rb_iseq_t *iseq; // cfp[2], iseq
2674 VALUE self; // cfp[3], self
2675 const VALUE *ep; // cfp[4], env pointer
2676 const void *block_code; // cfp[5], block code
2677 };
2678
2679 struct rb_captured_block {
2680 VALUE self;
2681 VALUE *ep;
2682 union code;
2683 };
2684
2685 struct METHOD_ENV {
2686 VALUE param0;
2687 ...
2688 VALUE paramN;
2689 VALUE lvar1;
2690 ...
2691 VALUE lvarM;
2692 VALUE cref; // ep[-2]
2693 VALUE special; // ep[-1]
2694 VALUE flags; // ep[ 0] == lep[0]
2695 };
2696
2697 struct BLOCK_ENV {
2698 VALUE block_param0;
2699 ...
2700 VALUE block_paramN;
2701 VALUE block_lvar1;
2702 ...
2703 VALUE block_lvarM;
2704 VALUE cref; // ep[-2]
2705 VALUE special; // ep[-1]
2706 VALUE flags; // ep[ 0]
2707 };
2708
2709 struct CLASS_ENV {
2710 VALUE class_lvar0;
2711 ...
2712 VALUE class_lvarN;
2713 VALUE cref;
2714 VALUE prev_ep; // for frame jump
2715 VALUE flags;
2716 };
2717
2718 struct C_METHOD_CONTROL_FRAME {
2719 VALUE *pc; // 0
2720 VALUE *sp; // stack pointer
2721 rb_iseq_t *iseq; // cmi
2722 VALUE self; // ?
2723 VALUE *ep; // ep == lep
2724 void *code; //
2725 };
2726
2727 struct C_BLOCK_CONTROL_FRAME {
2728 VALUE *pc; // point only "finish" insn
2729 VALUE *sp; // sp
2730 rb_iseq_t *iseq; // ?
2731 VALUE self; //
2732 VALUE *ep; // ep
2733 void *code; //
2734 };
2735 */
2736
2737static inline VALUE
2738vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state, VALUE errinfo);
2739static inline VALUE
2740vm_exec_loop(rb_execution_context_t *ec, enum ruby_tag_type state, struct rb_vm_tag *tag, VALUE result);
2741
2742// for non-Emscripten Wasm build, use vm_exec with optimized setjmp for runtime performance
2743#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
2744
2745struct rb_vm_exec_context {
2746 rb_execution_context_t *const ec;
2747 struct rb_vm_tag *const tag;
2748
2749 VALUE result;
2750};
2751
2752static void
2753vm_exec_bottom_main(void *context)
2754{
2755 struct rb_vm_exec_context *ctx = context;
2756 rb_execution_context_t *ec = ctx->ec;
2757
2758 ctx->result = vm_exec_loop(ec, TAG_NONE, ctx->tag, vm_exec_core(ec));
2759}
2760
2761static void
2762vm_exec_bottom_rescue(void *context)
2763{
2764 struct rb_vm_exec_context *ctx = context;
2765 rb_execution_context_t *ec = ctx->ec;
2766
2767 ctx->result = vm_exec_loop(ec, rb_ec_tag_state(ec), ctx->tag, ec->errinfo);
2768}
2769#endif
2770
2771VALUE
2772vm_exec(rb_execution_context_t *ec)
2773{
2774 VALUE result = Qundef;
2775
2776 EC_PUSH_TAG(ec);
2777
2778 _tag.retval = Qnil;
2779
2780#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
2781 struct rb_vm_exec_context ctx = {
2782 .ec = ec,
2783 .tag = &_tag,
2784 };
2785 struct rb_wasm_try_catch try_catch;
2786
2787 EC_REPUSH_TAG();
2788
2789 rb_wasm_try_catch_init(&try_catch, vm_exec_bottom_main, vm_exec_bottom_rescue, &ctx);
2790
2791 rb_wasm_try_catch_loop_run(&try_catch, &RB_VM_TAG_JMPBUF_GET(_tag.buf));
2792
2793 result = ctx.result;
2794#else
2795 enum ruby_tag_type state;
2796 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
2797 if (UNDEF_P(result = jit_exec(ec))) {
2798 result = vm_exec_core(ec);
2799 }
2800 /* fallback to the VM */
2801 result = vm_exec_loop(ec, TAG_NONE, &_tag, result);
2802 }
2803 else {
2804 result = vm_exec_loop(ec, state, &_tag, ec->errinfo);
2805 }
2806#endif
2807
2808 EC_POP_TAG();
2809 return result;
2810}
2811
2812static inline VALUE
2813vm_exec_loop(rb_execution_context_t *ec, enum ruby_tag_type state,
2814 struct rb_vm_tag *tag, VALUE result)
2815{
2816 if (state == TAG_NONE) { /* no jumps, result is discarded */
2817 goto vm_loop_start;
2818 }
2819
2820 rb_ec_raised_reset(ec, RAISED_STACKOVERFLOW | RAISED_NOMEMORY);
2821 while (UNDEF_P(result = vm_exec_handle_exception(ec, state, result))) {
2822 // caught a jump, exec the handler. JIT code in jit_exec_exception()
2823 // may return Qundef to run remaining frames with vm_exec_core().
2824 if (UNDEF_P(result = jit_exec_exception(ec))) {
2825 result = vm_exec_core(ec);
2826 }
2827 vm_loop_start:
2828 VM_ASSERT(ec->tag == tag);
2829 /* when caught `throw`, `tag.state` is set. */
2830 if ((state = tag->state) == TAG_NONE) break;
2831 tag->state = TAG_NONE;
2832 }
2833
2834 return result;
2835}
2836
2837static inline VALUE
2838vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state, VALUE errinfo)
2839{
2840 struct vm_throw_data *err = (struct vm_throw_data *)errinfo;
2841
2842 for (;;) {
2843 unsigned int i;
2844 const struct iseq_catch_table_entry *entry;
2845 const struct iseq_catch_table *ct;
2846 unsigned long epc, cont_pc, cont_sp;
2847 const rb_iseq_t *catch_iseq;
2848 VALUE type;
2849 const rb_control_frame_t *escape_cfp;
2850
2851 cont_pc = cont_sp = 0;
2852 catch_iseq = NULL;
2853
2854 while (ec->cfp->pc == 0 || ec->cfp->iseq == 0) {
2855 if (UNLIKELY(VM_FRAME_TYPE(ec->cfp) == VM_FRAME_MAGIC_CFUNC)) {
2856 EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_C_RETURN, ec->cfp->self,
2857 rb_vm_frame_method_entry(ec->cfp)->def->original_id,
2858 rb_vm_frame_method_entry(ec->cfp)->called_id,
2859 rb_vm_frame_method_entry(ec->cfp)->owner, Qnil);
2860 RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec,
2861 rb_vm_frame_method_entry(ec->cfp)->owner,
2862 rb_vm_frame_method_entry(ec->cfp)->def->original_id);
2863 }
2864 rb_vm_pop_frame(ec);
2865 }
2866
2867 rb_control_frame_t *const cfp = ec->cfp;
2868 epc = cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded;
2869
2870 escape_cfp = NULL;
2871 if (state == TAG_BREAK || state == TAG_RETURN) {
2872 escape_cfp = THROW_DATA_CATCH_FRAME(err);
2873
2874 if (cfp == escape_cfp) {
2875 if (state == TAG_RETURN) {
2876 if (!VM_FRAME_FINISHED_P(cfp)) {
2877 THROW_DATA_CATCH_FRAME_SET(err, cfp + 1);
2878 THROW_DATA_STATE_SET(err, state = TAG_BREAK);
2879 }
2880 else {
2881 ct = ISEQ_BODY(cfp->iseq)->catch_table;
2882 if (ct) for (i = 0; i < ct->size; i++) {
2883 entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
2884 if (entry->start < epc && entry->end >= epc) {
2885 if (entry->type == CATCH_TYPE_ENSURE) {
2886 catch_iseq = entry->iseq;
2887 cont_pc = entry->cont;
2888 cont_sp = entry->sp;
2889 break;
2890 }
2891 }
2892 }
2893 if (catch_iseq == NULL) {
2894 ec->errinfo = Qnil;
2895 THROW_DATA_CATCH_FRAME_SET(err, cfp + 1);
2896 // cfp == escape_cfp here so calling with cfp_returning_with_value = true
2897 hook_before_rewind(ec, true, state, err);
2898 rb_vm_pop_frame(ec);
2899 return THROW_DATA_VAL(err);
2900 }
2901 }
2902 /* through */
2903 }
2904 else {
2905 /* TAG_BREAK */
2906 *cfp->sp++ = THROW_DATA_VAL(err);
2907 ec->errinfo = Qnil;
2908 return Qundef;
2909 }
2910 }
2911 }
2912
2913 if (state == TAG_RAISE) {
2914 ct = ISEQ_BODY(cfp->iseq)->catch_table;
2915 if (ct) for (i = 0; i < ct->size; i++) {
2916 entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
2917 if (entry->start < epc && entry->end >= epc) {
2918
2919 if (entry->type == CATCH_TYPE_RESCUE ||
2920 entry->type == CATCH_TYPE_ENSURE) {
2921 catch_iseq = entry->iseq;
2922 cont_pc = entry->cont;
2923 cont_sp = entry->sp;
2924 break;
2925 }
2926 }
2927 }
2928 }
2929 else if (state == TAG_RETRY) {
2930 ct = ISEQ_BODY(cfp->iseq)->catch_table;
2931 if (ct) for (i = 0; i < ct->size; i++) {
2932 entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
2933 if (entry->start < epc && entry->end >= epc) {
2934
2935 if (entry->type == CATCH_TYPE_ENSURE) {
2936 catch_iseq = entry->iseq;
2937 cont_pc = entry->cont;
2938 cont_sp = entry->sp;
2939 break;
2940 }
2941 else if (entry->type == CATCH_TYPE_RETRY) {
2942 const rb_control_frame_t *escape_cfp;
2943 escape_cfp = THROW_DATA_CATCH_FRAME(err);
2944 if (cfp == escape_cfp) {
2945 cfp->pc = ISEQ_BODY(cfp->iseq)->iseq_encoded + entry->cont;
2946 ec->errinfo = Qnil;
2947 return Qundef;
2948 }
2949 }
2950 }
2951 }
2952 }
2953 else if ((state == TAG_BREAK && !escape_cfp) ||
2954 (state == TAG_REDO) ||
2955 (state == TAG_NEXT)) {
2956 type = (const enum rb_catch_type[TAG_MASK]) {
2957 [TAG_BREAK] = CATCH_TYPE_BREAK,
2958 [TAG_NEXT] = CATCH_TYPE_NEXT,
2959 [TAG_REDO] = CATCH_TYPE_REDO,
2960 /* otherwise = dontcare */
2961 }[state];
2962
2963 ct = ISEQ_BODY(cfp->iseq)->catch_table;
2964 if (ct) for (i = 0; i < ct->size; i++) {
2965 entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
2966
2967 if (entry->start < epc && entry->end >= epc) {
2968 if (entry->type == CATCH_TYPE_ENSURE) {
2969 catch_iseq = entry->iseq;
2970 cont_pc = entry->cont;
2971 cont_sp = entry->sp;
2972 break;
2973 }
2974 else if (entry->type == type) {
2975 cfp->pc = ISEQ_BODY(cfp->iseq)->iseq_encoded + entry->cont;
2976 cfp->sp = vm_base_ptr(cfp) + entry->sp;
2977
2978 if (state != TAG_REDO) {
2979 *cfp->sp++ = THROW_DATA_VAL(err);
2980 }
2981 ec->errinfo = Qnil;
2982 VM_ASSERT(ec->tag->state == TAG_NONE);
2983 return Qundef;
2984 }
2985 }
2986 }
2987 }
2988 else {
2989 ct = ISEQ_BODY(cfp->iseq)->catch_table;
2990 if (ct) for (i = 0; i < ct->size; i++) {
2991 entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
2992 if (entry->start < epc && entry->end >= epc) {
2993
2994 if (entry->type == CATCH_TYPE_ENSURE) {
2995 catch_iseq = entry->iseq;
2996 cont_pc = entry->cont;
2997 cont_sp = entry->sp;
2998 break;
2999 }
3000 }
3001 }
3002 }
3003
3004 if (catch_iseq != NULL) { /* found catch table */
3005 /* enter catch scope */
3006 const int arg_size = 1;
3007
3008 rb_iseq_check(catch_iseq);
3009 cfp->sp = vm_base_ptr(cfp) + cont_sp;
3010 cfp->pc = ISEQ_BODY(cfp->iseq)->iseq_encoded + cont_pc;
3011
3012 /* push block frame */
3013 cfp->sp[0] = (VALUE)err;
3014 vm_push_frame(ec, catch_iseq, VM_FRAME_MAGIC_RESCUE,
3015 cfp->self,
3016 VM_GUARDED_PREV_EP(cfp->ep),
3017 0, /* cref or me */
3018 ISEQ_BODY(catch_iseq)->iseq_encoded,
3019 cfp->sp + arg_size /* push value */,
3020 ISEQ_BODY(catch_iseq)->local_table_size - arg_size,
3021 ISEQ_BODY(catch_iseq)->stack_max);
3022
3023 state = 0;
3024 ec->tag->state = TAG_NONE;
3025 ec->errinfo = Qnil;
3026
3027 return Qundef;
3028 }
3029 else {
3030 hook_before_rewind(ec, (cfp == escape_cfp), state, err);
3031
3032 if (VM_FRAME_FINISHED_P(ec->cfp)) {
3033 rb_vm_pop_frame(ec);
3034 ec->errinfo = (VALUE)err;
3035 rb_vm_tag_jmpbuf_deinit(&ec->tag->buf);
3036 ec->tag = ec->tag->prev;
3037 EC_JUMP_TAG(ec, state);
3038 }
3039 else {
3040 rb_vm_pop_frame(ec);
3041 }
3042 }
3043 }
3044}
3045
3046/* misc */
3047
3048VALUE
3049rb_iseq_eval(const rb_iseq_t *iseq, const rb_box_t *box)
3050{
3051 rb_execution_context_t *ec = GET_EC();
3052 VALUE val;
3053 vm_set_top_stack(ec, iseq, box);
3054 val = vm_exec(ec);
3055 return val;
3056}
3057
3058VALUE
3059rb_iseq_eval_main(const rb_iseq_t *iseq)
3060{
3061 rb_execution_context_t *ec = GET_EC();
3062 VALUE val;
3063 vm_set_main_stack(ec, iseq);
3064 val = vm_exec(ec);
3065 return val;
3066}
3067
3068int
3069rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp)
3070{
3071 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(cfp);
3072
3073 if (me) {
3074 if (idp) *idp = me->def->original_id;
3075 if (called_idp) *called_idp = me->called_id;
3076 if (klassp) *klassp = me->owner;
3077 return TRUE;
3078 }
3079 else {
3080 return FALSE;
3081 }
3082}
3083
3084int
3085rb_ec_frame_method_id_and_class(const rb_execution_context_t *ec, ID *idp, ID *called_idp, VALUE *klassp)
3086{
3087 return rb_vm_control_frame_id_and_class(ec->cfp, idp, called_idp, klassp);
3088}
3089
3090int
3092{
3093 return rb_ec_frame_method_id_and_class(GET_EC(), idp, 0, klassp);
3094}
3095
3096VALUE
3097rb_vm_call_cfunc(VALUE recv, VALUE (*func)(VALUE), VALUE arg,
3098 VALUE block_handler, VALUE filename)
3099{
3100 rb_execution_context_t *ec = GET_EC();
3101 const rb_control_frame_t *reg_cfp = ec->cfp;
3102 const rb_iseq_t *iseq = rb_iseq_new(Qnil, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
3103 const rb_box_t *box = rb_current_box();
3104 VALUE val;
3105
3106 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_TOP | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH,
3107 recv, GC_GUARDED_PTR(box),
3108 (VALUE)vm_cref_new_toplevel(ec), /* cref or me */
3109 0, reg_cfp->sp, 0, 0);
3110
3111 val = (*func)(arg);
3112
3113 rb_vm_pop_frame(ec);
3114 return val;
3115}
3116
3117/* Ruby::Box */
3118
3119VALUE
3120rb_vm_call_cfunc_in_box(VALUE recv, VALUE (*func)(VALUE, VALUE), VALUE arg1, VALUE arg2,
3121 VALUE filename, const rb_box_t *box)
3122{
3123 rb_execution_context_t *ec = GET_EC();
3124 const rb_control_frame_t *reg_cfp = ec->cfp;
3125 const rb_iseq_t *iseq = rb_iseq_new(Qnil, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
3126 VALUE val;
3127
3128 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_TOP | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH,
3129 recv, GC_GUARDED_PTR(box),
3130 (VALUE)vm_cref_new_toplevel(ec), /* cref or me */
3131 0, reg_cfp->sp, 0, 0);
3132
3133 val = (*func)(arg1, arg2);
3134
3135 rb_vm_pop_frame(ec);
3136 return val;
3137}
3138
3139void
3140rb_vm_frame_flag_set_box_require(const rb_execution_context_t *ec)
3141{
3142 VM_ASSERT(rb_box_available());
3143 VM_ENV_FLAGS_SET(ec->cfp->ep, VM_FRAME_FLAG_BOX_REQUIRE);
3144}
3145
3146static const rb_box_t *
3147current_box_on_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
3148{
3150 const rb_box_t *box;
3151 const VALUE *lep = VM_EP_RUBY_LEP(ec, cfp);
3152 VM_BOX_ASSERT(lep, "lep should be valid");
3153 VM_BOX_ASSERT(rb_box_available(), "box should be available here");
3154
3155 if (VM_ENV_FRAME_TYPE_P(lep, VM_FRAME_MAGIC_METHOD) || VM_ENV_FRAME_TYPE_P(lep, VM_FRAME_MAGIC_CFUNC)) {
3156 cme = check_method_entry(lep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
3157 VM_BOX_ASSERT(cme, "cme should be valid");
3158 VM_BOX_ASSERT(cme->def, "cme->def shold be valid");
3159 return cme->def->box;
3160 }
3161 else if (VM_ENV_FRAME_TYPE_P(lep, VM_FRAME_MAGIC_TOP) || VM_ENV_FRAME_TYPE_P(lep, VM_FRAME_MAGIC_CLASS)) {
3162 VM_BOX_ASSERT(VM_ENV_LOCAL_P(lep), "lep should be local on MAGIC_TOP or MAGIC_CLASS frames");
3163 return VM_ENV_BOX(lep);
3164 }
3165 else if (VM_ENV_FRAME_TYPE_P(lep, VM_FRAME_MAGIC_DUMMY)) {
3166 // No valid local ep found (just after process boot?)
3167 // return the root box (the only valid box) until the main is initialized
3168 box = rb_main_box();
3169 if (box)
3170 return box;
3171 return rb_root_box();
3172 }
3173 else {
3174 VM_BOX_CRASHED();
3175 rb_bug("BUG: Local ep without cme/box, flags: %08lX", (unsigned long)lep[VM_ENV_DATA_INDEX_FLAGS]);
3176 }
3178}
3179
3180const rb_box_t *
3181rb_vm_current_box(const rb_execution_context_t *ec)
3182{
3183 return current_box_on_cfp(ec, ec->cfp);
3184}
3185
3186static const rb_control_frame_t *
3187find_loader_control_frame(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, const rb_control_frame_t *end_cfp)
3188{
3189 while (RUBY_VM_VALID_CONTROL_FRAME_P(cfp, end_cfp)) {
3190 if (!VM_ENV_FRAME_TYPE_P(cfp->ep, VM_FRAME_MAGIC_CFUNC))
3191 break;
3192 if (!BOX_ROOT_P(current_box_on_cfp(ec, cfp)))
3193 break;
3194 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
3195 }
3196 VM_ASSERT(RUBY_VM_VALID_CONTROL_FRAME_P(cfp, end_cfp));
3197 return cfp;
3198}
3199
3200const rb_box_t *
3201rb_vm_loading_box(const rb_execution_context_t *ec)
3202{
3203 const rb_control_frame_t *cfp, *current_cfp, *end_cfp;
3204
3205 if (!rb_box_available() || !ec)
3206 return rb_root_box();
3207
3208 cfp = ec->cfp;
3209 current_cfp = cfp;
3210 end_cfp = RUBY_VM_END_CONTROL_FRAME(ec);
3211
3212 while (RUBY_VM_VALID_CONTROL_FRAME_P(cfp, end_cfp)) {
3213 if (VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_BOX_REQUIRE)) {
3214 if (RTEST(cfp->self) && BOX_OBJ_P(cfp->self)) {
3215 // Box#require, #require_relative, #load
3216 return rb_get_box_t(cfp->self);
3217 }
3218 // Kernel#require, #require_relative, #load
3219 cfp = find_loader_control_frame(ec, cfp, end_cfp);
3220 return current_box_on_cfp(ec, cfp);
3221 }
3222 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
3223 }
3224 // no require/load with explicit boxes.
3225 return current_box_on_cfp(ec, current_cfp);
3226}
3227
3228/* vm */
3229
3230void
3231rb_vm_update_references(void *ptr)
3232{
3233 if (ptr) {
3234 rb_vm_t *vm = ptr;
3235
3236 vm->self = rb_gc_location(vm->self);
3237 vm->mark_object_ary = rb_gc_location(vm->mark_object_ary);
3238 vm->orig_progname = rb_gc_location(vm->orig_progname);
3239
3240 if (vm->root_box)
3241 rb_box_gc_update_references(vm->root_box);
3242 if (vm->main_box)
3243 rb_box_gc_update_references(vm->main_box);
3244
3245 rb_gc_update_values(RUBY_NSIG, vm->trap_list.cmd);
3246
3247 if (vm->coverages) {
3248 vm->coverages = rb_gc_location(vm->coverages);
3249 vm->me2counter = rb_gc_location(vm->me2counter);
3250 }
3251 }
3252}
3253
3254void
3255rb_vm_each_stack_value(void *ptr, void (*cb)(VALUE, void*), void *ctx)
3256{
3257 if (ptr) {
3258 rb_vm_t *vm = ptr;
3259 rb_ractor_t *r = 0;
3260 ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
3261 VM_ASSERT(rb_ractor_status_p(r, ractor_blocking) ||
3262 rb_ractor_status_p(r, ractor_running));
3263 if (r->threads.cnt > 0) {
3264 rb_thread_t *th = 0;
3265 ccan_list_for_each(&r->threads.set, th, lt_node) {
3266 VM_ASSERT(th != NULL);
3267 rb_execution_context_t * ec = th->ec;
3268 if (ec->vm_stack) {
3269 VALUE *p = ec->vm_stack;
3270 VALUE *sp = ec->cfp->sp;
3271 while (p < sp) {
3272 if (!RB_SPECIAL_CONST_P(*p)) {
3273 cb(*p, ctx);
3274 }
3275 p++;
3276 }
3277 }
3278 }
3279 }
3280 }
3281 }
3282}
3283
3284static enum rb_id_table_iterator_result
3285vm_mark_negative_cme(VALUE val, void *dmy)
3286{
3287 rb_gc_mark(val);
3288 return ID_TABLE_CONTINUE;
3289}
3290
3291void rb_thread_sched_mark_zombies(rb_vm_t *vm);
3292
3293void
3294rb_vm_mark(void *ptr)
3295{
3296 RUBY_MARK_ENTER("vm");
3297 RUBY_GC_INFO("-------------------------------------------------\n");
3298 if (ptr) {
3299 rb_vm_t *vm = ptr;
3300 rb_ractor_t *r = 0;
3301 long i;
3302
3303 ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
3304 // ractor.set only contains blocking or running ractors
3305 VM_ASSERT(rb_ractor_status_p(r, ractor_blocking) ||
3306 rb_ractor_status_p(r, ractor_running));
3307 rb_gc_mark(rb_ractor_self(r));
3308 }
3309
3310 for (struct global_object_list *list = vm->global_object_list; list; list = list->next) {
3311 rb_gc_mark_maybe(*list->varptr);
3312 }
3313
3314 rb_gc_mark_movable(vm->self);
3315
3316 if (vm->root_box) {
3317 rb_box_entry_mark(vm->root_box);
3318 }
3319 if (vm->main_box) {
3320 rb_box_entry_mark(vm->main_box);
3321 }
3322
3323 rb_gc_mark_movable(vm->mark_object_ary);
3324 rb_gc_mark_movable(vm->orig_progname);
3325 rb_gc_mark_movable(vm->coverages);
3326 rb_gc_mark_movable(vm->me2counter);
3327
3328 rb_gc_mark_values(RUBY_NSIG, vm->trap_list.cmd);
3329
3330 rb_hook_list_mark(&vm->global_hooks);
3331
3332 rb_id_table_foreach_values(vm->negative_cme_table, vm_mark_negative_cme, NULL);
3333 rb_mark_tbl_no_pin(vm->overloaded_cme_table);
3334 for (i=0; i<VM_GLOBAL_CC_CACHE_TABLE_SIZE; i++) {
3335 const struct rb_callcache *cc = vm->global_cc_cache_table[i];
3336
3337 if (cc != NULL) {
3338 if (!vm_cc_invalidated_p(cc)) {
3339 rb_gc_mark((VALUE)cc);
3340 }
3341 else {
3342 vm->global_cc_cache_table[i] = NULL;
3343 }
3344 }
3345 }
3346
3347 rb_thread_sched_mark_zombies(vm);
3348 }
3349
3350 RUBY_MARK_LEAVE("vm");
3351}
3352
3353#undef rb_vm_register_special_exception
3354void
3355rb_vm_register_special_exception_str(enum ruby_special_exceptions sp, VALUE cls, VALUE mesg)
3356{
3357 rb_vm_t *vm = GET_VM();
3358 VALUE exc = rb_exc_new3(cls, rb_obj_freeze(mesg));
3359 OBJ_FREEZE(exc);
3360 ((VALUE *)vm->special_exceptions)[sp] = exc;
3361 rb_vm_register_global_object(exc);
3362}
3363
3364void rb_objspace_free_objects(void *objspace);
3365
3366int
3368{
3369 RUBY_FREE_ENTER("vm");
3370 ruby_vm_during_cleanup = true;
3371
3372 if (vm) {
3373 rb_thread_t *th = vm->ractor.main_thread;
3374
3375 if (rb_free_at_exit) {
3376 rb_free_encoded_insn_data();
3377 rb_free_global_enc_table();
3378 rb_free_loaded_builtin_table();
3379 rb_free_global_symbol_table();
3380
3381 rb_free_shared_fiber_pool();
3382 rb_free_transcoder_table();
3383 rb_free_vm_opt_tables();
3384 rb_free_warning();
3385 rb_free_rb_global_tbl();
3386
3387 rb_id_table_free(vm->negative_cme_table);
3388 st_free_table(vm->overloaded_cme_table);
3389
3390 // TODO: Is this ignorable for classext->m_tbl ?
3391 // rb_id_table_free(RCLASS(rb_mRubyVMFrozenCore)->m_tbl);
3392
3393 st_free_table(vm->static_ext_inits);
3394
3395 rb_vm_postponed_job_free();
3396
3397 rb_id_table_free(vm->constant_cache);
3398 set_free_table(vm->unused_block_warning_table);
3399
3400 rb_thread_free_native_thread(th);
3401
3402#ifndef HAVE_SETPROCTITLE
3403 ruby_free_proctitle();
3404#endif
3405 }
3406 else {
3407 rb_fiber_reset_root_local_storage(th);
3408 thread_free(th);
3409 }
3410
3411 struct rb_objspace *objspace = vm->gc.objspace;
3412
3413 rb_vm_living_threads_init(vm);
3414 ruby_vm_run_at_exit_hooks(vm);
3415 if (vm->ci_table) {
3416 st_free_table(vm->ci_table);
3417 vm->ci_table = NULL;
3418 }
3419 if (vm->cc_refinement_table) {
3420 rb_set_free_table(vm->cc_refinement_table);
3421 vm->cc_refinement_table = NULL;
3422 }
3423 RB_ALTSTACK_FREE(vm->main_altstack);
3424
3425 struct global_object_list *next;
3426 for (struct global_object_list *list = vm->global_object_list; list; list = next) {
3427 next = list->next;
3428 xfree(list);
3429 }
3430
3431 if (objspace) {
3432 if (rb_free_at_exit) {
3433 rb_objspace_free_objects(objspace);
3434 rb_free_generic_fields_tbl_();
3435 rb_free_default_rand_key();
3436
3437 ruby_mimfree(th);
3438 }
3439 rb_objspace_free(objspace);
3440 }
3441 rb_native_mutex_destroy(&vm->workqueue_lock);
3442 /* after freeing objspace, you *can't* use ruby_xfree() */
3443 ruby_mimfree(vm);
3444 ruby_current_vm_ptr = NULL;
3445
3446 if (rb_free_at_exit) {
3447 rb_shape_free_all();
3448#if USE_YJIT
3449 rb_yjit_free_at_exit();
3450#endif
3451 }
3452 }
3453 RUBY_FREE_LEAVE("vm");
3454 return 0;
3455}
3456
3457size_t rb_vm_memsize_workqueue(struct ccan_list_head *workqueue); // vm_trace.c
3458
3459// Used for VM memsize reporting. Returns the size of the at_exit list by
3460// looping through the linked list and adding up the size of the structs.
3461static enum rb_id_table_iterator_result
3462vm_memsize_constant_cache_i(ID id, VALUE ics, void *size)
3463{
3464 *((size_t *) size) += rb_set_memsize((set_table *) ics);
3465 return ID_TABLE_CONTINUE;
3466}
3467
3468// Returns a size_t representing the memory footprint of the VM's constant
3469// cache, which is the memsize of the table as well as the memsize of all of the
3470// nested tables.
3471static size_t
3472vm_memsize_constant_cache(void)
3473{
3474 rb_vm_t *vm = GET_VM();
3475 size_t size = rb_id_table_memsize(vm->constant_cache);
3476
3477 rb_id_table_foreach(vm->constant_cache, vm_memsize_constant_cache_i, &size);
3478 return size;
3479}
3480
3481static size_t
3482vm_memsize_at_exit_list(rb_at_exit_list *at_exit)
3483{
3484 size_t size = 0;
3485
3486 while (at_exit) {
3487 size += sizeof(rb_at_exit_list);
3488 at_exit = at_exit->next;
3489 }
3490
3491 return size;
3492}
3493
3494// Used for VM memsize reporting. Returns the size of the builtin function
3495// table if it has been defined.
3496static size_t
3497vm_memsize_builtin_function_table(const struct rb_builtin_function *builtin_function_table)
3498{
3499 return builtin_function_table == NULL ? 0 : sizeof(struct rb_builtin_function);
3500}
3501
3502// Reports the memsize of the VM struct object and the structs that are
3503// associated with it.
3504static size_t
3505vm_memsize(const void *ptr)
3506{
3507 rb_vm_t *vm = GET_VM();
3508
3509 return (
3510 sizeof(rb_vm_t) +
3511 rb_vm_memsize_postponed_job_queue() +
3512 rb_vm_memsize_workqueue(&vm->workqueue) +
3513 vm_memsize_at_exit_list(vm->at_exit) +
3514 rb_st_memsize(vm->ci_table) +
3515 vm_memsize_builtin_function_table(vm->builtin_function_table) +
3516 rb_id_table_memsize(vm->negative_cme_table) +
3517 rb_st_memsize(vm->overloaded_cme_table) +
3518 rb_set_memsize(vm->cc_refinement_table) +
3519 vm_memsize_constant_cache()
3520 );
3521
3522 // TODO
3523 // struct { struct ccan_list_head set; } ractor;
3524 // void *main_altstack; #ifdef USE_SIGALTSTACK
3525 // struct rb_objspace *objspace;
3526}
3527
3528static const rb_data_type_t vm_data_type = {
3529 "VM",
3530 {0, 0, vm_memsize,},
3531 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
3532};
3533
3534
3535static VALUE
3536vm_default_params(void)
3537{
3538 rb_vm_t *vm = GET_VM();
3539 VALUE result = rb_hash_new_with_size(4);
3540#define SET(name) rb_hash_aset(result, ID2SYM(rb_intern(#name)), SIZET2NUM(vm->default_params.name));
3541 SET(thread_vm_stack_size);
3542 SET(thread_machine_stack_size);
3543 SET(fiber_vm_stack_size);
3544 SET(fiber_machine_stack_size);
3545#undef SET
3546 rb_obj_freeze(result);
3547 return result;
3548}
3549
3550static size_t
3551get_param(const char *name, size_t default_value, size_t min_value)
3552{
3553 const char *envval;
3554 size_t result = default_value;
3555 if ((envval = getenv(name)) != 0) {
3556 long val = atol(envval);
3557 if (val < (long)min_value) {
3558 val = (long)min_value;
3559 }
3560 result = (size_t)(((val -1 + RUBY_VM_SIZE_ALIGN) / RUBY_VM_SIZE_ALIGN) * RUBY_VM_SIZE_ALIGN);
3561 }
3562 if (0) ruby_debug_printf("%s: %"PRIuSIZE"\n", name, result); /* debug print */
3563
3564 return result;
3565}
3566
3567static void
3568check_machine_stack_size(size_t *sizep)
3569{
3570#ifdef PTHREAD_STACK_MIN
3571 size_t size = *sizep;
3572#endif
3573
3574#ifdef PTHREAD_STACK_MIN
3575 if (size < (size_t)PTHREAD_STACK_MIN) {
3576 *sizep = (size_t)PTHREAD_STACK_MIN * 2;
3577 }
3578#endif
3579}
3580
3581static void
3582vm_default_params_setup(rb_vm_t *vm)
3583{
3584 vm->default_params.thread_vm_stack_size =
3585 get_param("RUBY_THREAD_VM_STACK_SIZE",
3586 RUBY_VM_THREAD_VM_STACK_SIZE,
3587 RUBY_VM_THREAD_VM_STACK_SIZE_MIN);
3588
3589 vm->default_params.thread_machine_stack_size =
3590 get_param("RUBY_THREAD_MACHINE_STACK_SIZE",
3591 RUBY_VM_THREAD_MACHINE_STACK_SIZE,
3592 RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN);
3593
3594 vm->default_params.fiber_vm_stack_size =
3595 get_param("RUBY_FIBER_VM_STACK_SIZE",
3596 RUBY_VM_FIBER_VM_STACK_SIZE,
3597 RUBY_VM_FIBER_VM_STACK_SIZE_MIN);
3598
3599 vm->default_params.fiber_machine_stack_size =
3600 get_param("RUBY_FIBER_MACHINE_STACK_SIZE",
3601 RUBY_VM_FIBER_MACHINE_STACK_SIZE,
3602 RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN);
3603
3604 /* environment dependent check */
3605 check_machine_stack_size(&vm->default_params.thread_machine_stack_size);
3606 check_machine_stack_size(&vm->default_params.fiber_machine_stack_size);
3607}
3608
3609static void
3610vm_init2(rb_vm_t *vm)
3611{
3612 rb_vm_living_threads_init(vm);
3613 vm->thread_report_on_exception = 1;
3614 vm->src_encoding_index = -1;
3615
3616 vm_default_params_setup(vm);
3617}
3618
3619void
3620rb_execution_context_update(rb_execution_context_t *ec)
3621{
3622 /* update VM stack */
3623 if (ec->vm_stack) {
3624 long i;
3625 VM_ASSERT(ec->cfp);
3626 VALUE *p = ec->vm_stack;
3627 VALUE *sp = ec->cfp->sp;
3628 rb_control_frame_t *cfp = ec->cfp;
3629 rb_control_frame_t *limit_cfp = (void *)(ec->vm_stack + ec->vm_stack_size);
3630
3631 for (i = 0; i < (long)(sp - p); i++) {
3632 VALUE ref = p[i];
3633 VALUE update = rb_gc_location(ref);
3634 if (ref != update) {
3635 p[i] = update;
3636 }
3637 }
3638
3639 while (cfp != limit_cfp) {
3640 const VALUE *ep = cfp->ep;
3641 cfp->self = rb_gc_location(cfp->self);
3642 cfp->iseq = (rb_iseq_t *)rb_gc_location((VALUE)cfp->iseq);
3643 cfp->block_code = (void *)rb_gc_location((VALUE)cfp->block_code);
3644
3645 if (!VM_ENV_LOCAL_P(ep)) {
3646 const VALUE *prev_ep = VM_ENV_PREV_EP(ep);
3647 if (VM_ENV_FLAGS(prev_ep, VM_ENV_FLAG_ESCAPED)) {
3648 VM_FORCE_WRITE(&prev_ep[VM_ENV_DATA_INDEX_ENV], rb_gc_location(prev_ep[VM_ENV_DATA_INDEX_ENV]));
3649 }
3650
3651 if (VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED)) {
3652 VM_FORCE_WRITE(&ep[VM_ENV_DATA_INDEX_ENV], rb_gc_location(ep[VM_ENV_DATA_INDEX_ENV]));
3653 VM_FORCE_WRITE(&ep[VM_ENV_DATA_INDEX_ME_CREF], rb_gc_location(ep[VM_ENV_DATA_INDEX_ME_CREF]));
3654 }
3655 }
3656
3657 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
3658 }
3659 }
3660
3661 ec->storage = rb_gc_location(ec->storage);
3662
3663 ec->gen_fields_cache.obj = rb_gc_location(ec->gen_fields_cache.obj);
3664 ec->gen_fields_cache.fields_obj = rb_gc_location(ec->gen_fields_cache.fields_obj);
3665}
3666
3667static enum rb_id_table_iterator_result
3668mark_local_storage_i(VALUE local, void *data)
3669{
3670 rb_gc_mark(local);
3671 return ID_TABLE_CONTINUE;
3672}
3673
3674void
3675rb_execution_context_mark(const rb_execution_context_t *ec)
3676{
3677 /* mark VM stack */
3678 if (ec->vm_stack) {
3679 VM_ASSERT(ec->cfp);
3680 VALUE *p = ec->vm_stack;
3681 VALUE *sp = ec->cfp->sp;
3682 rb_control_frame_t *cfp = ec->cfp;
3683 rb_control_frame_t *limit_cfp = (void *)(ec->vm_stack + ec->vm_stack_size);
3684
3685 VM_ASSERT(sp == ec->cfp->sp);
3686 rb_gc_mark_vm_stack_values((long)(sp - p), p);
3687
3688 while (cfp != limit_cfp) {
3689 const VALUE *ep = cfp->ep;
3690 VM_ASSERT(!!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) == vm_ep_in_heap_p_(ec, ep));
3691
3692 rb_gc_mark_movable(cfp->self);
3693 rb_gc_mark_movable((VALUE)cfp->iseq);
3694 rb_gc_mark_movable((VALUE)cfp->block_code);
3695
3696 if (VM_ENV_LOCAL_P(ep) && VM_ENV_BOXED_P(ep)) {
3697 const rb_box_t *box = VM_ENV_BOX(ep);
3698 if (BOX_USER_P(box)) {
3699 rb_gc_mark_movable(box->box_object);
3700 }
3701 }
3702
3703 if (!VM_ENV_LOCAL_P(ep)) {
3704 const VALUE *prev_ep = VM_ENV_PREV_EP(ep);
3705 if (VM_ENV_FLAGS(prev_ep, VM_ENV_FLAG_ESCAPED)) {
3706 rb_gc_mark_movable(prev_ep[VM_ENV_DATA_INDEX_ENV]);
3707 }
3708
3709 if (VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED)) {
3710 rb_gc_mark_movable(ep[VM_ENV_DATA_INDEX_ENV]);
3711 rb_gc_mark(ep[VM_ENV_DATA_INDEX_ME_CREF]);
3712 }
3713 }
3714
3715 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
3716 }
3717 }
3718
3719 /* mark machine stack */
3720 if (ec->machine.stack_start && ec->machine.stack_end &&
3721 ec != GET_EC() /* marked for current ec at the first stage of marking */
3722 ) {
3723 rb_gc_mark_machine_context(ec);
3724 }
3725
3726 rb_gc_mark(ec->errinfo);
3727 rb_gc_mark(ec->root_svar);
3728 if (ec->local_storage) {
3729 rb_id_table_foreach_values(ec->local_storage, mark_local_storage_i, NULL);
3730 }
3731 rb_gc_mark(ec->local_storage_recursive_hash);
3732 rb_gc_mark(ec->local_storage_recursive_hash_for_trace);
3733 rb_gc_mark(ec->private_const_reference);
3734
3735 rb_gc_mark_movable(ec->storage);
3736}
3737
3738void rb_fiber_mark_self(rb_fiber_t *fib);
3739void rb_fiber_update_self(rb_fiber_t *fib);
3740void rb_threadptr_root_fiber_setup(rb_thread_t *th);
3741void rb_root_fiber_obj_setup(rb_thread_t *th);
3742void rb_threadptr_root_fiber_release(rb_thread_t *th);
3743
3744static void
3745thread_compact(void *ptr)
3746{
3747 rb_thread_t *th = ptr;
3748
3749 th->self = rb_gc_location(th->self);
3750}
3751
3752static void
3753thread_mark(void *ptr)
3754{
3755 rb_thread_t *th = ptr;
3756 RUBY_MARK_ENTER("thread");
3757
3758 // ec is null when setting up the thread in rb_threadptr_root_fiber_setup
3759 if (th->ec) {
3760 rb_fiber_mark_self(th->ec->fiber_ptr);
3761 }
3762
3763 /* mark ruby objects */
3764 switch (th->invoke_type) {
3765 case thread_invoke_type_proc:
3766 case thread_invoke_type_ractor_proc:
3767 rb_gc_mark(th->invoke_arg.proc.proc);
3768 rb_gc_mark(th->invoke_arg.proc.args);
3769 break;
3770 case thread_invoke_type_func:
3771 rb_gc_mark_maybe((VALUE)th->invoke_arg.func.arg);
3772 break;
3773 default:
3774 break;
3775 }
3776
3777 rb_gc_mark(rb_ractor_self(th->ractor));
3778 rb_gc_mark(th->thgroup);
3779 rb_gc_mark(th->value);
3780 rb_gc_mark(th->pending_interrupt_queue);
3781 rb_gc_mark(th->pending_interrupt_mask_stack);
3782 rb_gc_mark(th->top_self);
3783 rb_gc_mark(th->top_wrapper);
3784 if (th->root_fiber) rb_fiber_mark_self(th->root_fiber);
3785
3786 RUBY_ASSERT(th->ec == rb_fiberptr_get_ec(th->ec->fiber_ptr));
3787 rb_gc_mark(th->last_status);
3788 rb_gc_mark(th->locking_mutex);
3789 rb_gc_mark(th->name);
3790
3791 rb_gc_mark(th->scheduler);
3792
3793 rb_threadptr_interrupt_exec_task_mark(th);
3794
3795 RUBY_MARK_LEAVE("thread");
3796}
3797
3798void rb_threadptr_sched_free(rb_thread_t *th); // thread_*.c
3799
3800static void
3801thread_free(void *ptr)
3802{
3803 rb_thread_t *th = ptr;
3804 RUBY_FREE_ENTER("thread");
3805
3806 rb_threadptr_sched_free(th);
3807
3808 if (th->locking_mutex != Qfalse) {
3809 rb_bug("thread_free: locking_mutex must be NULL (%p:%p)", (void *)th, (void *)th->locking_mutex);
3810 }
3811 if (th->keeping_mutexes != NULL) {
3812 rb_bug("thread_free: keeping_mutexes must be NULL (%p:%p)", (void *)th, (void *)th->keeping_mutexes);
3813 }
3814
3815 ruby_xfree(th->specific_storage);
3816
3817 if (th->vm && th->vm->ractor.main_thread == th) {
3818 RUBY_GC_INFO("MRI main thread\n");
3819 }
3820 else {
3821 // ruby_xfree(th->nt);
3822 // TODO: MN system collect nt, but without MN system it should be freed here.
3823 ruby_xfree(th);
3824 }
3825
3826 RUBY_FREE_LEAVE("thread");
3827}
3828
3829static size_t
3830thread_memsize(const void *ptr)
3831{
3832 const rb_thread_t *th = ptr;
3833 size_t size = sizeof(rb_thread_t);
3834
3835 if (!th->root_fiber) {
3836 size += th->ec->vm_stack_size * sizeof(VALUE);
3837 }
3838 if (th->ec->local_storage) {
3839 size += rb_id_table_memsize(th->ec->local_storage);
3840 }
3841 return size;
3842}
3843
3844#define thread_data_type ruby_threadptr_data_type
3845const rb_data_type_t ruby_threadptr_data_type = {
3846 "VM/thread",
3847 {
3848 thread_mark,
3849 thread_free,
3850 thread_memsize,
3851 thread_compact,
3852 },
3853 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
3854};
3855
3856VALUE
3857rb_obj_is_thread(VALUE obj)
3858{
3859 return RBOOL(rb_typeddata_is_kind_of(obj, &thread_data_type));
3860}
3861
3862static VALUE
3863thread_alloc(VALUE klass)
3864{
3865 rb_thread_t *th;
3866 return TypedData_Make_Struct(klass, rb_thread_t, &thread_data_type, th);
3867}
3868
3869void
3870rb_ec_set_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size)
3871{
3872 ec->vm_stack = stack;
3873 ec->vm_stack_size = size;
3874}
3875
3876void
3877rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size)
3878{
3879 rb_ec_set_vm_stack(ec, stack, size);
3880
3881#if VM_CHECK_MODE > 0
3882 MEMZERO(stack, VALUE, size); // malloc memory could have the VM canary in it
3883#endif
3884
3885 ec->cfp = (void *)(ec->vm_stack + ec->vm_stack_size);
3886
3887 vm_push_frame(ec,
3888 NULL /* dummy iseq */,
3889 VM_FRAME_MAGIC_DUMMY | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH | VM_FRAME_FLAG_CFRAME /* dummy frame */,
3890 Qnil /* dummy self */, VM_BLOCK_HANDLER_NONE /* dummy block ptr */,
3891 0 /* dummy cref/me */,
3892 0 /* dummy pc */, ec->vm_stack, 0, 0
3893 );
3894}
3895
3896void
3897rb_ec_clear_vm_stack(rb_execution_context_t *ec)
3898{
3899 // set cfp to NULL before clearing the stack in case `thread_profile_frames`
3900 // gets called in this middle of `rb_ec_set_vm_stack` via signal handler.
3901 ec->cfp = NULL;
3902 rb_ec_set_vm_stack(ec, NULL, 0);
3903}
3904
3905void
3906rb_ec_close(rb_execution_context_t *ec)
3907{
3908 // Fiber storage is not accessible from outside the running fiber, so it is safe to clear it here.
3909 ec->storage = Qnil;
3910}
3911
3912static void
3913th_init(rb_thread_t *th, VALUE self, rb_vm_t *vm)
3914{
3915 const rb_box_t *box = rb_current_box();
3916
3917 th->self = self;
3918
3919 ccan_list_head_init(&th->interrupt_exec_tasks);
3920
3921 rb_threadptr_root_fiber_setup(th);
3922
3923 /* All threads are blocking until a non-blocking fiber is scheduled */
3924 th->blocking = 1;
3925 th->scheduler = Qnil;
3926
3927 if (self == 0) {
3928 size_t size = vm->default_params.thread_vm_stack_size / sizeof(VALUE);
3929 VALUE *stack = ALLOC_N(VALUE, size);
3930 rb_ec_initialize_vm_stack(th->ec, stack, size);
3931 rb_thread_malloc_stack_set(th, stack);
3932 }
3933 else {
3934 VM_ASSERT(th->ec->cfp == NULL);
3935 VM_ASSERT(th->ec->vm_stack == NULL);
3936 VM_ASSERT(th->ec->vm_stack_size == 0);
3937 }
3938
3939 th->status = THREAD_RUNNABLE;
3940 th->last_status = Qnil;
3941 th->top_wrapper = 0;
3942 if (box->top_self) {
3943 th->top_self = box->top_self;
3944 }
3945 else {
3946 th->top_self = 0;
3947 }
3948 th->value = Qundef;
3949
3950 th->ec->errinfo = Qnil;
3951 th->ec->root_svar = Qfalse;
3952 th->ec->local_storage_recursive_hash = Qnil;
3953 th->ec->local_storage_recursive_hash_for_trace = Qnil;
3954
3955 th->ec->storage = Qnil;
3956 th->ec->ractor_id = rb_ractor_id(th->ractor);
3957
3958#if OPT_CALL_THREADED_CODE
3959 th->retval = Qundef;
3960#endif
3961 th->name = Qnil;
3962 th->report_on_exception = vm->thread_report_on_exception;
3963 th->ext_config.ractor_safe = true;
3964
3965#if USE_RUBY_DEBUG_LOG
3966 static rb_atomic_t thread_serial = 1;
3967 th->serial = RUBY_ATOMIC_FETCH_ADD(thread_serial, 1);
3968
3969 RUBY_DEBUG_LOG("th:%u", th->serial);
3970#endif
3971}
3972
3973VALUE
3974rb_thread_alloc(VALUE klass)
3975{
3976 VALUE self = thread_alloc(klass);
3977 rb_thread_t *target_th = rb_thread_ptr(self);
3978 target_th->ractor = GET_RACTOR();
3979 th_init(target_th, self, target_th->vm = GET_VM());
3980 rb_root_fiber_obj_setup(target_th);
3981 return self;
3982}
3983
3984#define REWIND_CFP(expr) do { \
3985 rb_execution_context_t *ec__ = GET_EC(); \
3986 VALUE *const curr_sp = (ec__->cfp++)->sp; \
3987 VALUE *const saved_sp = ec__->cfp->sp; \
3988 ec__->cfp->sp = curr_sp; \
3989 expr; \
3990 (ec__->cfp--)->sp = saved_sp; \
3991} while (0)
3992
3993static VALUE
3994m_core_set_method_alias(VALUE self, VALUE cbase, VALUE sym1, VALUE sym2)
3995{
3996 REWIND_CFP({
3997 rb_alias(cbase, SYM2ID(sym1), SYM2ID(sym2));
3998 });
3999 return Qnil;
4000}
4001
4002static VALUE
4003m_core_set_variable_alias(VALUE self, VALUE sym1, VALUE sym2)
4004{
4005 REWIND_CFP({
4006 rb_alias_variable(SYM2ID(sym1), SYM2ID(sym2));
4007 });
4008 return Qnil;
4009}
4010
4011static VALUE
4012m_core_undef_method(VALUE self, VALUE cbase, VALUE sym)
4013{
4014 REWIND_CFP({
4015 ID mid = SYM2ID(sym);
4016 rb_undef(cbase, mid);
4017 rb_clear_method_cache(self, mid);
4018 });
4019 return Qnil;
4020}
4021
4022static VALUE
4023m_core_set_postexe(VALUE self)
4024{
4025 rb_set_end_proc(rb_call_end_proc, rb_block_proc());
4026 return Qnil;
4027}
4028
4029static VALUE core_hash_merge_kwd(VALUE hash, VALUE kw);
4030
4031static VALUE
4032core_hash_merge(VALUE hash, long argc, const VALUE *argv)
4033{
4034 Check_Type(hash, T_HASH);
4035 VM_ASSERT(argc % 2 == 0);
4036 rb_hash_bulk_insert(argc, argv, hash);
4037 return hash;
4038}
4039
4040static VALUE
4041m_core_hash_merge_ptr(int argc, VALUE *argv, VALUE recv)
4042{
4043 VALUE hash = argv[0];
4044
4045 REWIND_CFP(hash = core_hash_merge(hash, argc-1, argv+1));
4046
4047 return hash;
4048}
4049
4050static int
4051kwmerge_i(VALUE key, VALUE value, VALUE hash)
4052{
4053 rb_hash_aset(hash, key, value);
4054 return ST_CONTINUE;
4055}
4056
4057static VALUE
4058m_core_hash_merge_kwd(VALUE recv, VALUE hash, VALUE kw)
4059{
4060 if (!NIL_P(kw)) {
4061 REWIND_CFP(hash = core_hash_merge_kwd(hash, kw));
4062 }
4063 return hash;
4064}
4065
4066static VALUE
4067m_core_make_shareable(VALUE recv, VALUE obj)
4068{
4069 return rb_ractor_make_shareable(obj);
4070}
4071
4072static VALUE
4073m_core_make_shareable_copy(VALUE recv, VALUE obj)
4074{
4076}
4077
4078static VALUE
4079m_core_ensure_shareable(VALUE recv, VALUE obj, VALUE name)
4080{
4081 return rb_ractor_ensure_shareable(obj, name);
4082}
4083
4084static VALUE
4085core_hash_merge_kwd(VALUE hash, VALUE kw)
4086{
4087 rb_hash_foreach(rb_to_hash_type(kw), kwmerge_i, hash);
4088 return hash;
4089}
4090
4091extern VALUE *rb_gc_stack_start;
4092extern size_t rb_gc_stack_maxsize;
4093
4094/* debug functions */
4095
4096/* :nodoc: */
4097static VALUE
4098sdr(VALUE self)
4099{
4100 rb_vm_bugreport(NULL, stderr);
4101 return Qnil;
4102}
4103
4104/* :nodoc: */
4105static VALUE
4106nsdr(VALUE self)
4107{
4108 VALUE ary = rb_ary_new();
4109#ifdef HAVE_BACKTRACE
4110#include <execinfo.h>
4111#define MAX_NATIVE_TRACE 1024
4112 static void *trace[MAX_NATIVE_TRACE];
4113 int n = (int)backtrace(trace, MAX_NATIVE_TRACE);
4114 char **syms = backtrace_symbols(trace, n);
4115 int i;
4116
4117 if (syms == 0) {
4118 rb_memerror();
4119 }
4120
4121 for (i=0; i<n; i++) {
4122 rb_ary_push(ary, rb_str_new2(syms[i]));
4123 }
4124 free(syms); /* OK */
4125#endif
4126 return ary;
4127}
4128
4129#if VM_COLLECT_USAGE_DETAILS
4130static VALUE usage_analysis_insn_start(VALUE self);
4131static VALUE usage_analysis_operand_start(VALUE self);
4132static VALUE usage_analysis_register_start(VALUE self);
4133static VALUE usage_analysis_insn_stop(VALUE self);
4134static VALUE usage_analysis_operand_stop(VALUE self);
4135static VALUE usage_analysis_register_stop(VALUE self);
4136static VALUE usage_analysis_insn_running(VALUE self);
4137static VALUE usage_analysis_operand_running(VALUE self);
4138static VALUE usage_analysis_register_running(VALUE self);
4139static VALUE usage_analysis_insn_clear(VALUE self);
4140static VALUE usage_analysis_operand_clear(VALUE self);
4141static VALUE usage_analysis_register_clear(VALUE self);
4142#endif
4143
4144static VALUE
4145f_raise(int c, VALUE *v, VALUE _)
4146{
4147 return rb_f_raise(c, v);
4148}
4149
4150static VALUE
4151f_proc(VALUE _)
4152{
4153 return rb_block_proc();
4154}
4155
4156static VALUE
4157f_lambda(VALUE _)
4158{
4159 return rb_block_lambda();
4160}
4161
4162static VALUE
4163f_sprintf(int c, const VALUE *v, VALUE _)
4164{
4165 return rb_f_sprintf(c, v);
4166}
4167
4168/* :nodoc: */
4169static VALUE
4170vm_mtbl(VALUE self, VALUE obj, VALUE sym)
4171{
4172 vm_mtbl_dump(CLASS_OF(obj), RTEST(sym) ? SYM2ID(sym) : 0);
4173 return Qnil;
4174}
4175
4176/* :nodoc: */
4177static VALUE
4178vm_mtbl2(VALUE self, VALUE obj, VALUE sym)
4179{
4180 vm_mtbl_dump(obj, RTEST(sym) ? SYM2ID(sym) : 0);
4181 return Qnil;
4182}
4183
4184/*
4185 * call-seq:
4186 * RubyVM.keep_script_lines -> true or false
4187 *
4188 * Return current +keep_script_lines+ status. Now it only returns
4189 * +true+ of +false+, but it can return other objects in future.
4190 *
4191 * Note that this is an API for ruby internal use, debugging,
4192 * and research. Do not use this for any other purpose.
4193 * The compatibility is not guaranteed.
4194 */
4195static VALUE
4196vm_keep_script_lines(VALUE self)
4197{
4198 return RBOOL(ruby_vm_keep_script_lines);
4199}
4200
4201/*
4202 * call-seq:
4203 * RubyVM.keep_script_lines = true / false
4204 *
4205 * It set +keep_script_lines+ flag. If the flag is set, all
4206 * loaded scripts are recorded in a interpreter process.
4207 *
4208 * Note that this is an API for ruby internal use, debugging,
4209 * and research. Do not use this for any other purpose.
4210 * The compatibility is not guaranteed.
4211 */
4212static VALUE
4213vm_keep_script_lines_set(VALUE self, VALUE flags)
4214{
4215 ruby_vm_keep_script_lines = RTEST(flags);
4216 return flags;
4217}
4218
4219void
4220Init_VM(void)
4221{
4222 VALUE opts;
4223 VALUE klass;
4224 VALUE fcore;
4225
4226 /*
4227 * Document-class: RubyVM
4228 *
4229 * The RubyVM module only exists on MRI. +RubyVM+ is not defined in
4230 * other Ruby implementations such as JRuby and TruffleRuby.
4231 *
4232 * The RubyVM module provides some access to MRI internals.
4233 * This module is for very limited purposes, such as debugging,
4234 * prototyping, and research. Normal users must not use it.
4235 * This module is not portable between Ruby implementations.
4236 */
4237 rb_cRubyVM = rb_define_class("RubyVM", rb_cObject);
4238 rb_undef_alloc_func(rb_cRubyVM);
4239 rb_undef_method(CLASS_OF(rb_cRubyVM), "new");
4240 rb_define_singleton_method(rb_cRubyVM, "stat", vm_stat, -1);
4241 rb_define_singleton_method(rb_cRubyVM, "keep_script_lines", vm_keep_script_lines, 0);
4242 rb_define_singleton_method(rb_cRubyVM, "keep_script_lines=", vm_keep_script_lines_set, 1);
4243
4244#if USE_DEBUG_COUNTER
4245 rb_define_singleton_method(rb_cRubyVM, "reset_debug_counters", rb_debug_counter_reset, 0);
4246 rb_define_singleton_method(rb_cRubyVM, "show_debug_counters", rb_debug_counter_show, 0);
4247#endif
4248
4249 /* FrozenCore (hidden) */
4251 rb_set_class_path(fcore, rb_cRubyVM, "FrozenCore");
4252 rb_vm_register_global_object(rb_class_path_cached(fcore));
4253 klass = rb_singleton_class(fcore);
4254 rb_define_method_id(klass, id_core_set_method_alias, m_core_set_method_alias, 3);
4255 rb_define_method_id(klass, id_core_set_variable_alias, m_core_set_variable_alias, 2);
4256 rb_define_method_id(klass, id_core_undef_method, m_core_undef_method, 2);
4257 rb_define_method_id(klass, id_core_set_postexe, m_core_set_postexe, 0);
4258 rb_define_method_id(klass, id_core_hash_merge_ptr, m_core_hash_merge_ptr, -1);
4259 rb_define_method_id(klass, id_core_hash_merge_kwd, m_core_hash_merge_kwd, 2);
4260 rb_define_method_id(klass, id_core_raise, f_raise, -1);
4261 rb_define_method_id(klass, id_core_sprintf, f_sprintf, -1);
4262 rb_define_method_id(klass, idProc, f_proc, 0);
4263 rb_define_method_id(klass, idLambda, f_lambda, 0);
4264 rb_define_method(klass, "make_shareable", m_core_make_shareable, 1);
4265 rb_define_method(klass, "make_shareable_copy", m_core_make_shareable_copy, 1);
4266 rb_define_method(klass, "ensure_shareable", m_core_ensure_shareable, 2);
4267 rb_obj_freeze(fcore);
4268 RBASIC_CLEAR_CLASS(klass);
4269 rb_obj_freeze(klass);
4270 rb_vm_register_global_object(fcore);
4271 rb_mRubyVMFrozenCore = fcore;
4272
4273 /*
4274 * Document-class: Thread
4275 *
4276 * Threads are the Ruby implementation for a concurrent programming model.
4277 *
4278 * Programs that require multiple threads of execution are a perfect
4279 * candidate for Ruby's Thread class.
4280 *
4281 * For example, we can create a new thread separate from the main thread's
4282 * execution using ::new.
4283 *
4284 * thr = Thread.new { puts "What's the big deal" }
4285 *
4286 * Then we are able to pause the execution of the main thread and allow
4287 * our new thread to finish, using #join:
4288 *
4289 * thr.join #=> "What's the big deal"
4290 *
4291 * If we don't call +thr.join+ before the main thread terminates, then all
4292 * other threads including +thr+ will be killed.
4293 *
4294 * Alternatively, you can use an array for handling multiple threads at
4295 * once, like in the following example:
4296 *
4297 * threads = []
4298 * threads << Thread.new { puts "What's the big deal" }
4299 * threads << Thread.new { 3.times { puts "Threads are fun!" } }
4300 *
4301 * After creating a few threads we wait for them all to finish
4302 * consecutively.
4303 *
4304 * threads.each { |thr| thr.join }
4305 *
4306 * To retrieve the last value of a thread, use #value
4307 *
4308 * thr = Thread.new { sleep 1; "Useful value" }
4309 * thr.value #=> "Useful value"
4310 *
4311 * === Thread initialization
4312 *
4313 * In order to create new threads, Ruby provides ::new, ::start, and
4314 * ::fork. A block must be provided with each of these methods, otherwise
4315 * a ThreadError will be raised.
4316 *
4317 * When subclassing the Thread class, the +initialize+ method of your
4318 * subclass will be ignored by ::start and ::fork. Otherwise, be sure to
4319 * call super in your +initialize+ method.
4320 *
4321 * === Thread termination
4322 *
4323 * For terminating threads, Ruby provides a variety of ways to do this.
4324 *
4325 * The class method ::kill, is meant to exit a given thread:
4326 *
4327 * thr = Thread.new { sleep }
4328 * Thread.kill(thr) # sends exit() to thr
4329 *
4330 * Alternatively, you can use the instance method #exit, or any of its
4331 * aliases #kill or #terminate.
4332 *
4333 * thr.exit
4334 *
4335 * === Thread status
4336 *
4337 * Ruby provides a few instance methods for querying the state of a given
4338 * thread. To get a string with the current thread's state use #status
4339 *
4340 * thr = Thread.new { sleep }
4341 * thr.status # => "sleep"
4342 * thr.exit
4343 * thr.status # => false
4344 *
4345 * You can also use #alive? to tell if the thread is running or sleeping,
4346 * and #stop? if the thread is dead or sleeping.
4347 *
4348 * === Thread variables and scope
4349 *
4350 * Since threads are created with blocks, the same rules apply to other
4351 * Ruby blocks for variable scope. Any local variables created within this
4352 * block are accessible to only this thread.
4353 *
4354 * ==== Fiber-local vs. Thread-local
4355 *
4356 * Each fiber has its own bucket for Thread#[] storage. When you set a
4357 * new fiber-local it is only accessible within this Fiber. To illustrate:
4358 *
4359 * Thread.new {
4360 * Thread.current[:foo] = "bar"
4361 * Fiber.new {
4362 * p Thread.current[:foo] # => nil
4363 * }.resume
4364 * }.join
4365 *
4366 * This example uses #[] for getting and #[]= for setting fiber-locals,
4367 * you can also use #keys to list the fiber-locals for a given
4368 * thread and #key? to check if a fiber-local exists.
4369 *
4370 * When it comes to thread-locals, they are accessible within the entire
4371 * scope of the thread. Given the following example:
4372 *
4373 * Thread.new{
4374 * Thread.current.thread_variable_set(:foo, 1)
4375 * p Thread.current.thread_variable_get(:foo) # => 1
4376 * Fiber.new{
4377 * Thread.current.thread_variable_set(:foo, 2)
4378 * p Thread.current.thread_variable_get(:foo) # => 2
4379 * }.resume
4380 * p Thread.current.thread_variable_get(:foo) # => 2
4381 * }.join
4382 *
4383 * You can see that the thread-local +:foo+ carried over into the fiber
4384 * and was changed to +2+ by the end of the thread.
4385 *
4386 * This example makes use of #thread_variable_set to create new
4387 * thread-locals, and #thread_variable_get to reference them.
4388 *
4389 * There is also #thread_variables to list all thread-locals, and
4390 * #thread_variable? to check if a given thread-local exists.
4391 *
4392 * === Exception handling
4393 *
4394 * When an unhandled exception is raised inside a thread, it will
4395 * terminate. By default, this exception will not propagate to other
4396 * threads. The exception is stored and when another thread calls #value
4397 * or #join, the exception will be re-raised in that thread.
4398 *
4399 * t = Thread.new{ raise 'something went wrong' }
4400 * t.value #=> RuntimeError: something went wrong
4401 *
4402 * An exception can be raised from outside the thread using the
4403 * Thread#raise instance method, which takes the same parameters as
4404 * Kernel#raise.
4405 *
4406 * Setting Thread.abort_on_exception = true, Thread#abort_on_exception =
4407 * true, or $DEBUG = true will cause a subsequent unhandled exception
4408 * raised in a thread to be automatically re-raised in the main thread.
4409 *
4410 * With the addition of the class method ::handle_interrupt, you can now
4411 * handle exceptions asynchronously with threads.
4412 *
4413 * === Scheduling
4414 *
4415 * Ruby provides a few ways to support scheduling threads in your program.
4416 *
4417 * The first way is by using the class method ::stop, to put the current
4418 * running thread to sleep and schedule the execution of another thread.
4419 *
4420 * Once a thread is asleep, you can use the instance method #wakeup to
4421 * mark your thread as eligible for scheduling.
4422 *
4423 * You can also try ::pass, which attempts to pass execution to another
4424 * thread but is dependent on the OS whether a running thread will switch
4425 * or not. The same goes for #priority, which lets you hint to the thread
4426 * scheduler which threads you want to take precedence when passing
4427 * execution. This method is also dependent on the OS and may be ignored
4428 * on some platforms.
4429 *
4430 */
4431 rb_cThread = rb_define_class("Thread", rb_cObject);
4433
4434#if VM_COLLECT_USAGE_DETAILS
4435 /* ::RubyVM::USAGE_ANALYSIS_* */
4436#define define_usage_analysis_hash(name) /* shut up rdoc -C */ \
4437 rb_define_const(rb_cRubyVM, "USAGE_ANALYSIS_" #name, rb_hash_new())
4438 define_usage_analysis_hash(INSN);
4439 define_usage_analysis_hash(REGS);
4440 define_usage_analysis_hash(INSN_BIGRAM);
4441
4442 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_INSN_START", usage_analysis_insn_start, 0);
4443 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_OPERAND_START", usage_analysis_operand_start, 0);
4444 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_REGISTER_START", usage_analysis_register_start, 0);
4445 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_INSN_STOP", usage_analysis_insn_stop, 0);
4446 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_OPERAND_STOP", usage_analysis_operand_stop, 0);
4447 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_REGISTER_STOP", usage_analysis_register_stop, 0);
4448 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_INSN_RUNNING", usage_analysis_insn_running, 0);
4449 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_OPERAND_RUNNING", usage_analysis_operand_running, 0);
4450 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_REGISTER_RUNNING", usage_analysis_register_running, 0);
4451 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_INSN_CLEAR", usage_analysis_insn_clear, 0);
4452 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_OPERAND_CLEAR", usage_analysis_operand_clear, 0);
4453 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_REGISTER_CLEAR", usage_analysis_register_clear, 0);
4454#endif
4455
4456 /* ::RubyVM::OPTS
4457 * An Array of VM build options.
4458 * This constant is MRI specific.
4459 */
4460 rb_define_const(rb_cRubyVM, "OPTS", opts = rb_ary_new());
4461
4462#if OPT_DIRECT_THREADED_CODE
4463 rb_ary_push(opts, rb_str_new2("direct threaded code"));
4464#elif OPT_TOKEN_THREADED_CODE
4465 rb_ary_push(opts, rb_str_new2("token threaded code"));
4466#elif OPT_CALL_THREADED_CODE
4467 rb_ary_push(opts, rb_str_new2("call threaded code"));
4468#endif
4469
4470#if OPT_OPERANDS_UNIFICATION
4471 rb_ary_push(opts, rb_str_new2("operands unification"));
4472#endif
4473#if OPT_INSTRUCTIONS_UNIFICATION
4474 rb_ary_push(opts, rb_str_new2("instructions unification"));
4475#endif
4476#if OPT_INLINE_METHOD_CACHE
4477 rb_ary_push(opts, rb_str_new2("inline method cache"));
4478#endif
4479
4480 /* ::RubyVM::INSTRUCTION_NAMES
4481 * A list of bytecode instruction names in MRI.
4482 * This constant is MRI specific.
4483 */
4484 rb_define_const(rb_cRubyVM, "INSTRUCTION_NAMES", rb_insns_name_array());
4485
4486 /* ::RubyVM::DEFAULT_PARAMS
4487 * This constant exposes the VM's default parameters.
4488 * Note that changing these values does not affect VM execution.
4489 * Specification is not stable and you should not depend on this value.
4490 * Of course, this constant is MRI specific.
4491 */
4492 rb_define_const(rb_cRubyVM, "DEFAULT_PARAMS", vm_default_params());
4493
4494 /* debug functions ::RubyVM::SDR(), ::RubyVM::NSDR() */
4495#if VMDEBUG
4496 rb_define_singleton_method(rb_cRubyVM, "SDR", sdr, 0);
4497 rb_define_singleton_method(rb_cRubyVM, "NSDR", nsdr, 0);
4498 rb_define_singleton_method(rb_cRubyVM, "mtbl", vm_mtbl, 2);
4499 rb_define_singleton_method(rb_cRubyVM, "mtbl2", vm_mtbl2, 2);
4500#else
4501 (void)sdr;
4502 (void)nsdr;
4503 (void)vm_mtbl;
4504 (void)vm_mtbl2;
4505#endif
4506
4507 /* VM bootstrap: phase 2 */
4508 {
4509 rb_vm_t *vm = ruby_current_vm_ptr;
4510 rb_thread_t *th = GET_THREAD();
4511 VALUE filename = rb_fstring_lit("<main>");
4512 const rb_iseq_t *iseq = rb_iseq_new(Qnil, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
4513
4514 // Ractor setup
4515 rb_ractor_main_setup(vm, th->ractor, th);
4516
4517 /* create vm object */
4518 vm->self = TypedData_Wrap_Struct(rb_cRubyVM, &vm_data_type, vm);
4519
4520 /* create main thread */
4521 th->self = TypedData_Wrap_Struct(rb_cThread, &thread_data_type, th);
4522 vm->ractor.main_thread = th;
4523 vm->ractor.main_ractor = th->ractor;
4524 th->vm = vm;
4525 th->top_wrapper = 0;
4526 th->top_self = rb_vm_top_self();
4527
4528 rb_root_fiber_obj_setup(th);
4529
4530 rb_vm_register_global_object((VALUE)iseq);
4531 th->ec->cfp->iseq = iseq;
4532 th->ec->cfp->pc = ISEQ_BODY(iseq)->iseq_encoded;
4533 th->ec->cfp->self = th->top_self;
4534
4535 VM_ENV_FLAGS_UNSET(th->ec->cfp->ep, VM_FRAME_FLAG_CFRAME);
4536 VM_STACK_ENV_WRITE(th->ec->cfp->ep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)vm_cref_new(rb_cObject, METHOD_VISI_PRIVATE, FALSE, NULL, FALSE, FALSE));
4537
4538 /*
4539 * The Binding of the top level scope
4540 */
4541 rb_define_global_const("TOPLEVEL_BINDING", rb_binding_new());
4542
4543#ifdef _WIN32
4544 rb_objspace_gc_enable(vm->gc.objspace);
4545#endif
4546 }
4547 vm_init_redefined_flag();
4548
4549 rb_block_param_proxy = rb_obj_alloc(rb_cObject);
4550 rb_add_method_optimized(rb_singleton_class(rb_block_param_proxy), idCall,
4551 OPTIMIZED_METHOD_TYPE_BLOCK_CALL, 0, METHOD_VISI_PUBLIC);
4552 rb_obj_freeze(rb_block_param_proxy);
4553 rb_vm_register_global_object(rb_block_param_proxy);
4554
4555 /* vm_backtrace.c */
4556 Init_vm_backtrace();
4557}
4558
4559void
4560rb_vm_set_progname(VALUE filename)
4561{
4562 rb_thread_t *th = GET_VM()->ractor.main_thread;
4563 rb_control_frame_t *cfp = (void *)(th->ec->vm_stack + th->ec->vm_stack_size);
4564 --cfp;
4565
4566 filename = rb_str_new_frozen(filename);
4567 rb_iseq_pathobj_set(cfp->iseq, filename, rb_iseq_realpath(cfp->iseq));
4568}
4569
4570extern const struct st_hash_type rb_fstring_hash_type;
4571
4572void
4573Init_BareVM(void)
4574{
4575 /* VM bootstrap: phase 1 */
4576 rb_vm_t *vm = ruby_mimcalloc(1, sizeof(*vm));
4577 rb_thread_t *th = ruby_mimcalloc(1, sizeof(*th));
4578 if (!vm || !th) {
4579 fputs("[FATAL] failed to allocate memory\n", stderr);
4580 exit(EXIT_FAILURE);
4581 }
4582
4583 // setup the VM
4584 vm_init2(vm);
4585
4586 rb_vm_postponed_job_queue_init(vm);
4587 ruby_current_vm_ptr = vm;
4588 rb_objspace_alloc();
4589 vm->negative_cme_table = rb_id_table_create(16);
4590 vm->overloaded_cme_table = st_init_numtable();
4591 vm->constant_cache = rb_id_table_create(0);
4592 vm->unused_block_warning_table = set_init_numtable();
4593 vm->global_hooks.type = hook_list_type_global;
4594
4595 // setup main thread
4596 th->nt = ZALLOC(struct rb_native_thread);
4597 th->vm = vm;
4598 th->ractor = vm->ractor.main_ractor = rb_ractor_main_alloc();
4599 Init_native_thread(th);
4600 rb_jit_cont_init();
4601 th_init(th, 0, vm);
4602
4603 rb_ractor_set_current_ec(th->ractor, th->ec);
4604
4605 /* n.b. native_main_thread_stack_top is set by the INIT_STACK macro */
4606 ruby_thread_init_stack(th, native_main_thread_stack_top);
4607
4608 // setup ractor system
4609 rb_native_mutex_initialize(&vm->ractor.sync.lock);
4610 rb_native_cond_initialize(&vm->ractor.sync.terminate_cond);
4611
4612 vm_opt_method_def_table = st_init_numtable();
4613 vm_opt_mid_table = st_init_numtable();
4614
4615#ifdef RUBY_THREAD_WIN32_H
4616 rb_native_cond_initialize(&vm->ractor.sync.barrier_complete_cond);
4617 rb_native_cond_initialize(&vm->ractor.sync.barrier_release_cond);
4618#endif
4619}
4620
4621void
4623{
4624 native_main_thread_stack_top = addr;
4625}
4626
4627#ifndef _WIN32
4628#include <unistd.h>
4629#include <sys/mman.h>
4630#endif
4631
4632
4633#ifndef MARK_OBJECT_ARY_BUCKET_SIZE
4634#define MARK_OBJECT_ARY_BUCKET_SIZE 1024
4635#endif
4636
4638 VALUE next;
4639 long len;
4640 VALUE *array;
4641};
4642
4643static void
4644pin_array_list_mark(void *data)
4645{
4646 struct pin_array_list *array = (struct pin_array_list *)data;
4647 rb_gc_mark_movable(array->next);
4648
4649 rb_gc_mark_vm_stack_values(array->len, array->array);
4650}
4651
4652static void
4653pin_array_list_free(void *data)
4654{
4655 struct pin_array_list *array = (struct pin_array_list *)data;
4656 xfree(array->array);
4657}
4658
4659static size_t
4660pin_array_list_memsize(const void *data)
4661{
4662 return sizeof(struct pin_array_list) + (MARK_OBJECT_ARY_BUCKET_SIZE * sizeof(VALUE));
4663}
4664
4665static void
4666pin_array_list_update_references(void *data)
4667{
4668 struct pin_array_list *array = (struct pin_array_list *)data;
4669 array->next = rb_gc_location(array->next);
4670}
4671
4672static const rb_data_type_t pin_array_list_type = {
4673 .wrap_struct_name = "VM/pin_array_list",
4674 .function = {
4675 .dmark = pin_array_list_mark,
4676 .dfree = pin_array_list_free,
4677 .dsize = pin_array_list_memsize,
4678 .dcompact = pin_array_list_update_references,
4679 },
4680 .flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE,
4681};
4682
4683static VALUE
4684pin_array_list_new(VALUE next)
4685{
4686 struct pin_array_list *array_list;
4687 VALUE obj = TypedData_Make_Struct(0, struct pin_array_list, &pin_array_list_type, array_list);
4688 RB_OBJ_WRITE(obj, &array_list->next, next);
4689 array_list->array = ALLOC_N(VALUE, MARK_OBJECT_ARY_BUCKET_SIZE);
4690 return obj;
4691}
4692
4693static VALUE
4694pin_array_list_append(VALUE obj, VALUE item)
4695{
4696 struct pin_array_list *array_list;
4697 TypedData_Get_Struct(obj, struct pin_array_list, &pin_array_list_type, array_list);
4698
4699 if (array_list->len >= MARK_OBJECT_ARY_BUCKET_SIZE) {
4700 obj = pin_array_list_new(obj);
4701 TypedData_Get_Struct(obj, struct pin_array_list, &pin_array_list_type, array_list);
4702 }
4703
4704 RB_OBJ_WRITE(obj, &array_list->array[array_list->len], item);
4705 array_list->len++;
4706 return obj;
4707}
4708
4709void
4710rb_vm_register_global_object(VALUE obj)
4711{
4713 if (RB_SPECIAL_CONST_P(obj)) {
4714 return;
4715 }
4716
4717 switch (RB_BUILTIN_TYPE(obj)) {
4718 case T_CLASS:
4719 case T_MODULE:
4720 if (FL_TEST(obj, RCLASS_IS_ROOT)) {
4721 return;
4722 }
4723 FL_SET(obj, RCLASS_IS_ROOT);
4724 break;
4725 default:
4726 break;
4727 }
4728 RB_VM_LOCKING() {
4729 VALUE list = GET_VM()->mark_object_ary;
4730 VALUE head = pin_array_list_append(list, obj);
4731 if (head != list) {
4732 GET_VM()->mark_object_ary = head;
4733 }
4734 RB_GC_GUARD(obj);
4735 }
4736}
4737
4738void
4739Init_vm_objects(void)
4740{
4741 rb_vm_t *vm = GET_VM();
4742
4743 /* initialize mark object array, hash */
4744 vm->mark_object_ary = pin_array_list_new(Qnil);
4745 vm->ci_table = st_init_table(&vm_ci_hashtype);
4746 vm->cc_refinement_table = rb_set_init_numtable();
4747}
4748
4749// Whether JIT is enabled or not, we need to load/undef `#with_jit` for other builtins.
4750#include "jit_hook.rbinc"
4751#include "jit_undef.rbinc"
4752
4753// Stub for builtin function when not building YJIT units
4754#if !USE_YJIT
4755void Init_builtin_yjit(void) {}
4756#endif
4757
4758// Stub for builtin function when not building ZJIT units
4759#if !USE_ZJIT
4760void Init_builtin_zjit(void) {}
4761#endif
4762
4763/* top self */
4764
4765static VALUE
4766main_to_s(VALUE obj)
4767{
4768 return rb_str_new2("main");
4769}
4770
4771VALUE
4772rb_vm_top_self(void)
4773{
4774 const rb_box_t *box = rb_current_box();
4775 VM_ASSERT(box);
4776 VM_ASSERT(box->top_self);
4777 return box->top_self;
4778}
4779
4780void
4781Init_top_self(void)
4782{
4783 rb_vm_t *vm = GET_VM();
4784 vm->root_box = (rb_box_t *)rb_root_box();
4785 vm->root_box->top_self = rb_obj_alloc(rb_cObject);
4786 rb_define_singleton_method(vm->root_box->top_self, "to_s", main_to_s, 0);
4787 rb_define_alias(rb_singleton_class(vm->root_box->top_self), "inspect", "to_s");
4788}
4789
4790VALUE *
4792{
4793 rb_ractor_t *cr = GET_RACTOR();
4794 return &cr->verbose;
4795}
4796
4797VALUE *
4799{
4800 rb_ractor_t *cr = GET_RACTOR();
4801 return &cr->debug;
4802}
4803
4804bool rb_free_at_exit = false;
4805
4806bool
4807ruby_free_at_exit_p(void)
4808{
4809 return rb_free_at_exit;
4810}
4811
4812/* iseq.c */
4813VALUE rb_insn_operand_intern(const rb_iseq_t *iseq,
4814 VALUE insn, int op_no, VALUE op,
4815 int len, size_t pos, VALUE *pnop, VALUE child);
4816
4817#if VM_COLLECT_USAGE_DETAILS
4818
4819#define HASH_ASET(h, k, v) rb_hash_aset((h), (st_data_t)(k), (st_data_t)(v))
4820
4821/* uh = {
4822 * insn(Fixnum) => ihash(Hash)
4823 * }
4824 * ihash = {
4825 * -1(Fixnum) => count, # insn usage
4826 * 0(Fixnum) => ophash, # operand usage
4827 * }
4828 * ophash = {
4829 * val(interned string) => count(Fixnum)
4830 * }
4831 */
4832static void
4833vm_analysis_insn(int insn)
4834{
4835 ID usage_hash;
4836 ID bigram_hash;
4837 static int prev_insn = -1;
4838
4839 VALUE uh;
4840 VALUE ihash;
4841 VALUE cv;
4842
4843 CONST_ID(usage_hash, "USAGE_ANALYSIS_INSN");
4844 CONST_ID(bigram_hash, "USAGE_ANALYSIS_INSN_BIGRAM");
4845 uh = rb_const_get(rb_cRubyVM, usage_hash);
4846 if (NIL_P(ihash = rb_hash_aref(uh, INT2FIX(insn)))) {
4847 ihash = rb_hash_new();
4848 HASH_ASET(uh, INT2FIX(insn), ihash);
4849 }
4850 if (NIL_P(cv = rb_hash_aref(ihash, INT2FIX(-1)))) {
4851 cv = INT2FIX(0);
4852 }
4853 HASH_ASET(ihash, INT2FIX(-1), INT2FIX(FIX2INT(cv) + 1));
4854
4855 /* calc bigram */
4856 if (prev_insn != -1) {
4857 VALUE bi;
4858 VALUE ary[2];
4859 VALUE cv;
4860
4861 ary[0] = INT2FIX(prev_insn);
4862 ary[1] = INT2FIX(insn);
4863 bi = rb_ary_new4(2, &ary[0]);
4864
4865 uh = rb_const_get(rb_cRubyVM, bigram_hash);
4866 if (NIL_P(cv = rb_hash_aref(uh, bi))) {
4867 cv = INT2FIX(0);
4868 }
4869 HASH_ASET(uh, bi, INT2FIX(FIX2INT(cv) + 1));
4870 }
4871 prev_insn = insn;
4872}
4873
4874static void
4875vm_analysis_operand(int insn, int n, VALUE op)
4876{
4877 ID usage_hash;
4878
4879 VALUE uh;
4880 VALUE ihash;
4881 VALUE ophash;
4882 VALUE valstr;
4883 VALUE cv;
4884
4885 CONST_ID(usage_hash, "USAGE_ANALYSIS_INSN");
4886
4887 uh = rb_const_get(rb_cRubyVM, usage_hash);
4888 if (NIL_P(ihash = rb_hash_aref(uh, INT2FIX(insn)))) {
4889 ihash = rb_hash_new();
4890 HASH_ASET(uh, INT2FIX(insn), ihash);
4891 }
4892 if (NIL_P(ophash = rb_hash_aref(ihash, INT2FIX(n)))) {
4893 ophash = rb_hash_new();
4894 HASH_ASET(ihash, INT2FIX(n), ophash);
4895 }
4896 /* intern */
4897 valstr = rb_insn_operand_intern(GET_EC()->cfp->iseq, insn, n, op, 0, 0, 0, 0);
4898
4899 /* set count */
4900 if (NIL_P(cv = rb_hash_aref(ophash, valstr))) {
4901 cv = INT2FIX(0);
4902 }
4903 HASH_ASET(ophash, valstr, INT2FIX(FIX2INT(cv) + 1));
4904}
4905
4906static void
4907vm_analysis_register(int reg, int isset)
4908{
4909 ID usage_hash;
4910 VALUE uh;
4911 VALUE valstr;
4912 static const char regstrs[][5] = {
4913 "pc", /* 0 */
4914 "sp", /* 1 */
4915 "ep", /* 2 */
4916 "cfp", /* 3 */
4917 "self", /* 4 */
4918 "iseq", /* 5 */
4919 };
4920 static const char getsetstr[][4] = {
4921 "get",
4922 "set",
4923 };
4924 static VALUE syms[sizeof(regstrs) / sizeof(regstrs[0])][2];
4925
4926 VALUE cv;
4927
4928 CONST_ID(usage_hash, "USAGE_ANALYSIS_REGS");
4929 if (syms[0] == 0) {
4930 char buff[0x10];
4931 int i;
4932
4933 for (i = 0; i < (int)(sizeof(regstrs) / sizeof(regstrs[0])); i++) {
4934 int j;
4935 for (j = 0; j < 2; j++) {
4936 snprintf(buff, 0x10, "%d %s %-4s", i, getsetstr[j], regstrs[i]);
4937 syms[i][j] = ID2SYM(rb_intern(buff));
4938 }
4939 }
4940 }
4941 valstr = syms[reg][isset];
4942
4943 uh = rb_const_get(rb_cRubyVM, usage_hash);
4944 if (NIL_P(cv = rb_hash_aref(uh, valstr))) {
4945 cv = INT2FIX(0);
4946 }
4947 HASH_ASET(uh, valstr, INT2FIX(FIX2INT(cv) + 1));
4948}
4949
4950#undef HASH_ASET
4951
4952static void (*ruby_vm_collect_usage_func_insn)(int insn) = NULL;
4953static void (*ruby_vm_collect_usage_func_operand)(int insn, int n, VALUE op) = NULL;
4954static void (*ruby_vm_collect_usage_func_register)(int reg, int isset) = NULL;
4955
4956/* :nodoc: */
4957static VALUE
4958usage_analysis_insn_start(VALUE self)
4959{
4960 ruby_vm_collect_usage_func_insn = vm_analysis_insn;
4961 return Qnil;
4962}
4963
4964/* :nodoc: */
4965static VALUE
4966usage_analysis_operand_start(VALUE self)
4967{
4968 ruby_vm_collect_usage_func_operand = vm_analysis_operand;
4969 return Qnil;
4970}
4971
4972/* :nodoc: */
4973static VALUE
4974usage_analysis_register_start(VALUE self)
4975{
4976 ruby_vm_collect_usage_func_register = vm_analysis_register;
4977 return Qnil;
4978}
4979
4980/* :nodoc: */
4981static VALUE
4982usage_analysis_insn_stop(VALUE self)
4983{
4984 ruby_vm_collect_usage_func_insn = 0;
4985 return Qnil;
4986}
4987
4988/* :nodoc: */
4989static VALUE
4990usage_analysis_operand_stop(VALUE self)
4991{
4992 ruby_vm_collect_usage_func_operand = 0;
4993 return Qnil;
4994}
4995
4996/* :nodoc: */
4997static VALUE
4998usage_analysis_register_stop(VALUE self)
4999{
5000 ruby_vm_collect_usage_func_register = 0;
5001 return Qnil;
5002}
5003
5004/* :nodoc: */
5005static VALUE
5006usage_analysis_insn_running(VALUE self)
5007{
5008 return RBOOL(ruby_vm_collect_usage_func_insn != 0);
5009}
5010
5011/* :nodoc: */
5012static VALUE
5013usage_analysis_operand_running(VALUE self)
5014{
5015 return RBOOL(ruby_vm_collect_usage_func_operand != 0);
5016}
5017
5018/* :nodoc: */
5019static VALUE
5020usage_analysis_register_running(VALUE self)
5021{
5022 return RBOOL(ruby_vm_collect_usage_func_register != 0);
5023}
5024
5025static VALUE
5026usage_analysis_clear(VALUE self, ID usage_hash)
5027{
5028 VALUE uh;
5029 uh = rb_const_get(self, usage_hash);
5030 rb_hash_clear(uh);
5031
5032 return Qtrue;
5033}
5034
5035
5036/* :nodoc: */
5037static VALUE
5038usage_analysis_insn_clear(VALUE self)
5039{
5040 ID usage_hash;
5041 ID bigram_hash;
5042
5043 CONST_ID(usage_hash, "USAGE_ANALYSIS_INSN");
5044 CONST_ID(bigram_hash, "USAGE_ANALYSIS_INSN_BIGRAM");
5045 usage_analysis_clear(rb_cRubyVM, usage_hash);
5046 return usage_analysis_clear(rb_cRubyVM, bigram_hash);
5047}
5048
5049/* :nodoc: */
5050static VALUE
5051usage_analysis_operand_clear(VALUE self)
5052{
5053 ID usage_hash;
5054
5055 CONST_ID(usage_hash, "USAGE_ANALYSIS_INSN");
5056 return usage_analysis_clear(self, usage_hash);
5057}
5058
5059/* :nodoc: */
5060static VALUE
5061usage_analysis_register_clear(VALUE self)
5062{
5063 ID usage_hash;
5064
5065 CONST_ID(usage_hash, "USAGE_ANALYSIS_REGS");
5066 return usage_analysis_clear(self, usage_hash);
5067}
5068
5069#else
5070
5071MAYBE_UNUSED(static void (*ruby_vm_collect_usage_func_insn)(int insn)) = 0;
5072MAYBE_UNUSED(static void (*ruby_vm_collect_usage_func_operand)(int insn, int n, VALUE op)) = 0;
5073MAYBE_UNUSED(static void (*ruby_vm_collect_usage_func_register)(int reg, int isset)) = 0;
5074
5075#endif
5076
5077#if VM_COLLECT_USAGE_DETAILS
5078/* @param insn instruction number */
5079static void
5080vm_collect_usage_insn(int insn)
5081{
5082 if (RUBY_DTRACE_INSN_ENABLED()) {
5083 RUBY_DTRACE_INSN(rb_insns_name(insn));
5084 }
5085 if (ruby_vm_collect_usage_func_insn)
5086 (*ruby_vm_collect_usage_func_insn)(insn);
5087}
5088
5089/* @param insn instruction number
5090 * @param n n-th operand
5091 * @param op operand value
5092 */
5093static void
5094vm_collect_usage_operand(int insn, int n, VALUE op)
5095{
5096 if (RUBY_DTRACE_INSN_OPERAND_ENABLED()) {
5097 VALUE valstr;
5098
5099 valstr = rb_insn_operand_intern(GET_EC()->cfp->iseq, insn, n, op, 0, 0, 0, 0);
5100
5101 RUBY_DTRACE_INSN_OPERAND(RSTRING_PTR(valstr), rb_insns_name(insn));
5102 RB_GC_GUARD(valstr);
5103 }
5104 if (ruby_vm_collect_usage_func_operand)
5105 (*ruby_vm_collect_usage_func_operand)(insn, n, op);
5106}
5107
5108/* @param reg register id. see code of vm_analysis_register() */
5109/* @param isset 0: read, 1: write */
5110static void
5111vm_collect_usage_register(int reg, int isset)
5112{
5113 if (ruby_vm_collect_usage_func_register)
5114 (*ruby_vm_collect_usage_func_register)(reg, isset);
5115}
5116#endif
5117
5118const struct rb_callcache *
5119rb_vm_empty_cc(void)
5120{
5121 return &vm_empty_cc;
5122}
5123
5124const struct rb_callcache *
5125rb_vm_empty_cc_for_super(void)
5126{
5127 return &vm_empty_cc_for_super;
5128}
5129
5130#include "vm_call_iseq_optimized.inc" /* required from vm_insnhelper.c */
#define RUBY_ASSERT_MESG(expr,...)
Asserts that the expression is truthy.
Definition assert.h:186
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ATOMIC_FETCH_ADD(var, val)
Atomically replaces the value pointed by var with the result of addition of val to the old value of v...
Definition atomic.h:118
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_method_id(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
#define RUBY_EVENT_END
Encountered an end of a class clause.
Definition event.h:40
#define RUBY_EVENT_B_RETURN
Encountered a next statement.
Definition event.h:56
#define RUBY_EVENT_RETURN
Encountered a return statement.
Definition event.h:42
#define RUBY_EVENT_C_RETURN
Return from a method, written in C.
Definition event.h:44
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition class.c:1589
VALUE rb_class_new(VALUE super)
Creates a new, anonymous class.
Definition class.c:969
VALUE rb_singleton_class(VALUE obj)
Finds or creates the singleton class of the passed object.
Definition class.c:2913
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
Definition class.c:2956
void rb_undef_method(VALUE klass, const char *name)
Defines an undef of a method.
Definition class.c:2768
#define rb_str_new2
Old name of rb_str_new_cstr.
Definition string.h:1674
#define NUM2ULONG
Old name of RB_NUM2ULONG.
Definition long.h:52
#define ALLOCV
Old name of RB_ALLOCV.
Definition memory.h:404
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:400
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition value_type.h:67
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition fl_type.h:133
#define ULONG2NUM
Old name of RB_ULONG2NUM.
Definition long.h:60
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition assume.h:29
#define SYM2ID
Old name of RB_SYM2ID.
Definition symbol.h:45
#define ZALLOC
Old name of RB_ZALLOC.
Definition memory.h:402
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:205
#define rb_ary_new4
Old name of rb_ary_new_from_values.
Definition array.h:659
#define SIZET2NUM
Old name of RB_SIZE2NUM.
Definition size_t.h:62
#define rb_exc_new2
Old name of rb_exc_new_cstr.
Definition error.h:37
#define FIX2INT
Old name of RB_FIX2INT.
Definition int.h:41
#define T_MODULE
Old name of RUBY_T_MODULE.
Definition value_type.h:70
#define ZALLOC_N
Old name of RB_ZALLOC_N.
Definition memory.h:401
#define ASSUME
Old name of RBIMPL_ASSUME.
Definition assume.h:27
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define ALLOC_N
Old name of RB_ALLOC_N.
Definition memory.h:399
#define FL_SET
Old name of RB_FL_SET.
Definition fl_type.h:127
#define rb_exc_new3
Old name of rb_exc_new_str.
Definition error.h:38
#define ULL2NUM
Old name of RB_ULL2NUM.
Definition long_long.h:31
#define Qtrue
Old name of RUBY_Qtrue.
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define NIL_P
Old name of RB_NIL_P.
#define NUM2ULL
Old name of RB_NUM2ULL.
Definition long_long.h:35
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define FL_TEST
Old name of RB_FL_TEST.
Definition fl_type.h:129
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define FL_USHIFT
Old name of RUBY_FL_USHIFT.
Definition fl_type.h:67
#define CONST_ID
Old name of RUBY_CONST_ID.
Definition symbol.h:47
#define ALLOCV_END
Old name of RB_ALLOCV_END.
Definition memory.h:406
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
void ruby_init_stack(void *addr)
Set stack bottom of Ruby implementation.
Definition vm.c:4622
VALUE rb_eLocalJumpError
LocalJumpError exception.
Definition eval.c:48
void rb_category_warn(rb_warning_category_t category, const char *fmt,...)
Identical to rb_category_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:476
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition eval.c:653
int rb_typeddata_is_kind_of(VALUE obj, const rb_data_type_t *data_type)
Checks if the given object is of given kind.
Definition error.c:1381
void rb_iter_break(void)
Breaks from a block.
Definition vm.c:2280
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1431
void rb_iter_break_value(VALUE val)
Identical to rb_iter_break(), except it additionally takes the "value" of this breakage.
Definition vm.c:2286
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1429
VALUE * rb_ruby_verbose_ptr(void)
This is an implementation detail of ruby_verbose.
Definition vm.c:4791
VALUE rb_exc_new_str(VALUE etype, VALUE str)
Identical to rb_exc_new_cstr(), except it takes a Ruby's string instead of C's.
Definition error.c:1482
VALUE * rb_ruby_debug_ptr(void)
This is an implementation detail of ruby_debug.
Definition vm.c:4798
VALUE rb_eSysStackError
SystemStackError exception.
Definition eval.c:49
@ RB_WARN_CATEGORY_PERFORMANCE
Warning is for performance issues (not enabled by -w).
Definition error.h:54
VALUE rb_cTime
Time class.
Definition time.c:679
VALUE rb_cArray
Array class.
VALUE rb_obj_alloc(VALUE klass)
Allocates an instance of the given class.
Definition object.c:2208
VALUE rb_cInteger
Module class.
Definition numeric.c:198
VALUE rb_cNilClass
NilClass class.
Definition object.c:66
VALUE rb_cBinding
Binding class.
Definition proc.c:44
VALUE rb_cRegexp
Regexp class.
Definition re.c:2658
VALUE rb_cHash
Hash class.
Definition hash.c:109
VALUE rb_cFalseClass
FalseClass class.
Definition object.c:68
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:264
VALUE rb_cSymbol
Symbol class.
Definition string.c:85
VALUE rb_cBasicObject
BasicObject class.
Definition object.c:59
VALUE rb_cThread
Thread class.
Definition vm.c:671
VALUE rb_obj_freeze(VALUE obj)
Just calls rb_obj_freeze_inline() inside.
Definition object.c:1342
VALUE rb_cFloat
Float class.
Definition numeric.c:197
VALUE rb_cProc
Proc class.
Definition proc.c:45
VALUE rb_cTrueClass
TrueClass class.
Definition object.c:67
VALUE rb_cString
String class.
Definition string.c:84
#define RB_OBJ_WRITTEN(old, oldv, young)
Identical to RB_OBJ_WRITE(), except it doesn't write any values, but only a WB declaration.
Definition gc.h:615
#define RB_OBJ_WRITE(old, slot, young)
Declaration of a "back" pointer.
Definition gc.h:603
Defines RBIMPL_HAS_BUILTIN.
VALUE rb_ary_delete_at(VALUE ary, long pos)
Destructively removes an element which resides at the specific index of the passed array.
VALUE rb_ary_new(void)
Allocates a new, empty array.
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
void rb_undef(VALUE mod, ID mid)
Inserts a method entry that hides previous method definition of the given name.
Definition vm_method.c:2311
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:284
VALUE rb_backref_get(void)
Queries the last match, or Regexp.last_match, or the $~.
Definition vm.c:2030
void rb_lastline_set(VALUE str)
Updates $_.
Definition vm.c:2048
VALUE rb_lastline_get(void)
Queries the last line, or the $_.
Definition vm.c:2042
void rb_backref_set(VALUE md)
Updates $~.
Definition vm.c:2036
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition proc.c:983
VALUE rb_block_lambda(void)
Identical to rb_proc_new(), except it returns a lambda.
Definition proc.c:1002
VALUE rb_binding_new(void)
Snapshots the current execution context and turn it into an instance of rb_cBinding.
Definition proc.c:329
VALUE rb_str_append(VALUE dst, VALUE src)
Identical to rb_str_buf_append(), except it converts the right hand side before concatenating.
Definition string.c:3797
VALUE rb_str_new_frozen(VALUE str)
Creates a frozen copy of the string, if necessary.
Definition string.c:1518
#define rb_str_cat_cstr(buf, str)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1655
VALUE rb_const_get(VALUE space, ID name)
Identical to rb_const_defined(), except it returns the actual defined value.
Definition variable.c:3448
void rb_set_class_path(VALUE klass, VALUE space, const char *name)
Names a class.
Definition variable.c:441
VALUE rb_class_path_cached(VALUE mod)
Just another name of rb_mod_name.
Definition variable.c:389
void rb_alias_variable(ID dst, ID src)
Aliases a global variable.
Definition variable.c:1153
VALUE rb_class_path(VALUE mod)
Identical to rb_mod_name(), except it returns #<Class: ...> style inspection for anonymous modules.
Definition variable.c:380
void rb_undef_alloc_func(VALUE klass)
Deletes the allocator function of a class.
Definition vm_method.c:1647
const char * rb_sourcefile(void)
Resembles __FILE__.
Definition vm.c:2067
void rb_alias(VALUE klass, ID dst, ID src)
Resembles alias.
Definition vm_method.c:2694
int rb_frame_method_id_and_class(ID *idp, VALUE *klassp)
Resembles __method__.
Definition vm.c:3091
int rb_sourceline(void)
Resembles __LINE__.
Definition vm.c:2081
VALUE rb_sym2str(VALUE symbol)
Obtain a frozen string representation of a symbol (not including the leading colon).
Definition symbol.c:993
void rb_define_global_const(const char *name, VALUE val)
Identical to rb_define_const(), except it defines that of "global", i.e.
Definition variable.c:4035
VALUE rb_iv_set(VALUE obj, const char *name, VALUE val)
Assigns to an instance variable.
Definition variable.c:4517
int len
Length of the buffer.
Definition io.h:8
VALUE rb_ractor_make_shareable_copy(VALUE obj)
Identical to rb_ractor_make_shareable(), except it returns a (deep) copy of the passed one instead of...
Definition ractor.c:1556
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
Definition ractor.h:249
#define RB_OBJ_SHAREABLE_P(obj)
Queries if the passed object has previously classified as shareable or not.
Definition ractor.h:235
VALUE rb_ractor_make_shareable(VALUE obj)
Destructively transforms the passed object so that multiple Ractors can share it.
Definition ractor.c:1547
void ruby_vm_at_exit(void(*func)(ruby_vm_t *))
ruby_vm_at_exit registers a function func to be invoked when a VM passed away.
Definition vm.c:1007
int ruby_vm_destruct(ruby_vm_t *vm)
Destructs the passed VM.
Definition vm.c:3367
VALUE rb_f_sprintf(int argc, const VALUE *argv)
Identical to rb_str_format(), except how the arguments are arranged.
Definition sprintf.c:209
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition memory.h:372
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
Definition memory.h:360
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
VALUE type(ANYARGS)
ANYARGS-ed function type.
void rb_hash_foreach(VALUE q, int_type *w, VALUE e)
Iteration over the given hash.
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
static int RARRAY_LENINT(VALUE ary)
Identical to rb_array_len(), except it differs for the return type.
Definition rarray.h:281
#define RARRAY_AREF(a, i)
Definition rarray.h:403
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:166
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
#define StringValuePtr(v)
Identical to StringValue, except it returns a char*.
Definition rstring.h:76
#define RTYPEDDATA_DATA(v)
Convenient getter macro.
Definition rtypeddata.h:103
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
Definition rtypeddata.h:649
#define TypedData_Wrap_Struct(klass, data_type, sval)
Converts sval, a pointer to your struct, into a Ruby object.
Definition rtypeddata.h:461
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
Definition rtypeddata.h:508
const char * rb_class2name(VALUE klass)
Queries the name of the passed class.
Definition variable.c:506
#define RB_NO_KEYWORDS
Do not pass keywords.
Definition scan_args.h:69
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
#define _(args)
This was a transition path from K&R to ANSI.
Definition stdarg.h:35
Definition proc.c:30
Definition iseq.h:288
Internal header for Ruby Box.
Definition box.h:14
Definition method.h:63
CREF (Class REFerence)
Definition method.h:45
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:208
const char * wrap_struct_name
Name of structs of this kind.
Definition rtypeddata.h:215
Definition method.h:55
Definition st.h:79
IFUNC (Internal FUNCtion)
Definition imemo.h:85
SVAR (Special VARiable)
Definition imemo.h:49
const VALUE cref_or_me
class reference or rb_method_entry_t
Definition imemo.h:51
THROW_DATA.
Definition imemo.h:58
void rb_native_cond_initialize(rb_nativethread_cond_t *cond)
Fills the passed condition variable with an initial value.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy.
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static enum ruby_value_type RB_BUILTIN_TYPE(VALUE obj)
Queries the type of the object.
Definition value_type.h:182
static void Check_Type(VALUE v, enum ruby_value_type t)
Identical to RB_TYPE_P(), except it raises exceptions on predication failure.
Definition value_type.h:433
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376
ruby_value_type
C-level type of an object.
Definition value_type.h:113