Ruby 4.1.0dev (2026-04-25 revision 97c070c244ebd76cbf4d9a5b81dca08f4bda6f05)
vm.c (97c070c244ebd76cbf4d9a5b81dca08f4bda6f05)
1/**********************************************************************
2
3 Vm.c -
4
5 $Author$
6
7 Copyright (C) 2004-2007 Koichi Sasada
8
9**********************************************************************/
10
11#define vm_exec rb_vm_exec
12
13#include "eval_intern.h"
14#include "internal.h"
15#include "internal/box.h"
16#include "internal/class.h"
17#include "internal/compile.h"
18#include "internal/cont.h"
19#include "internal/error.h"
20#include "internal/encoding.h"
21#include "internal/eval.h"
22#include "internal/gc.h"
23#include "internal/inits.h"
24#include "internal/missing.h"
25#include "internal/object.h"
26#include "internal/proc.h"
27#include "internal/re.h"
28#include "internal/ruby_parser.h"
29#include "internal/st.h"
30#include "internal/symbol.h"
31#include "internal/thread.h"
32#include "internal/transcode.h"
33#include "internal/vm.h"
34#include "internal/sanitizers.h"
35#include "internal/variable.h"
36#include "iseq.h"
37#include "symbol.h" // This includes a macro for a more performant rb_id2sym.
38#include "yjit.h"
39#include "insns.inc"
40#include "zjit.h"
41#include "ruby/st.h"
42#include "ruby/vm.h"
43#include "vm_core.h"
44#include "vm_callinfo.h"
45#include "vm_debug.h"
46#include "vm_exec.h"
47#include "vm_insnhelper.h"
48#include "ractor_core.h"
49#include "vm_sync.h"
50#include "shape.h"
51
52#include "builtin.h"
53
54#include "probes.h"
55#include "probes_helper.h"
56
57#ifdef RUBY_ASSERT_CRITICAL_SECTION
58int ruby_assert_critical_section_entered = 0;
59#endif
60
61static void *native_main_thread_stack_top;
62
63bool ruby_vm_during_cleanup = false;
64
65VALUE rb_str_concat_literals(size_t, const VALUE*);
66
68
69extern const char *const rb_debug_counter_names[];
70
71PUREFUNC(static inline const VALUE *VM_EP_LEP(const VALUE *));
72static inline const VALUE *
73VM_EP_LEP(const VALUE *ep)
74{
75 while (!VM_ENV_LOCAL_P(ep)) {
76 ep = VM_ENV_PREV_EP(ep);
77 }
78 return ep;
79}
80
81static inline const rb_control_frame_t *
82rb_vm_search_cf_from_ep(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, const VALUE * const ep)
83{
84 if (!ep) {
85 return NULL;
86 }
87 else {
88 const rb_control_frame_t * const eocfp = RUBY_VM_END_CONTROL_FRAME(ec); /* end of control frame pointer */
89
90 while (cfp < eocfp) {
91 if (cfp->ep == ep) {
92 return cfp;
93 }
94 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
95 }
96
97 return NULL;
98 }
99}
100
101#if VM_CHECK_MODE > 0
102// ruby_box_crashed defined in internal/box.h
103#define VM_BOX_CRASHED() {ruby_box_crashed = true;}
104#define VM_BOX_ASSERT(expr, msg) \
105 if (!(expr)) { ruby_box_crashed = true; rb_bug(msg); }
106#else
107#define VM_BOX_CRASHED() {}
108#define VM_BOX_ASSERT(expr, msg) ((void)0)
109#endif
110
111static const VALUE *
112VM_EP_RUBY_LEP(const rb_execution_context_t *ec, const rb_control_frame_t *current_cfp)
113{
114 // rb_vmdebug_box_env_dump_raw() simulates this function
115 const VALUE *ep = current_cfp->ep;
116 const rb_control_frame_t * const eocfp = RUBY_VM_END_CONTROL_FRAME(ec); /* end of control frame pointer */
117 const rb_control_frame_t *cfp = current_cfp;
118
119 if (VM_ENV_FRAME_TYPE_P(ep, VM_FRAME_MAGIC_IFUNC)) {
120 ep = VM_EP_LEP(current_cfp->ep);
149 VM_ASSERT(VM_ENV_FRAME_TYPE_P(ep, VM_FRAME_MAGIC_CFUNC));
150 return ep;
151 }
152
153 while (VM_ENV_FRAME_TYPE_P(ep, VM_FRAME_MAGIC_CFUNC)) {
154 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
155
156 VM_BOX_ASSERT(cfp, "CFUNC should have a valid previous control frame");
157 VM_BOX_ASSERT(cfp < eocfp, "CFUNC should have a valid caller frame");
158 if (!cfp || cfp >= eocfp) {
159 return NULL;
160 }
161
162 VM_BOX_ASSERT(cfp->ep, "CFUNC should have a valid caller frame with env");
163 ep = cfp->ep;
164 if (!ep) {
165 return NULL;
166 }
167 }
168
169 while (!VM_ENV_LOCAL_P(ep)) {
170 ep = VM_ENV_PREV_EP(ep);
171 }
172
173 return ep;
174}
175
176const VALUE *
177rb_vm_ep_local_ep(const VALUE *ep)
178{
179 return VM_EP_LEP(ep);
180}
181
182PUREFUNC(static inline const VALUE *VM_CF_LEP(const rb_control_frame_t * const cfp));
183static inline const VALUE *
184VM_CF_LEP(const rb_control_frame_t * const cfp)
185{
186 return VM_EP_LEP(cfp->ep);
187}
188
189static inline const VALUE *
190VM_CF_PREV_EP(const rb_control_frame_t * const cfp)
191{
192 return VM_ENV_PREV_EP(cfp->ep);
193}
194
195PUREFUNC(static inline VALUE VM_CF_BLOCK_HANDLER(const rb_control_frame_t * const cfp));
196static inline VALUE
197VM_CF_BLOCK_HANDLER(const rb_control_frame_t * const cfp)
198{
199 const VALUE *ep;
200 if (VM_ENV_BOXED_P(cfp->ep)) {
201 VM_ASSERT(VM_ENV_LOCAL_P(cfp->ep));
202 /* Never set black_handler for VM_FRAME_MAGIC_TOP or VM_FRAME_MAGIC_CLASS
203 * and the specval is used for boxes (rb_box_t) in these case
204 */
205 return VM_BLOCK_HANDLER_NONE;
206 }
207 ep = VM_CF_LEP(cfp);
208 return VM_ENV_BLOCK_HANDLER(ep);
209}
210
211int
212rb_vm_cframe_keyword_p(const rb_control_frame_t *cfp)
213{
214 return VM_FRAME_CFRAME_KW_P(cfp);
215}
216
217VALUE
218rb_vm_frame_block_handler(const rb_control_frame_t *cfp)
219{
220 return VM_CF_BLOCK_HANDLER(cfp);
221}
222
223#if VM_CHECK_MODE > 0
224static int
225VM_CFP_IN_HEAP_P(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
226{
227 const VALUE *start = ec->vm_stack;
228 const VALUE *end = (VALUE *)ec->vm_stack + ec->vm_stack_size;
229 VM_ASSERT(start != NULL);
230
231 if (start <= (VALUE *)cfp && (VALUE *)cfp < end) {
232 return FALSE;
233 }
234 else {
235 return TRUE;
236 }
237}
238
239static int
240VM_EP_IN_HEAP_P(const rb_execution_context_t *ec, const VALUE *ep)
241{
242 const VALUE *start = ec->vm_stack;
243 const VALUE *end = (VALUE *)ec->cfp;
244 VM_ASSERT(start != NULL);
245
246 if (start <= ep && ep < end) {
247 return FALSE;
248 }
249 else {
250 return TRUE;
251 }
252}
253
254static int
255vm_ep_in_heap_p_(const rb_execution_context_t *ec, const VALUE *ep)
256{
257 if (VM_EP_IN_HEAP_P(ec, ep)) {
258 VALUE envval = ep[VM_ENV_DATA_INDEX_ENV]; /* VM_ENV_ENVVAL(ep); */
259
260 if (!UNDEF_P(envval)) {
261 const rb_env_t *env = (const rb_env_t *)envval;
262
263 VM_ASSERT(imemo_type_p(envval, imemo_env));
264 VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED));
265 VM_ASSERT(env->ep == ep);
266 }
267 return TRUE;
268 }
269 else {
270 return FALSE;
271 }
272}
273
274int
275rb_vm_ep_in_heap_p(const VALUE *ep)
276{
277 const rb_execution_context_t *ec = GET_EC();
278 if (ec->vm_stack == NULL) return TRUE;
279 return vm_ep_in_heap_p_(ec, ep);
280}
281#endif
282
283static struct rb_captured_block *
284VM_CFP_TO_CAPTURED_BLOCK(const rb_control_frame_t *cfp)
285{
286 VM_ASSERT(!VM_CFP_IN_HEAP_P(GET_EC(), cfp));
287 return (struct rb_captured_block *)&cfp->self;
288}
289
290static rb_control_frame_t *
291VM_CAPTURED_BLOCK_TO_CFP(const struct rb_captured_block *captured)
292{
293 rb_control_frame_t *cfp = ((rb_control_frame_t *)((VALUE *)(captured) - 3));
294 VM_ASSERT(!VM_CFP_IN_HEAP_P(GET_EC(), cfp));
295 VM_ASSERT(sizeof(rb_control_frame_t)/sizeof(VALUE) == 7 + VM_DEBUG_BP_CHECK ? 1 : 0);
296 return cfp;
297}
298
299static int
300VM_BH_FROM_CFP_P(VALUE block_handler, const rb_control_frame_t *cfp)
301{
302 const struct rb_captured_block *captured = VM_CFP_TO_CAPTURED_BLOCK(cfp);
303 return VM_TAGGED_PTR_REF(block_handler, 0x03) == captured;
304}
305
306static VALUE
307vm_passed_block_handler(rb_execution_context_t *ec)
308{
309 VALUE block_handler = ec->passed_block_handler;
310 ec->passed_block_handler = VM_BLOCK_HANDLER_NONE;
311 vm_block_handler_verify(block_handler);
312 return block_handler;
313}
314
315static rb_cref_t *
316vm_cref_new0(VALUE klass, rb_method_visibility_t visi, int module_func, rb_cref_t *prev_cref, int pushed_by_eval, int use_prev_prev, int singleton)
317{
318 VALUE refinements = Qnil;
319 int omod_shared = FALSE;
320
321 /* scope */
322 rb_scope_visibility_t scope_visi;
323 scope_visi.method_visi = visi;
324 scope_visi.module_func = module_func;
325
326 /* refinements */
327 if (prev_cref != NULL && prev_cref != (void *)1 /* TODO: why CREF_NEXT(cref) is 1? */) {
328 refinements = CREF_REFINEMENTS(prev_cref);
329
330 if (!NIL_P(refinements)) {
331 omod_shared = TRUE;
332 CREF_OMOD_SHARED_SET(prev_cref);
333 }
334 }
335
336 VM_ASSERT(singleton || klass);
337
338 rb_cref_t *cref = SHAREABLE_IMEMO_NEW(rb_cref_t, imemo_cref, refinements);
339 cref->klass_or_self = klass;
340 cref->next = use_prev_prev ? CREF_NEXT(prev_cref) : prev_cref;
341 *((rb_scope_visibility_t *)&cref->scope_visi) = scope_visi;
342
343 if (pushed_by_eval) CREF_PUSHED_BY_EVAL_SET(cref);
344 if (omod_shared) CREF_OMOD_SHARED_SET(cref);
345 if (singleton) CREF_SINGLETON_SET(cref);
346
347 return cref;
348}
349
350static rb_cref_t *
351vm_cref_new(VALUE klass, rb_method_visibility_t visi, int module_func, rb_cref_t *prev_cref, int pushed_by_eval, int singleton)
352{
353 return vm_cref_new0(klass, visi, module_func, prev_cref, pushed_by_eval, FALSE, singleton);
354}
355
356static rb_cref_t *
357vm_cref_new_use_prev(VALUE klass, rb_method_visibility_t visi, int module_func, rb_cref_t *prev_cref, int pushed_by_eval)
358{
359 return vm_cref_new0(klass, visi, module_func, prev_cref, pushed_by_eval, TRUE, FALSE);
360}
361
362static int
363ref_delete_symkey(VALUE key, VALUE value, VALUE unused)
364{
365 return SYMBOL_P(key) ? ST_DELETE : ST_CONTINUE;
366}
367
368static rb_cref_t *
369vm_cref_dup(const rb_cref_t *cref)
370{
371 const rb_scope_visibility_t *visi = CREF_SCOPE_VISI(cref);
372 rb_cref_t *next_cref = CREF_NEXT(cref), *new_cref;
373 int pushed_by_eval = CREF_PUSHED_BY_EVAL(cref);
374 int singleton = CREF_SINGLETON(cref);
375
376 new_cref = vm_cref_new(cref->klass_or_self, visi->method_visi, visi->module_func, next_cref, pushed_by_eval, singleton);
377
378 if (!NIL_P(CREF_REFINEMENTS(cref))) {
379 VALUE ref = rb_hash_dup(CREF_REFINEMENTS(cref));
380 rb_hash_foreach(ref, ref_delete_symkey, Qnil);
381 CREF_REFINEMENTS_SET(new_cref, ref);
382 CREF_OMOD_SHARED_UNSET(new_cref);
383 }
384
385 return new_cref;
386}
387
388
389rb_cref_t *
390rb_vm_cref_dup_without_refinements(const rb_cref_t *cref)
391{
392 const rb_scope_visibility_t *visi = CREF_SCOPE_VISI(cref);
393 rb_cref_t *next_cref = CREF_NEXT(cref), *new_cref;
394 int pushed_by_eval = CREF_PUSHED_BY_EVAL(cref);
395 int singleton = CREF_SINGLETON(cref);
396
397 new_cref = vm_cref_new(cref->klass_or_self, visi->method_visi, visi->module_func, next_cref, pushed_by_eval, singleton);
398
399 if (!NIL_P(CREF_REFINEMENTS(cref))) {
400 CREF_REFINEMENTS_SET(new_cref, Qnil);
401 CREF_OMOD_SHARED_UNSET(new_cref);
402 }
403
404 return new_cref;
405}
406
407static rb_cref_t *
408vm_cref_new_toplevel(rb_execution_context_t *ec)
409{
410 rb_cref_t *cref = vm_cref_new(rb_cObject, METHOD_VISI_PRIVATE /* toplevel visibility is private */, FALSE, NULL, FALSE, FALSE);
411 VALUE top_wrapper = rb_ec_thread_ptr(ec)->top_wrapper;
412
413 if (top_wrapper) {
414 cref = vm_cref_new(top_wrapper, METHOD_VISI_PRIVATE, FALSE, cref, FALSE, FALSE);
415 }
416
417 return cref;
418}
419
420rb_cref_t *
421rb_vm_cref_new_toplevel(void)
422{
423 return vm_cref_new_toplevel(GET_EC());
424}
425
426static void
427vm_cref_dump(const char *mesg, const rb_cref_t *cref)
428{
429 ruby_debug_printf("vm_cref_dump: %s (%p)\n", mesg, (void *)cref);
430
431 while (cref) {
432 ruby_debug_printf("= cref| klass: %s\n", RSTRING_PTR(rb_class_path(CREF_CLASS(cref))));
433 cref = CREF_NEXT(cref);
434 }
435}
436
437void
438rb_vm_block_ep_update(VALUE obj, const struct rb_block *dst, const VALUE *ep)
439{
440 *((const VALUE **)&dst->as.captured.ep) = ep;
441 RB_OBJ_WRITTEN(obj, Qundef, VM_ENV_ENVVAL(ep));
442}
443
444static void
445vm_bind_update_env(VALUE bindval, rb_binding_t *bind, VALUE envval)
446{
447 const rb_env_t *env = (rb_env_t *)envval;
448 RB_OBJ_WRITE(bindval, &bind->block.as.captured.code.iseq, env->iseq);
449 rb_vm_block_ep_update(bindval, &bind->block, env->ep);
450}
451
452#if VM_COLLECT_USAGE_DETAILS
453static void vm_collect_usage_operand(int insn, int n, VALUE op);
454static void vm_collect_usage_insn(int insn);
455static void vm_collect_usage_register(int reg, int isset);
456#endif
457
458static VALUE vm_make_env_object(const rb_execution_context_t *ec, rb_control_frame_t *cfp);
459static VALUE vm_invoke_bmethod(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
460 int argc, const VALUE *argv, int kw_splat, VALUE block_handler,
462static VALUE vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE block_handler);
463
464#if USE_YJIT
465// Counter to serve as a proxy for execution time, total number of calls
466static uint64_t yjit_total_entry_hits = 0;
467
468// Number of calls used to estimate how hot an ISEQ is
469#define YJIT_CALL_COUNT_INTERV 20u
470
472static inline bool
473rb_yjit_threshold_hit(const rb_iseq_t *iseq, uint64_t entry_calls)
474{
475 yjit_total_entry_hits += 1;
476
477 // Record the number of calls at the beginning of the interval
478 if (entry_calls + YJIT_CALL_COUNT_INTERV == rb_yjit_call_threshold) {
479 iseq->body->yjit_calls_at_interv = yjit_total_entry_hits;
480 }
481
482 // Try to estimate the total time taken (total number of calls) to reach 20 calls to this ISEQ
483 // This give us a ratio of how hot/cold this ISEQ is
484 if (entry_calls == rb_yjit_call_threshold) {
485 // We expect threshold 1 to compile everything immediately
486 if (rb_yjit_call_threshold < YJIT_CALL_COUNT_INTERV) {
487 return true;
488 }
489
490 uint64_t num_calls = yjit_total_entry_hits - iseq->body->yjit_calls_at_interv;
491
492 // Reject ISEQs that don't get called often enough
493 if (num_calls > rb_yjit_cold_threshold) {
494 rb_yjit_incr_counter("cold_iseq_entry");
495 return false;
496 }
497
498 return true;
499 }
500
501 return false;
502}
503#else
504#define rb_yjit_threshold_hit(iseq, entry_calls) false
505#endif
506
507#if USE_YJIT
508// Generate JIT code that supports the following kinds of ISEQ entries:
509// * The first ISEQ on vm_exec (e.g. <main>, or Ruby methods/blocks
510// called by a C method). The current frame has VM_FRAME_FLAG_FINISH.
511// The current vm_exec stops if JIT code returns a non-Qundef value.
512// * ISEQs called by the interpreter on vm_sendish (e.g. Ruby methods or
513// blocks called by a Ruby frame that isn't compiled or side-exited).
514// The current frame doesn't have VM_FRAME_FLAG_FINISH. The current
515// vm_exec does NOT stop whether JIT code returns Qundef or not.
516static inline rb_jit_func_t
517yjit_compile(rb_execution_context_t *ec)
518{
519 const rb_iseq_t *iseq = CFP_ISEQ(ec->cfp);
520 struct rb_iseq_constant_body *body = ISEQ_BODY(iseq);
521
522 // Increment the ISEQ's call counter and trigger JIT compilation if not compiled
523 if (body->jit_entry == NULL) {
524 body->jit_entry_calls++;
525 if (rb_yjit_threshold_hit(iseq, body->jit_entry_calls)) {
526 rb_yjit_compile_iseq(iseq, ec, false);
527 }
528 }
529 return body->jit_entry;
530}
531#else
532# define yjit_compile(ec) ((rb_jit_func_t)0)
533#endif
534
535#if USE_ZJIT
536static inline rb_jit_func_t
537zjit_compile(rb_execution_context_t *ec)
538{
539 const rb_iseq_t *iseq = CFP_ISEQ(ec->cfp);
540 struct rb_iseq_constant_body *body = ISEQ_BODY(iseq);
541
542 if (body->jit_entry == NULL) {
543 body->jit_entry_calls++;
544
545 // At profile-threshold, rewrite some of the YARV instructions
546 // to zjit_* instructions to profile these instructions.
547 if (body->jit_entry_calls == rb_zjit_profile_threshold) {
548 rb_zjit_profile_enable(iseq);
549 }
550
551 // At call-threshold, compile the ISEQ with ZJIT.
552 if (body->jit_entry_calls == rb_zjit_call_threshold) {
553 rb_zjit_compile_iseq(iseq, ec, false);
554 }
555 }
556 return body->jit_entry;
557}
558#else
559# define zjit_compile(ec) ((rb_jit_func_t)0)
560#endif
561
562static inline void zjit_materialize_frames(rb_control_frame_t *cfp);
563
564#if USE_YJIT || USE_ZJIT
565// Execute JIT code compiled by yjit_compile() or zjit_compile()
566static inline VALUE
567jit_exec(rb_execution_context_t *ec)
568{
569#if USE_YJIT
570 if (rb_yjit_enabled_p) {
571 rb_jit_func_t func = yjit_compile(ec);
572 if (func) {
573 return func(ec, ec->cfp);
574 }
575 return Qundef;
576 }
577#endif
578
579#if USE_ZJIT
580 void *zjit_entry = rb_zjit_entry;
581 if (zjit_entry) {
582 rb_jit_func_t func = zjit_compile(ec);
583 if (func) {
584 VALUE result = ((rb_zjit_func_t)zjit_entry)(ec, ec->cfp, func);
585 // Materialize any remaining lightweight ZJIT frames on side exit.
586 // This is done here (once per JIT entry) instead of in each side exit
587 // to reduce generated code size.
588 if (UNDEF_P(result)) {
589 ec->cfp->jit_return = 0;
590 zjit_materialize_frames(ec->cfp);
591 }
592 return result;
593 }
594 }
595#endif
596 return Qundef;
597}
598
599// Generate JIT code that supports the following kind of ISEQ entry:
600// * The first ISEQ pushed by vm_exec_handle_exception. The frame would
601// point to a location specified by a catch table, and it doesn't have
602// VM_FRAME_FLAG_FINISH. The current vm_exec stops if JIT code returns
603// a non-Qundef value. So you should not return a non-Qundef value
604// until ec->cfp is changed to a frame with VM_FRAME_FLAG_FINISH.
605static inline rb_jit_func_t
606jit_compile_exception(rb_execution_context_t *ec)
607{
608 const rb_iseq_t *iseq = CFP_ISEQ(ec->cfp);
609 struct rb_iseq_constant_body *body = ISEQ_BODY(iseq);
610
611#if USE_ZJIT
612 if (body->jit_exception == NULL && rb_zjit_enabled_p) {
613 body->jit_exception_calls++;
614
615 // At profile-threshold, rewrite some of the YARV instructions
616 // to zjit_* instructions to profile these instructions.
617 if (body->jit_exception_calls == rb_zjit_profile_threshold) {
618 rb_zjit_profile_enable(iseq);
619 }
620
621 // At call-threshold, compile the ISEQ with ZJIT.
622 if (body->jit_exception_calls == rb_zjit_call_threshold) {
623 rb_zjit_compile_iseq(iseq, ec, true);
624 }
625 }
626#endif
627
628#if USE_YJIT
629 // Increment the ISEQ's call counter and trigger JIT compilation if not compiled
630 if (body->jit_exception == NULL && rb_yjit_enabled_p) {
631 body->jit_exception_calls++;
632 if (body->jit_exception_calls == rb_yjit_call_threshold) {
633 rb_yjit_compile_iseq(iseq, ec, true);
634 }
635 }
636#endif
637 return body->jit_exception;
638}
639
640// Execute JIT code compiled by jit_compile_exception()
641static inline VALUE
642jit_exec_exception(rb_execution_context_t *ec)
643{
644 rb_jit_func_t func = jit_compile_exception(ec);
645 if (func) {
646 // Call the JIT code
647 return func(ec, ec->cfp);
648 }
649 else {
650 return Qundef;
651 }
652}
653#else
654# define jit_compile_exception(ec) ((rb_jit_func_t)0)
655# define jit_exec(ec) Qundef
656# define jit_exec_exception(ec) Qundef
657#endif
658
659static void add_opt_method_entry(const rb_method_entry_t *me);
660
661#define RB_TYPE_2_P(obj, type1, type2) \
662 (RB_TYPE_P(obj, type1) || RB_TYPE_P(obj, type2))
663#define RB_TYPE_3_P(obj, type1, type2, type3) \
664 (RB_TYPE_P(obj, type1) || RB_TYPE_P(obj, type2) || RB_TYPE_P(obj, type3))
665
666#define VM_ASSERT_TYPE(obj, type) \
667 VM_ASSERT(RB_TYPE_P(obj, type), #obj ": %s", rb_obj_info(obj))
668#define VM_ASSERT_TYPE2(obj, type1, type2) \
669 VM_ASSERT(RB_TYPE_2_P(obj, type1, type2), #obj ": %s", rb_obj_info(obj))
670#define VM_ASSERT_TYPE3(obj, type1, type2, type3) \
671 VM_ASSERT(RB_TYPE_3_P(obj, type1, type2, type3), #obj ": %s", rb_obj_info(obj))
672
673#include "vm_insnhelper.c"
674
675#include "vm_exec.c"
676
677#include "vm_method.c"
678#include "vm_eval.c"
679
680#define PROCDEBUG 0
681
682VALUE rb_cRubyVM;
684VALUE rb_mRubyVMFrozenCore;
685VALUE rb_block_param_proxy;
686
687VALUE ruby_vm_const_missing_count = 0;
688rb_vm_t *ruby_current_vm_ptr = NULL;
689rb_ractor_t *ruby_single_main_ractor;
690bool ruby_vm_keep_script_lines;
691
692#ifdef RB_THREAD_LOCAL_SPECIFIER
693RB_THREAD_LOCAL_SPECIFIER rb_execution_context_t *ruby_current_ec;
694
695#ifdef RUBY_NT_SERIAL
696RB_THREAD_LOCAL_SPECIFIER rb_atomic_t ruby_nt_serial;
697#endif
698
699// no-inline decl on vm_core.h
701rb_current_ec_noinline(void)
702{
703 return ruby_current_ec;
704}
705
706void
707rb_current_ec_set(rb_execution_context_t *ec)
708{
709 ruby_current_ec = ec;
710}
711
712
713#ifdef RB_THREAD_CURRENT_EC_NOINLINE
715rb_current_ec(void)
716{
717 return ruby_current_ec;
718}
719
720#endif
721#else
722native_tls_key_t ruby_current_ec_key;
723
724// no-inline decl on vm_core.h
726rb_current_ec_noinline(void)
727{
728 return native_tls_get(ruby_current_ec_key);
729}
730
731#endif
732
733rb_event_flag_t ruby_vm_event_flags = 0;
734rb_event_flag_t ruby_vm_event_enabled_global_flags = 0;
735unsigned int ruby_vm_c_events_enabled = 0;
736unsigned int ruby_vm_iseq_events_enabled = 0;
737
738rb_serial_t ruby_vm_constant_cache_invalidations = 0;
739rb_serial_t ruby_vm_constant_cache_misses = 0;
740rb_serial_t ruby_vm_global_cvar_state = 1;
741
742static const struct rb_callcache vm_empty_cc = {
743 .flags = T_IMEMO | (imemo_callcache << FL_USHIFT) | VM_CALLCACHE_UNMARKABLE,
744 .klass = Qundef,
745 .cme_ = NULL,
746 .call_ = vm_call_general,
747 .aux_ = {
748 .v = Qfalse,
749 }
750};
751
752static const struct rb_callcache vm_empty_cc_for_super = {
753 .flags = T_IMEMO | (imemo_callcache << FL_USHIFT) | VM_CALLCACHE_UNMARKABLE,
754 .klass = Qundef,
755 .cme_ = NULL,
756 .call_ = vm_call_super_method,
757 .aux_ = {
758 .v = Qfalse,
759 }
760};
761
762static void thread_free(void *ptr);
763
764void
765rb_vm_inc_const_missing_count(void)
766{
767 ruby_vm_const_missing_count +=1;
768}
769
770int
771rb_dtrace_setup(rb_execution_context_t *ec, VALUE klass, ID id,
772 struct ruby_dtrace_method_hook_args *args)
773{
775 if (!klass) {
776 if (!ec) ec = GET_EC();
777 if (!rb_ec_frame_method_id_and_class(ec, &id, 0, &klass) || !klass)
778 return FALSE;
779 }
780 if (RB_TYPE_P(klass, T_ICLASS)) {
781 klass = RBASIC(klass)->klass;
782 }
783 else if (RCLASS_SINGLETON_P(klass)) {
784 klass = RCLASS_ATTACHED_OBJECT(klass);
785 if (NIL_P(klass)) return FALSE;
786 }
787 type = BUILTIN_TYPE(klass);
788 if (type == T_CLASS || type == T_ICLASS || type == T_MODULE) {
789 VALUE name = rb_class_path(klass);
790 const char *classname, *filename;
791 const char *methodname = rb_id2name(id);
792 if (methodname && (filename = rb_source_location_cstr(&args->line_no)) != 0) {
793 if (NIL_P(name) || !(classname = StringValuePtr(name)))
794 classname = "<unknown>";
795 args->classname = classname;
796 args->methodname = methodname;
797 args->filename = filename;
798 args->klass = klass;
799 args->name = name;
800 return TRUE;
801 }
802 }
803 return FALSE;
804}
805
806extern unsigned int redblack_buffer_size;
807
808/*
809 * call-seq:
810 * RubyVM.stat -> Hash
811 * RubyVM.stat(hsh) -> hsh
812 * RubyVM.stat(Symbol) -> Numeric
813 *
814 * Returns a Hash containing implementation-dependent counters inside the VM.
815 *
816 * This hash includes information about method/constant caches:
817 *
818 * {
819 * :constant_cache_invalidations=>2,
820 * :constant_cache_misses=>14,
821 * :global_cvar_state=>27
822 * }
823 *
824 * If <tt>USE_DEBUG_COUNTER</tt> is enabled, debug counters will be included.
825 *
826 * The contents of the hash are implementation specific and may be changed in
827 * the future.
828 *
829 * This method is only expected to work on C Ruby.
830 */
831static VALUE
832vm_stat(int argc, VALUE *argv, VALUE self)
833{
834 static VALUE sym_constant_cache_invalidations, sym_constant_cache_misses, sym_global_cvar_state, sym_next_shape_id;
835 static VALUE sym_shape_cache_size;
836 VALUE arg = Qnil;
837 VALUE hash = Qnil, key = Qnil;
838
839 if (rb_check_arity(argc, 0, 1) == 1) {
840 arg = argv[0];
841 if (SYMBOL_P(arg))
842 key = arg;
843 else if (RB_TYPE_P(arg, T_HASH))
844 hash = arg;
845 else
846 rb_raise(rb_eTypeError, "non-hash or symbol given");
847 }
848 else {
849 hash = rb_hash_new();
850 }
851
852#define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
853 S(constant_cache_invalidations);
854 S(constant_cache_misses);
855 S(global_cvar_state);
856 S(next_shape_id);
857 S(shape_cache_size);
858#undef S
859
860#define SET(name, attr) \
861 if (key == sym_##name) \
862 return SERIALT2NUM(attr); \
863 else if (hash != Qnil) \
864 rb_hash_aset(hash, sym_##name, SERIALT2NUM(attr));
865
866 SET(constant_cache_invalidations, ruby_vm_constant_cache_invalidations);
867 SET(constant_cache_misses, ruby_vm_constant_cache_misses);
868 SET(global_cvar_state, ruby_vm_global_cvar_state);
869 SET(next_shape_id, (rb_serial_t)rb_shapes_count());
870 SET(shape_cache_size, (rb_serial_t)rb_shape_tree.cache_size);
871#undef SET
872
873#if USE_DEBUG_COUNTER
874 ruby_debug_counter_show_at_exit(FALSE);
875 for (size_t i = 0; i < RB_DEBUG_COUNTER_MAX; i++) {
876 const VALUE name = rb_sym_intern_ascii_cstr(rb_debug_counter_names[i]);
877 const VALUE boxed_value = SIZET2NUM(rb_debug_counter[i]);
878
879 if (key == name) {
880 return boxed_value;
881 }
882 else if (hash != Qnil) {
883 rb_hash_aset(hash, name, boxed_value);
884 }
885 }
886#endif
887
888 if (!NIL_P(key)) { /* matched key should return above */
889 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
890 }
891
892 return hash;
893}
894
895/* control stack frame */
896
897static void
898vm_set_top_stack(rb_execution_context_t *ec, const rb_iseq_t *iseq, const rb_box_t *box)
899{
900 if (ISEQ_BODY(iseq)->type != ISEQ_TYPE_TOP) {
901 rb_raise(rb_eTypeError, "Not a toplevel InstructionSequence");
902 }
903
904 /* for return */
905 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_TOP | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH,
906 rb_ec_thread_ptr(ec)->top_self,
907 GC_GUARDED_PTR(box),
908 (VALUE)vm_cref_new_toplevel(ec), /* cref or me */
909 ISEQ_BODY(iseq)->iseq_encoded, ec->cfp->sp,
910 ISEQ_BODY(iseq)->local_table_size, ISEQ_BODY(iseq)->stack_max);
911}
912
913static void
914vm_set_eval_stack(rb_execution_context_t *ec, const rb_iseq_t *iseq, const rb_cref_t *cref, const struct rb_block *base_block)
915{
916 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_EVAL | VM_FRAME_FLAG_FINISH,
917 vm_block_self(base_block), VM_GUARDED_PREV_EP(vm_block_ep(base_block)),
918 (VALUE)cref, /* cref or me */
919 ISEQ_BODY(iseq)->iseq_encoded,
920 ec->cfp->sp, ISEQ_BODY(iseq)->local_table_size,
921 ISEQ_BODY(iseq)->stack_max);
922}
923
924static void
925vm_set_main_stack(rb_execution_context_t *ec, const rb_iseq_t *iseq)
926{
927 VALUE toplevel_binding = rb_const_get(rb_cObject, rb_intern("TOPLEVEL_BINDING"));
928 rb_binding_t *bind;
929
930 GetBindingPtr(toplevel_binding, bind);
931 RUBY_ASSERT_MESG(bind, "TOPLEVEL_BINDING is not built");
932
933 vm_set_eval_stack(ec, iseq, 0, &bind->block);
934
935 /* save binding */
936 if (ISEQ_BODY(iseq)->local_table_size > 0) {
937 vm_bind_update_env(toplevel_binding, bind, vm_make_env_object(ec, ec->cfp));
938 }
939}
940
942rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
943{
944 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
945 if (CFP_ISEQ(cfp)) {
946 return (rb_control_frame_t *)cfp;
947 }
948 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
949 }
950 return 0;
951}
952
954rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
955{
956 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
957 if (VM_FRAME_RUBYFRAME_P(cfp)) {
958 return (rb_control_frame_t *)cfp;
959 }
960 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
961 }
962 return 0;
963}
964
965static rb_control_frame_t *
966vm_get_ruby_level_caller_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
967{
968 if (VM_FRAME_RUBYFRAME_P(cfp)) {
969 return (rb_control_frame_t *)cfp;
970 }
971
972 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
973
974 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
975 if (VM_FRAME_RUBYFRAME_P(cfp)) {
976 return (rb_control_frame_t *)cfp;
977 }
978
979 if (VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_PASSED) == FALSE) {
980 break;
981 }
982 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
983 }
984 return 0;
985}
986
987void
988rb_vm_pop_cfunc_frame(void)
989{
990 rb_execution_context_t *ec = GET_EC();
991 rb_control_frame_t *cfp = ec->cfp;
992 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(cfp);
993
994 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, cfp->self, me->def->original_id, me->called_id, me->owner, Qnil);
995 RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, me->owner, me->def->original_id);
996 vm_pop_frame(ec, cfp, cfp->ep);
997}
998
999void
1000rb_vm_rewind_cfp(rb_execution_context_t *ec, rb_control_frame_t *cfp)
1001{
1002 /* check skipped frame */
1003 while (ec->cfp != cfp) {
1004#if VMDEBUG
1005 printf("skipped frame: %s\n", vm_frametype_name(ec->cfp));
1006#endif
1007 if (VM_FRAME_TYPE(ec->cfp) != VM_FRAME_MAGIC_CFUNC) {
1008 rb_vm_pop_frame(ec);
1009 }
1010 else { /* unlikely path */
1011 rb_vm_pop_cfunc_frame();
1012 }
1013 }
1014}
1015
1016/* at exit */
1017
1018void
1019ruby_vm_at_exit(void (*func)(rb_vm_t *))
1020{
1021 rb_vm_t *vm = GET_VM();
1023 nl->func = func;
1024 nl->next = vm->at_exit;
1025 vm->at_exit = nl;
1026}
1027
1028static void
1029ruby_vm_run_at_exit_hooks(rb_vm_t *vm)
1030{
1031 rb_at_exit_list *l = vm->at_exit;
1032
1033 while (l) {
1034 rb_at_exit_list* t = l->next;
1035 rb_vm_at_exit_func *func = l->func;
1036 ruby_xfree(l);
1037 l = t;
1038 (*func)(vm);
1039 }
1040}
1041
1042/* Env */
1043
1044static VALUE check_env_value(const rb_env_t *env);
1045
1046static int
1047check_env(const rb_env_t *env)
1048{
1049 fputs("---\n", stderr);
1050 ruby_debug_printf("envptr: %p\n", (void *)&env->ep[0]);
1051 ruby_debug_printf("envval: %10p ", (void *)env->ep[1]);
1052 dp(env->ep[1]);
1053 ruby_debug_printf("ep: %10p\n", (void *)env->ep);
1054 if (rb_vm_env_prev_env(env)) {
1055 fputs(">>\n", stderr);
1056 check_env_value(rb_vm_env_prev_env(env));
1057 fputs("<<\n", stderr);
1058 }
1059 return 1;
1060}
1061
1062static VALUE
1063check_env_value(const rb_env_t *env)
1064{
1065 if (check_env(env)) {
1066 return (VALUE)env;
1067 }
1068 rb_bug("invalid env");
1069 return Qnil; /* unreachable */
1070}
1071
1072static VALUE
1073vm_block_handler_escape(const rb_execution_context_t *ec, VALUE block_handler)
1074{
1075 switch (vm_block_handler_type(block_handler)) {
1076 case block_handler_type_ifunc:
1077 case block_handler_type_iseq:
1078 return rb_vm_make_proc(ec, VM_BH_TO_CAPT_BLOCK(block_handler), rb_cProc);
1079
1080 case block_handler_type_symbol:
1081 case block_handler_type_proc:
1082 return block_handler;
1083 }
1084 VM_UNREACHABLE(vm_block_handler_escape);
1085 return Qnil;
1086}
1087
1088static VALUE
1089vm_make_env_each(const rb_execution_context_t * const ec, rb_control_frame_t *const cfp)
1090{
1091 const VALUE * const ep = cfp->ep;
1092 VALUE *env_body, *env_ep;
1093 int local_size, env_size;
1094
1095 if (VM_ENV_ESCAPED_P(ep)) {
1096 return VM_ENV_ENVVAL(ep);
1097 }
1098
1099 if (!VM_ENV_LOCAL_P(ep)) {
1100 const VALUE *prev_ep = VM_ENV_PREV_EP(ep);
1101 if (!VM_ENV_ESCAPED_P(prev_ep)) {
1102 rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
1103
1104 while (prev_cfp->ep != prev_ep) {
1105 prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(prev_cfp);
1106 VM_ASSERT(prev_cfp->ep != NULL);
1107 }
1108
1109 vm_make_env_each(ec, prev_cfp);
1110 VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_SPECVAL], VM_GUARDED_PREV_EP(prev_cfp->ep));
1111 }
1112 }
1113 else {
1114 VM_ASSERT(VM_ENV_LOCAL_P(ep));
1115 VALUE block_handler = VM_ENV_BLOCK_HANDLER(ep);
1116
1117 if (block_handler != VM_BLOCK_HANDLER_NONE) {
1118 VALUE blockprocval = vm_block_handler_escape(ec, block_handler);
1119 VM_STACK_ENV_WRITE(ep, VM_ENV_DATA_INDEX_SPECVAL, blockprocval);
1120 }
1121 }
1122
1123 const rb_iseq_t *iseq = CFP_ISEQ(cfp);
1124 if (!VM_FRAME_RUBYFRAME_P(cfp)) {
1125 local_size = VM_ENV_DATA_SIZE;
1126 }
1127 else {
1128 local_size = ISEQ_BODY(iseq)->local_table_size;
1129 if (ISEQ_BODY(iseq)->param.flags.forwardable && VM_ENV_LOCAL_P(cfp->ep)) {
1130 int ci_offset = local_size - ISEQ_BODY(iseq)->param.size + VM_ENV_DATA_SIZE;
1131
1132 CALL_INFO ci = (CALL_INFO)VM_CF_LEP(cfp)[-ci_offset];
1133 local_size += vm_ci_argc(ci);
1134 }
1135 local_size += VM_ENV_DATA_SIZE;
1136 }
1137
1138 // Invalidate JIT code that assumes cfp->ep == vm_base_ptr(cfp).
1139 // This is done before creating the imemo_env because VM_STACK_ENV_WRITE
1140 // below leaves the on-stack ep in a state that is unsafe to GC.
1141 if (VM_FRAME_RUBYFRAME_P(cfp)) {
1142 rb_yjit_invalidate_ep_is_bp(iseq);
1143 rb_zjit_invalidate_no_ep_escape(iseq);
1144 }
1145
1146 /*
1147 * # local variables on a stack frame (N == local_size)
1148 * [lvar1, lvar2, ..., lvarN, SPECVAL]
1149 * ^
1150 * ep[0]
1151 *
1152 * # moved local variables
1153 * [lvar1, lvar2, ..., lvarN, SPECVAL, Envval, BlockProcval (if needed)]
1154 * ^ ^
1155 * env->env[0] ep[0]
1156 */
1157
1158 env_size = local_size +
1159 1 /* envval */;
1160
1161 // Careful with order in the following sequence. Each allocation can move objects.
1162 env_body = ALLOC_N(VALUE, env_size);
1163 rb_env_t *env = IMEMO_NEW(rb_env_t, imemo_env, 0);
1164
1165 // Set up env without WB since it's brand new (similar to newobj_init(), newobj_fill())
1166 MEMCPY(env_body, ep - (local_size - 1 /* specval */), VALUE, local_size);
1167
1168 env_ep = &env_body[local_size - 1 /* specval */];
1169 env_ep[VM_ENV_DATA_INDEX_ENV] = (VALUE)env;
1170
1171 env->iseq = (rb_iseq_t *)(VM_FRAME_RUBYFRAME_P(cfp) ? iseq : NULL);
1172 env->ep = env_ep;
1173 env->env = env_body;
1174 env->env_size = env_size;
1175
1176 cfp->ep = env_ep;
1177 VM_ENV_FLAGS_SET(env_ep, VM_ENV_FLAG_ESCAPED | VM_ENV_FLAG_WB_REQUIRED);
1178 VM_STACK_ENV_WRITE(ep, 0, (VALUE)env); /* GC mark */
1179
1180#if 0
1181 for (i = 0; i < local_size; i++) {
1182 if (VM_FRAME_RUBYFRAME_P(cfp)) {
1183 /* clear value stack for GC */
1184 ep[-local_size + i] = 0;
1185 }
1186 }
1187#endif
1188
1189 return (VALUE)env;
1190}
1191
1192static VALUE
1193vm_make_env_object(const rb_execution_context_t *ec, rb_control_frame_t *cfp)
1194{
1195 VALUE envval = vm_make_env_each(ec, cfp);
1196
1197 if (PROCDEBUG) {
1198 check_env_value((const rb_env_t *)envval);
1199 }
1200
1201 return envval;
1202}
1203
1204void
1205rb_vm_stack_to_heap(rb_execution_context_t *ec)
1206{
1207 rb_control_frame_t *cfp = ec->cfp;
1208 while ((cfp = rb_vm_get_binding_creatable_next_cfp(ec, cfp)) != 0) {
1209 vm_make_env_object(ec, cfp);
1210 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
1211 }
1212}
1213
1214const rb_env_t *
1215rb_vm_env_prev_env(const rb_env_t *env)
1216{
1217 const VALUE *ep = env->ep;
1218
1219 if (VM_ENV_LOCAL_P(ep)) {
1220 return NULL;
1221 }
1222 else {
1223 const VALUE *prev_ep = VM_ENV_PREV_EP(ep);
1224 return VM_ENV_ENVVAL_PTR(prev_ep);
1225 }
1226}
1227
1228static int
1229collect_local_variables_in_iseq(const rb_iseq_t *iseq, const struct local_var_list *vars)
1230{
1231 unsigned int i;
1232 if (!iseq) return 0;
1233 for (i = 0; i < ISEQ_BODY(iseq)->local_table_size; i++) {
1234 local_var_list_add(vars, ISEQ_BODY(iseq)->local_table[i]);
1235 }
1236 return 1;
1237}
1238
1239static void
1240collect_local_variables_in_env(const rb_env_t *env, const struct local_var_list *vars)
1241{
1242 do {
1243 if (VM_ENV_FLAGS(env->ep, VM_ENV_FLAG_ISOLATED)) break;
1244 collect_local_variables_in_iseq(env->iseq, vars);
1245 } while ((env = rb_vm_env_prev_env(env)) != NULL);
1246}
1247
1248static int
1249vm_collect_local_variables_in_heap(const VALUE *ep, const struct local_var_list *vars)
1250{
1251 if (VM_ENV_ESCAPED_P(ep)) {
1252 collect_local_variables_in_env(VM_ENV_ENVVAL_PTR(ep), vars);
1253 return 1;
1254 }
1255 else {
1256 return 0;
1257 }
1258}
1259
1260VALUE
1261rb_vm_env_local_variables(const rb_env_t *env)
1262{
1263 struct local_var_list vars;
1264 local_var_list_init(&vars);
1265 collect_local_variables_in_env(env, &vars);
1266 return local_var_list_finish(&vars);
1267}
1268
1269VALUE
1270rb_vm_env_numbered_parameters(const rb_env_t *env)
1271{
1272 struct local_var_list vars;
1273 local_var_list_init(&vars);
1274 // if (VM_ENV_FLAGS(env->ep, VM_ENV_FLAG_ISOLATED)) break; // TODO: is this needed?
1275 const rb_iseq_t *iseq = env->iseq;
1276 unsigned int i;
1277 if (!iseq) return 0;
1278 for (i = 0; i < ISEQ_BODY(iseq)->local_table_size; i++) {
1279 numparam_list_add(&vars, ISEQ_BODY(iseq)->local_table[i]);
1280 }
1281 return local_var_list_finish(&vars);
1282}
1283
1284VALUE
1285rb_iseq_local_variables(const rb_iseq_t *iseq)
1286{
1287 struct local_var_list vars;
1288 local_var_list_init(&vars);
1289 while (collect_local_variables_in_iseq(iseq, &vars)) {
1290 iseq = ISEQ_BODY(iseq)->parent_iseq;
1291 }
1292 return local_var_list_finish(&vars);
1293}
1294
1295/* Proc */
1296
1297static VALUE
1298vm_proc_create_from_captured(VALUE klass,
1299 const struct rb_captured_block *captured,
1300 enum rb_block_type block_type,
1301 int8_t is_from_method, int8_t is_lambda)
1302{
1303 VALUE procval = rb_proc_alloc(klass);
1304 rb_proc_t *proc = RTYPEDDATA_DATA(procval);
1305
1306 VM_ASSERT(VM_EP_IN_HEAP_P(GET_EC(), captured->ep));
1307
1308 /* copy block */
1309 RB_OBJ_WRITE(procval, &proc->block.as.captured.code.val, captured->code.val);
1310 RB_OBJ_WRITE(procval, &proc->block.as.captured.self, captured->self);
1311 rb_vm_block_ep_update(procval, &proc->block, captured->ep);
1312
1313 vm_block_type_set(&proc->block, block_type);
1314 proc->is_from_method = is_from_method;
1315 proc->is_lambda = is_lambda;
1316
1317 return procval;
1318}
1319
1320void
1321rb_vm_block_copy(VALUE obj, const struct rb_block *dst, const struct rb_block *src)
1322{
1323 /* copy block */
1324 switch (vm_block_type(src)) {
1325 case block_type_iseq:
1326 case block_type_ifunc:
1327 RB_OBJ_WRITE(obj, &dst->as.captured.self, src->as.captured.self);
1328 RB_OBJ_WRITE(obj, &dst->as.captured.code.val, src->as.captured.code.val);
1329 rb_vm_block_ep_update(obj, dst, src->as.captured.ep);
1330 break;
1331 case block_type_symbol:
1332 RB_OBJ_WRITE(obj, &dst->as.symbol, src->as.symbol);
1333 break;
1334 case block_type_proc:
1335 RB_OBJ_WRITE(obj, &dst->as.proc, src->as.proc);
1336 break;
1337 }
1338}
1339
1340static VALUE
1341proc_create(VALUE klass, const struct rb_block *block, int8_t is_from_method, int8_t is_lambda)
1342{
1343 VALUE procval = rb_proc_alloc(klass);
1344 rb_proc_t *proc = RTYPEDDATA_DATA(procval);
1345
1346 VM_ASSERT(VM_EP_IN_HEAP_P(GET_EC(), vm_block_ep(block)));
1347 rb_vm_block_copy(procval, &proc->block, block);
1348 vm_block_type_set(&proc->block, block->type);
1349 proc->is_from_method = is_from_method;
1350 proc->is_lambda = is_lambda;
1351
1352 return procval;
1353}
1354
1355VALUE
1356rb_proc_dup(VALUE self)
1357{
1358 VALUE procval;
1359 rb_proc_t *src;
1360
1361 GetProcPtr(self, src);
1362
1363 switch (vm_block_type(&src->block)) {
1364 case block_type_ifunc:
1365 procval = rb_func_proc_dup(self);
1366 break;
1367 default:
1368 procval = proc_create(rb_obj_class(self), &src->block, src->is_from_method, src->is_lambda);
1369 break;
1370 }
1371
1372 if (RB_OBJ_SHAREABLE_P(self)) RB_OBJ_SET_SHAREABLE(procval);
1373 RB_GC_GUARD(self); /* for: body = rb_proc_dup(body) */
1374 return procval;
1375}
1376
1378 VALUE ary;
1379 VALUE read_only;
1380 bool yield;
1381 bool isolate;
1382};
1383
1384static VALUE
1385ID2NUM(ID id)
1386{
1387 if (SIZEOF_VOIDP > SIZEOF_LONG)
1388 return ULL2NUM(id);
1389 else
1390 return ULONG2NUM(id);
1391}
1392
1393static ID
1394NUM2ID(VALUE num)
1395{
1396 if (SIZEOF_VOIDP > SIZEOF_LONG)
1397 return (ID)NUM2ULL(num);
1398 else
1399 return (ID)NUM2ULONG(num);
1400}
1401
1402static enum rb_id_table_iterator_result
1403collect_outer_variable_names(ID id, VALUE val, void *ptr)
1404{
1406
1407 if (id == rb_intern("yield")) {
1408 data->yield = true;
1409 }
1410 else {
1411 VALUE *store;
1412 if (data->isolate ||
1413 val == Qtrue /* write */) {
1414 store = &data->ary;
1415 }
1416 else {
1417 store = &data->read_only;
1418 }
1419 if (*store == Qfalse) *store = rb_ary_new();
1420 rb_ary_push(*store, ID2NUM(id));
1421 }
1422 return ID_TABLE_CONTINUE;
1423}
1424
1425static const rb_env_t *
1426env_copy(const VALUE *src_ep, VALUE read_only_variables)
1427{
1428 const rb_env_t *src_env = (rb_env_t *)VM_ENV_ENVVAL(src_ep);
1429 VM_ASSERT(src_env->ep == src_ep);
1430
1431 VALUE *env_body = ZALLOC_N(VALUE, src_env->env_size); // fill with Qfalse
1432 VALUE *ep = &env_body[src_env->env_size - 2];
1433 const rb_env_t *copied_env = vm_env_new(ep, env_body, src_env->env_size, src_env->iseq);
1434
1435 // Copy after allocations above, since they can move objects in src_ep.
1436 VALUE svar_val = src_ep[VM_ENV_DATA_INDEX_ME_CREF];
1437 if (imemo_type_p(svar_val, imemo_svar)) {
1438 const struct vm_svar *svar = (struct vm_svar *)svar_val;
1439
1440 if (svar->cref_or_me) {
1441 svar_val = svar->cref_or_me;
1442 }
1443 else {
1444 svar_val = Qfalse;
1445 }
1446 }
1447 RB_OBJ_WRITE(copied_env, &ep[VM_ENV_DATA_INDEX_ME_CREF], svar_val);
1448
1449 ep[VM_ENV_DATA_INDEX_FLAGS] = src_ep[VM_ENV_DATA_INDEX_FLAGS] | VM_ENV_FLAG_ISOLATED;
1450 if (!VM_ENV_LOCAL_P(src_ep)) {
1451 VM_ENV_FLAGS_SET(ep, VM_ENV_FLAG_LOCAL);
1452 }
1453
1454 if (read_only_variables) {
1455 for (int i=RARRAY_LENINT(read_only_variables)-1; i>=0; i--) {
1456 ID id = NUM2ID(RARRAY_AREF(read_only_variables, i));
1457
1458 const struct rb_iseq_constant_body *body = ISEQ_BODY(src_env->iseq);
1459 for (unsigned int j=0; j<body->local_table_size; j++) {
1460 if (id == body->local_table[j]) {
1461 // check reassignment
1462 if (body->lvar_states[j] == lvar_reassigned) {
1463 VALUE name = rb_id2str(id);
1464 VALUE msg = rb_sprintf("cannot make a shareable Proc because "
1465 "the outer variable '%" PRIsVALUE "' may be reassigned.", name);
1466 rb_exc_raise(rb_exc_new_str(rb_eRactorIsolationError, msg));
1467 }
1468
1469 // check shareable
1470 VALUE v = src_env->env[j];
1471 if (!rb_ractor_shareable_p(v)) {
1472 VALUE name = rb_id2str(id);
1473 VALUE msg = rb_sprintf("cannot make a shareable Proc because it can refer"
1474 " unshareable object %+" PRIsVALUE " from ", v);
1475 if (name)
1476 rb_str_catf(msg, "variable '%" PRIsVALUE "'", name);
1477 else
1478 rb_str_cat_cstr(msg, "a hidden variable");
1479 rb_exc_raise(rb_exc_new_str(rb_eRactorIsolationError, msg));
1480 }
1481 RB_OBJ_WRITE((VALUE)copied_env, &env_body[j], v);
1482 rb_ary_delete_at(read_only_variables, i);
1483 break;
1484 }
1485 }
1486 }
1487 }
1488
1489 if (!VM_ENV_LOCAL_P(src_ep)) {
1490 const VALUE *prev_ep = VM_ENV_PREV_EP(src_env->ep);
1491 const rb_env_t *new_prev_env = env_copy(prev_ep, read_only_variables);
1492 ep[VM_ENV_DATA_INDEX_SPECVAL] = VM_GUARDED_PREV_EP(new_prev_env->ep);
1493 RB_OBJ_WRITTEN(copied_env, Qundef, new_prev_env);
1494 VM_ENV_FLAGS_UNSET(ep, VM_ENV_FLAG_LOCAL);
1495 }
1496 else {
1497 ep[VM_ENV_DATA_INDEX_SPECVAL] = VM_BLOCK_HANDLER_NONE;
1498 }
1499
1500 RB_OBJ_SET_SHAREABLE((VALUE)copied_env);
1501 return copied_env;
1502}
1503
1504static void
1505proc_isolate_env(VALUE self, rb_proc_t *proc, VALUE read_only_variables)
1506{
1507 const struct rb_captured_block *captured = &proc->block.as.captured;
1508 const rb_env_t *env = env_copy(captured->ep, read_only_variables);
1509 *((const VALUE **)&proc->block.as.captured.ep) = env->ep;
1510 RB_OBJ_WRITTEN(self, Qundef, env);
1511}
1512
1513static VALUE
1514proc_shared_outer_variables(struct rb_id_table *outer_variables, bool isolate, const char *message)
1515{
1516 struct collect_outer_variable_name_data data = {
1517 .isolate = isolate,
1518 .ary = Qfalse,
1519 .read_only = Qfalse,
1520 .yield = false,
1521 };
1522 rb_id_table_foreach(outer_variables, collect_outer_variable_names, (void *)&data);
1523
1524 if (data.ary != Qfalse) {
1525 VALUE str = rb_sprintf("can not %s because it accesses outer variables", message);
1526 VALUE ary = data.ary;
1527 const char *sep = " (";
1528 for (long i = 0; i < RARRAY_LEN(ary); i++) {
1529 VALUE name = rb_id2str(NUM2ID(RARRAY_AREF(ary, i)));
1530 if (!name) continue;
1531 rb_str_cat_cstr(str, sep);
1532 sep = ", ";
1533 rb_str_append(str, name);
1534 }
1535 if (*sep == ',') rb_str_cat_cstr(str, ")");
1536 rb_str_cat_cstr(str, data.yield ? " and uses 'yield'." : ".");
1537 rb_exc_raise(rb_exc_new_str(rb_eRactorIsolationError, str));
1538 }
1539 else if (data.yield) {
1540 rb_raise(rb_eRactorIsolationError, "can not %s because it uses 'yield'.", message);
1541 }
1542
1543 return data.read_only;
1544}
1545
1546VALUE
1547rb_proc_isolate_bang(VALUE self, VALUE replace_self)
1548{
1549 const rb_iseq_t *iseq = vm_proc_iseq(self);
1550
1551 if (iseq) {
1552 rb_proc_t *proc = (rb_proc_t *)RTYPEDDATA_DATA(self);
1553
1554 if (!UNDEF_P(replace_self)) {
1555 VM_ASSERT(rb_ractor_shareable_p(replace_self));
1556 RB_OBJ_WRITE(self, &proc->block.as.captured.self, replace_self);
1557 }
1558
1559 if (proc->block.type != block_type_iseq) rb_raise(rb_eRuntimeError, "not supported yet");
1560
1561 if (ISEQ_BODY(iseq)->outer_variables) {
1562 proc_shared_outer_variables(ISEQ_BODY(iseq)->outer_variables, true, "isolate a Proc");
1563 }
1564
1565 proc_isolate_env(self, proc, Qfalse);
1566 proc->is_isolated = TRUE;
1567 RB_OBJ_WRITE(self, &proc->block.as.captured.self, Qnil);
1568 }
1569
1570 RB_OBJ_SET_SHAREABLE(self);
1571 return self;
1572}
1573
1574VALUE
1575rb_proc_isolate(VALUE self)
1576{
1577 VALUE dst = rb_proc_dup(self);
1578 rb_proc_isolate_bang(dst, Qundef);
1579 return dst;
1580}
1581
1582VALUE
1583rb_proc_ractor_make_shareable(VALUE self, VALUE replace_self)
1584{
1585 const rb_iseq_t *iseq = vm_proc_iseq(self);
1586
1587 if (iseq) {
1588 rb_proc_t *proc = (rb_proc_t *)RTYPEDDATA_DATA(self);
1589
1590 if (!UNDEF_P(replace_self)) {
1591 RB_OBJ_WRITE(self, &proc->block.as.captured.self, replace_self);
1592 }
1593
1594 if (proc->block.type != block_type_iseq) rb_raise(rb_eRuntimeError, "not supported yet");
1595
1596 if (!rb_ractor_shareable_p(vm_block_self(&proc->block))) {
1597 rb_raise(rb_eRactorIsolationError,
1598 "Proc's self is not shareable: %" PRIsVALUE,
1599 self);
1600 }
1601
1602 VALUE read_only_variables = Qfalse;
1603
1604 if (ISEQ_BODY(iseq)->outer_variables) {
1605 read_only_variables =
1606 proc_shared_outer_variables(ISEQ_BODY(iseq)->outer_variables, false, "make a Proc shareable");
1607 }
1608
1609 proc_isolate_env(self, proc, read_only_variables);
1610 proc->is_isolated = TRUE;
1611 }
1612 else {
1613 const struct rb_block *block = vm_proc_block(self);
1614 if (block->type != block_type_symbol) rb_raise(rb_eRuntimeError, "not supported yet");
1615
1616 VALUE proc_self = vm_block_self(block);
1617 if (!rb_ractor_shareable_p(proc_self)) {
1618 rb_raise(rb_eRactorIsolationError,
1619 "Proc's self is not shareable: %" PRIsVALUE,
1620 self);
1621 }
1622 }
1623
1624 RB_OBJ_SET_FROZEN_SHAREABLE(self);
1625 return self;
1626}
1627
1628VALUE
1629rb_vm_make_proc_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda)
1630{
1631 VALUE procval;
1632 enum imemo_type code_type = imemo_type(captured->code.val);
1633
1634 if (!VM_ENV_ESCAPED_P(captured->ep)) {
1635 rb_control_frame_t *cfp = VM_CAPTURED_BLOCK_TO_CFP(captured);
1636 vm_make_env_object(ec, cfp);
1637 }
1638
1639 VM_ASSERT(VM_EP_IN_HEAP_P(ec, captured->ep));
1640 VM_ASSERT(code_type == imemo_iseq || code_type == imemo_ifunc);
1641
1642 procval = vm_proc_create_from_captured(klass, captured,
1643 code_type == imemo_iseq ? block_type_iseq : block_type_ifunc,
1644 FALSE, is_lambda);
1645
1646 if (code_type == imemo_ifunc) {
1647 struct vm_ifunc *ifunc = (struct vm_ifunc *)captured->code.val;
1648 if (ifunc->svar_lep) {
1649 VALUE ep0 = ifunc->svar_lep[0];
1650 if (RB_TYPE_P(ep0, T_IMEMO) && imemo_type_p(ep0, imemo_env)) {
1651 // `ep0 == imemo_env` means this ep is escaped to heap (in env object).
1652 const rb_env_t *env = (const rb_env_t *)ep0;
1653 ifunc->svar_lep = (VALUE *)env->ep;
1654 }
1655 else {
1656 VM_ASSERT(FIXNUM_P(ep0));
1657 if (ep0 & VM_ENV_FLAG_ESCAPED) {
1658 // ok. do nothing
1659 }
1660 else {
1661 ifunc->svar_lep = NULL;
1662 }
1663 }
1664 }
1665 }
1666
1667 return procval;
1668}
1669
1670/* Binding */
1671
1672VALUE
1673rb_vm_make_binding(const rb_execution_context_t *ec, const rb_control_frame_t *src_cfp)
1674{
1675 rb_control_frame_t *cfp = rb_vm_get_binding_creatable_next_cfp(ec, src_cfp);
1676 rb_control_frame_t *ruby_level_cfp = rb_vm_get_ruby_level_next_cfp(ec, src_cfp);
1677 VALUE bindval, envval;
1678 rb_binding_t *bind;
1679
1680 if (cfp == 0 || ruby_level_cfp == 0) {
1681 rb_raise(rb_eRuntimeError, "Can't create Binding Object on top of Fiber.");
1682 }
1683 if (!VM_FRAME_RUBYFRAME_P(src_cfp) &&
1684 !VM_FRAME_RUBYFRAME_P(RUBY_VM_PREVIOUS_CONTROL_FRAME(src_cfp))) {
1685 rb_raise(rb_eRuntimeError, "Cannot create Binding object for non-Ruby caller");
1686 }
1687
1688 envval = vm_make_env_object(ec, cfp);
1689 bindval = rb_binding_alloc(rb_cBinding);
1690 GetBindingPtr(bindval, bind);
1691 vm_bind_update_env(bindval, bind, envval);
1692 RB_OBJ_WRITE(bindval, &bind->block.as.captured.self, cfp->self);
1693 RB_OBJ_WRITE(bindval, &bind->block.as.captured.code.iseq, CFP_ISEQ(cfp));
1694 RB_OBJ_WRITE(bindval, &bind->pathobj, ISEQ_BODY(CFP_ISEQ(ruby_level_cfp))->location.pathobj);
1695 bind->first_lineno = rb_vm_get_sourceline(ruby_level_cfp);
1696
1697 return bindval;
1698}
1699
1700const VALUE *
1701rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const ID *dynvars)
1702{
1703 VALUE envval, pathobj = bind->pathobj;
1704 VALUE path = pathobj_path(pathobj);
1705 VALUE realpath = pathobj_realpath(pathobj);
1706 const struct rb_block *base_block;
1707 const rb_env_t *env;
1708 rb_execution_context_t *ec = GET_EC();
1709 const rb_iseq_t *base_iseq, *iseq;
1710 rb_node_scope_t tmp_node;
1711
1712 if (dyncount < 0) return 0;
1713
1714 base_block = &bind->block;
1715 base_iseq = vm_block_iseq(base_block);
1716
1717 VALUE idtmp = 0;
1718 rb_ast_id_table_t *dyns = ALLOCV(idtmp, sizeof(rb_ast_id_table_t) + dyncount * sizeof(ID));
1719 dyns->size = dyncount;
1720 MEMCPY(dyns->ids, dynvars, ID, dyncount);
1721
1722 rb_node_init(RNODE(&tmp_node), NODE_SCOPE);
1723 tmp_node.nd_tbl = dyns;
1724 tmp_node.nd_body = 0;
1725 tmp_node.nd_parent = NULL;
1726 tmp_node.nd_args = 0;
1727
1728 VALUE ast_value = rb_ruby_ast_new(RNODE(&tmp_node));
1729
1730 if (base_iseq) {
1731 iseq = rb_iseq_new(ast_value, ISEQ_BODY(base_iseq)->location.label, path, realpath, base_iseq, ISEQ_TYPE_EVAL);
1732 }
1733 else {
1734 VALUE tempstr = rb_fstring_lit("<temp>");
1735 iseq = rb_iseq_new_top(ast_value, tempstr, tempstr, tempstr, NULL);
1736 }
1737 tmp_node.nd_tbl = 0; /* reset table */
1738 ALLOCV_END(idtmp);
1739
1740 vm_set_eval_stack(ec, iseq, 0, base_block);
1741 vm_bind_update_env(bindval, bind, envval = vm_make_env_object(ec, ec->cfp));
1742 rb_vm_pop_frame(ec);
1743
1744 env = (const rb_env_t *)envval;
1745 return env->env;
1746}
1747
1748/* C -> Ruby: block */
1749
1750static inline void
1751invoke_block(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE self, const struct rb_captured_block *captured, const rb_cref_t *cref, VALUE type, int opt_pc)
1752{
1753 int arg_size = ISEQ_BODY(iseq)->param.size;
1754
1755 vm_push_frame(ec, iseq, type | VM_FRAME_FLAG_FINISH, self,
1756 VM_GUARDED_PREV_EP(captured->ep),
1757 (VALUE)cref, /* cref or method */
1758 ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
1759 ec->cfp->sp + arg_size,
1760 ISEQ_BODY(iseq)->local_table_size - arg_size,
1761 ISEQ_BODY(iseq)->stack_max);
1762}
1763
1764static inline void
1765invoke_bmethod(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE self, const struct rb_captured_block *captured, const rb_callable_method_entry_t *me, VALUE type, int opt_pc)
1766{
1767 /* bmethod call from outside the VM */
1768 int arg_size = ISEQ_BODY(iseq)->param.size;
1769
1770 VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
1771
1772 vm_push_frame(ec, iseq, type | VM_FRAME_FLAG_BMETHOD, self,
1773 VM_GUARDED_PREV_EP(captured->ep),
1774 (VALUE)me,
1775 ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
1776 ec->cfp->sp + 1 /* self */ + arg_size,
1777 ISEQ_BODY(iseq)->local_table_size - arg_size,
1778 ISEQ_BODY(iseq)->stack_max);
1779
1780 VM_ENV_FLAGS_SET(ec->cfp->ep, VM_FRAME_FLAG_FINISH);
1781}
1782
1783ALWAYS_INLINE(static VALUE
1784 invoke_iseq_block_from_c(rb_execution_context_t *ec, const struct rb_captured_block *captured,
1785 VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler,
1786 const rb_cref_t *cref, int is_lambda, const rb_callable_method_entry_t *me));
1787
1788static inline VALUE
1789invoke_iseq_block_from_c(rb_execution_context_t *ec, const struct rb_captured_block *captured,
1790 VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler,
1791 const rb_cref_t *cref, int is_lambda, const rb_callable_method_entry_t *me)
1792{
1793 const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
1794 int opt_pc;
1795 VALUE type = VM_FRAME_MAGIC_BLOCK | (is_lambda ? VM_FRAME_FLAG_LAMBDA : 0);
1796 rb_control_frame_t *cfp = ec->cfp;
1797 VALUE *sp = cfp->sp;
1798 int flags = (kw_splat ? VM_CALL_KW_SPLAT : 0);
1799 VALUE *use_argv = (VALUE *)argv;
1800 VALUE av[2];
1801
1802 stack_check(ec);
1803
1804 if (UNLIKELY(argc > VM_ARGC_STACK_MAX) &&
1805 (VM_ARGC_STACK_MAX >= 1 ||
1806 /* Skip ruby array for potential autosplat case */
1807 (argc != 1 || is_lambda))) {
1808 use_argv = vm_argv_ruby_array(av, argv, &flags, &argc, kw_splat);
1809 }
1810
1811 CHECK_VM_STACK_OVERFLOW(cfp, argc + 1);
1812 vm_check_canary(ec, sp);
1813
1814 VALUE *stack_argv = sp;
1815 if (me) {
1816 *sp = self; // bemthods need `self` on the VM stack
1817 stack_argv++;
1818 }
1819 cfp->sp = stack_argv + argc;
1820 MEMCPY(stack_argv, use_argv, VALUE, argc); // restrict: new stack space
1821
1822 opt_pc = vm_yield_setup_args(ec, iseq, argc, stack_argv, flags, passed_block_handler,
1823 (is_lambda ? arg_setup_method : arg_setup_block));
1824 cfp->sp = sp;
1825
1826 if (me == NULL) {
1827 invoke_block(ec, iseq, self, captured, cref, type, opt_pc);
1828 }
1829 else {
1830 invoke_bmethod(ec, iseq, self, captured, me, type, opt_pc);
1831 }
1832
1833 return vm_exec(ec);
1834}
1835
1836static VALUE
1837invoke_block_from_c_bh(rb_execution_context_t *ec, VALUE block_handler,
1838 int argc, const VALUE *argv,
1839 int kw_splat, VALUE passed_block_handler, const rb_cref_t *cref,
1840 int is_lambda, int force_blockarg)
1841{
1842 again:
1843 switch (vm_block_handler_type(block_handler)) {
1844 case block_handler_type_iseq:
1845 {
1846 const struct rb_captured_block *captured = VM_BH_TO_ISEQ_BLOCK(block_handler);
1847 return invoke_iseq_block_from_c(ec, captured, captured->self,
1848 argc, argv, kw_splat, passed_block_handler,
1849 cref, is_lambda, NULL);
1850 }
1851 case block_handler_type_ifunc:
1852 return vm_yield_with_cfunc(ec, VM_BH_TO_IFUNC_BLOCK(block_handler),
1853 VM_BH_TO_IFUNC_BLOCK(block_handler)->self,
1854 argc, argv, kw_splat, passed_block_handler, NULL);
1855 case block_handler_type_symbol:
1856 return vm_yield_with_symbol(ec, VM_BH_TO_SYMBOL(block_handler),
1857 argc, argv, kw_splat, passed_block_handler);
1858 case block_handler_type_proc:
1859 if (force_blockarg == FALSE) {
1860 is_lambda = block_proc_is_lambda(VM_BH_TO_PROC(block_handler));
1861 }
1862 block_handler = vm_proc_to_block_handler(VM_BH_TO_PROC(block_handler));
1863 goto again;
1864 }
1865 VM_UNREACHABLE(invoke_block_from_c_splattable);
1866 return Qundef;
1867}
1868
1869static inline VALUE
1870check_block_handler(rb_execution_context_t *ec)
1871{
1872 VALUE block_handler = VM_CF_BLOCK_HANDLER(ec->cfp);
1873 vm_block_handler_verify(block_handler);
1874 if (UNLIKELY(block_handler == VM_BLOCK_HANDLER_NONE)) {
1875 rb_vm_localjump_error("no block given", Qnil, 0);
1876 }
1877
1878 return block_handler;
1879}
1880
1881static VALUE
1882vm_yield_with_cref(rb_execution_context_t *ec, int argc, const VALUE *argv, int kw_splat, const rb_cref_t *cref, int is_lambda)
1883{
1884 return invoke_block_from_c_bh(ec, check_block_handler(ec),
1885 argc, argv, kw_splat, VM_BLOCK_HANDLER_NONE,
1886 cref, is_lambda, FALSE);
1887}
1888
1889static VALUE
1890vm_yield(rb_execution_context_t *ec, int argc, const VALUE *argv, int kw_splat)
1891{
1892 return vm_yield_with_cref(ec, argc, argv, kw_splat, NULL, FALSE);
1893}
1894
1895static VALUE
1896vm_yield_with_block(rb_execution_context_t *ec, int argc, const VALUE *argv, VALUE block_handler, int kw_splat)
1897{
1898 return invoke_block_from_c_bh(ec, check_block_handler(ec),
1899 argc, argv, kw_splat, block_handler,
1900 NULL, FALSE, FALSE);
1901}
1902
1903static VALUE
1904vm_yield_force_blockarg(rb_execution_context_t *ec, VALUE args)
1905{
1906 return invoke_block_from_c_bh(ec, check_block_handler(ec), 1, &args,
1907 RB_NO_KEYWORDS, VM_BLOCK_HANDLER_NONE, NULL, FALSE, TRUE);
1908}
1909
1910ALWAYS_INLINE(static VALUE
1911 invoke_block_from_c_proc(rb_execution_context_t *ec, const rb_proc_t *proc,
1912 VALUE self, int argc, const VALUE *argv,
1913 int kw_splat, VALUE passed_block_handler, int is_lambda,
1914 const rb_callable_method_entry_t *me));
1915
1916static inline VALUE
1917invoke_block_from_c_proc(rb_execution_context_t *ec, const rb_proc_t *proc,
1918 VALUE self, int argc, const VALUE *argv,
1919 int kw_splat, VALUE passed_block_handler, int is_lambda,
1921{
1922 const struct rb_block *block = &proc->block;
1923
1924 again:
1925 switch (vm_block_type(block)) {
1926 case block_type_iseq:
1927 return invoke_iseq_block_from_c(ec, &block->as.captured, self, argc, argv, kw_splat, passed_block_handler, NULL, is_lambda, me);
1928 case block_type_ifunc:
1929 if (kw_splat == 1) {
1930 VALUE keyword_hash = argv[argc-1];
1931 if (!RB_TYPE_P(keyword_hash, T_HASH)) {
1932 keyword_hash = rb_to_hash_type(keyword_hash);
1933 }
1934 if (RHASH_EMPTY_P(keyword_hash)) {
1935 argc--;
1936 }
1937 else {
1938 ((VALUE *)argv)[argc-1] = rb_hash_dup(keyword_hash);
1939 }
1940 }
1941 return vm_yield_with_cfunc(ec, &block->as.captured, self, argc, argv, kw_splat, passed_block_handler, me);
1942 case block_type_symbol:
1943 return vm_yield_with_symbol(ec, block->as.symbol, argc, argv, kw_splat, passed_block_handler);
1944 case block_type_proc:
1945 is_lambda = block_proc_is_lambda(block->as.proc);
1946 block = vm_proc_block(block->as.proc);
1947 goto again;
1948 }
1949 VM_UNREACHABLE(invoke_block_from_c_proc);
1950 return Qundef;
1951}
1952
1953static VALUE
1954vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
1955 int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler)
1956{
1957 return invoke_block_from_c_proc(ec, proc, self, argc, argv, kw_splat, passed_block_handler, proc->is_lambda, NULL);
1958}
1959
1960static VALUE
1961vm_invoke_bmethod(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
1962 int argc, const VALUE *argv, int kw_splat, VALUE block_handler, const rb_callable_method_entry_t *me)
1963{
1964 return invoke_block_from_c_proc(ec, proc, self, argc, argv, kw_splat, block_handler, TRUE, me);
1965}
1966
1967VALUE
1968rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc,
1969 int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler)
1970{
1971 VALUE self = vm_block_self(&proc->block);
1972 vm_block_handler_verify(passed_block_handler);
1973
1974 if (proc->is_from_method) {
1975 return vm_invoke_bmethod(ec, proc, self, argc, argv, kw_splat, passed_block_handler, NULL);
1976 }
1977 else {
1978 return vm_invoke_proc(ec, proc, self, argc, argv, kw_splat, passed_block_handler);
1979 }
1980}
1981
1982VALUE
1983rb_vm_invoke_proc_with_self(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
1984 int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler)
1985{
1986 vm_block_handler_verify(passed_block_handler);
1987
1988 if (proc->is_from_method) {
1989 return vm_invoke_bmethod(ec, proc, self, argc, argv, kw_splat, passed_block_handler, NULL);
1990 }
1991 else {
1992 return vm_invoke_proc(ec, proc, self, argc, argv, kw_splat, passed_block_handler);
1993 }
1994}
1995
1996/* special variable */
1997
1998VALUE *
1999rb_vm_svar_lep(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
2000{
2001 while (!CFP_PC(cfp) || !CFP_ISEQ(cfp)) {
2002 if (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_IFUNC) {
2003 struct vm_ifunc *ifunc = (struct vm_ifunc *)CFP_ISEQ(cfp);
2004 return ifunc->svar_lep;
2005 }
2006 else {
2007 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
2008 }
2009
2010 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
2011 return NULL;
2012 }
2013 }
2014
2015 return (VALUE *)VM_CF_LEP(cfp);
2016}
2017
2018static VALUE
2019vm_cfp_svar_get(const rb_execution_context_t *ec, rb_control_frame_t *cfp, VALUE key)
2020{
2021 return lep_svar_get(ec, rb_vm_svar_lep(ec, cfp), key);
2022}
2023
2024static void
2025vm_cfp_svar_set(const rb_execution_context_t *ec, rb_control_frame_t *cfp, VALUE key, const VALUE val)
2026{
2027 lep_svar_set(ec, rb_vm_svar_lep(ec, cfp), key, val);
2028}
2029
2030static VALUE
2031vm_svar_get(const rb_execution_context_t *ec, VALUE key)
2032{
2033 return vm_cfp_svar_get(ec, ec->cfp, key);
2034}
2035
2036static void
2037vm_svar_set(const rb_execution_context_t *ec, VALUE key, VALUE val)
2038{
2039 vm_cfp_svar_set(ec, ec->cfp, key, val);
2040}
2041
2042VALUE
2044{
2045 return vm_svar_get(GET_EC(), VM_SVAR_BACKREF);
2046}
2047
2048void
2050{
2051 vm_svar_set(GET_EC(), VM_SVAR_BACKREF, val);
2052}
2053
2054VALUE
2056{
2057 return vm_svar_get(GET_EC(), VM_SVAR_LASTLINE);
2058}
2059
2060void
2062{
2063 vm_svar_set(GET_EC(), VM_SVAR_LASTLINE, val);
2064}
2065
2066void
2067rb_lastline_set_up(VALUE val, unsigned int up)
2068{
2069 rb_control_frame_t * cfp = GET_EC()->cfp;
2070
2071 for(unsigned int i = 0; i < up; i++) {
2072 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
2073 }
2074 vm_cfp_svar_set(GET_EC(), cfp, VM_SVAR_LASTLINE, val);
2075}
2076
2077/* misc */
2078
2079const char *
2081{
2082 const rb_execution_context_t *ec = GET_EC();
2083 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
2084
2085 if (cfp) {
2086 return RSTRING_PTR(rb_iseq_path(CFP_ISEQ(cfp)));
2087 }
2088 else {
2089 return 0;
2090 }
2091}
2092
2093int
2095{
2096 const rb_execution_context_t *ec = GET_EC();
2097 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
2098
2099 if (cfp) {
2100 return rb_vm_get_sourceline(cfp);
2101 }
2102 else {
2103 return 0;
2104 }
2105}
2106
2107VALUE
2108rb_source_location(int *pline)
2109{
2110 const rb_execution_context_t *ec = GET_EC();
2111 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
2112
2113 if (cfp && VM_FRAME_RUBYFRAME_P(cfp)) {
2114 if (pline) *pline = rb_vm_get_sourceline(cfp);
2115 return rb_iseq_path(CFP_ISEQ(cfp));
2116 }
2117 else {
2118 if (pline) *pline = 0;
2119 return Qnil;
2120 }
2121}
2122
2123const char *
2124rb_source_location_cstr(int *pline)
2125{
2126 VALUE path = rb_source_location(pline);
2127 if (NIL_P(path)) return NULL;
2128 return RSTRING_PTR(path);
2129}
2130
2131rb_cref_t *
2132rb_vm_cref(void)
2133{
2134 const rb_execution_context_t *ec = GET_EC();
2135 return vm_ec_cref(ec);
2136}
2137
2138rb_cref_t *
2139rb_vm_cref_replace_with_duplicated_cref(void)
2140{
2141 const rb_execution_context_t *ec = GET_EC();
2142 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
2143 rb_cref_t *cref = vm_cref_replace_with_duplicated_cref(cfp->ep);
2144 ASSUME(cref);
2145 return cref;
2146}
2147
2148const rb_cref_t *
2149rb_vm_cref_in_context(VALUE self, VALUE cbase)
2150{
2151 const rb_execution_context_t *ec = GET_EC();
2152 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
2153 const rb_cref_t *cref;
2154 if (!cfp || cfp->self != self) return NULL;
2155 if (!vm_env_cref_by_cref(cfp->ep)) return NULL;
2156 cref = vm_get_cref(cfp->ep);
2157 if (CREF_CLASS(cref) != cbase) return NULL;
2158 return cref;
2159}
2160
2161#if 0
2162void
2163debug_cref(rb_cref_t *cref)
2164{
2165 while (cref) {
2166 dp(CREF_CLASS(cref));
2167 printf("%ld\n", CREF_VISI(cref));
2168 cref = CREF_NEXT(cref);
2169 }
2170}
2171#endif
2172
2173VALUE
2174rb_vm_cbase(void)
2175{
2176 const rb_execution_context_t *ec = GET_EC();
2177 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
2178
2179 if (cfp == 0) {
2180 rb_raise(rb_eRuntimeError, "Can't call on top of Fiber or Thread");
2181 }
2182 return vm_get_cbase(cfp->ep);
2183}
2184
2185/* jump */
2186
2187static VALUE
2188make_localjump_error(const char *mesg, VALUE value, int reason)
2189{
2192 ID id;
2193
2194 switch (reason) {
2195 case TAG_BREAK:
2196 CONST_ID(id, "break");
2197 break;
2198 case TAG_REDO:
2199 CONST_ID(id, "redo");
2200 break;
2201 case TAG_RETRY:
2202 CONST_ID(id, "retry");
2203 break;
2204 case TAG_NEXT:
2205 CONST_ID(id, "next");
2206 break;
2207 case TAG_RETURN:
2208 CONST_ID(id, "return");
2209 break;
2210 default:
2211 CONST_ID(id, "noreason");
2212 break;
2213 }
2214 rb_iv_set(exc, "@exit_value", value);
2215 rb_iv_set(exc, "@reason", ID2SYM(id));
2216 return exc;
2217}
2218
2219void
2220rb_vm_localjump_error(const char *mesg, VALUE value, int reason)
2221{
2222 VALUE exc = make_localjump_error(mesg, value, reason);
2223 rb_exc_raise(exc);
2224}
2225
2226VALUE
2227rb_vm_make_jump_tag_but_local_jump(enum ruby_tag_type state, VALUE val)
2228{
2229 const char *mesg;
2230
2231 switch (state) {
2232 case TAG_RETURN:
2233 mesg = "unexpected return";
2234 break;
2235 case TAG_BREAK:
2236 mesg = "unexpected break";
2237 break;
2238 case TAG_NEXT:
2239 mesg = "unexpected next";
2240 break;
2241 case TAG_REDO:
2242 mesg = "unexpected redo";
2243 val = Qnil;
2244 break;
2245 case TAG_RETRY:
2246 mesg = "retry outside of rescue clause";
2247 val = Qnil;
2248 break;
2249 default:
2250 return Qnil;
2251 }
2252 if (UNDEF_P(val)) {
2253 val = GET_EC()->tag->retval;
2254 }
2255 return make_localjump_error(mesg, val, state);
2256}
2257
2258void
2259rb_vm_jump_tag_but_local_jump(enum ruby_tag_type state)
2260{
2261 VALUE exc = rb_vm_make_jump_tag_but_local_jump(state, Qundef);
2262 if (!NIL_P(exc)) rb_exc_raise(exc);
2263 EC_JUMP_TAG(GET_EC(), state);
2264}
2265
2266static rb_control_frame_t *
2267next_not_local_frame(rb_control_frame_t *cfp)
2268{
2269 while (VM_ENV_LOCAL_P(cfp->ep)) {
2270 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
2271 }
2272 return cfp;
2273}
2274
2275NORETURN(static void vm_iter_break(rb_execution_context_t *ec, VALUE val));
2276
2277static void
2278vm_iter_break(rb_execution_context_t *ec, VALUE val)
2279{
2280 rb_control_frame_t *cfp = next_not_local_frame(ec->cfp);
2281 const VALUE *ep = VM_CF_PREV_EP(cfp);
2282 const rb_control_frame_t *target_cfp = rb_vm_search_cf_from_ep(ec, cfp, ep);
2283
2284 if (!target_cfp) {
2285 rb_vm_localjump_error("unexpected break", val, TAG_BREAK);
2286 }
2287
2288 ec->errinfo = (VALUE)THROW_DATA_NEW(val, target_cfp, TAG_BREAK);
2289 EC_JUMP_TAG(ec, TAG_BREAK);
2290}
2291
2292void
2294{
2295 vm_iter_break(GET_EC(), Qnil);
2296}
2297
2298void
2300{
2301 vm_iter_break(GET_EC(), val);
2302}
2303
2304/* optimization: redefine management */
2305
2306short ruby_vm_redefined_flag[BOP_LAST_];
2307static st_table *vm_opt_method_def_table = 0;
2308static st_table *vm_opt_mid_table = 0;
2309
2310void
2311rb_free_vm_opt_tables(void)
2312{
2313 st_free_table(vm_opt_method_def_table);
2314 st_free_table(vm_opt_mid_table);
2315}
2316
2317static int
2318vm_redefinition_check_flag(VALUE klass)
2319{
2320 if (klass == rb_cInteger) return INTEGER_REDEFINED_OP_FLAG;
2321 if (klass == rb_cFloat) return FLOAT_REDEFINED_OP_FLAG;
2322 if (klass == rb_cString) return STRING_REDEFINED_OP_FLAG;
2323 if (klass == rb_cArray) return ARRAY_REDEFINED_OP_FLAG;
2324 if (klass == rb_cHash) return HASH_REDEFINED_OP_FLAG;
2325 if (klass == rb_cSymbol) return SYMBOL_REDEFINED_OP_FLAG;
2326#if 0
2327 if (klass == rb_cTime) return TIME_REDEFINED_OP_FLAG;
2328#endif
2329 if (klass == rb_cRegexp) return REGEXP_REDEFINED_OP_FLAG;
2330 if (klass == rb_cNilClass) return NIL_REDEFINED_OP_FLAG;
2331 if (klass == rb_cTrueClass) return TRUE_REDEFINED_OP_FLAG;
2332 if (klass == rb_cFalseClass) return FALSE_REDEFINED_OP_FLAG;
2333 if (klass == rb_cProc) return PROC_REDEFINED_OP_FLAG;
2334 return 0;
2335}
2336
2337int
2338rb_vm_check_optimizable_mid(VALUE mid)
2339{
2340 if (!vm_opt_mid_table) {
2341 return FALSE;
2342 }
2343
2344 return st_lookup(vm_opt_mid_table, mid, NULL);
2345}
2346
2347static int
2348vm_redefinition_check_method_type(const rb_method_entry_t *me)
2349{
2350 if (me->called_id != me->def->original_id) {
2351 return FALSE;
2352 }
2353
2354 if (METHOD_ENTRY_BASIC(me)) return TRUE;
2355
2356 const rb_method_definition_t *def = me->def;
2357 switch (def->type) {
2358 case VM_METHOD_TYPE_CFUNC:
2359 case VM_METHOD_TYPE_OPTIMIZED:
2360 return TRUE;
2361 default:
2362 return FALSE;
2363 }
2364}
2365
2366static void
2367rb_vm_check_redefinition_opt_method(const rb_method_entry_t *me, VALUE klass)
2368{
2369 st_data_t bop;
2370 if (RB_TYPE_P(klass, T_ICLASS) && RICLASS_IS_ORIGIN_P(klass) &&
2371 RB_TYPE_P(RBASIC_CLASS(klass), T_CLASS)) {
2372 klass = RBASIC_CLASS(klass);
2373 }
2374 if (vm_redefinition_check_method_type(me)) {
2375 if (st_lookup(vm_opt_method_def_table, (st_data_t)me->def, &bop)) {
2376 int flag = vm_redefinition_check_flag(klass);
2377 if (flag != 0) {
2380 "Redefining '%s#%s' disables interpreter and JIT optimizations",
2381 rb_class2name(me->owner),
2382 rb_id2name(me->called_id)
2383 );
2384 rb_yjit_bop_redefined(flag, (enum ruby_basic_operators)bop);
2385 rb_zjit_bop_redefined(flag, (enum ruby_basic_operators)bop);
2386 ruby_vm_redefined_flag[bop] |= flag;
2387 }
2388 }
2389 }
2390}
2391
2392static enum rb_id_table_iterator_result
2393check_redefined_method(ID mid, VALUE value, void *data)
2394{
2395 VALUE klass = (VALUE)data;
2396 const rb_method_entry_t *me = (rb_method_entry_t *)value;
2397 const rb_method_entry_t *newme = rb_method_entry(klass, mid);
2398
2399 if (newme != me) rb_vm_check_redefinition_opt_method(me, me->owner);
2400
2401 return ID_TABLE_CONTINUE;
2402}
2403
2404void
2405rb_vm_check_redefinition_by_prepend(VALUE klass)
2406{
2407 if (!vm_redefinition_check_flag(klass)) return;
2408 rb_id_table_foreach(RCLASS_M_TBL(RCLASS_ORIGIN(klass)), check_redefined_method, (void *)klass);
2409}
2410
2411static void
2412add_opt_method_entry_bop(const rb_method_entry_t *me, ID mid, enum ruby_basic_operators bop)
2413{
2414 st_insert(vm_opt_method_def_table, (st_data_t)me->def, (st_data_t)bop);
2415 st_insert(vm_opt_mid_table, (st_data_t)mid, (st_data_t)Qtrue);
2416}
2417
2418static void
2419add_opt_method(VALUE klass, ID mid, enum ruby_basic_operators bop)
2420{
2421 const rb_method_entry_t *me = rb_method_entry_at(klass, mid);
2422
2423 if (me && vm_redefinition_check_method_type(me)) {
2424 add_opt_method_entry_bop(me, mid, bop);
2425 }
2426 else {
2427 rb_bug("undefined optimized method: %s", rb_id2name(mid));
2428 }
2429}
2430
2431static enum ruby_basic_operators vm_redefinition_bop_for_id(ID mid);
2432
2433static void
2434add_opt_method_entry(const rb_method_entry_t *me)
2435{
2436 if (me && vm_redefinition_check_method_type(me)) {
2437 ID mid = me->called_id;
2438 enum ruby_basic_operators bop = vm_redefinition_bop_for_id(mid);
2439 if ((int)bop >= 0) {
2440 add_opt_method_entry_bop(me, mid, bop);
2441 }
2442 }
2443}
2444
2445static void
2446vm_init_redefined_flag(void)
2447{
2448 ID mid;
2449 enum ruby_basic_operators bop;
2450
2451#define OP(mid_, bop_) (mid = id##mid_, bop = BOP_##bop_, ruby_vm_redefined_flag[bop] = 0)
2452#define C(k) add_opt_method(rb_c##k, mid, bop)
2453 OP(PLUS, PLUS), (C(Integer), C(Float), C(String), C(Array));
2454 OP(MINUS, MINUS), (C(Integer), C(Float));
2455 OP(MULT, MULT), (C(Integer), C(Float));
2456 OP(DIV, DIV), (C(Integer), C(Float));
2457 OP(MOD, MOD), (C(Integer), C(Float));
2458 OP(Eq, EQ), (C(Integer), C(Float), C(String), C(Symbol));
2459 OP(Eqq, EQQ), (C(Integer), C(Float), C(Symbol), C(String),
2460 C(NilClass), C(TrueClass), C(FalseClass));
2461 OP(LT, LT), (C(Integer), C(Float));
2462 OP(LE, LE), (C(Integer), C(Float));
2463 OP(GT, GT), (C(Integer), C(Float));
2464 OP(GE, GE), (C(Integer), C(Float));
2465 OP(LTLT, LTLT), (C(String), C(Array));
2466 OP(GTGT, GTGT), (C(Integer));
2467 OP(AREF, AREF), (C(Array), C(Hash), C(Integer));
2468 OP(ASET, ASET), (C(Array), C(Hash));
2469 OP(Length, LENGTH), (C(Array), C(String), C(Hash));
2470 OP(Size, SIZE), (C(Array), C(String), C(Hash));
2471 OP(EmptyP, EMPTY_P), (C(Array), C(String), C(Hash));
2472 OP(Succ, SUCC), (C(Integer), C(String));
2473 OP(EqTilde, MATCH), (C(Regexp), C(String));
2474 OP(Freeze, FREEZE), (C(String), C(Array), C(Hash));
2475 OP(UMinus, UMINUS), (C(String));
2476 OP(Max, MAX), (C(Array));
2477 OP(Min, MIN), (C(Array));
2478 OP(Hash, HASH), (C(Array));
2479 OP(Call, CALL), (C(Proc));
2480 OP(And, AND), (C(Integer));
2481 OP(Or, OR), (C(Integer));
2482 OP(NilP, NIL_P), (C(NilClass));
2483 OP(Cmp, CMP), (C(Integer), C(Float), C(String));
2484 OP(Default, DEFAULT), (C(Hash));
2485 OP(IncludeP, INCLUDE_P), (C(Array));
2486#undef C
2487#undef OP
2488}
2489
2490static enum ruby_basic_operators
2491vm_redefinition_bop_for_id(ID mid)
2492{
2493 switch (mid) {
2494#define OP(mid_, bop_) case id##mid_: return BOP_##bop_
2495 OP(PLUS, PLUS);
2496 OP(MINUS, MINUS);
2497 OP(MULT, MULT);
2498 OP(DIV, DIV);
2499 OP(MOD, MOD);
2500 OP(Eq, EQ);
2501 OP(Eqq, EQQ);
2502 OP(LT, LT);
2503 OP(LE, LE);
2504 OP(GT, GT);
2505 OP(GE, GE);
2506 OP(LTLT, LTLT);
2507 OP(AREF, AREF);
2508 OP(ASET, ASET);
2509 OP(Length, LENGTH);
2510 OP(Size, SIZE);
2511 OP(EmptyP, EMPTY_P);
2512 OP(Succ, SUCC);
2513 OP(EqTilde, MATCH);
2514 OP(Freeze, FREEZE);
2515 OP(UMinus, UMINUS);
2516 OP(Max, MAX);
2517 OP(Min, MIN);
2518 OP(Hash, HASH);
2519 OP(Call, CALL);
2520 OP(And, AND);
2521 OP(Or, OR);
2522 OP(NilP, NIL_P);
2523 OP(Cmp, CMP);
2524 OP(Default, DEFAULT);
2525 OP(Pack, PACK);
2526#undef OP
2527 }
2528 return -1;
2529}
2530
2531/* for vm development */
2532
2533#if VMDEBUG
2534static const char *
2535vm_frametype_name(const rb_control_frame_t *cfp)
2536{
2537 switch (VM_FRAME_TYPE(cfp)) {
2538 case VM_FRAME_MAGIC_METHOD: return "method";
2539 case VM_FRAME_MAGIC_BLOCK: return "block";
2540 case VM_FRAME_MAGIC_CLASS: return "class";
2541 case VM_FRAME_MAGIC_TOP: return "top";
2542 case VM_FRAME_MAGIC_CFUNC: return "cfunc";
2543 case VM_FRAME_MAGIC_IFUNC: return "ifunc";
2544 case VM_FRAME_MAGIC_EVAL: return "eval";
2545 case VM_FRAME_MAGIC_RESCUE: return "rescue";
2546 default:
2547 rb_bug("unknown frame");
2548 }
2549}
2550#endif
2551
2552static VALUE
2553frame_return_value(const struct vm_throw_data *err)
2554{
2555 if (THROW_DATA_P(err) &&
2556 THROW_DATA_STATE(err) == TAG_BREAK &&
2557 THROW_DATA_CONSUMED_P(err) == FALSE) {
2558 return THROW_DATA_VAL(err);
2559 }
2560 else {
2561 return Qnil;
2562 }
2563}
2564
2565#if 0
2566/* for debug */
2567static const char *
2568frame_name(const rb_control_frame_t *cfp)
2569{
2570 unsigned long type = VM_FRAME_TYPE(cfp);
2571#define C(t) if (type == VM_FRAME_MAGIC_##t) return #t
2572 C(METHOD);
2573 C(BLOCK);
2574 C(CLASS);
2575 C(TOP);
2576 C(CFUNC);
2577 C(PROC);
2578 C(IFUNC);
2579 C(EVAL);
2580 C(LAMBDA);
2581 C(RESCUE);
2582 C(DUMMY);
2583#undef C
2584 return "unknown";
2585}
2586#endif
2587
2588// cfp_returning_with_value:
2589// Whether cfp is the last frame in the unwinding process for a non-local return.
2590static void
2591hook_before_rewind(rb_execution_context_t *ec, bool cfp_returning_with_value, int state, struct vm_throw_data *err)
2592{
2593 if (state == TAG_RAISE && RBASIC(err)->klass == rb_eSysStackError) {
2594 return;
2595 }
2596 else {
2597 const rb_iseq_t *iseq = CFP_ISEQ(ec->cfp);
2598 rb_hook_list_t *local_hooks = NULL;
2599 unsigned int local_hooks_cnt = iseq->aux.exec.local_hooks_cnt;
2600 if (RB_UNLIKELY(local_hooks_cnt > 0)) {
2601 local_hooks = rb_iseq_local_hooks(iseq, rb_ec_ractor_ptr(ec), false);
2602 }
2603
2604 switch (VM_FRAME_TYPE(ec->cfp)) {
2605 case VM_FRAME_MAGIC_METHOD:
2606 RUBY_DTRACE_METHOD_RETURN_HOOK(ec, 0, 0);
2607 EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_RETURN, ec->cfp->self, 0, 0, 0, frame_return_value(err));
2608
2609 if (UNLIKELY(local_hooks && local_hooks->events & RUBY_EVENT_RETURN)) {
2610 rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_RETURN,
2611 ec->cfp->self, 0, 0, 0, frame_return_value(err), TRUE);
2612 }
2613
2614 THROW_DATA_CONSUMED_SET(err);
2615 break;
2616 case VM_FRAME_MAGIC_BLOCK:
2617 if (VM_FRAME_BMETHOD_P(ec->cfp)) {
2618 VALUE bmethod_return_value = frame_return_value(err);
2619 if (cfp_returning_with_value) {
2620 // Non-local return terminating at a BMETHOD control frame.
2621 bmethod_return_value = THROW_DATA_VAL(err);
2622 }
2623
2624
2625 EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_B_RETURN, ec->cfp->self, 0, 0, 0, bmethod_return_value);
2626 if (UNLIKELY(local_hooks && local_hooks->events & RUBY_EVENT_B_RETURN)) {
2627 rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_B_RETURN,
2628 ec->cfp->self, 0, 0, 0, bmethod_return_value, TRUE);
2629 }
2630
2631 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(ec->cfp);
2632
2633 EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_RETURN, ec->cfp->self,
2634 rb_vm_frame_method_entry(ec->cfp)->def->original_id,
2635 rb_vm_frame_method_entry(ec->cfp)->called_id,
2636 rb_vm_frame_method_entry(ec->cfp)->owner,
2637 bmethod_return_value);
2638
2639 VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
2640 unsigned int local_hooks_cnt = me->def->body.bmethod.local_hooks_cnt;
2641 if (UNLIKELY(local_hooks_cnt > 0)) {
2642 local_hooks = rb_method_def_local_hooks(me->def, rb_ec_ractor_ptr(ec), false);
2643 if (local_hooks && local_hooks->events & RUBY_EVENT_RETURN) {
2644 rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_RETURN, ec->cfp->self,
2645 rb_vm_frame_method_entry(ec->cfp)->def->original_id,
2646 rb_vm_frame_method_entry(ec->cfp)->called_id,
2647 rb_vm_frame_method_entry(ec->cfp)->owner,
2648 bmethod_return_value, TRUE);
2649 }
2650 }
2651
2652 THROW_DATA_CONSUMED_SET(err);
2653 }
2654 else {
2655 EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_B_RETURN, ec->cfp->self, 0, 0, 0, frame_return_value(err));
2656 if (UNLIKELY(local_hooks && local_hooks->events & RUBY_EVENT_B_RETURN)) {
2657 rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_B_RETURN,
2658 ec->cfp->self, 0, 0, 0, frame_return_value(err), TRUE);
2659 }
2660 THROW_DATA_CONSUMED_SET(err);
2661 }
2662 break;
2663 case VM_FRAME_MAGIC_CLASS:
2664 EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_END, ec->cfp->self, 0, 0, 0, Qnil);
2665 break;
2666 }
2667 }
2668}
2669
2670/* evaluator body */
2671
2672/* finish
2673 VMe (h1) finish
2674 VM finish F1 F2
2675 cfunc finish F1 F2 C1
2676 rb_funcall finish F1 F2 C1
2677 VMe finish F1 F2 C1
2678 VM finish F1 F2 C1 F3
2679
2680 F1 - F3 : pushed by VM
2681 C1 : pushed by send insn (CFUNC)
2682
2683 struct CONTROL_FRAME {
2684 VALUE *pc; // cfp[0], program counter
2685 VALUE *sp; // cfp[1], stack pointer
2686 rb_iseq_t *iseq; // cfp[2], iseq
2687 VALUE self; // cfp[3], self
2688 const VALUE *ep; // cfp[4], env pointer
2689 const void *block_code; // cfp[5], block code
2690 };
2691
2692 struct rb_captured_block {
2693 VALUE self;
2694 VALUE *ep;
2695 union code;
2696 };
2697
2698 struct METHOD_ENV {
2699 VALUE param0;
2700 ...
2701 VALUE paramN;
2702 VALUE lvar1;
2703 ...
2704 VALUE lvarM;
2705 VALUE cref; // ep[-2]
2706 VALUE special; // ep[-1]
2707 VALUE flags; // ep[ 0] == lep[0]
2708 };
2709
2710 struct BLOCK_ENV {
2711 VALUE block_param0;
2712 ...
2713 VALUE block_paramN;
2714 VALUE block_lvar1;
2715 ...
2716 VALUE block_lvarM;
2717 VALUE cref; // ep[-2]
2718 VALUE special; // ep[-1]
2719 VALUE flags; // ep[ 0]
2720 };
2721
2722 struct CLASS_ENV {
2723 VALUE class_lvar0;
2724 ...
2725 VALUE class_lvarN;
2726 VALUE cref;
2727 VALUE prev_ep; // for frame jump
2728 VALUE flags;
2729 };
2730
2731 struct C_METHOD_CONTROL_FRAME {
2732 VALUE *pc; // 0
2733 VALUE *sp; // stack pointer
2734 rb_iseq_t *iseq; // cmi
2735 VALUE self; // ?
2736 VALUE *ep; // ep == lep
2737 void *code; //
2738 };
2739
2740 struct C_BLOCK_CONTROL_FRAME {
2741 VALUE *pc; // point only "finish" insn
2742 VALUE *sp; // sp
2743 rb_iseq_t *iseq; // ?
2744 VALUE self; //
2745 VALUE *ep; // ep
2746 void *code; //
2747 };
2748 */
2749
2750static inline VALUE
2751vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state, VALUE errinfo);
2752static inline VALUE
2753vm_exec_loop(rb_execution_context_t *ec, enum ruby_tag_type state, struct rb_vm_tag *tag, VALUE result);
2754
2755// for non-Emscripten Wasm build, use vm_exec with optimized setjmp for runtime performance
2756#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
2757
2758struct rb_vm_exec_context {
2759 rb_execution_context_t *const ec;
2760 struct rb_vm_tag *const tag;
2761
2762 VALUE result;
2763};
2764
2765static void
2766vm_exec_bottom_main(void *context)
2767{
2768 struct rb_vm_exec_context *ctx = context;
2769 rb_execution_context_t *ec = ctx->ec;
2770
2771 ctx->result = vm_exec_loop(ec, TAG_NONE, ctx->tag, vm_exec_core(ec));
2772}
2773
2774static void
2775vm_exec_bottom_rescue(void *context)
2776{
2777 struct rb_vm_exec_context *ctx = context;
2778 rb_execution_context_t *ec = ctx->ec;
2779
2780 ctx->result = vm_exec_loop(ec, rb_ec_tag_state(ec), ctx->tag, ec->errinfo);
2781}
2782#endif
2783
2784VALUE
2785vm_exec(rb_execution_context_t *ec)
2786{
2787 VALUE result = Qundef;
2788
2789 EC_PUSH_TAG(ec);
2790
2791 _tag.retval = Qnil;
2792
2793#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
2794 struct rb_vm_exec_context ctx = {
2795 .ec = ec,
2796 .tag = &_tag,
2797 };
2798 struct rb_wasm_try_catch try_catch;
2799
2800 EC_REPUSH_TAG();
2801
2802 rb_wasm_try_catch_init(&try_catch, vm_exec_bottom_main, vm_exec_bottom_rescue, &ctx);
2803
2804 rb_wasm_try_catch_loop_run(&try_catch, &RB_VM_TAG_JMPBUF_GET(_tag.buf));
2805
2806 result = ctx.result;
2807#else
2808 enum ruby_tag_type state;
2809 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
2810 if (UNDEF_P(result = jit_exec(ec))) {
2811 result = vm_exec_core(ec);
2812 }
2813 /* fallback to the VM */
2814 result = vm_exec_loop(ec, TAG_NONE, &_tag, result);
2815 }
2816 else {
2817 result = vm_exec_loop(ec, state, &_tag, ec->errinfo);
2818 }
2819#endif
2820
2821 EC_POP_TAG();
2822 return result;
2823}
2824
2825static inline VALUE
2826vm_exec_loop(rb_execution_context_t *ec, enum ruby_tag_type state,
2827 struct rb_vm_tag *tag, VALUE result)
2828{
2829 if (state == TAG_NONE) { /* no jumps, result is discarded */
2830 goto vm_loop_start;
2831 }
2832
2833 rb_ec_raised_reset(ec, RAISED_STACKOVERFLOW | RAISED_NOMEMORY);
2834 while (UNDEF_P(result = vm_exec_handle_exception(ec, state, result))) {
2835 // caught a jump, exec the handler. JIT code in jit_exec_exception()
2836 // may return Qundef to run remaining frames with vm_exec_core().
2837 if (UNDEF_P(result = jit_exec_exception(ec))) {
2838 result = vm_exec_core(ec);
2839 }
2840 vm_loop_start:
2841 VM_ASSERT(ec->tag == tag);
2842 /* when caught `throw`, `tag.state` is set. */
2843 if ((state = tag->state) == TAG_NONE) break;
2844 tag->state = TAG_NONE;
2845 }
2846
2847 return result;
2848}
2849
2850static inline void
2851zjit_materialize_frames(rb_control_frame_t *cfp)
2852{
2853 if (!rb_zjit_enabled_p) return;
2854
2855 while (true) {
2856 if (CFP_ZJIT_FRAME(cfp)) {
2857 const zjit_jit_frame_t *jit_frame = (const zjit_jit_frame_t *)cfp->jit_return;
2858 cfp->pc = jit_frame->pc;
2859 cfp->_iseq = (rb_iseq_t *)jit_frame->iseq;
2860 if (jit_frame->materialize_block_code) {
2861 cfp->block_code = NULL;
2862 }
2863 cfp->jit_return = 0;
2864 }
2865 if (VM_FRAME_FINISHED_P(cfp)) break;
2866 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
2867 }
2868}
2869
2870void
2871rb_zjit_materialize_frames(rb_control_frame_t *cfp)
2872{
2873 zjit_materialize_frames(cfp);
2874}
2875
2876static inline VALUE
2877vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state, VALUE errinfo)
2878{
2879 struct vm_throw_data *err = (struct vm_throw_data *)errinfo;
2880
2881 for (;;) {
2882 unsigned int i;
2883 const struct iseq_catch_table_entry *entry;
2884 const struct iseq_catch_table *ct;
2885 unsigned long epc, cont_pc, cont_sp;
2886 const rb_iseq_t *catch_iseq;
2887 VALUE type;
2888 const rb_control_frame_t *escape_cfp;
2889
2890 cont_pc = cont_sp = 0;
2891 catch_iseq = NULL;
2892
2893 while (CFP_PC(ec->cfp) == 0 || CFP_ISEQ(ec->cfp) == 0) {
2894 if (UNLIKELY(VM_FRAME_TYPE(ec->cfp) == VM_FRAME_MAGIC_CFUNC)) {
2895 EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_C_RETURN, ec->cfp->self,
2896 rb_vm_frame_method_entry(ec->cfp)->def->original_id,
2897 rb_vm_frame_method_entry(ec->cfp)->called_id,
2898 rb_vm_frame_method_entry(ec->cfp)->owner, Qnil);
2899 RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec,
2900 rb_vm_frame_method_entry(ec->cfp)->owner,
2901 rb_vm_frame_method_entry(ec->cfp)->def->original_id);
2902 }
2903 rb_vm_pop_frame(ec);
2904 }
2905
2906 rb_control_frame_t *const cfp = ec->cfp;
2907 epc = CFP_PC(cfp) - ISEQ_BODY(CFP_ISEQ(cfp))->iseq_encoded;
2908
2909 escape_cfp = NULL;
2910 if (state == TAG_BREAK || state == TAG_RETURN) {
2911 escape_cfp = THROW_DATA_CATCH_FRAME(err);
2912
2913 if (cfp == escape_cfp) {
2914 if (state == TAG_RETURN) {
2915 if (!VM_FRAME_FINISHED_P(cfp)) {
2916 THROW_DATA_CATCH_FRAME_SET(err, cfp + 1);
2917 THROW_DATA_STATE_SET(err, state = TAG_BREAK);
2918 }
2919 else {
2920 ct = ISEQ_BODY(CFP_ISEQ(cfp))->catch_table;
2921 if (ct) for (i = 0; i < ct->size; i++) {
2922 entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
2923 if (entry->start < epc && entry->end >= epc) {
2924 if (entry->type == CATCH_TYPE_ENSURE) {
2925 catch_iseq = entry->iseq;
2926 cont_pc = entry->cont;
2927 cont_sp = entry->sp;
2928 break;
2929 }
2930 }
2931 }
2932 if (catch_iseq == NULL) {
2933 ec->errinfo = Qnil;
2934 THROW_DATA_CATCH_FRAME_SET(err, cfp + 1);
2935 // cfp == escape_cfp here so calling with cfp_returning_with_value = true
2936 hook_before_rewind(ec, true, state, err);
2937 rb_vm_pop_frame(ec);
2938 return THROW_DATA_VAL(err);
2939 }
2940 }
2941 /* through */
2942 }
2943 else {
2944 /* TAG_BREAK */
2945 *cfp->sp++ = THROW_DATA_VAL(err);
2946 ec->errinfo = Qnil;
2947 zjit_materialize_frames(cfp);
2948 return Qundef;
2949 }
2950 }
2951 }
2952
2953 if (state == TAG_RAISE) {
2954 ct = ISEQ_BODY(CFP_ISEQ(cfp))->catch_table;
2955 if (ct) for (i = 0; i < ct->size; i++) {
2956 entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
2957 if (entry->start < epc && entry->end >= epc) {
2958
2959 if (entry->type == CATCH_TYPE_RESCUE ||
2960 entry->type == CATCH_TYPE_ENSURE) {
2961 catch_iseq = entry->iseq;
2962 cont_pc = entry->cont;
2963 cont_sp = entry->sp;
2964 break;
2965 }
2966 }
2967 }
2968 }
2969 else if (state == TAG_RETRY) {
2970 ct = ISEQ_BODY(CFP_ISEQ(cfp))->catch_table;
2971 if (ct) for (i = 0; i < ct->size; i++) {
2972 entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
2973 if (entry->start < epc && entry->end >= epc) {
2974
2975 if (entry->type == CATCH_TYPE_ENSURE) {
2976 catch_iseq = entry->iseq;
2977 cont_pc = entry->cont;
2978 cont_sp = entry->sp;
2979 break;
2980 }
2981 else if (entry->type == CATCH_TYPE_RETRY) {
2982 const rb_control_frame_t *escape_cfp;
2983 escape_cfp = THROW_DATA_CATCH_FRAME(err);
2984 if (cfp == escape_cfp) {
2985 zjit_materialize_frames(cfp);
2986 cfp->pc = ISEQ_BODY(CFP_ISEQ(cfp))->iseq_encoded + entry->cont;
2987 ec->errinfo = Qnil;
2988 return Qundef;
2989 }
2990 }
2991 }
2992 }
2993 }
2994 else if ((state == TAG_BREAK && !escape_cfp) ||
2995 (state == TAG_REDO) ||
2996 (state == TAG_NEXT)) {
2997 type = (const enum rb_catch_type[TAG_MASK]) {
2998 [TAG_BREAK] = CATCH_TYPE_BREAK,
2999 [TAG_NEXT] = CATCH_TYPE_NEXT,
3000 [TAG_REDO] = CATCH_TYPE_REDO,
3001 /* otherwise = dontcare */
3002 }[state];
3003
3004 ct = ISEQ_BODY(CFP_ISEQ(cfp))->catch_table;
3005 if (ct) for (i = 0; i < ct->size; i++) {
3006 entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
3007
3008 if (entry->start < epc && entry->end >= epc) {
3009 if (entry->type == CATCH_TYPE_ENSURE) {
3010 catch_iseq = entry->iseq;
3011 cont_pc = entry->cont;
3012 cont_sp = entry->sp;
3013 break;
3014 }
3015 else if (entry->type == type) {
3016 zjit_materialize_frames(cfp);
3017 cfp->pc = ISEQ_BODY(CFP_ISEQ(cfp))->iseq_encoded + entry->cont;
3018 cfp->sp = vm_base_ptr(cfp) + entry->sp;
3019
3020 if (state != TAG_REDO) {
3021 *cfp->sp++ = THROW_DATA_VAL(err);
3022 }
3023 ec->errinfo = Qnil;
3024 VM_ASSERT(ec->tag->state == TAG_NONE);
3025 return Qundef;
3026 }
3027 }
3028 }
3029 }
3030 else {
3031 ct = ISEQ_BODY(CFP_ISEQ(cfp))->catch_table;
3032 if (ct) for (i = 0; i < ct->size; i++) {
3033 entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
3034 if (entry->start < epc && entry->end >= epc) {
3035
3036 if (entry->type == CATCH_TYPE_ENSURE) {
3037 catch_iseq = entry->iseq;
3038 cont_pc = entry->cont;
3039 cont_sp = entry->sp;
3040 break;
3041 }
3042 }
3043 }
3044 }
3045
3046 if (catch_iseq != NULL) { /* found catch table */
3047 /* enter catch scope */
3048 const int arg_size = 1;
3049
3050 rb_iseq_check(catch_iseq);
3051 zjit_materialize_frames(cfp); // vm_base_ptr looks at cfp->_iseq
3052 cfp->sp = vm_base_ptr(cfp) + cont_sp;
3053 cfp->pc = ISEQ_BODY(CFP_ISEQ(cfp))->iseq_encoded + cont_pc;
3054
3055 /* push block frame */
3056 cfp->sp[0] = (VALUE)err;
3057 vm_push_frame(ec, catch_iseq, VM_FRAME_MAGIC_RESCUE,
3058 cfp->self,
3059 VM_GUARDED_PREV_EP(cfp->ep),
3060 0, /* cref or me */
3061 ISEQ_BODY(catch_iseq)->iseq_encoded,
3062 cfp->sp + arg_size /* push value */,
3063 ISEQ_BODY(catch_iseq)->local_table_size - arg_size,
3064 ISEQ_BODY(catch_iseq)->stack_max);
3065
3066 state = 0;
3067 ec->tag->state = TAG_NONE;
3068 ec->errinfo = Qnil;
3069
3070 return Qundef;
3071 }
3072 else {
3073 hook_before_rewind(ec, (cfp == escape_cfp), state, err);
3074
3075 if (VM_FRAME_FINISHED_P(ec->cfp)) {
3076 rb_vm_pop_frame(ec);
3077 ec->errinfo = (VALUE)err;
3078 rb_vm_tag_jmpbuf_deinit(&ec->tag->buf);
3079 ec->tag = ec->tag->prev;
3080 EC_JUMP_TAG(ec, state);
3081 }
3082 else {
3083 rb_vm_pop_frame(ec);
3084 }
3085 }
3086 }
3087}
3088
3089/* misc */
3090
3091VALUE
3092rb_iseq_eval(const rb_iseq_t *iseq, const rb_box_t *box)
3093{
3094 rb_execution_context_t *ec = GET_EC();
3095 VALUE val;
3096 vm_set_top_stack(ec, iseq, box);
3097 val = vm_exec(ec);
3098 return val;
3099}
3100
3101VALUE
3102rb_iseq_eval_main(const rb_iseq_t *iseq)
3103{
3104 rb_execution_context_t *ec = GET_EC();
3105 VALUE val;
3106 vm_set_main_stack(ec, iseq);
3107 val = vm_exec(ec);
3108 return val;
3109}
3110
3111int
3112rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp)
3113{
3114 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(cfp);
3115
3116 if (me) {
3117 if (idp) *idp = me->def->original_id;
3118 if (called_idp) *called_idp = me->called_id;
3119 if (klassp) *klassp = me->owner;
3120 return TRUE;
3121 }
3122 else {
3123 return FALSE;
3124 }
3125}
3126
3127int
3128rb_ec_frame_method_id_and_class(const rb_execution_context_t *ec, ID *idp, ID *called_idp, VALUE *klassp)
3129{
3130 return rb_vm_control_frame_id_and_class(ec->cfp, idp, called_idp, klassp);
3131}
3132
3133int
3135{
3136 return rb_ec_frame_method_id_and_class(GET_EC(), idp, 0, klassp);
3137}
3138
3139VALUE
3140rb_vm_call_cfunc(VALUE recv, VALUE (*func)(VALUE), VALUE arg,
3141 VALUE block_handler, VALUE filename)
3142{
3143 rb_execution_context_t *ec = GET_EC();
3144 const rb_control_frame_t *reg_cfp = ec->cfp;
3145 const rb_iseq_t *iseq = rb_iseq_new(Qnil, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
3146 const rb_box_t *box = rb_current_box();
3147 VALUE val;
3148
3149 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_TOP | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH,
3150 recv, GC_GUARDED_PTR(box),
3151 (VALUE)vm_cref_new_toplevel(ec), /* cref or me */
3152 0, reg_cfp->sp, 0, 0);
3153
3154 val = (*func)(arg);
3155
3156 rb_vm_pop_frame(ec);
3157 return val;
3158}
3159
3160/* Ruby::Box */
3161
3162VALUE
3163rb_vm_call_cfunc_in_box(VALUE recv, VALUE (*func)(VALUE, VALUE), VALUE arg1, VALUE arg2,
3164 VALUE filename, const rb_box_t *box)
3165{
3166 rb_execution_context_t *ec = GET_EC();
3167 const rb_control_frame_t *reg_cfp = ec->cfp;
3168 const rb_iseq_t *iseq = rb_iseq_new(Qnil, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
3169 VALUE val;
3170
3171 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_TOP | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH,
3172 recv, GC_GUARDED_PTR(box),
3173 (VALUE)vm_cref_new_toplevel(ec), /* cref or me */
3174 0, reg_cfp->sp, 0, 0);
3175
3176 val = (*func)(arg1, arg2);
3177
3178 rb_vm_pop_frame(ec);
3179 return val;
3180}
3181
3182void
3183rb_vm_frame_flag_set_box_require(const rb_execution_context_t *ec)
3184{
3185 VM_ASSERT(rb_box_available());
3186 VM_ENV_FLAGS_SET(ec->cfp->ep, VM_FRAME_FLAG_BOX_REQUIRE);
3187}
3188
3189static const rb_box_t *
3190current_box_on_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
3191{
3193 const rb_box_t *box;
3194 const VALUE *lep = VM_EP_RUBY_LEP(ec, cfp);
3195 VM_BOX_ASSERT(lep, "lep should be valid");
3196 VM_BOX_ASSERT(rb_box_available(), "box should be available here");
3197
3198 if (VM_ENV_FRAME_TYPE_P(lep, VM_FRAME_MAGIC_METHOD) || VM_ENV_FRAME_TYPE_P(lep, VM_FRAME_MAGIC_CFUNC)) {
3199 cme = check_method_entry(lep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
3200 VM_BOX_ASSERT(cme, "cme should be valid");
3201 VM_BOX_ASSERT(cme->def, "cme->def shold be valid");
3202 return cme->def->box;
3203 }
3204 else if (VM_ENV_FRAME_TYPE_P(lep, VM_FRAME_MAGIC_TOP) || VM_ENV_FRAME_TYPE_P(lep, VM_FRAME_MAGIC_CLASS)) {
3205 VM_BOX_ASSERT(VM_ENV_LOCAL_P(lep), "lep should be local on MAGIC_TOP or MAGIC_CLASS frames");
3206 return VM_ENV_BOX(lep);
3207 }
3208 else if (VM_ENV_FRAME_TYPE_P(lep, VM_FRAME_MAGIC_DUMMY)) {
3209 // No valid local ep found (just after process boot?)
3210 // return the root box (the only valid box) until the main is initialized
3211 box = rb_main_box();
3212 if (box)
3213 return box;
3214 return rb_root_box();
3215 }
3216 else {
3217 VM_BOX_CRASHED();
3218 rb_bug("BUG: Local ep without cme/box, flags: %08lX", (unsigned long)lep[VM_ENV_DATA_INDEX_FLAGS]);
3219 }
3221}
3222
3223const rb_box_t *
3224rb_vm_current_box(const rb_execution_context_t *ec)
3225{
3226 return current_box_on_cfp(ec, ec->cfp);
3227}
3228
3229static const rb_control_frame_t *
3230find_loader_control_frame(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, const rb_control_frame_t *end_cfp)
3231{
3232 while (RUBY_VM_VALID_CONTROL_FRAME_P(cfp, end_cfp)) {
3233 if (!VM_ENV_FRAME_TYPE_P(cfp->ep, VM_FRAME_MAGIC_CFUNC))
3234 break;
3235 if (!BOX_ROOT_P(current_box_on_cfp(ec, cfp)))
3236 break;
3237 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
3238 }
3239 VM_ASSERT(RUBY_VM_VALID_CONTROL_FRAME_P(cfp, end_cfp));
3240 return cfp;
3241}
3242
3243const rb_box_t *
3244rb_vm_loading_box(const rb_execution_context_t *ec)
3245{
3246 const rb_control_frame_t *cfp, *current_cfp, *end_cfp;
3247
3248 if (!rb_box_available() || !ec)
3249 return rb_root_box();
3250
3251 cfp = ec->cfp;
3252 current_cfp = cfp;
3253 end_cfp = RUBY_VM_END_CONTROL_FRAME(ec);
3254
3255 while (RUBY_VM_VALID_CONTROL_FRAME_P(cfp, end_cfp)) {
3256 if (VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_BOX_REQUIRE)) {
3257 if (RTEST(cfp->self) && BOX_OBJ_P(cfp->self)) {
3258 // Box#require, #require_relative, #load
3259 return rb_get_box_t(cfp->self);
3260 }
3261 // Kernel#require, #require_relative, #load
3262 cfp = find_loader_control_frame(ec, cfp, end_cfp);
3263 return current_box_on_cfp(ec, cfp);
3264 }
3265 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
3266 }
3267 // no require/load with explicit boxes.
3268 return current_box_on_cfp(ec, current_cfp);
3269}
3270
3271/* vm */
3272
3273void
3274rb_vm_update_references(void *ptr)
3275{
3276 if (ptr) {
3277 rb_vm_t *vm = ptr;
3278
3279 vm->self = rb_gc_location(vm->self);
3280 vm->mark_object_ary = rb_gc_location(vm->mark_object_ary);
3281 vm->orig_progname = rb_gc_location(vm->orig_progname);
3282 vm->cc_refinement_set = rb_gc_location(vm->cc_refinement_set);
3283
3284 if (vm->root_box)
3285 rb_box_gc_update_references(vm->root_box);
3286 if (vm->main_box)
3287 rb_box_gc_update_references(vm->main_box);
3288
3289 rb_gc_update_values(RUBY_NSIG, vm->trap_list.cmd);
3290
3291 if (vm->coverages) {
3292 vm->coverages = rb_gc_location(vm->coverages);
3293 vm->me2counter = rb_gc_location(vm->me2counter);
3294 }
3295 }
3296}
3297
3298void
3299rb_vm_each_stack_value(void *ptr, void (*cb)(VALUE, void*), void *ctx)
3300{
3301 if (ptr) {
3302 rb_vm_t *vm = ptr;
3303 rb_ractor_t *r = 0;
3304 ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
3305 VM_ASSERT(rb_ractor_status_p(r, ractor_blocking) ||
3306 rb_ractor_status_p(r, ractor_running));
3307 if (r->threads.cnt > 0) {
3308 rb_thread_t *th = 0;
3309 ccan_list_for_each(&r->threads.set, th, lt_node) {
3310 VM_ASSERT(th != NULL);
3311 rb_execution_context_t * ec = th->ec;
3312 if (ec->vm_stack) {
3313 VALUE *p = ec->vm_stack;
3314 VALUE *sp = ec->cfp->sp;
3315 while (p < sp) {
3316 if (!RB_SPECIAL_CONST_P(*p)) {
3317 cb(*p, ctx);
3318 }
3319 p++;
3320 }
3321 }
3322 }
3323 }
3324 }
3325 }
3326}
3327
3328static enum rb_id_table_iterator_result
3329vm_mark_negative_cme(VALUE val, void *dmy)
3330{
3331 rb_gc_mark(val);
3332 return ID_TABLE_CONTINUE;
3333}
3334
3335void rb_thread_sched_mark_zombies(rb_vm_t *vm);
3336
3337void
3338rb_vm_mark(void *ptr)
3339{
3340 RUBY_MARK_ENTER("vm");
3341 RUBY_GC_INFO("-------------------------------------------------\n");
3342 if (ptr) {
3343 rb_vm_t *vm = ptr;
3344 rb_ractor_t *r = 0;
3345 long i;
3346
3347 ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
3348 // ractor.set only contains blocking or running ractors
3349 VM_ASSERT(rb_ractor_status_p(r, ractor_blocking) ||
3350 rb_ractor_status_p(r, ractor_running));
3351 rb_gc_mark(rb_ractor_self(r));
3352 }
3353
3354 for (struct global_object_list *list = vm->global_object_list; list; list = list->next) {
3355 rb_gc_mark_maybe(*list->varptr);
3356 }
3357
3358 rb_gc_mark_movable(vm->self);
3359
3360 if (vm->root_box) {
3361 rb_box_entry_mark(vm->root_box);
3362 }
3363 if (vm->main_box) {
3364 rb_box_entry_mark(vm->main_box);
3365 }
3366
3367 rb_gc_mark_movable(vm->mark_object_ary);
3368 rb_gc_mark_movable(vm->orig_progname);
3369 rb_gc_mark_movable(vm->coverages);
3370 rb_gc_mark_movable(vm->me2counter);
3371 rb_gc_mark_movable(vm->cc_refinement_set);
3372
3373 rb_gc_mark_values(RUBY_NSIG, vm->trap_list.cmd);
3374
3375 rb_hook_list_mark(&vm->global_hooks);
3376
3377 rb_id_table_foreach_values(&vm->negative_cme_table, vm_mark_negative_cme, NULL);
3378 rb_mark_tbl_no_pin(&vm->overloaded_cme_table);
3379 for (i=0; i<VM_GLOBAL_CC_CACHE_TABLE_SIZE; i++) {
3380 const struct rb_callcache *cc = vm->global_cc_cache_table[i];
3381
3382 if (cc != NULL) {
3383 if (!vm_cc_invalidated_p(cc)) {
3384 rb_gc_mark((VALUE)cc);
3385 }
3386 else {
3387 vm->global_cc_cache_table[i] = NULL;
3388 }
3389 }
3390 }
3391
3392 rb_thread_sched_mark_zombies(vm);
3393 }
3394
3395 RUBY_MARK_LEAVE("vm");
3396}
3397
3398#undef rb_vm_register_special_exception
3399void
3400rb_vm_register_special_exception_str(enum ruby_special_exceptions sp, VALUE cls, VALUE mesg)
3401{
3402 rb_vm_t *vm = GET_VM();
3403 VALUE exc = rb_exc_new3(cls, rb_obj_freeze(mesg));
3404 OBJ_FREEZE(exc);
3405 ((VALUE *)vm->special_exceptions)[sp] = exc;
3406 rb_vm_register_global_object(exc);
3407}
3408
3409void rb_objspace_free_objects(void *objspace);
3410
3411int
3413{
3414 RUBY_FREE_ENTER("vm");
3415 ruby_vm_during_cleanup = true;
3416
3417 if (vm) {
3418 rb_thread_t *th = vm->ractor.main_thread;
3419
3420 if (rb_free_at_exit) {
3421 rb_free_encoded_insn_data();
3422 rb_free_global_enc_table();
3423 rb_free_loaded_builtin_table();
3424 rb_free_global_symbol_table();
3425
3426 rb_free_shared_fiber_pool();
3427 rb_free_transcoder_table();
3428 rb_free_vm_opt_tables();
3429 rb_free_warning();
3430 rb_free_rb_global_tbl();
3431
3432 rb_id_table_free_items(&vm->negative_cme_table);
3433 st_free_embedded_table(&vm->overloaded_cme_table);
3434
3435 // TODO: Is this ignorable for classext->m_tbl ?
3436 // rb_id_table_free(RCLASS(rb_mRubyVMFrozenCore)->m_tbl);
3437
3438 st_free_embedded_table(&vm->static_ext_inits);
3439
3440 rb_id_table_free_items(&vm->constant_cache);
3441 set_free_embedded_table(&vm->unused_block_warning_table);
3442
3443 rb_thread_free_native_thread(th);
3444
3445#ifndef HAVE_SETPROCTITLE
3446 ruby_free_proctitle();
3447#endif
3448 }
3449 else {
3450 rb_fiber_reset_root_local_storage(th);
3451 thread_free(th);
3452 }
3453
3454 struct rb_objspace *objspace = vm->gc.objspace;
3455
3456 rb_vm_living_threads_init(vm);
3457 ruby_vm_run_at_exit_hooks(vm);
3458 st_free_embedded_table(&vm->ci_table);
3459 RB_ALTSTACK_FREE(vm->main_altstack);
3460
3461 struct global_object_list *next;
3462 for (struct global_object_list *list = vm->global_object_list; list; list = next) {
3463 next = list->next;
3464 xfree(list);
3465 }
3466
3467 if (objspace) {
3468 if (rb_free_at_exit) {
3469 rb_objspace_free_objects(objspace);
3470 rb_free_generic_fields_tbl_();
3471 rb_free_default_rand_key();
3472 }
3473 rb_objspace_free(objspace);
3474 }
3475 rb_native_mutex_destroy(&vm->workqueue_lock);
3476 /* after freeing objspace, you *can't* use ruby_xfree() */
3477 ruby_current_vm_ptr = NULL;
3478
3479 if (rb_free_at_exit) {
3480 rb_shape_free_all();
3481#if USE_YJIT
3482 rb_yjit_free_at_exit();
3483#endif
3484 }
3485 }
3486 RUBY_FREE_LEAVE("vm");
3487 return 0;
3488}
3489
3490size_t rb_vm_memsize_workqueue(struct ccan_list_head *workqueue); // vm_trace.c
3491
3492// Used for VM memsize reporting. Returns the size of the at_exit list by
3493// looping through the linked list and adding up the size of the structs.
3494static enum rb_id_table_iterator_result
3495vm_memsize_constant_cache_i(ID id, VALUE ics, void *size)
3496{
3497 *((size_t *) size) += rb_set_memsize((set_table *) ics);
3498 return ID_TABLE_CONTINUE;
3499}
3500
3501// Returns a size_t representing the memory footprint of the VM's constant
3502// cache, which is the memsize of the table as well as the memsize of all of the
3503// nested tables.
3504static size_t
3505vm_memsize_constant_cache(void)
3506{
3507 rb_vm_t *vm = GET_VM();
3508 size_t size = rb_id_table_memsize(&vm->constant_cache) - sizeof(struct rb_id_table);
3509
3510 rb_id_table_foreach(&vm->constant_cache, vm_memsize_constant_cache_i, &size);
3511 return size;
3512}
3513
3514static size_t
3515vm_memsize_at_exit_list(rb_at_exit_list *at_exit)
3516{
3517 size_t size = 0;
3518
3519 while (at_exit) {
3520 size += sizeof(rb_at_exit_list);
3521 at_exit = at_exit->next;
3522 }
3523
3524 return size;
3525}
3526
3527// Used for VM memsize reporting. Returns the size of the builtin function
3528// table if it has been defined.
3529static size_t
3530vm_memsize_builtin_function_table(const struct rb_builtin_function *builtin_function_table)
3531{
3532 return builtin_function_table == NULL ? 0 : sizeof(struct rb_builtin_function);
3533}
3534
3535// Reports the memsize of the VM struct object and the structs that are
3536// associated with it.
3537static size_t
3538vm_memsize(const void *ptr)
3539{
3540 rb_vm_t *vm = GET_VM();
3541
3542 return (
3543 sizeof(rb_vm_t) +
3544 rb_vm_memsize_postponed_job_queue() +
3545 rb_vm_memsize_workqueue(&vm->workqueue) +
3546 vm_memsize_at_exit_list(vm->at_exit) +
3547 (rb_st_memsize(&vm->ci_table) - sizeof(struct st_table)) +
3548 vm_memsize_builtin_function_table(vm->builtin_function_table) +
3549 (rb_id_table_memsize(&vm->negative_cme_table) - sizeof(struct rb_id_table)) +
3550 (rb_st_memsize(&vm->overloaded_cme_table) - sizeof(struct st_table)) +
3551 vm_memsize_constant_cache()
3552 );
3553
3554 // TODO
3555 // struct { struct ccan_list_head set; } ractor;
3556 // void *main_altstack; #ifdef USE_SIGALTSTACK
3557 // struct rb_objspace *objspace;
3558}
3559
3560const rb_data_type_t ruby_vm_data_type = {
3561 "VM",
3562 {0, 0, vm_memsize,},
3564};
3565
3566#define vm_data_type ruby_vm_data_type
3567
3568static VALUE
3569vm_default_params(void)
3570{
3571 rb_vm_t *vm = GET_VM();
3572 VALUE result = rb_hash_new_with_size(4);
3573#define SET(name) rb_hash_aset(result, ID2SYM(rb_intern(#name)), SIZET2NUM(vm->default_params.name));
3574 SET(thread_vm_stack_size);
3575 SET(thread_machine_stack_size);
3576 SET(fiber_vm_stack_size);
3577 SET(fiber_machine_stack_size);
3578#undef SET
3579 rb_obj_freeze(result);
3580 return result;
3581}
3582
3583static size_t
3584get_param(const char *name, size_t default_value, size_t min_value)
3585{
3586 const char *envval;
3587 size_t result = default_value;
3588 if ((envval = getenv(name)) != 0) {
3589 long val = atol(envval);
3590 if (val < (long)min_value) {
3591 val = (long)min_value;
3592 }
3593 result = (size_t)(((val -1 + RUBY_VM_SIZE_ALIGN) / RUBY_VM_SIZE_ALIGN) * RUBY_VM_SIZE_ALIGN);
3594 }
3595 if (0) ruby_debug_printf("%s: %"PRIuSIZE"\n", name, result); /* debug print */
3596
3597 return result;
3598}
3599
3600static void
3601check_machine_stack_size(size_t *sizep)
3602{
3603#ifdef PTHREAD_STACK_MIN
3604 size_t size = *sizep;
3605#endif
3606
3607#ifdef PTHREAD_STACK_MIN
3608 if (size < (size_t)PTHREAD_STACK_MIN) {
3609 *sizep = (size_t)PTHREAD_STACK_MIN * 2;
3610 }
3611#endif
3612}
3613
3614static void
3615vm_default_params_setup(rb_vm_t *vm)
3616{
3617 vm->default_params.thread_vm_stack_size =
3618 get_param("RUBY_THREAD_VM_STACK_SIZE",
3619 RUBY_VM_THREAD_VM_STACK_SIZE,
3620 RUBY_VM_THREAD_VM_STACK_SIZE_MIN);
3621
3622 vm->default_params.thread_machine_stack_size =
3623 get_param("RUBY_THREAD_MACHINE_STACK_SIZE",
3624 RUBY_VM_THREAD_MACHINE_STACK_SIZE,
3625 RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN);
3626
3627 vm->default_params.fiber_vm_stack_size =
3628 get_param("RUBY_FIBER_VM_STACK_SIZE",
3629 RUBY_VM_FIBER_VM_STACK_SIZE,
3630 RUBY_VM_FIBER_VM_STACK_SIZE_MIN);
3631
3632 vm->default_params.fiber_machine_stack_size =
3633 get_param("RUBY_FIBER_MACHINE_STACK_SIZE",
3634 RUBY_VM_FIBER_MACHINE_STACK_SIZE,
3635 RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN);
3636
3637 /* environment dependent check */
3638 check_machine_stack_size(&vm->default_params.thread_machine_stack_size);
3639 check_machine_stack_size(&vm->default_params.fiber_machine_stack_size);
3640}
3641
3642static void
3643vm_init2(rb_vm_t *vm)
3644{
3645 rb_vm_living_threads_init(vm);
3646 vm->thread_report_on_exception = 1;
3647 vm->src_encoding_index = -1;
3648
3649 vm_default_params_setup(vm);
3650}
3651
3652void
3653rb_execution_context_update(rb_execution_context_t *ec)
3654{
3655 /* update VM stack */
3656 if (ec->vm_stack) {
3657 long i;
3658 VM_ASSERT(ec->cfp);
3659 VALUE *p = ec->vm_stack;
3660 VALUE *sp = ec->cfp->sp;
3661 rb_control_frame_t *cfp = ec->cfp;
3662 rb_control_frame_t *limit_cfp = (void *)(ec->vm_stack + ec->vm_stack_size);
3663
3664 for (i = 0; i < (long)(sp - p); i++) {
3665 VALUE ref = p[i];
3666 VALUE update = rb_gc_location(ref);
3667 if (ref != update) {
3668 p[i] = update;
3669 }
3670 }
3671
3672 while (cfp != limit_cfp) {
3673 const VALUE *ep = cfp->ep;
3674 cfp->self = rb_gc_location(cfp->self);
3675 if (CFP_ZJIT_FRAME(cfp)) {
3676 rb_zjit_jit_frame_update_references((zjit_jit_frame_t *)cfp->jit_return);
3677 // block_code must always be relocated. For ISEQ frames, the JIT caller
3678 // may have written it (gen_block_handler_specval) for passing blocks.
3679 // For C frames, rb_iterate0 may have written an ifunc to block_code
3680 // after the JIT pushed the frame. NULL is safe to pass to rb_gc_location.
3681 cfp->block_code = (void *)rb_gc_location((VALUE)cfp->block_code);
3682 }
3683 else {
3684 cfp->_iseq = (rb_iseq_t *)rb_gc_location((VALUE)cfp->_iseq);
3685 cfp->block_code = (void *)rb_gc_location((VALUE)cfp->block_code);
3686 }
3687
3688 if (!VM_ENV_LOCAL_P(ep)) {
3689 const VALUE *prev_ep = VM_ENV_PREV_EP(ep);
3690 if (VM_ENV_FLAGS(prev_ep, VM_ENV_FLAG_ESCAPED)) {
3691 VM_FORCE_WRITE(&prev_ep[VM_ENV_DATA_INDEX_ENV], rb_gc_location(prev_ep[VM_ENV_DATA_INDEX_ENV]));
3692 }
3693
3694 if (VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED)) {
3695 VM_FORCE_WRITE(&ep[VM_ENV_DATA_INDEX_ENV], rb_gc_location(ep[VM_ENV_DATA_INDEX_ENV]));
3696 VM_FORCE_WRITE(&ep[VM_ENV_DATA_INDEX_ME_CREF], rb_gc_location(ep[VM_ENV_DATA_INDEX_ME_CREF]));
3697 }
3698 }
3699
3700 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
3701 }
3702 }
3703
3704 ec->storage = rb_gc_location(ec->storage);
3705
3706 ec->gen_fields_cache.obj = rb_gc_location(ec->gen_fields_cache.obj);
3707 ec->gen_fields_cache.fields_obj = rb_gc_location(ec->gen_fields_cache.fields_obj);
3708}
3709
3710static enum rb_id_table_iterator_result
3711mark_local_storage_i(VALUE local, void *data)
3712{
3713 rb_gc_mark(local);
3714 return ID_TABLE_CONTINUE;
3715}
3716
3717void
3718rb_execution_context_mark(const rb_execution_context_t *ec)
3719{
3720 /* mark VM stack */
3721 if (ec->vm_stack) {
3722 VM_ASSERT(ec->cfp);
3723 VALUE *p = ec->vm_stack;
3724 VALUE *sp = ec->cfp->sp;
3725 rb_control_frame_t *cfp = ec->cfp;
3726 rb_control_frame_t *limit_cfp = (void *)(ec->vm_stack + ec->vm_stack_size);
3727
3728 for (long i = 0; i < (long)(sp - p); i++) {
3729 rb_gc_mark_movable(p[i]);
3730 }
3731
3732 while (cfp != limit_cfp) {
3733 const VALUE *ep = cfp->ep;
3734 VM_ASSERT(!!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) == vm_ep_in_heap_p_(ec, ep));
3735
3736 rb_gc_mark_movable(cfp->self);
3737 rb_gc_mark_movable((VALUE)CFP_ISEQ(cfp));
3738 // Mark block_code directly (not through rb_zjit_cfp_block_code)
3739 // because rb_iterate0 may write a valid ifunc after JIT frame push.
3740 rb_gc_mark_movable((VALUE)cfp->block_code);
3741
3742 if (VM_ENV_LOCAL_P(ep) && VM_ENV_BOXED_P(ep)) {
3743 const rb_box_t *box = VM_ENV_BOX(ep);
3744 if (BOX_USER_P(box)) {
3745 rb_gc_mark_movable(box->box_object);
3746 }
3747 }
3748
3749 if (!VM_ENV_LOCAL_P(ep)) {
3750 const VALUE *prev_ep = VM_ENV_PREV_EP(ep);
3751 if (VM_ENV_FLAGS(prev_ep, VM_ENV_FLAG_ESCAPED)) {
3752 rb_gc_mark_movable(prev_ep[VM_ENV_DATA_INDEX_ENV]);
3753 }
3754
3755 if (VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED)) {
3756 rb_gc_mark_movable(ep[VM_ENV_DATA_INDEX_ENV]);
3757 rb_gc_mark(ep[VM_ENV_DATA_INDEX_ME_CREF]);
3758 }
3759 }
3760
3761 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
3762 }
3763 }
3764
3765 /* mark machine stack */
3766 if (ec->machine.stack_start && ec->machine.stack_end &&
3767 ec != GET_EC() /* marked for current ec at the first stage of marking */
3768 ) {
3769 rb_gc_mark_machine_context(ec);
3770 }
3771
3772 rb_gc_mark(ec->errinfo);
3773 rb_gc_mark(ec->root_svar);
3774 if (ec->local_storage) {
3775 rb_id_table_foreach_values(ec->local_storage, mark_local_storage_i, NULL);
3776 }
3777 rb_gc_mark(ec->local_storage_recursive_hash);
3778 rb_gc_mark(ec->local_storage_recursive_hash_for_trace);
3779 rb_gc_mark(ec->private_const_reference);
3780
3781 rb_gc_mark_movable(ec->storage);
3782}
3783
3784void rb_fiber_mark_self(rb_fiber_t *fib);
3785void rb_fiber_update_self(rb_fiber_t *fib);
3786void rb_threadptr_root_fiber_setup(rb_thread_t *th);
3787void rb_root_fiber_obj_setup(rb_thread_t *th);
3788void rb_threadptr_root_fiber_release(rb_thread_t *th);
3789
3790static void
3791thread_compact(void *ptr)
3792{
3793 rb_thread_t *th = ptr;
3794
3795 th->self = rb_gc_location(th->self);
3796}
3797
3798static void
3799thread_mark(void *ptr)
3800{
3801 rb_thread_t *th = ptr;
3802 RUBY_MARK_ENTER("thread");
3803
3804 // ec is null when setting up the thread in rb_threadptr_root_fiber_setup
3805 if (th->ec) {
3806 rb_fiber_mark_self(th->ec->fiber_ptr);
3807 }
3808
3809 /* mark ruby objects */
3810 switch (th->invoke_type) {
3811 case thread_invoke_type_proc:
3812 case thread_invoke_type_ractor_proc:
3813 rb_gc_mark(th->invoke_arg.proc.proc);
3814 rb_gc_mark(th->invoke_arg.proc.args);
3815 break;
3816 case thread_invoke_type_func:
3817 rb_gc_mark_maybe((VALUE)th->invoke_arg.func.arg);
3818 break;
3819 default:
3820 break;
3821 }
3822
3823 rb_gc_mark(rb_ractor_self(th->ractor));
3824 rb_gc_mark(th->thgroup);
3825 rb_gc_mark(th->value);
3826 rb_gc_mark(th->pending_interrupt_queue);
3827 rb_gc_mark(th->pending_interrupt_mask_stack);
3828 rb_gc_mark(th->top_self);
3829 rb_gc_mark(th->top_wrapper);
3830 if (th->root_fiber) rb_fiber_mark_self(th->root_fiber);
3831
3832 RUBY_ASSERT(th->ec == NULL || th->ec == rb_fiberptr_get_ec(th->ec->fiber_ptr));
3833 rb_gc_mark(th->last_status);
3834 rb_gc_mark(th->locking_mutex);
3835 rb_gc_mark(th->name);
3836
3837 rb_gc_mark(th->scheduler);
3838
3839 rb_threadptr_interrupt_exec_task_mark(th);
3840
3841 RUBY_MARK_LEAVE("thread");
3842}
3843
3844void rb_threadptr_sched_free(rb_thread_t *th); // thread_*.c
3845
3846static void
3847thread_free(void *ptr)
3848{
3849 rb_thread_t *th = ptr;
3850 RUBY_FREE_ENTER("thread");
3851
3852 rb_threadptr_sched_free(th);
3853
3854 if (th->locking_mutex != Qfalse) {
3855 rb_bug("thread_free: locking_mutex must be NULL (%p:%p)", (void *)th, (void *)th->locking_mutex);
3856 }
3857 if (th->keeping_mutexes != NULL) {
3858 rb_bug("thread_free: keeping_mutexes must be NULL (%p:%p)", (void *)th, (void *)th->keeping_mutexes);
3859 }
3860
3861 ruby_xfree(th->specific_storage);
3862
3863 if (th->vm && th->vm->ractor.main_thread == th) {
3864 RUBY_GC_INFO("MRI main thread\n");
3865 }
3866 else {
3867 // ruby_xfree(th->nt);
3868 // TODO: MN system collect nt, but without MN system it should be freed here.
3869 if (!th->main_thread) {
3870 ruby_xfree(th);
3871 }
3872 }
3873
3874 RUBY_FREE_LEAVE("thread");
3875}
3876
3877static size_t
3878thread_memsize(const void *ptr)
3879{
3880 const rb_thread_t *th = ptr;
3881 size_t size = sizeof(rb_thread_t);
3882
3883 if (!th->root_fiber) {
3884 size += th->ec->vm_stack_size * sizeof(VALUE);
3885 }
3886 if (th->ec->local_storage) {
3887 size += rb_id_table_memsize(th->ec->local_storage);
3888 }
3889 return size;
3890}
3891
3892#define thread_data_type ruby_threadptr_data_type
3893const rb_data_type_t ruby_threadptr_data_type = {
3894 "VM/thread",
3895 {
3896 thread_mark,
3897 thread_free,
3898 thread_memsize,
3899 thread_compact,
3900 },
3902};
3903
3904VALUE
3905rb_obj_is_thread(VALUE obj)
3906{
3907 return RBOOL(rb_typeddata_is_kind_of(obj, &thread_data_type));
3908}
3909
3910static VALUE
3911thread_alloc(VALUE klass)
3912{
3913 rb_thread_t *th;
3914 return TypedData_Make_Struct(klass, rb_thread_t, &thread_data_type, th);
3915}
3916
3917void
3918rb_ec_set_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size)
3919{
3920 ec->vm_stack = stack;
3921 ec->vm_stack_size = size;
3922}
3923
3924void
3925rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size)
3926{
3927 rb_ec_set_vm_stack(ec, stack, size);
3928
3929#if VM_CHECK_MODE > 0
3930 MEMZERO(stack, VALUE, size); // malloc memory could have the VM canary in it
3931#endif
3932
3933 ec->cfp = (void *)(ec->vm_stack + ec->vm_stack_size);
3934
3935 vm_push_frame(ec,
3936 NULL /* dummy iseq */,
3937 VM_FRAME_MAGIC_DUMMY | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH | VM_FRAME_FLAG_CFRAME /* dummy frame */,
3938 Qnil /* dummy self */, VM_BLOCK_HANDLER_NONE /* dummy block ptr */,
3939 0 /* dummy cref/me */,
3940 0 /* dummy pc */, ec->vm_stack, 0, 0
3941 );
3942}
3943
3944void
3945rb_ec_clear_vm_stack(rb_execution_context_t *ec)
3946{
3947 // set cfp to NULL before clearing the stack in case `thread_profile_frames`
3948 // gets called in this middle of `rb_ec_set_vm_stack` via signal handler.
3949 ec->cfp = NULL;
3950 rb_ec_set_vm_stack(ec, NULL, 0);
3951}
3952
3953void
3954rb_ec_close(rb_execution_context_t *ec)
3955{
3956 // Fiber storage is not accessible from outside the running fiber, so it is safe to clear it here.
3957 ec->storage = Qnil;
3958}
3959
3960static void
3961th_init(rb_thread_t *th, VALUE self, rb_vm_t *vm)
3962{
3963 const rb_box_t *box = rb_current_box();
3964
3965 th->self = self;
3966
3967 ccan_list_head_init(&th->interrupt_exec_tasks);
3968
3969 rb_threadptr_root_fiber_setup(th);
3970
3971 /* All threads are blocking until a non-blocking fiber is scheduled */
3972 th->blocking = 1;
3973 th->scheduler = Qnil;
3974
3975 if (self == 0) {
3976 size_t size = vm->default_params.thread_vm_stack_size / sizeof(VALUE);
3977 VALUE *stack = ALLOC_N(VALUE, size);
3978 rb_ec_initialize_vm_stack(th->ec, stack, size);
3979 rb_thread_malloc_stack_set(th, stack, size);
3980 }
3981 else {
3982 VM_ASSERT(th->ec->cfp == NULL);
3983 VM_ASSERT(th->ec->vm_stack == NULL);
3984 VM_ASSERT(th->ec->vm_stack_size == 0);
3985 }
3986
3987 th->status = THREAD_RUNNABLE;
3988 th->last_status = Qnil;
3989 th->top_wrapper = 0;
3990 if (box->top_self) {
3991 th->top_self = box->top_self;
3992 }
3993 else {
3994 th->top_self = 0;
3995 }
3996 th->value = Qundef;
3997
3998 th->ec->errinfo = Qnil;
3999 th->ec->root_svar = Qfalse;
4000 th->ec->local_storage_recursive_hash = Qnil;
4001 th->ec->local_storage_recursive_hash_for_trace = Qnil;
4002
4003 th->ec->storage = Qnil;
4004 th->ec->ractor_id = rb_ractor_id(th->ractor);
4005
4006#if OPT_CALL_THREADED_CODE
4007 th->retval = Qundef;
4008#endif
4009 th->name = Qnil;
4010 th->report_on_exception = vm->thread_report_on_exception;
4011 th->ext_config.ractor_safe = true;
4012
4013#if USE_RUBY_DEBUG_LOG
4014 static rb_atomic_t thread_serial = 1;
4015 th->serial = RUBY_ATOMIC_FETCH_ADD(thread_serial, 1);
4016
4017 RUBY_DEBUG_LOG("th:%u", th->serial);
4018#endif
4019}
4020
4021VALUE
4022rb_thread_alloc(VALUE klass)
4023{
4024 VALUE self = thread_alloc(klass);
4025 rb_thread_t *target_th = rb_thread_ptr(self);
4026 target_th->ractor = GET_RACTOR();
4027 th_init(target_th, self, target_th->vm = GET_VM());
4028 rb_root_fiber_obj_setup(target_th);
4029 return self;
4030}
4031
4032#define REWIND_CFP(expr) do { \
4033 rb_execution_context_t *ec__ = GET_EC(); \
4034 VALUE *const curr_sp = (ec__->cfp++)->sp; \
4035 VALUE *const saved_sp = ec__->cfp->sp; \
4036 ec__->cfp->sp = curr_sp; \
4037 expr; \
4038 (ec__->cfp--)->sp = saved_sp; \
4039} while (0)
4040
4041static VALUE
4042m_core_set_method_alias(VALUE self, VALUE cbase, VALUE sym1, VALUE sym2)
4043{
4044 REWIND_CFP({
4045 rb_alias(cbase, SYM2ID(sym1), SYM2ID(sym2));
4046 });
4047 return Qnil;
4048}
4049
4050static VALUE
4051m_core_set_variable_alias(VALUE self, VALUE sym1, VALUE sym2)
4052{
4053 REWIND_CFP({
4054 rb_alias_variable(SYM2ID(sym1), SYM2ID(sym2));
4055 });
4056 return Qnil;
4057}
4058
4059static VALUE
4060m_core_undef_method(VALUE self, VALUE cbase, VALUE sym)
4061{
4062 REWIND_CFP({
4063 ID mid = SYM2ID(sym);
4064 rb_undef(cbase, mid);
4065 rb_clear_method_cache(self, mid);
4066 });
4067 return Qnil;
4068}
4069
4070static VALUE
4071m_core_set_postexe(VALUE self)
4072{
4073 rb_set_end_proc(rb_call_end_proc, rb_block_proc());
4074 return Qnil;
4075}
4076
4077static VALUE core_hash_merge_kwd(VALUE hash, VALUE kw);
4078
4079static VALUE
4080core_hash_merge(VALUE hash, long argc, const VALUE *argv)
4081{
4082 Check_Type(hash, T_HASH);
4083 VM_ASSERT(argc % 2 == 0);
4084 rb_hash_bulk_insert(argc, argv, hash);
4085 return hash;
4086}
4087
4088static VALUE
4089m_core_hash_merge_ptr(int argc, VALUE *argv, VALUE recv)
4090{
4091 VALUE hash = argv[0];
4092
4093 REWIND_CFP(hash = core_hash_merge(hash, argc-1, argv+1));
4094
4095 return hash;
4096}
4097
4098static int
4099kwmerge_i(VALUE key, VALUE value, VALUE hash)
4100{
4101 rb_hash_aset(hash, key, value);
4102 return ST_CONTINUE;
4103}
4104
4105static VALUE
4106m_core_hash_merge_kwd(VALUE recv, VALUE hash, VALUE kw)
4107{
4108 if (!NIL_P(kw)) {
4109 REWIND_CFP(hash = core_hash_merge_kwd(hash, kw));
4110 }
4111 return hash;
4112}
4113
4114static VALUE
4115m_core_make_shareable(VALUE recv, VALUE obj)
4116{
4117 return rb_ractor_make_shareable(obj);
4118}
4119
4120static VALUE
4121m_core_make_shareable_copy(VALUE recv, VALUE obj)
4122{
4124}
4125
4126static VALUE
4127m_core_ensure_shareable(VALUE recv, VALUE obj, VALUE name)
4128{
4129 return rb_ractor_ensure_shareable(obj, name);
4130}
4131
4132static VALUE
4133core_hash_merge_kwd(VALUE hash, VALUE kw)
4134{
4135 rb_hash_foreach(rb_to_hash_type(kw), kwmerge_i, hash);
4136 return hash;
4137}
4138
4139extern VALUE *rb_gc_stack_start;
4140extern size_t rb_gc_stack_maxsize;
4141
4142/* debug functions */
4143
4144/* :nodoc: */
4145static VALUE
4146sdr(VALUE self)
4147{
4148 rb_vm_bugreport(NULL, stderr);
4149 return Qnil;
4150}
4151
4152/* :nodoc: */
4153static VALUE
4154nsdr(VALUE self)
4155{
4156 VALUE ary = rb_ary_new();
4157#ifdef HAVE_BACKTRACE
4158#include <execinfo.h>
4159#define MAX_NATIVE_TRACE 1024
4160 static void *trace[MAX_NATIVE_TRACE];
4161 int n = (int)backtrace(trace, MAX_NATIVE_TRACE);
4162 char **syms = backtrace_symbols(trace, n);
4163 int i;
4164
4165 if (syms == 0) {
4166 rb_memerror();
4167 }
4168
4169 for (i=0; i<n; i++) {
4170 rb_ary_push(ary, rb_str_new2(syms[i]));
4171 }
4172 free(syms); /* OK */
4173#endif
4174 return ary;
4175}
4176
4177#if VM_COLLECT_USAGE_DETAILS
4178static VALUE usage_analysis_insn_start(VALUE self);
4179static VALUE usage_analysis_operand_start(VALUE self);
4180static VALUE usage_analysis_register_start(VALUE self);
4181static VALUE usage_analysis_insn_stop(VALUE self);
4182static VALUE usage_analysis_operand_stop(VALUE self);
4183static VALUE usage_analysis_register_stop(VALUE self);
4184static VALUE usage_analysis_insn_running(VALUE self);
4185static VALUE usage_analysis_operand_running(VALUE self);
4186static VALUE usage_analysis_register_running(VALUE self);
4187static VALUE usage_analysis_insn_clear(VALUE self);
4188static VALUE usage_analysis_operand_clear(VALUE self);
4189static VALUE usage_analysis_register_clear(VALUE self);
4190#endif
4191
4192static VALUE
4193f_raise(int c, VALUE *v, VALUE _)
4194{
4195 return rb_f_raise(c, v);
4196}
4197
4198static VALUE
4199f_proc(VALUE _)
4200{
4201 return rb_block_proc();
4202}
4203
4204static VALUE
4205f_lambda(VALUE _)
4206{
4207 return rb_block_lambda();
4208}
4209
4210static VALUE
4211f_sprintf(int c, const VALUE *v, VALUE _)
4212{
4213 return rb_f_sprintf(c, v);
4214}
4215
4216/* :nodoc: */
4217static VALUE
4218vm_mtbl(VALUE self, VALUE obj, VALUE sym)
4219{
4220 vm_mtbl_dump(CLASS_OF(obj), RTEST(sym) ? SYM2ID(sym) : 0);
4221 return Qnil;
4222}
4223
4224/* :nodoc: */
4225static VALUE
4226vm_mtbl2(VALUE self, VALUE obj, VALUE sym)
4227{
4228 vm_mtbl_dump(obj, RTEST(sym) ? SYM2ID(sym) : 0);
4229 return Qnil;
4230}
4231
4232/*
4233 * call-seq:
4234 * RubyVM.keep_script_lines -> true or false
4235 *
4236 * Return current +keep_script_lines+ status. Now it only returns
4237 * +true+ of +false+, but it can return other objects in future.
4238 *
4239 * Note that this is an API for ruby internal use, debugging,
4240 * and research. Do not use this for any other purpose.
4241 * The compatibility is not guaranteed.
4242 */
4243static VALUE
4244vm_keep_script_lines(VALUE self)
4245{
4246 return RBOOL(ruby_vm_keep_script_lines);
4247}
4248
4249/*
4250 * call-seq:
4251 * RubyVM.keep_script_lines = true / false
4252 *
4253 * It set +keep_script_lines+ flag. If the flag is set, all
4254 * loaded scripts are recorded in a interpreter process.
4255 *
4256 * Note that this is an API for ruby internal use, debugging,
4257 * and research. Do not use this for any other purpose.
4258 * The compatibility is not guaranteed.
4259 */
4260static VALUE
4261vm_keep_script_lines_set(VALUE self, VALUE flags)
4262{
4263 ruby_vm_keep_script_lines = RTEST(flags);
4264 return flags;
4265}
4266
4267void
4268Init_VM(void)
4269{
4270 VALUE opts;
4271 VALUE klass;
4272 VALUE fcore;
4273
4274 /*
4275 * Document-class: RubyVM
4276 *
4277 * The RubyVM module only exists on MRI. +RubyVM+ is not defined in
4278 * other Ruby implementations such as JRuby and TruffleRuby.
4279 *
4280 * The RubyVM module provides some access to MRI internals.
4281 * This module is for very limited purposes, such as debugging,
4282 * prototyping, and research. Normal users must not use it.
4283 * This module is not portable between Ruby implementations.
4284 */
4285 rb_cRubyVM = rb_define_class("RubyVM", rb_cObject);
4286 rb_undef_alloc_func(rb_cRubyVM);
4287 rb_undef_method(CLASS_OF(rb_cRubyVM), "new");
4288 rb_define_singleton_method(rb_cRubyVM, "stat", vm_stat, -1);
4289 rb_define_singleton_method(rb_cRubyVM, "keep_script_lines", vm_keep_script_lines, 0);
4290 rb_define_singleton_method(rb_cRubyVM, "keep_script_lines=", vm_keep_script_lines_set, 1);
4291
4292#if USE_DEBUG_COUNTER
4293 rb_define_singleton_method(rb_cRubyVM, "reset_debug_counters", rb_debug_counter_reset, 0);
4294 rb_define_singleton_method(rb_cRubyVM, "show_debug_counters", rb_debug_counter_show, 0);
4295#endif
4296
4297 /* FrozenCore (hidden) */
4299 rb_set_class_path(fcore, rb_cRubyVM, "FrozenCore");
4300 rb_vm_register_global_object(rb_class_path_cached(fcore));
4301 klass = rb_singleton_class(fcore);
4302 rb_define_method_id(klass, id_core_set_method_alias, m_core_set_method_alias, 3);
4303 rb_define_method_id(klass, id_core_set_variable_alias, m_core_set_variable_alias, 2);
4304 rb_define_method_id(klass, id_core_undef_method, m_core_undef_method, 2);
4305 rb_define_method_id(klass, id_core_set_postexe, m_core_set_postexe, 0);
4306 rb_define_method_id(klass, id_core_hash_merge_ptr, m_core_hash_merge_ptr, -1);
4307 rb_define_method_id(klass, id_core_hash_merge_kwd, m_core_hash_merge_kwd, 2);
4308 rb_define_method_id(klass, id_core_raise, f_raise, -1);
4309 rb_define_method_id(klass, id_core_sprintf, f_sprintf, -1);
4310 rb_define_method_id(klass, idProc, f_proc, 0);
4311 rb_define_method_id(klass, idLambda, f_lambda, 0);
4312 rb_define_method(klass, "make_shareable", m_core_make_shareable, 1);
4313 rb_define_method(klass, "make_shareable_copy", m_core_make_shareable_copy, 1);
4314 rb_define_method(klass, "ensure_shareable", m_core_ensure_shareable, 2);
4315 rb_obj_freeze(fcore);
4316 RBASIC_CLEAR_CLASS(klass);
4317 rb_obj_freeze(klass);
4318 rb_vm_register_global_object(fcore);
4319 rb_mRubyVMFrozenCore = fcore;
4320
4321 /*
4322 * Document-class: Thread
4323 *
4324 * Threads are the Ruby implementation for a concurrent programming model.
4325 *
4326 * Programs that require multiple threads of execution are a perfect
4327 * candidate for Ruby's Thread class.
4328 *
4329 * For example, we can create a new thread separate from the main thread's
4330 * execution using ::new.
4331 *
4332 * thr = Thread.new { puts "What's the big deal" }
4333 *
4334 * Then we are able to pause the execution of the main thread and allow
4335 * our new thread to finish, using #join:
4336 *
4337 * thr.join #=> "What's the big deal"
4338 *
4339 * If we don't call +thr.join+ before the main thread terminates, then all
4340 * other threads including +thr+ will be killed.
4341 *
4342 * Alternatively, you can use an array for handling multiple threads at
4343 * once, like in the following example:
4344 *
4345 * threads = []
4346 * threads << Thread.new { puts "What's the big deal" }
4347 * threads << Thread.new { 3.times { puts "Threads are fun!" } }
4348 *
4349 * After creating a few threads we wait for them all to finish
4350 * consecutively.
4351 *
4352 * threads.each { |thr| thr.join }
4353 *
4354 * To retrieve the last value of a thread, use #value
4355 *
4356 * thr = Thread.new { sleep 1; "Useful value" }
4357 * thr.value #=> "Useful value"
4358 *
4359 * === Thread initialization
4360 *
4361 * In order to create new threads, Ruby provides ::new, ::start, and
4362 * ::fork. A block must be provided with each of these methods, otherwise
4363 * a ThreadError will be raised.
4364 *
4365 * When subclassing the Thread class, the +initialize+ method of your
4366 * subclass will be ignored by ::start and ::fork. Otherwise, be sure to
4367 * call super in your +initialize+ method.
4368 *
4369 * === Thread termination
4370 *
4371 * For terminating threads, Ruby provides a variety of ways to do this.
4372 *
4373 * The class method ::kill, is meant to exit a given thread:
4374 *
4375 * thr = Thread.new { sleep }
4376 * Thread.kill(thr) # sends exit() to thr
4377 *
4378 * Alternatively, you can use the instance method #exit, or any of its
4379 * aliases #kill or #terminate.
4380 *
4381 * thr.exit
4382 *
4383 * === Thread status
4384 *
4385 * Ruby provides a few instance methods for querying the state of a given
4386 * thread. To get a string with the current thread's state use #status
4387 *
4388 * thr = Thread.new { sleep }
4389 * thr.status # => "sleep"
4390 * thr.exit
4391 * thr.status # => false
4392 *
4393 * You can also use #alive? to tell if the thread is running or sleeping,
4394 * and #stop? if the thread is dead or sleeping.
4395 *
4396 * === Thread variables and scope
4397 *
4398 * Since threads are created with blocks, the same rules apply to other
4399 * Ruby blocks for variable scope. Any local variables created within this
4400 * block are accessible to only this thread.
4401 *
4402 * ==== Fiber-local vs. Thread-local
4403 *
4404 * Each fiber has its own bucket for Thread#[] storage. When you set a
4405 * new fiber-local it is only accessible within this Fiber. To illustrate:
4406 *
4407 * Thread.new {
4408 * Thread.current[:foo] = "bar"
4409 * Fiber.new {
4410 * p Thread.current[:foo] # => nil
4411 * }.resume
4412 * }.join
4413 *
4414 * This example uses #[] for getting and #[]= for setting fiber-locals,
4415 * you can also use #keys to list the fiber-locals for a given
4416 * thread and #key? to check if a fiber-local exists.
4417 *
4418 * When it comes to thread-locals, they are accessible within the entire
4419 * scope of the thread. Given the following example:
4420 *
4421 * Thread.new{
4422 * Thread.current.thread_variable_set(:foo, 1)
4423 * p Thread.current.thread_variable_get(:foo) # => 1
4424 * Fiber.new{
4425 * Thread.current.thread_variable_set(:foo, 2)
4426 * p Thread.current.thread_variable_get(:foo) # => 2
4427 * }.resume
4428 * p Thread.current.thread_variable_get(:foo) # => 2
4429 * }.join
4430 *
4431 * You can see that the thread-local +:foo+ carried over into the fiber
4432 * and was changed to +2+ by the end of the thread.
4433 *
4434 * This example makes use of #thread_variable_set to create new
4435 * thread-locals, and #thread_variable_get to reference them.
4436 *
4437 * There is also #thread_variables to list all thread-locals, and
4438 * #thread_variable? to check if a given thread-local exists.
4439 *
4440 * === Exception handling
4441 *
4442 * When an unhandled exception is raised inside a thread, it will
4443 * terminate. By default, this exception will not propagate to other
4444 * threads. The exception is stored and when another thread calls #value
4445 * or #join, the exception will be re-raised in that thread.
4446 *
4447 * t = Thread.new{ raise 'something went wrong' }
4448 * t.value #=> RuntimeError: something went wrong
4449 *
4450 * An exception can be raised from outside the thread using the
4451 * Thread#raise instance method, which takes the same parameters as
4452 * Kernel#raise.
4453 *
4454 * Setting Thread.abort_on_exception = true, Thread#abort_on_exception =
4455 * true, or $DEBUG = true will cause a subsequent unhandled exception
4456 * raised in a thread to be automatically re-raised in the main thread.
4457 *
4458 * With the addition of the class method ::handle_interrupt, you can now
4459 * handle exceptions asynchronously with threads.
4460 *
4461 * === Scheduling
4462 *
4463 * Ruby provides a few ways to support scheduling threads in your program.
4464 *
4465 * The first way is by using the class method ::stop, to put the current
4466 * running thread to sleep and schedule the execution of another thread.
4467 *
4468 * Once a thread is asleep, you can use the instance method #wakeup to
4469 * mark your thread as eligible for scheduling.
4470 *
4471 * You can also try ::pass, which attempts to pass execution to another
4472 * thread but is dependent on the OS whether a running thread will switch
4473 * or not. The same goes for #priority, which lets you hint to the thread
4474 * scheduler which threads you want to take precedence when passing
4475 * execution. This method is also dependent on the OS and may be ignored
4476 * on some platforms.
4477 *
4478 */
4481
4482#if VM_COLLECT_USAGE_DETAILS
4483 /* ::RubyVM::USAGE_ANALYSIS_* */
4484#define define_usage_analysis_hash(name) /* shut up rdoc -C */ \
4485 rb_define_const(rb_cRubyVM, "USAGE_ANALYSIS_" #name, rb_hash_new())
4486 define_usage_analysis_hash(INSN);
4487 define_usage_analysis_hash(REGS);
4488 define_usage_analysis_hash(INSN_BIGRAM);
4489
4490 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_INSN_START", usage_analysis_insn_start, 0);
4491 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_OPERAND_START", usage_analysis_operand_start, 0);
4492 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_REGISTER_START", usage_analysis_register_start, 0);
4493 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_INSN_STOP", usage_analysis_insn_stop, 0);
4494 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_OPERAND_STOP", usage_analysis_operand_stop, 0);
4495 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_REGISTER_STOP", usage_analysis_register_stop, 0);
4496 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_INSN_RUNNING", usage_analysis_insn_running, 0);
4497 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_OPERAND_RUNNING", usage_analysis_operand_running, 0);
4498 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_REGISTER_RUNNING", usage_analysis_register_running, 0);
4499 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_INSN_CLEAR", usage_analysis_insn_clear, 0);
4500 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_OPERAND_CLEAR", usage_analysis_operand_clear, 0);
4501 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_REGISTER_CLEAR", usage_analysis_register_clear, 0);
4502#endif
4503
4504 /* ::RubyVM::OPTS
4505 * An Array of VM build options.
4506 * This constant is MRI specific.
4507 */
4508 rb_define_const(rb_cRubyVM, "OPTS", opts = rb_ary_new());
4509
4510#if OPT_DIRECT_THREADED_CODE
4511 rb_ary_push(opts, rb_str_new2("direct threaded code"));
4512#elif OPT_TOKEN_THREADED_CODE
4513 rb_ary_push(opts, rb_str_new2("token threaded code"));
4514#elif OPT_CALL_THREADED_CODE
4515 rb_ary_push(opts, rb_str_new2("call threaded code"));
4516#endif
4517
4518#if OPT_OPERANDS_UNIFICATION
4519 rb_ary_push(opts, rb_str_new2("operands unification"));
4520#endif
4521#if OPT_INSTRUCTIONS_UNIFICATION
4522 rb_ary_push(opts, rb_str_new2("instructions unification"));
4523#endif
4524#if OPT_INLINE_METHOD_CACHE
4525 rb_ary_push(opts, rb_str_new2("inline method cache"));
4526#endif
4527
4528 /* ::RubyVM::INSTRUCTION_NAMES
4529 * A list of bytecode instruction names in MRI.
4530 * This constant is MRI specific.
4531 */
4532 rb_define_const(rb_cRubyVM, "INSTRUCTION_NAMES", rb_insns_name_array());
4533
4534 /* ::RubyVM::DEFAULT_PARAMS
4535 * This constant exposes the VM's default parameters.
4536 * Note that changing these values does not affect VM execution.
4537 * Specification is not stable and you should not depend on this value.
4538 * Of course, this constant is MRI specific.
4539 */
4540 rb_define_const(rb_cRubyVM, "DEFAULT_PARAMS", vm_default_params());
4541
4542 /* debug functions ::RubyVM::SDR(), ::RubyVM::NSDR() */
4543#if VMDEBUG
4544 rb_define_singleton_method(rb_cRubyVM, "SDR", sdr, 0);
4545 rb_define_singleton_method(rb_cRubyVM, "NSDR", nsdr, 0);
4546 rb_define_singleton_method(rb_cRubyVM, "mtbl", vm_mtbl, 2);
4547 rb_define_singleton_method(rb_cRubyVM, "mtbl2", vm_mtbl2, 2);
4548#else
4549 (void)sdr;
4550 (void)nsdr;
4551 (void)vm_mtbl;
4552 (void)vm_mtbl2;
4553#endif
4554
4555 /* VM bootstrap: phase 2 */
4556 {
4557 rb_vm_t *vm = ruby_current_vm_ptr;
4558 rb_thread_t *th = GET_THREAD();
4559 VALUE filename = rb_fstring_lit("<main>");
4560 const rb_iseq_t *iseq = rb_iseq_new(Qnil, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
4561
4562 // Ractor setup
4563 rb_ractor_main_setup(vm, th->ractor, th);
4564
4565 /* create vm object */
4566 vm->self = TypedData_Wrap_Struct(rb_cRubyVM, &vm_data_type, vm);
4567
4568 /* create main thread */
4569 th->self = TypedData_Wrap_Struct(rb_cThread, &thread_data_type, th);
4570 vm->ractor.main_thread = th;
4571 vm->ractor.main_ractor = th->ractor;
4572 th->vm = vm;
4573 th->top_wrapper = 0;
4574 th->top_self = rb_vm_top_self();
4575
4576 rb_root_fiber_obj_setup(th);
4577
4578 rb_vm_register_global_object((VALUE)iseq);
4579 th->ec->cfp->_iseq = iseq;
4580 th->ec->cfp->pc = ISEQ_BODY(iseq)->iseq_encoded;
4581 th->ec->cfp->self = th->top_self;
4582
4583 VM_ENV_FLAGS_UNSET(th->ec->cfp->ep, VM_FRAME_FLAG_CFRAME);
4584 VM_STACK_ENV_WRITE(th->ec->cfp->ep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)vm_cref_new(rb_cObject, METHOD_VISI_PRIVATE, FALSE, NULL, FALSE, FALSE));
4585
4586 /*
4587 * The Binding of the top level scope
4588 */
4589 rb_define_global_const("TOPLEVEL_BINDING", rb_binding_new());
4590
4591#ifdef _WIN32
4592 rb_objspace_gc_enable(vm->gc.objspace);
4593#endif
4594 }
4595 vm_init_redefined_flag();
4596
4597 rb_block_param_proxy = rb_obj_alloc(rb_cObject);
4598 rb_add_method_optimized(rb_singleton_class(rb_block_param_proxy), idCall,
4599 OPTIMIZED_METHOD_TYPE_BLOCK_CALL, 0, METHOD_VISI_PUBLIC);
4600 rb_obj_freeze(rb_block_param_proxy);
4601 rb_vm_register_global_object(rb_block_param_proxy);
4602
4603 /* vm_backtrace.c */
4604 Init_vm_backtrace();
4605}
4606
4607void
4608rb_vm_set_progname(VALUE filename)
4609{
4610 rb_thread_t *th = GET_VM()->ractor.main_thread;
4611 rb_control_frame_t *cfp = (void *)(th->ec->vm_stack + th->ec->vm_stack_size);
4612 --cfp;
4613
4614 filename = rb_str_new_frozen(filename);
4615 rb_iseq_pathobj_set(CFP_ISEQ(cfp), filename, rb_iseq_realpath(CFP_ISEQ(cfp)));
4616}
4617
4618extern const struct st_hash_type rb_fstring_hash_type;
4619
4620static rb_vm_t _vm;
4621static rb_thread_t _main_thread = {
4622 .vm = &_vm,
4623 .main_thread = 1,
4624};
4625
4626void
4627Init_BareVM(void)
4628{
4629 /* VM bootstrap: phase 1 */
4630 rb_vm_t *vm = &_vm;
4631 rb_thread_t *th = &_main_thread;
4632
4633 // setup the VM
4634 vm_init2(vm);
4635
4636 ruby_current_vm_ptr = vm;
4637 rb_objspace_alloc();
4638 rb_id_table_init(&vm->negative_cme_table, 16);
4639 st_init_existing_numtable_with_size(&vm->overloaded_cme_table, 0);
4640 st_init_existing_strtable_with_size(&vm->static_ext_inits, 0);
4641 set_init_embedded_numtable_with_size(&vm->unused_block_warning_table, 0);
4642 vm->global_hooks.type = hook_list_type_global;
4643
4644 // setup main thread
4645 th->nt = ZALLOC(struct rb_native_thread);
4646 th->ractor = vm->ractor.main_ractor = rb_ractor_main_alloc();
4647 Init_native_thread(th);
4648 rb_jit_cont_init();
4649 th_init(th, 0, vm);
4650
4651 rb_ractor_set_current_ec(th->ractor, th->ec);
4652
4653 /* n.b. native_main_thread_stack_top is set by the INIT_STACK macro */
4654 ruby_thread_init_stack(th, native_main_thread_stack_top);
4655
4656 // setup ractor system
4657 rb_native_mutex_initialize(&vm->ractor.sync.lock);
4658 rb_native_cond_initialize(&vm->ractor.sync.terminate_cond);
4659
4660 vm_opt_method_def_table = st_init_numtable();
4661 vm_opt_mid_table = st_init_numtable();
4662
4663#ifdef RUBY_THREAD_WIN32_H
4664 rb_native_cond_initialize(&vm->ractor.sync.barrier_complete_cond);
4665 rb_native_cond_initialize(&vm->ractor.sync.barrier_release_cond);
4666#endif
4667}
4668
4669void
4671{
4672 native_main_thread_stack_top = addr;
4673}
4674
4675#ifndef _WIN32
4676#include <unistd.h>
4677#include <sys/mman.h>
4678#endif
4679
4680
4681#ifndef MARK_OBJECT_ARY_BUCKET_SIZE
4682#define MARK_OBJECT_ARY_BUCKET_SIZE 1024
4683#endif
4684
4686 VALUE next;
4687 long len;
4688 VALUE *array;
4689};
4690
4691static void
4692pin_array_list_mark(void *data)
4693{
4694 struct pin_array_list *array = (struct pin_array_list *)data;
4695 rb_gc_mark_movable(array->next);
4696
4697 rb_gc_mark_vm_stack_values(array->len, array->array);
4698}
4699
4700static void
4701pin_array_list_free(void *data)
4702{
4703 struct pin_array_list *array = (struct pin_array_list *)data;
4704 xfree(array->array);
4705}
4706
4707static size_t
4708pin_array_list_memsize(const void *data)
4709{
4710 return sizeof(struct pin_array_list) + (MARK_OBJECT_ARY_BUCKET_SIZE * sizeof(VALUE));
4711}
4712
4713static void
4714pin_array_list_update_references(void *data)
4715{
4716 struct pin_array_list *array = (struct pin_array_list *)data;
4717 array->next = rb_gc_location(array->next);
4718}
4719
4720static const rb_data_type_t pin_array_list_type = {
4721 .wrap_struct_name = "VM/pin_array_list",
4722 .function = {
4723 .dmark = pin_array_list_mark,
4724 .dfree = pin_array_list_free,
4725 .dsize = pin_array_list_memsize,
4726 .dcompact = pin_array_list_update_references,
4727 },
4728 .flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE,
4729};
4730
4731static VALUE
4732pin_array_list_new(VALUE next)
4733{
4734 struct pin_array_list *array_list;
4735 VALUE obj = TypedData_Make_Struct(0, struct pin_array_list, &pin_array_list_type, array_list);
4736 RB_OBJ_WRITE(obj, &array_list->next, next);
4737 array_list->array = ALLOC_N(VALUE, MARK_OBJECT_ARY_BUCKET_SIZE);
4738 return obj;
4739}
4740
4741static VALUE
4742pin_array_list_append(VALUE obj, VALUE item)
4743{
4744 struct pin_array_list *array_list;
4745 TypedData_Get_Struct(obj, struct pin_array_list, &pin_array_list_type, array_list);
4746
4747 if (array_list->len >= MARK_OBJECT_ARY_BUCKET_SIZE) {
4748 obj = pin_array_list_new(obj);
4749 TypedData_Get_Struct(obj, struct pin_array_list, &pin_array_list_type, array_list);
4750 }
4751
4752 RB_OBJ_WRITE(obj, &array_list->array[array_list->len], item);
4753 array_list->len++;
4754 return obj;
4755}
4756
4757void
4758rb_vm_register_global_object(VALUE obj)
4759{
4761 if (RB_SPECIAL_CONST_P(obj)) {
4762 return;
4763 }
4764
4765 switch (RB_BUILTIN_TYPE(obj)) {
4766 case T_CLASS:
4767 case T_MODULE:
4768 if (FL_TEST(obj, RCLASS_IS_ROOT)) {
4769 return;
4770 }
4771 FL_SET(obj, RCLASS_IS_ROOT);
4772 break;
4773 default:
4774 break;
4775 }
4776 RB_VM_LOCKING() {
4777 VALUE list = GET_VM()->mark_object_ary;
4778 VALUE head = pin_array_list_append(list, obj);
4779 if (head != list) {
4780 GET_VM()->mark_object_ary = head;
4781 }
4782 RB_GC_GUARD(obj);
4783 }
4784}
4785
4786VALUE rb_cc_refinement_set_create(void);
4787
4788void
4789Init_vm_objects(void)
4790{
4791 rb_vm_t *vm = GET_VM();
4792
4793 /* initialize mark object array, hash */
4794 vm->mark_object_ary = pin_array_list_new(Qnil);
4795 st_init_existing_table_with_size(&vm->ci_table, &vm_ci_hashtype, 0);
4796 vm->cc_refinement_set = rb_cc_refinement_set_create();
4797}
4798
4799// Whether JIT is enabled or not, we need to load/undef `#with_jit` for other builtins.
4800#include "jit_hook.rbinc"
4801#include "jit_undef.rbinc"
4802
4803// Stub for builtin function when not building YJIT units
4804#if !USE_YJIT
4805void Init_builtin_yjit(void) {}
4806#endif
4807
4808// Stub for builtin function when not building ZJIT units
4809#if !USE_ZJIT
4810void Init_builtin_zjit(void) {}
4811#endif
4812
4813/* top self */
4814
4815static VALUE
4816main_to_s(VALUE obj)
4817{
4818 return rb_str_new2("main");
4819}
4820
4821VALUE
4822rb_vm_top_self(void)
4823{
4824 const rb_box_t *box = rb_current_box();
4825 VM_ASSERT(box);
4826 VM_ASSERT(box->top_self);
4827 return box->top_self;
4828}
4829
4830void
4831Init_top_self(void)
4832{
4833 rb_vm_t *vm = GET_VM();
4834 vm->root_box = (rb_box_t *)rb_root_box();
4835 vm->root_box->top_self = rb_obj_alloc(rb_cObject);
4836 rb_define_singleton_method(vm->root_box->top_self, "to_s", main_to_s, 0);
4837 rb_define_alias(rb_singleton_class(vm->root_box->top_self), "inspect", "to_s");
4838}
4839
4840VALUE *
4842{
4843 rb_ractor_t *cr = GET_RACTOR();
4844 return &cr->verbose;
4845}
4846
4847VALUE *
4849{
4850 rb_ractor_t *cr = GET_RACTOR();
4851 return &cr->debug;
4852}
4853
4854bool rb_free_at_exit = false;
4855
4856bool
4857ruby_free_at_exit_p(void)
4858{
4859 return rb_free_at_exit;
4860}
4861
4862/* iseq.c */
4863VALUE rb_insn_operand_intern(const rb_iseq_t *iseq,
4864 VALUE insn, int op_no, VALUE op,
4865 int len, size_t pos, VALUE *pnop, VALUE child);
4866
4867#if VM_COLLECT_USAGE_DETAILS
4868
4869#define HASH_ASET(h, k, v) rb_hash_aset((h), (st_data_t)(k), (st_data_t)(v))
4870
4871/* uh = {
4872 * insn(Fixnum) => ihash(Hash)
4873 * }
4874 * ihash = {
4875 * -1(Fixnum) => count, # insn usage
4876 * 0(Fixnum) => ophash, # operand usage
4877 * }
4878 * ophash = {
4879 * val(interned string) => count(Fixnum)
4880 * }
4881 */
4882static void
4883vm_analysis_insn(int insn)
4884{
4885 ID usage_hash;
4886 ID bigram_hash;
4887 static int prev_insn = -1;
4888
4889 VALUE uh;
4890 VALUE ihash;
4891 VALUE cv;
4892
4893 CONST_ID(usage_hash, "USAGE_ANALYSIS_INSN");
4894 CONST_ID(bigram_hash, "USAGE_ANALYSIS_INSN_BIGRAM");
4895 uh = rb_const_get(rb_cRubyVM, usage_hash);
4896 if (NIL_P(ihash = rb_hash_aref(uh, INT2FIX(insn)))) {
4897 ihash = rb_hash_new();
4898 HASH_ASET(uh, INT2FIX(insn), ihash);
4899 }
4900 if (NIL_P(cv = rb_hash_aref(ihash, INT2FIX(-1)))) {
4901 cv = INT2FIX(0);
4902 }
4903 HASH_ASET(ihash, INT2FIX(-1), INT2FIX(FIX2INT(cv) + 1));
4904
4905 /* calc bigram */
4906 if (prev_insn != -1) {
4907 VALUE bi;
4908 VALUE ary[2];
4909 VALUE cv;
4910
4911 ary[0] = INT2FIX(prev_insn);
4912 ary[1] = INT2FIX(insn);
4913 bi = rb_ary_new4(2, &ary[0]);
4914
4915 uh = rb_const_get(rb_cRubyVM, bigram_hash);
4916 if (NIL_P(cv = rb_hash_aref(uh, bi))) {
4917 cv = INT2FIX(0);
4918 }
4919 HASH_ASET(uh, bi, INT2FIX(FIX2INT(cv) + 1));
4920 }
4921 prev_insn = insn;
4922}
4923
4924static void
4925vm_analysis_operand(int insn, int n, VALUE op)
4926{
4927 ID usage_hash;
4928
4929 VALUE uh;
4930 VALUE ihash;
4931 VALUE ophash;
4932 VALUE valstr;
4933 VALUE cv;
4934
4935 CONST_ID(usage_hash, "USAGE_ANALYSIS_INSN");
4936
4937 uh = rb_const_get(rb_cRubyVM, usage_hash);
4938 if (NIL_P(ihash = rb_hash_aref(uh, INT2FIX(insn)))) {
4939 ihash = rb_hash_new();
4940 HASH_ASET(uh, INT2FIX(insn), ihash);
4941 }
4942 if (NIL_P(ophash = rb_hash_aref(ihash, INT2FIX(n)))) {
4943 ophash = rb_hash_new();
4944 HASH_ASET(ihash, INT2FIX(n), ophash);
4945 }
4946 /* intern */
4947 valstr = rb_insn_operand_intern(CFP_ISEQ(GET_EC()->cfp), insn, n, op, 0, 0, 0, 0);
4948
4949 /* set count */
4950 if (NIL_P(cv = rb_hash_aref(ophash, valstr))) {
4951 cv = INT2FIX(0);
4952 }
4953 HASH_ASET(ophash, valstr, INT2FIX(FIX2INT(cv) + 1));
4954}
4955
4956static void
4957vm_analysis_register(int reg, int isset)
4958{
4959 ID usage_hash;
4960 VALUE uh;
4961 VALUE valstr;
4962 static const char regstrs[][5] = {
4963 "pc", /* 0 */
4964 "sp", /* 1 */
4965 "ep", /* 2 */
4966 "cfp", /* 3 */
4967 "self", /* 4 */
4968 "iseq", /* 5 */
4969 };
4970 static const char getsetstr[][4] = {
4971 "get",
4972 "set",
4973 };
4974 static VALUE syms[sizeof(regstrs) / sizeof(regstrs[0])][2];
4975
4976 VALUE cv;
4977
4978 CONST_ID(usage_hash, "USAGE_ANALYSIS_REGS");
4979 if (syms[0] == 0) {
4980 char buff[0x10];
4981 int i;
4982
4983 for (i = 0; i < (int)(sizeof(regstrs) / sizeof(regstrs[0])); i++) {
4984 int j;
4985 for (j = 0; j < 2; j++) {
4986 snprintf(buff, 0x10, "%d %s %-4s", i, getsetstr[j], regstrs[i]);
4987 syms[i][j] = ID2SYM(rb_intern(buff));
4988 }
4989 }
4990 }
4991 valstr = syms[reg][isset];
4992
4993 uh = rb_const_get(rb_cRubyVM, usage_hash);
4994 if (NIL_P(cv = rb_hash_aref(uh, valstr))) {
4995 cv = INT2FIX(0);
4996 }
4997 HASH_ASET(uh, valstr, INT2FIX(FIX2INT(cv) + 1));
4998}
4999
5000#undef HASH_ASET
5001
5002static void (*ruby_vm_collect_usage_func_insn)(int insn) = NULL;
5003static void (*ruby_vm_collect_usage_func_operand)(int insn, int n, VALUE op) = NULL;
5004static void (*ruby_vm_collect_usage_func_register)(int reg, int isset) = NULL;
5005
5006/* :nodoc: */
5007static VALUE
5008usage_analysis_insn_start(VALUE self)
5009{
5010 ruby_vm_collect_usage_func_insn = vm_analysis_insn;
5011 return Qnil;
5012}
5013
5014/* :nodoc: */
5015static VALUE
5016usage_analysis_operand_start(VALUE self)
5017{
5018 ruby_vm_collect_usage_func_operand = vm_analysis_operand;
5019 return Qnil;
5020}
5021
5022/* :nodoc: */
5023static VALUE
5024usage_analysis_register_start(VALUE self)
5025{
5026 ruby_vm_collect_usage_func_register = vm_analysis_register;
5027 return Qnil;
5028}
5029
5030/* :nodoc: */
5031static VALUE
5032usage_analysis_insn_stop(VALUE self)
5033{
5034 ruby_vm_collect_usage_func_insn = 0;
5035 return Qnil;
5036}
5037
5038/* :nodoc: */
5039static VALUE
5040usage_analysis_operand_stop(VALUE self)
5041{
5042 ruby_vm_collect_usage_func_operand = 0;
5043 return Qnil;
5044}
5045
5046/* :nodoc: */
5047static VALUE
5048usage_analysis_register_stop(VALUE self)
5049{
5050 ruby_vm_collect_usage_func_register = 0;
5051 return Qnil;
5052}
5053
5054/* :nodoc: */
5055static VALUE
5056usage_analysis_insn_running(VALUE self)
5057{
5058 return RBOOL(ruby_vm_collect_usage_func_insn != 0);
5059}
5060
5061/* :nodoc: */
5062static VALUE
5063usage_analysis_operand_running(VALUE self)
5064{
5065 return RBOOL(ruby_vm_collect_usage_func_operand != 0);
5066}
5067
5068/* :nodoc: */
5069static VALUE
5070usage_analysis_register_running(VALUE self)
5071{
5072 return RBOOL(ruby_vm_collect_usage_func_register != 0);
5073}
5074
5075static VALUE
5076usage_analysis_clear(VALUE self, ID usage_hash)
5077{
5078 VALUE uh;
5079 uh = rb_const_get(self, usage_hash);
5080 rb_hash_clear(uh);
5081
5082 return Qtrue;
5083}
5084
5085
5086/* :nodoc: */
5087static VALUE
5088usage_analysis_insn_clear(VALUE self)
5089{
5090 ID usage_hash;
5091 ID bigram_hash;
5092
5093 CONST_ID(usage_hash, "USAGE_ANALYSIS_INSN");
5094 CONST_ID(bigram_hash, "USAGE_ANALYSIS_INSN_BIGRAM");
5095 usage_analysis_clear(rb_cRubyVM, usage_hash);
5096 return usage_analysis_clear(rb_cRubyVM, bigram_hash);
5097}
5098
5099/* :nodoc: */
5100static VALUE
5101usage_analysis_operand_clear(VALUE self)
5102{
5103 ID usage_hash;
5104
5105 CONST_ID(usage_hash, "USAGE_ANALYSIS_INSN");
5106 return usage_analysis_clear(self, usage_hash);
5107}
5108
5109/* :nodoc: */
5110static VALUE
5111usage_analysis_register_clear(VALUE self)
5112{
5113 ID usage_hash;
5114
5115 CONST_ID(usage_hash, "USAGE_ANALYSIS_REGS");
5116 return usage_analysis_clear(self, usage_hash);
5117}
5118
5119#else
5120
5121MAYBE_UNUSED(static void (*ruby_vm_collect_usage_func_insn)(int insn)) = 0;
5122MAYBE_UNUSED(static void (*ruby_vm_collect_usage_func_operand)(int insn, int n, VALUE op)) = 0;
5123MAYBE_UNUSED(static void (*ruby_vm_collect_usage_func_register)(int reg, int isset)) = 0;
5124
5125#endif
5126
5127#if VM_COLLECT_USAGE_DETAILS
5128/* @param insn instruction number */
5129static void
5130vm_collect_usage_insn(int insn)
5131{
5132 if (RUBY_DTRACE_INSN_ENABLED()) {
5133 RUBY_DTRACE_INSN(rb_insns_name(insn));
5134 }
5135 if (ruby_vm_collect_usage_func_insn)
5136 (*ruby_vm_collect_usage_func_insn)(insn);
5137}
5138
5139/* @param insn instruction number
5140 * @param n n-th operand
5141 * @param op operand value
5142 */
5143static void
5144vm_collect_usage_operand(int insn, int n, VALUE op)
5145{
5146 if (RUBY_DTRACE_INSN_OPERAND_ENABLED()) {
5147 VALUE valstr;
5148
5149 valstr = rb_insn_operand_intern(CFP_ISEQ(GET_EC()->cfp), insn, n, op, 0, 0, 0, 0);
5150
5151 RUBY_DTRACE_INSN_OPERAND(RSTRING_PTR(valstr), rb_insns_name(insn));
5152 RB_GC_GUARD(valstr);
5153 }
5154 if (ruby_vm_collect_usage_func_operand)
5155 (*ruby_vm_collect_usage_func_operand)(insn, n, op);
5156}
5157
5158/* @param reg register id. see code of vm_analysis_register() */
5159/* @param isset 0: read, 1: write */
5160static void
5161vm_collect_usage_register(int reg, int isset)
5162{
5163 if (ruby_vm_collect_usage_func_register)
5164 (*ruby_vm_collect_usage_func_register)(reg, isset);
5165}
5166#endif
5167
5168const struct rb_callcache *
5169rb_vm_empty_cc(void)
5170{
5171 return &vm_empty_cc;
5172}
5173
5174const struct rb_callcache *
5175rb_vm_empty_cc_for_super(void)
5176{
5177 return &vm_empty_cc_for_super;
5178}
5179
5180#include "vm_call_iseq_optimized.inc" /* required from vm_insnhelper.c */
#define RUBY_ASSERT_MESG(expr,...)
Asserts that the expression is truthy.
Definition assert.h:186
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ATOMIC_FETCH_ADD(var, val)
Atomically replaces the value pointed by var with the result of addition of val to the old value of v...
Definition atomic.h:118
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_method_id(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
#define RUBY_EVENT_END
Encountered an end of a class clause.
Definition event.h:40
#define RUBY_EVENT_B_RETURN
Encountered a next statement.
Definition event.h:56
#define RUBY_EVENT_RETURN
Encountered a return statement.
Definition event.h:42
#define RUBY_EVENT_C_RETURN
Return from a method, written in C.
Definition event.h:44
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition class.c:1497
VALUE rb_class_new(VALUE super)
Creates a new, anonymous class.
Definition class.c:866
VALUE rb_singleton_class(VALUE obj)
Finds or creates the singleton class of the passed object.
Definition class.c:2821
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
Definition class.c:2864
void rb_undef_method(VALUE klass, const char *name)
Defines an undef of a method.
Definition class.c:2674
#define rb_str_new2
Old name of rb_str_new_cstr.
Definition string.h:1676
#define NUM2ULONG
Old name of RB_NUM2ULONG.
Definition long.h:52
#define ALLOCV
Old name of RB_ALLOCV.
Definition memory.h:404
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:400
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition value_type.h:67
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition fl_type.h:131
#define ULONG2NUM
Old name of RB_ULONG2NUM.
Definition long.h:60
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition assume.h:29
#define SYM2ID
Old name of RB_SYM2ID.
Definition symbol.h:45
#define ZALLOC
Old name of RB_ZALLOC.
Definition memory.h:402
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:205
#define rb_ary_new4
Old name of rb_ary_new_from_values.
Definition array.h:659
#define SIZET2NUM
Old name of RB_SIZE2NUM.
Definition size_t.h:62
#define rb_exc_new2
Old name of rb_exc_new_cstr.
Definition error.h:37
#define FIX2INT
Old name of RB_FIX2INT.
Definition int.h:41
#define T_MODULE
Old name of RUBY_T_MODULE.
Definition value_type.h:70
#define ZALLOC_N
Old name of RB_ZALLOC_N.
Definition memory.h:401
#define ASSUME
Old name of RBIMPL_ASSUME.
Definition assume.h:27
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define ALLOC_N
Old name of RB_ALLOC_N.
Definition memory.h:399
#define FL_SET
Old name of RB_FL_SET.
Definition fl_type.h:125
#define rb_exc_new3
Old name of rb_exc_new_str.
Definition error.h:38
#define ULL2NUM
Old name of RB_ULL2NUM.
Definition long_long.h:31
#define Qtrue
Old name of RUBY_Qtrue.
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define NIL_P
Old name of RB_NIL_P.
#define NUM2ULL
Old name of RB_NUM2ULL.
Definition long_long.h:35
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define FL_TEST
Old name of RB_FL_TEST.
Definition fl_type.h:127
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define FL_USHIFT
Old name of RUBY_FL_USHIFT.
Definition fl_type.h:67
#define CONST_ID
Old name of RUBY_CONST_ID.
Definition symbol.h:47
#define ALLOCV_END
Old name of RB_ALLOCV_END.
Definition memory.h:406
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
void ruby_init_stack(void *addr)
Set stack bottom of Ruby implementation.
Definition vm.c:4670
VALUE rb_eLocalJumpError
LocalJumpError exception.
Definition eval.c:49
void rb_category_warn(rb_warning_category_t category, const char *fmt,...)
Identical to rb_category_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:477
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition eval.c:661
void rb_iter_break(void)
Breaks from a block.
Definition vm.c:2293
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1427
void rb_iter_break_value(VALUE val)
Identical to rb_iter_break(), except it additionally takes the "value" of this breakage.
Definition vm.c:2299
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1425
VALUE * rb_ruby_verbose_ptr(void)
This is an implementation detail of ruby_verbose.
Definition vm.c:4841
VALUE rb_exc_new_str(VALUE etype, VALUE str)
Identical to rb_exc_new_cstr(), except it takes a Ruby's string instead of C's.
Definition error.c:1478
VALUE * rb_ruby_debug_ptr(void)
This is an implementation detail of ruby_debug.
Definition vm.c:4848
VALUE rb_eSysStackError
SystemStackError exception.
Definition eval.c:50
@ RB_WARN_CATEGORY_PERFORMANCE
Warning is for performance issues (not enabled by -w).
Definition error.h:54
VALUE rb_cTime
Time class.
Definition time.c:679
VALUE rb_cArray
Array class.
VALUE rb_cObject
Object class.
Definition object.c:61
VALUE rb_obj_alloc(VALUE klass)
Allocates an instance of the given class.
Definition object.c:2254
VALUE rb_cInteger
Module class.
Definition numeric.c:199
VALUE rb_cNilClass
NilClass class.
Definition object.c:66
VALUE rb_cBinding
Binding class.
Definition proc.c:44
VALUE rb_cRegexp
Regexp class.
Definition re.c:2654
VALUE rb_cHash
Hash class.
Definition hash.c:109
VALUE rb_cFalseClass
FalseClass class.
Definition object.c:68
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:235
VALUE rb_cSymbol
Symbol class.
Definition string.c:85
VALUE rb_cBasicObject
BasicObject class.
Definition object.c:59
VALUE rb_cThread
Thread class.
Definition vm.c:683
VALUE rb_obj_freeze(VALUE obj)
Just calls rb_obj_freeze_inline() inside.
Definition object.c:1313
VALUE rb_cFloat
Float class.
Definition numeric.c:198
VALUE rb_cProc
Proc class.
Definition proc.c:45
VALUE rb_cTrueClass
TrueClass class.
Definition object.c:67
VALUE rb_cString
String class.
Definition string.c:84
#define RB_OBJ_WRITTEN(old, oldv, young)
Identical to RB_OBJ_WRITE(), except it doesn't write any values, but only a WB declaration.
Definition gc.h:615
#define RB_OBJ_WRITE(old, slot, young)
Declaration of a "back" pointer.
Definition gc.h:603
Defines RBIMPL_HAS_BUILTIN.
VALUE rb_ary_delete_at(VALUE ary, long pos)
Destructively removes an element which resides at the specific index of the passed array.
VALUE rb_ary_new(void)
Allocates a new, empty array.
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
void rb_undef(VALUE mod, ID mid)
Inserts a method entry that hides previous method definition of the given name.
Definition vm_method.c:2386
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:284
VALUE rb_backref_get(void)
Queries the last match, or Regexp.last_match, or the $~.
Definition vm.c:2043
void rb_lastline_set(VALUE str)
Updates $_.
Definition vm.c:2061
VALUE rb_lastline_get(void)
Queries the last line, or the $_.
Definition vm.c:2055
void rb_backref_set(VALUE md)
Updates $~.
Definition vm.c:2049
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition proc.c:988
VALUE rb_block_lambda(void)
Identical to rb_proc_new(), except it returns a lambda.
Definition proc.c:1007
VALUE rb_binding_new(void)
Snapshots the current execution context and turn it into an instance of rb_cBinding.
Definition proc.c:331
VALUE rb_str_append(VALUE dst, VALUE src)
Identical to rb_str_buf_append(), except it converts the right hand side before concatenating.
Definition string.c:3843
VALUE rb_str_new_frozen(VALUE str)
Creates a frozen copy of the string, if necessary.
Definition string.c:1525
#define rb_str_cat_cstr(buf, str)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1657
VALUE rb_const_get(VALUE space, ID name)
Identical to rb_const_defined(), except it returns the actual defined value.
Definition variable.c:3476
void rb_set_class_path(VALUE klass, VALUE space, const char *name)
Names a class.
Definition variable.c:441
VALUE rb_class_path_cached(VALUE mod)
Just another name of rb_mod_name.
Definition variable.c:389
void rb_alias_variable(ID dst, ID src)
Aliases a global variable.
Definition variable.c:1166
VALUE rb_class_path(VALUE mod)
Identical to rb_mod_name(), except it returns #<Class: ...> style inspection for anonymous modules.
Definition variable.c:380
void rb_undef_alloc_func(VALUE klass)
Deletes the allocator function of a class.
Definition vm_method.c:1731
const char * rb_sourcefile(void)
Resembles __FILE__.
Definition vm.c:2080
void rb_alias(VALUE klass, ID dst, ID src)
Resembles alias.
Definition vm_method.c:2769
int rb_frame_method_id_and_class(ID *idp, VALUE *klassp)
Resembles __method__.
Definition vm.c:3134
int rb_sourceline(void)
Resembles __LINE__.
Definition vm.c:2094
VALUE rb_sym2str(VALUE symbol)
Obtain a frozen string representation of a symbol (not including the leading colon).
Definition symbol.c:1024
void rb_define_global_const(const char *name, VALUE val)
Identical to rb_define_const(), except it defines that of "global", i.e.
Definition variable.c:4065
VALUE rb_iv_set(VALUE obj, const char *name, VALUE val)
Assigns to an instance variable.
Definition variable.c:4539
int len
Length of the buffer.
Definition io.h:8
VALUE rb_ractor_make_shareable_copy(VALUE obj)
Identical to rb_ractor_make_shareable(), except it returns a (deep) copy of the passed one instead of...
Definition ractor.c:1560
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
Definition ractor.h:249
#define RB_OBJ_SHAREABLE_P(obj)
Queries if the passed object has previously classified as shareable or not.
Definition ractor.h:235
VALUE rb_ractor_make_shareable(VALUE obj)
Destructively transforms the passed object so that multiple Ractors can share it.
Definition ractor.c:1551
void ruby_vm_at_exit(void(*func)(ruby_vm_t *))
ruby_vm_at_exit registers a function func to be invoked when a VM passed away.
Definition vm.c:1019
int ruby_vm_destruct(ruby_vm_t *vm)
Destructs the passed VM.
Definition vm.c:3412
VALUE rb_f_sprintf(int argc, const VALUE *argv)
Identical to rb_str_format(), except how the arguments are arranged.
Definition sprintf.c:209
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition memory.h:372
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
Definition memory.h:360
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
VALUE type(ANYARGS)
ANYARGS-ed function type.
void rb_hash_foreach(VALUE q, int_type *w, VALUE e)
Iteration over the given hash.
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
static int RARRAY_LENINT(VALUE ary)
Identical to rb_array_len(), except it differs for the return type.
Definition rarray.h:281
#define RARRAY_AREF(a, i)
Definition rarray.h:403
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:166
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
#define StringValuePtr(v)
Identical to StringValue, except it returns a char*.
Definition rstring.h:76
#define RTYPEDDATA_DATA(v)
Convenient getter macro.
Definition rtypeddata.h:106
#define RUBY_TYPED_FREE_IMMEDIATELY
Macros to see if each corresponding flag is defined.
Definition rtypeddata.h:122
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
Definition rtypeddata.h:769
#define TypedData_Wrap_Struct(klass, data_type, sval)
Converts sval, a pointer to your struct, into a Ruby object.
Definition rtypeddata.h:531
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
Definition rtypeddata.h:578
const char * rb_class2name(VALUE klass)
Queries the name of the passed class.
Definition variable.c:506
#define RB_NO_KEYWORDS
Do not pass keywords.
Definition scan_args.h:69
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
#define _(args)
This was a transition path from K&R to ANSI.
Definition stdarg.h:35
Definition proc.c:30
Definition iseq.h:289
Internal header for Ruby Box.
Definition box.h:14
Definition method.h:63
CREF (Class REFerence)
Definition method.h:45
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:229
const char * wrap_struct_name
Name of structs of this kind.
Definition rtypeddata.h:236
Definition method.h:55
Definition st.h:79
IFUNC (Internal FUNCtion)
Definition imemo.h:85
SVAR (Special VARiable)
Definition imemo.h:50
const VALUE cref_or_me
class reference or rb_method_entry_t
Definition imemo.h:52
THROW_DATA.
Definition imemo.h:59
void rb_native_cond_initialize(rb_nativethread_cond_t *cond)
Fills the passed condition variable with an initial value.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy.
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static enum ruby_value_type RB_BUILTIN_TYPE(VALUE obj)
Queries the type of the object.
Definition value_type.h:182
static void Check_Type(VALUE v, enum ruby_value_type t)
Identical to RB_TYPE_P(), except it raises exceptions on predication failure.
Definition value_type.h:433
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376
ruby_value_type
C-level type of an object.
Definition value_type.h:113