Ruby 4.1.0dev (2026-04-04 revision 892991bdc1a5068d74a8597cd0ccf3092afffabf)
vm.c (892991bdc1a5068d74a8597cd0ccf3092afffabf)
1/**********************************************************************
2
3 Vm.c -
4
5 $Author$
6
7 Copyright (C) 2004-2007 Koichi Sasada
8
9**********************************************************************/
10
11#define vm_exec rb_vm_exec
12
13#include "eval_intern.h"
14#include "internal.h"
15#include "internal/box.h"
16#include "internal/class.h"
17#include "internal/compile.h"
18#include "internal/cont.h"
19#include "internal/error.h"
20#include "internal/encoding.h"
21#include "internal/eval.h"
22#include "internal/gc.h"
23#include "internal/inits.h"
24#include "internal/missing.h"
25#include "internal/object.h"
26#include "internal/proc.h"
27#include "internal/re.h"
28#include "internal/ruby_parser.h"
29#include "internal/st.h"
30#include "internal/symbol.h"
31#include "internal/thread.h"
32#include "internal/transcode.h"
33#include "internal/vm.h"
34#include "internal/sanitizers.h"
35#include "internal/variable.h"
36#include "iseq.h"
37#include "symbol.h" // This includes a macro for a more performant rb_id2sym.
38#include "yjit.h"
39#include "insns.inc"
40#include "zjit.h"
41#include "ruby/st.h"
42#include "ruby/vm.h"
43#include "vm_core.h"
44#include "vm_callinfo.h"
45#include "vm_debug.h"
46#include "vm_exec.h"
47#include "vm_insnhelper.h"
48#include "ractor_core.h"
49#include "vm_sync.h"
50#include "shape.h"
51
52#include "builtin.h"
53
54#include "probes.h"
55#include "probes_helper.h"
56
57#ifdef RUBY_ASSERT_CRITICAL_SECTION
58int ruby_assert_critical_section_entered = 0;
59#endif
60
61static void *native_main_thread_stack_top;
62
63bool ruby_vm_during_cleanup = false;
64
65VALUE rb_str_concat_literals(size_t, const VALUE*);
66
68
69extern const char *const rb_debug_counter_names[];
70
71PUREFUNC(static inline const VALUE *VM_EP_LEP(const VALUE *));
72static inline const VALUE *
73VM_EP_LEP(const VALUE *ep)
74{
75 while (!VM_ENV_LOCAL_P(ep)) {
76 ep = VM_ENV_PREV_EP(ep);
77 }
78 return ep;
79}
80
81static inline const rb_control_frame_t *
82rb_vm_search_cf_from_ep(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, const VALUE * const ep)
83{
84 if (!ep) {
85 return NULL;
86 }
87 else {
88 const rb_control_frame_t * const eocfp = RUBY_VM_END_CONTROL_FRAME(ec); /* end of control frame pointer */
89
90 while (cfp < eocfp) {
91 if (cfp->ep == ep) {
92 return cfp;
93 }
94 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
95 }
96
97 return NULL;
98 }
99}
100
101#if VM_CHECK_MODE > 0
102// ruby_box_crashed defined in internal/box.h
103#define VM_BOX_CRASHED() {ruby_box_crashed = true;}
104#define VM_BOX_ASSERT(expr, msg) \
105 if (!(expr)) { ruby_box_crashed = true; rb_bug(msg); }
106#else
107#define VM_BOX_CRASHED() {}
108#define VM_BOX_ASSERT(expr, msg) ((void)0)
109#endif
110
111static const VALUE *
112VM_EP_RUBY_LEP(const rb_execution_context_t *ec, const rb_control_frame_t *current_cfp)
113{
114 // rb_vmdebug_box_env_dump_raw() simulates this function
115 const VALUE *ep = current_cfp->ep;
116 const rb_control_frame_t * const eocfp = RUBY_VM_END_CONTROL_FRAME(ec); /* end of control frame pointer */
117 const rb_control_frame_t *cfp = current_cfp;
118
119 if (VM_ENV_FRAME_TYPE_P(ep, VM_FRAME_MAGIC_IFUNC)) {
120 ep = VM_EP_LEP(current_cfp->ep);
149 VM_ASSERT(VM_ENV_FRAME_TYPE_P(ep, VM_FRAME_MAGIC_CFUNC));
150 return ep;
151 }
152
153 while (VM_ENV_FRAME_TYPE_P(ep, VM_FRAME_MAGIC_CFUNC)) {
154 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
155
156 VM_BOX_ASSERT(cfp, "CFUNC should have a valid previous control frame");
157 VM_BOX_ASSERT(cfp < eocfp, "CFUNC should have a valid caller frame");
158 if (!cfp || cfp >= eocfp) {
159 return NULL;
160 }
161
162 VM_BOX_ASSERT(cfp->ep, "CFUNC should have a valid caller frame with env");
163 ep = cfp->ep;
164 if (!ep) {
165 return NULL;
166 }
167 }
168
169 while (!VM_ENV_LOCAL_P(ep)) {
170 ep = VM_ENV_PREV_EP(ep);
171 }
172
173 return ep;
174}
175
176const VALUE *
177rb_vm_ep_local_ep(const VALUE *ep)
178{
179 return VM_EP_LEP(ep);
180}
181
182PUREFUNC(static inline const VALUE *VM_CF_LEP(const rb_control_frame_t * const cfp));
183static inline const VALUE *
184VM_CF_LEP(const rb_control_frame_t * const cfp)
185{
186 return VM_EP_LEP(cfp->ep);
187}
188
189static inline const VALUE *
190VM_CF_PREV_EP(const rb_control_frame_t * const cfp)
191{
192 return VM_ENV_PREV_EP(cfp->ep);
193}
194
195PUREFUNC(static inline VALUE VM_CF_BLOCK_HANDLER(const rb_control_frame_t * const cfp));
196static inline VALUE
197VM_CF_BLOCK_HANDLER(const rb_control_frame_t * const cfp)
198{
199 const VALUE *ep;
200 if (VM_ENV_BOXED_P(cfp->ep)) {
201 VM_ASSERT(VM_ENV_LOCAL_P(cfp->ep));
202 /* Never set black_handler for VM_FRAME_MAGIC_TOP or VM_FRAME_MAGIC_CLASS
203 * and the specval is used for boxes (rb_box_t) in these case
204 */
205 return VM_BLOCK_HANDLER_NONE;
206 }
207 ep = VM_CF_LEP(cfp);
208 return VM_ENV_BLOCK_HANDLER(ep);
209}
210
211int
212rb_vm_cframe_keyword_p(const rb_control_frame_t *cfp)
213{
214 return VM_FRAME_CFRAME_KW_P(cfp);
215}
216
217VALUE
218rb_vm_frame_block_handler(const rb_control_frame_t *cfp)
219{
220 return VM_CF_BLOCK_HANDLER(cfp);
221}
222
223#if VM_CHECK_MODE > 0
224static int
225VM_CFP_IN_HEAP_P(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
226{
227 const VALUE *start = ec->vm_stack;
228 const VALUE *end = (VALUE *)ec->vm_stack + ec->vm_stack_size;
229 VM_ASSERT(start != NULL);
230
231 if (start <= (VALUE *)cfp && (VALUE *)cfp < end) {
232 return FALSE;
233 }
234 else {
235 return TRUE;
236 }
237}
238
239static int
240VM_EP_IN_HEAP_P(const rb_execution_context_t *ec, const VALUE *ep)
241{
242 const VALUE *start = ec->vm_stack;
243 const VALUE *end = (VALUE *)ec->cfp;
244 VM_ASSERT(start != NULL);
245
246 if (start <= ep && ep < end) {
247 return FALSE;
248 }
249 else {
250 return TRUE;
251 }
252}
253
254static int
255vm_ep_in_heap_p_(const rb_execution_context_t *ec, const VALUE *ep)
256{
257 if (VM_EP_IN_HEAP_P(ec, ep)) {
258 VALUE envval = ep[VM_ENV_DATA_INDEX_ENV]; /* VM_ENV_ENVVAL(ep); */
259
260 if (!UNDEF_P(envval)) {
261 const rb_env_t *env = (const rb_env_t *)envval;
262
263 VM_ASSERT(imemo_type_p(envval, imemo_env));
264 VM_ASSERT(VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED));
265 VM_ASSERT(env->ep == ep);
266 }
267 return TRUE;
268 }
269 else {
270 return FALSE;
271 }
272}
273
274int
275rb_vm_ep_in_heap_p(const VALUE *ep)
276{
277 const rb_execution_context_t *ec = GET_EC();
278 if (ec->vm_stack == NULL) return TRUE;
279 return vm_ep_in_heap_p_(ec, ep);
280}
281#endif
282
283static struct rb_captured_block *
284VM_CFP_TO_CAPTURED_BLOCK(const rb_control_frame_t *cfp)
285{
286 VM_ASSERT(!VM_CFP_IN_HEAP_P(GET_EC(), cfp));
287 return (struct rb_captured_block *)&cfp->self;
288}
289
290static rb_control_frame_t *
291VM_CAPTURED_BLOCK_TO_CFP(const struct rb_captured_block *captured)
292{
293 rb_control_frame_t *cfp = ((rb_control_frame_t *)((VALUE *)(captured) - 3));
294 VM_ASSERT(!VM_CFP_IN_HEAP_P(GET_EC(), cfp));
295 VM_ASSERT(sizeof(rb_control_frame_t)/sizeof(VALUE) == 7 + VM_DEBUG_BP_CHECK ? 1 : 0);
296 return cfp;
297}
298
299static int
300VM_BH_FROM_CFP_P(VALUE block_handler, const rb_control_frame_t *cfp)
301{
302 const struct rb_captured_block *captured = VM_CFP_TO_CAPTURED_BLOCK(cfp);
303 return VM_TAGGED_PTR_REF(block_handler, 0x03) == captured;
304}
305
306static VALUE
307vm_passed_block_handler(rb_execution_context_t *ec)
308{
309 VALUE block_handler = ec->passed_block_handler;
310 ec->passed_block_handler = VM_BLOCK_HANDLER_NONE;
311 vm_block_handler_verify(block_handler);
312 return block_handler;
313}
314
315static rb_cref_t *
316vm_cref_new0(VALUE klass, rb_method_visibility_t visi, int module_func, rb_cref_t *prev_cref, int pushed_by_eval, int use_prev_prev, int singleton)
317{
318 VALUE refinements = Qnil;
319 int omod_shared = FALSE;
320
321 /* scope */
322 rb_scope_visibility_t scope_visi;
323 scope_visi.method_visi = visi;
324 scope_visi.module_func = module_func;
325
326 /* refinements */
327 if (prev_cref != NULL && prev_cref != (void *)1 /* TODO: why CREF_NEXT(cref) is 1? */) {
328 refinements = CREF_REFINEMENTS(prev_cref);
329
330 if (!NIL_P(refinements)) {
331 omod_shared = TRUE;
332 CREF_OMOD_SHARED_SET(prev_cref);
333 }
334 }
335
336 VM_ASSERT(singleton || klass);
337
338 rb_cref_t *cref = SHAREABLE_IMEMO_NEW(rb_cref_t, imemo_cref, refinements);
339 cref->klass_or_self = klass;
340 cref->next = use_prev_prev ? CREF_NEXT(prev_cref) : prev_cref;
341 *((rb_scope_visibility_t *)&cref->scope_visi) = scope_visi;
342
343 if (pushed_by_eval) CREF_PUSHED_BY_EVAL_SET(cref);
344 if (omod_shared) CREF_OMOD_SHARED_SET(cref);
345 if (singleton) CREF_SINGLETON_SET(cref);
346
347 return cref;
348}
349
350static rb_cref_t *
351vm_cref_new(VALUE klass, rb_method_visibility_t visi, int module_func, rb_cref_t *prev_cref, int pushed_by_eval, int singleton)
352{
353 return vm_cref_new0(klass, visi, module_func, prev_cref, pushed_by_eval, FALSE, singleton);
354}
355
356static rb_cref_t *
357vm_cref_new_use_prev(VALUE klass, rb_method_visibility_t visi, int module_func, rb_cref_t *prev_cref, int pushed_by_eval)
358{
359 return vm_cref_new0(klass, visi, module_func, prev_cref, pushed_by_eval, TRUE, FALSE);
360}
361
362static int
363ref_delete_symkey(VALUE key, VALUE value, VALUE unused)
364{
365 return SYMBOL_P(key) ? ST_DELETE : ST_CONTINUE;
366}
367
368static rb_cref_t *
369vm_cref_dup(const rb_cref_t *cref)
370{
371 const rb_scope_visibility_t *visi = CREF_SCOPE_VISI(cref);
372 rb_cref_t *next_cref = CREF_NEXT(cref), *new_cref;
373 int pushed_by_eval = CREF_PUSHED_BY_EVAL(cref);
374 int singleton = CREF_SINGLETON(cref);
375
376 new_cref = vm_cref_new(cref->klass_or_self, visi->method_visi, visi->module_func, next_cref, pushed_by_eval, singleton);
377
378 if (!NIL_P(CREF_REFINEMENTS(cref))) {
379 VALUE ref = rb_hash_dup(CREF_REFINEMENTS(cref));
380 rb_hash_foreach(ref, ref_delete_symkey, Qnil);
381 CREF_REFINEMENTS_SET(new_cref, ref);
382 CREF_OMOD_SHARED_UNSET(new_cref);
383 }
384
385 return new_cref;
386}
387
388
389rb_cref_t *
390rb_vm_cref_dup_without_refinements(const rb_cref_t *cref)
391{
392 const rb_scope_visibility_t *visi = CREF_SCOPE_VISI(cref);
393 rb_cref_t *next_cref = CREF_NEXT(cref), *new_cref;
394 int pushed_by_eval = CREF_PUSHED_BY_EVAL(cref);
395 int singleton = CREF_SINGLETON(cref);
396
397 new_cref = vm_cref_new(cref->klass_or_self, visi->method_visi, visi->module_func, next_cref, pushed_by_eval, singleton);
398
399 if (!NIL_P(CREF_REFINEMENTS(cref))) {
400 CREF_REFINEMENTS_SET(new_cref, Qnil);
401 CREF_OMOD_SHARED_UNSET(new_cref);
402 }
403
404 return new_cref;
405}
406
407static rb_cref_t *
408vm_cref_new_toplevel(rb_execution_context_t *ec)
409{
410 rb_cref_t *cref = vm_cref_new(rb_cObject, METHOD_VISI_PRIVATE /* toplevel visibility is private */, FALSE, NULL, FALSE, FALSE);
411 VALUE top_wrapper = rb_ec_thread_ptr(ec)->top_wrapper;
412
413 if (top_wrapper) {
414 cref = vm_cref_new(top_wrapper, METHOD_VISI_PRIVATE, FALSE, cref, FALSE, FALSE);
415 }
416
417 return cref;
418}
419
420rb_cref_t *
421rb_vm_cref_new_toplevel(void)
422{
423 return vm_cref_new_toplevel(GET_EC());
424}
425
426static void
427vm_cref_dump(const char *mesg, const rb_cref_t *cref)
428{
429 ruby_debug_printf("vm_cref_dump: %s (%p)\n", mesg, (void *)cref);
430
431 while (cref) {
432 ruby_debug_printf("= cref| klass: %s\n", RSTRING_PTR(rb_class_path(CREF_CLASS(cref))));
433 cref = CREF_NEXT(cref);
434 }
435}
436
437void
438rb_vm_block_ep_update(VALUE obj, const struct rb_block *dst, const VALUE *ep)
439{
440 *((const VALUE **)&dst->as.captured.ep) = ep;
441 RB_OBJ_WRITTEN(obj, Qundef, VM_ENV_ENVVAL(ep));
442}
443
444static void
445vm_bind_update_env(VALUE bindval, rb_binding_t *bind, VALUE envval)
446{
447 const rb_env_t *env = (rb_env_t *)envval;
448 RB_OBJ_WRITE(bindval, &bind->block.as.captured.code.iseq, env->iseq);
449 rb_vm_block_ep_update(bindval, &bind->block, env->ep);
450}
451
452#if VM_COLLECT_USAGE_DETAILS
453static void vm_collect_usage_operand(int insn, int n, VALUE op);
454static void vm_collect_usage_insn(int insn);
455static void vm_collect_usage_register(int reg, int isset);
456#endif
457
458static VALUE vm_make_env_object(const rb_execution_context_t *ec, rb_control_frame_t *cfp);
459static VALUE vm_invoke_bmethod(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
460 int argc, const VALUE *argv, int kw_splat, VALUE block_handler,
462static VALUE vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE block_handler);
463
464#if USE_YJIT
465// Counter to serve as a proxy for execution time, total number of calls
466static uint64_t yjit_total_entry_hits = 0;
467
468// Number of calls used to estimate how hot an ISEQ is
469#define YJIT_CALL_COUNT_INTERV 20u
470
472static inline bool
473rb_yjit_threshold_hit(const rb_iseq_t *iseq, uint64_t entry_calls)
474{
475 yjit_total_entry_hits += 1;
476
477 // Record the number of calls at the beginning of the interval
478 if (entry_calls + YJIT_CALL_COUNT_INTERV == rb_yjit_call_threshold) {
479 iseq->body->yjit_calls_at_interv = yjit_total_entry_hits;
480 }
481
482 // Try to estimate the total time taken (total number of calls) to reach 20 calls to this ISEQ
483 // This give us a ratio of how hot/cold this ISEQ is
484 if (entry_calls == rb_yjit_call_threshold) {
485 // We expect threshold 1 to compile everything immediately
486 if (rb_yjit_call_threshold < YJIT_CALL_COUNT_INTERV) {
487 return true;
488 }
489
490 uint64_t num_calls = yjit_total_entry_hits - iseq->body->yjit_calls_at_interv;
491
492 // Reject ISEQs that don't get called often enough
493 if (num_calls > rb_yjit_cold_threshold) {
494 rb_yjit_incr_counter("cold_iseq_entry");
495 return false;
496 }
497
498 return true;
499 }
500
501 return false;
502}
503#else
504#define rb_yjit_threshold_hit(iseq, entry_calls) false
505#endif
506
507#if USE_YJIT
508// Generate JIT code that supports the following kinds of ISEQ entries:
509// * The first ISEQ on vm_exec (e.g. <main>, or Ruby methods/blocks
510// called by a C method). The current frame has VM_FRAME_FLAG_FINISH.
511// The current vm_exec stops if JIT code returns a non-Qundef value.
512// * ISEQs called by the interpreter on vm_sendish (e.g. Ruby methods or
513// blocks called by a Ruby frame that isn't compiled or side-exited).
514// The current frame doesn't have VM_FRAME_FLAG_FINISH. The current
515// vm_exec does NOT stop whether JIT code returns Qundef or not.
516static inline rb_jit_func_t
517yjit_compile(rb_execution_context_t *ec)
518{
519 const rb_iseq_t *iseq = CFP_ISEQ(ec->cfp);
520 struct rb_iseq_constant_body *body = ISEQ_BODY(iseq);
521
522 // Increment the ISEQ's call counter and trigger JIT compilation if not compiled
523 if (body->jit_entry == NULL) {
524 body->jit_entry_calls++;
525 if (rb_yjit_threshold_hit(iseq, body->jit_entry_calls)) {
526 rb_yjit_compile_iseq(iseq, ec, false);
527 }
528 }
529 return body->jit_entry;
530}
531#else
532# define yjit_compile(ec) ((rb_jit_func_t)0)
533#endif
534
535#if USE_ZJIT
536static inline rb_jit_func_t
537zjit_compile(rb_execution_context_t *ec)
538{
539 const rb_iseq_t *iseq = CFP_ISEQ(ec->cfp);
540 struct rb_iseq_constant_body *body = ISEQ_BODY(iseq);
541
542 if (body->jit_entry == NULL) {
543 body->jit_entry_calls++;
544
545 // At profile-threshold, rewrite some of the YARV instructions
546 // to zjit_* instructions to profile these instructions.
547 if (body->jit_entry_calls == rb_zjit_profile_threshold) {
548 rb_zjit_profile_enable(iseq);
549 }
550
551 // At call-threshold, compile the ISEQ with ZJIT.
552 if (body->jit_entry_calls == rb_zjit_call_threshold) {
553 rb_zjit_compile_iseq(iseq, ec, false);
554 }
555 }
556 return body->jit_entry;
557}
558#else
559# define zjit_compile(ec) ((rb_jit_func_t)0)
560#endif
561
562static inline void zjit_materialize_frames(rb_control_frame_t *cfp);
563
564// Execute JIT code compiled by yjit_compile() or zjit_compile()
565static inline VALUE
566jit_exec(rb_execution_context_t *ec)
567{
568#if USE_YJIT
569 if (rb_yjit_enabled_p) {
570 rb_jit_func_t func = yjit_compile(ec);
571 if (func) {
572 return func(ec, ec->cfp);
573 }
574 return Qundef;
575 }
576#endif
577
578#if USE_ZJIT
579 void *zjit_entry = rb_zjit_entry;
580 if (zjit_entry) {
581 rb_jit_func_t func = zjit_compile(ec);
582 if (func) {
583 VALUE result = ((rb_zjit_func_t)zjit_entry)(ec, ec->cfp, func);
584 // Materialize any remaining lightweight ZJIT frames on side exit.
585 // This is done here (once per JIT entry) instead of in each side exit
586 // to reduce generated code size.
587 if (UNDEF_P(result)) {
588 ec->cfp->jit_return = 0;
589 zjit_materialize_frames(ec->cfp);
590 }
591 return result;
592 }
593 }
594#endif
595 return Qundef;
596}
597
598#if USE_YJIT || USE_ZJIT
599// Generate JIT code that supports the following kind of ISEQ entry:
600// * The first ISEQ pushed by vm_exec_handle_exception. The frame would
601// point to a location specified by a catch table, and it doesn't have
602// VM_FRAME_FLAG_FINISH. The current vm_exec stops if JIT code returns
603// a non-Qundef value. So you should not return a non-Qundef value
604// until ec->cfp is changed to a frame with VM_FRAME_FLAG_FINISH.
605static inline rb_jit_func_t
606jit_compile_exception(rb_execution_context_t *ec)
607{
608 const rb_iseq_t *iseq = CFP_ISEQ(ec->cfp);
609 struct rb_iseq_constant_body *body = ISEQ_BODY(iseq);
610
611#if USE_ZJIT
612 if (body->jit_exception == NULL && rb_zjit_enabled_p) {
613 body->jit_exception_calls++;
614
615 // At profile-threshold, rewrite some of the YARV instructions
616 // to zjit_* instructions to profile these instructions.
617 if (body->jit_exception_calls == rb_zjit_profile_threshold) {
618 rb_zjit_profile_enable(iseq);
619 }
620
621 // At call-threshold, compile the ISEQ with ZJIT.
622 if (body->jit_exception_calls == rb_zjit_call_threshold) {
623 rb_zjit_compile_iseq(iseq, ec, true);
624 }
625 }
626#endif
627
628#if USE_YJIT
629 // Increment the ISEQ's call counter and trigger JIT compilation if not compiled
630 if (body->jit_exception == NULL && rb_yjit_enabled_p) {
631 body->jit_exception_calls++;
632 if (body->jit_exception_calls == rb_yjit_call_threshold) {
633 rb_yjit_compile_iseq(iseq, ec, true);
634 }
635 }
636#endif
637 return body->jit_exception;
638}
639
640// Execute JIT code compiled by jit_compile_exception()
641static inline VALUE
642jit_exec_exception(rb_execution_context_t *ec)
643{
644 rb_jit_func_t func = jit_compile_exception(ec);
645 if (func) {
646 // Call the JIT code
647 return func(ec, ec->cfp);
648 }
649 else {
650 return Qundef;
651 }
652}
653#else
654# define jit_compile_exception(ec) ((rb_jit_func_t)0)
655# define jit_exec_exception(ec) Qundef
656#endif
657
658static void add_opt_method_entry(const rb_method_entry_t *me);
659
660#define RB_TYPE_2_P(obj, type1, type2) \
661 (RB_TYPE_P(obj, type1) || RB_TYPE_P(obj, type2))
662#define RB_TYPE_3_P(obj, type1, type2, type3) \
663 (RB_TYPE_P(obj, type1) || RB_TYPE_P(obj, type2) || RB_TYPE_P(obj, type3))
664
665#define VM_ASSERT_TYPE(obj, type) \
666 VM_ASSERT(RB_TYPE_P(obj, type), #obj ": %s", rb_obj_info(obj))
667#define VM_ASSERT_TYPE2(obj, type1, type2) \
668 VM_ASSERT(RB_TYPE_2_P(obj, type1, type2), #obj ": %s", rb_obj_info(obj))
669#define VM_ASSERT_TYPE3(obj, type1, type2, type3) \
670 VM_ASSERT(RB_TYPE_3_P(obj, type1, type2, type3), #obj ": %s", rb_obj_info(obj))
671
672#include "vm_insnhelper.c"
673
674#include "vm_exec.c"
675
676#include "vm_method.c"
677#include "vm_eval.c"
678
679#define PROCDEBUG 0
680
681VALUE rb_cRubyVM;
683VALUE rb_mRubyVMFrozenCore;
684VALUE rb_block_param_proxy;
685
686VALUE ruby_vm_const_missing_count = 0;
687rb_vm_t *ruby_current_vm_ptr = NULL;
688rb_ractor_t *ruby_single_main_ractor;
689bool ruby_vm_keep_script_lines;
690
691#ifdef RB_THREAD_LOCAL_SPECIFIER
692RB_THREAD_LOCAL_SPECIFIER rb_execution_context_t *ruby_current_ec;
693
694#ifdef RUBY_NT_SERIAL
695RB_THREAD_LOCAL_SPECIFIER rb_atomic_t ruby_nt_serial;
696#endif
697
698// no-inline decl on vm_core.h
700rb_current_ec_noinline(void)
701{
702 return ruby_current_ec;
703}
704
705void
706rb_current_ec_set(rb_execution_context_t *ec)
707{
708 ruby_current_ec = ec;
709}
710
711
712#ifdef RB_THREAD_CURRENT_EC_NOINLINE
714rb_current_ec(void)
715{
716 return ruby_current_ec;
717}
718
719#endif
720#else
721native_tls_key_t ruby_current_ec_key;
722
723// no-inline decl on vm_core.h
725rb_current_ec_noinline(void)
726{
727 return native_tls_get(ruby_current_ec_key);
728}
729
730#endif
731
732rb_event_flag_t ruby_vm_event_flags = 0;
733rb_event_flag_t ruby_vm_event_enabled_global_flags = 0;
734unsigned int ruby_vm_c_events_enabled = 0;
735unsigned int ruby_vm_iseq_events_enabled = 0;
736
737rb_serial_t ruby_vm_constant_cache_invalidations = 0;
738rb_serial_t ruby_vm_constant_cache_misses = 0;
739rb_serial_t ruby_vm_global_cvar_state = 1;
740
741static const struct rb_callcache vm_empty_cc = {
742 .flags = T_IMEMO | (imemo_callcache << FL_USHIFT) | VM_CALLCACHE_UNMARKABLE,
743 .klass = Qundef,
744 .cme_ = NULL,
745 .call_ = vm_call_general,
746 .aux_ = {
747 .v = Qfalse,
748 }
749};
750
751static const struct rb_callcache vm_empty_cc_for_super = {
752 .flags = T_IMEMO | (imemo_callcache << FL_USHIFT) | VM_CALLCACHE_UNMARKABLE,
753 .klass = Qundef,
754 .cme_ = NULL,
755 .call_ = vm_call_super_method,
756 .aux_ = {
757 .v = Qfalse,
758 }
759};
760
761static void thread_free(void *ptr);
762
763void
764rb_vm_inc_const_missing_count(void)
765{
766 ruby_vm_const_missing_count +=1;
767}
768
769int
770rb_dtrace_setup(rb_execution_context_t *ec, VALUE klass, ID id,
771 struct ruby_dtrace_method_hook_args *args)
772{
774 if (!klass) {
775 if (!ec) ec = GET_EC();
776 if (!rb_ec_frame_method_id_and_class(ec, &id, 0, &klass) || !klass)
777 return FALSE;
778 }
779 if (RB_TYPE_P(klass, T_ICLASS)) {
780 klass = RBASIC(klass)->klass;
781 }
782 else if (RCLASS_SINGLETON_P(klass)) {
783 klass = RCLASS_ATTACHED_OBJECT(klass);
784 if (NIL_P(klass)) return FALSE;
785 }
786 type = BUILTIN_TYPE(klass);
787 if (type == T_CLASS || type == T_ICLASS || type == T_MODULE) {
788 VALUE name = rb_class_path(klass);
789 const char *classname, *filename;
790 const char *methodname = rb_id2name(id);
791 if (methodname && (filename = rb_source_location_cstr(&args->line_no)) != 0) {
792 if (NIL_P(name) || !(classname = StringValuePtr(name)))
793 classname = "<unknown>";
794 args->classname = classname;
795 args->methodname = methodname;
796 args->filename = filename;
797 args->klass = klass;
798 args->name = name;
799 return TRUE;
800 }
801 }
802 return FALSE;
803}
804
805extern unsigned int redblack_buffer_size;
806
807/*
808 * call-seq:
809 * RubyVM.stat -> Hash
810 * RubyVM.stat(hsh) -> hsh
811 * RubyVM.stat(Symbol) -> Numeric
812 *
813 * Returns a Hash containing implementation-dependent counters inside the VM.
814 *
815 * This hash includes information about method/constant caches:
816 *
817 * {
818 * :constant_cache_invalidations=>2,
819 * :constant_cache_misses=>14,
820 * :global_cvar_state=>27
821 * }
822 *
823 * If <tt>USE_DEBUG_COUNTER</tt> is enabled, debug counters will be included.
824 *
825 * The contents of the hash are implementation specific and may be changed in
826 * the future.
827 *
828 * This method is only expected to work on C Ruby.
829 */
830static VALUE
831vm_stat(int argc, VALUE *argv, VALUE self)
832{
833 static VALUE sym_constant_cache_invalidations, sym_constant_cache_misses, sym_global_cvar_state, sym_next_shape_id;
834 static VALUE sym_shape_cache_size;
835 VALUE arg = Qnil;
836 VALUE hash = Qnil, key = Qnil;
837
838 if (rb_check_arity(argc, 0, 1) == 1) {
839 arg = argv[0];
840 if (SYMBOL_P(arg))
841 key = arg;
842 else if (RB_TYPE_P(arg, T_HASH))
843 hash = arg;
844 else
845 rb_raise(rb_eTypeError, "non-hash or symbol given");
846 }
847 else {
848 hash = rb_hash_new();
849 }
850
851#define S(s) sym_##s = ID2SYM(rb_intern_const(#s))
852 S(constant_cache_invalidations);
853 S(constant_cache_misses);
854 S(global_cvar_state);
855 S(next_shape_id);
856 S(shape_cache_size);
857#undef S
858
859#define SET(name, attr) \
860 if (key == sym_##name) \
861 return SERIALT2NUM(attr); \
862 else if (hash != Qnil) \
863 rb_hash_aset(hash, sym_##name, SERIALT2NUM(attr));
864
865 SET(constant_cache_invalidations, ruby_vm_constant_cache_invalidations);
866 SET(constant_cache_misses, ruby_vm_constant_cache_misses);
867 SET(global_cvar_state, ruby_vm_global_cvar_state);
868 SET(next_shape_id, (rb_serial_t)rb_shapes_count());
869 SET(shape_cache_size, (rb_serial_t)rb_shape_tree.cache_size);
870#undef SET
871
872#if USE_DEBUG_COUNTER
873 ruby_debug_counter_show_at_exit(FALSE);
874 for (size_t i = 0; i < RB_DEBUG_COUNTER_MAX; i++) {
875 const VALUE name = rb_sym_intern_ascii_cstr(rb_debug_counter_names[i]);
876 const VALUE boxed_value = SIZET2NUM(rb_debug_counter[i]);
877
878 if (key == name) {
879 return boxed_value;
880 }
881 else if (hash != Qnil) {
882 rb_hash_aset(hash, name, boxed_value);
883 }
884 }
885#endif
886
887 if (!NIL_P(key)) { /* matched key should return above */
888 rb_raise(rb_eArgError, "unknown key: %"PRIsVALUE, rb_sym2str(key));
889 }
890
891 return hash;
892}
893
894/* control stack frame */
895
896static void
897vm_set_top_stack(rb_execution_context_t *ec, const rb_iseq_t *iseq, const rb_box_t *box)
898{
899 if (ISEQ_BODY(iseq)->type != ISEQ_TYPE_TOP) {
900 rb_raise(rb_eTypeError, "Not a toplevel InstructionSequence");
901 }
902
903 /* for return */
904 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_TOP | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH,
905 rb_ec_thread_ptr(ec)->top_self,
906 GC_GUARDED_PTR(box),
907 (VALUE)vm_cref_new_toplevel(ec), /* cref or me */
908 ISEQ_BODY(iseq)->iseq_encoded, ec->cfp->sp,
909 ISEQ_BODY(iseq)->local_table_size, ISEQ_BODY(iseq)->stack_max);
910}
911
912static void
913vm_set_eval_stack(rb_execution_context_t *ec, const rb_iseq_t *iseq, const rb_cref_t *cref, const struct rb_block *base_block)
914{
915 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_EVAL | VM_FRAME_FLAG_FINISH,
916 vm_block_self(base_block), VM_GUARDED_PREV_EP(vm_block_ep(base_block)),
917 (VALUE)cref, /* cref or me */
918 ISEQ_BODY(iseq)->iseq_encoded,
919 ec->cfp->sp, ISEQ_BODY(iseq)->local_table_size,
920 ISEQ_BODY(iseq)->stack_max);
921}
922
923static void
924vm_set_main_stack(rb_execution_context_t *ec, const rb_iseq_t *iseq)
925{
926 VALUE toplevel_binding = rb_const_get(rb_cObject, rb_intern("TOPLEVEL_BINDING"));
927 rb_binding_t *bind;
928
929 GetBindingPtr(toplevel_binding, bind);
930 RUBY_ASSERT_MESG(bind, "TOPLEVEL_BINDING is not built");
931
932 vm_set_eval_stack(ec, iseq, 0, &bind->block);
933
934 /* save binding */
935 if (ISEQ_BODY(iseq)->local_table_size > 0) {
936 vm_bind_update_env(toplevel_binding, bind, vm_make_env_object(ec, ec->cfp));
937 }
938}
939
941rb_vm_get_binding_creatable_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
942{
943 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
944 if (CFP_ISEQ(cfp)) {
945 return (rb_control_frame_t *)cfp;
946 }
947 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
948 }
949 return 0;
950}
951
953rb_vm_get_ruby_level_next_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
954{
955 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
956 if (VM_FRAME_RUBYFRAME_P(cfp)) {
957 return (rb_control_frame_t *)cfp;
958 }
959 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
960 }
961 return 0;
962}
963
964static rb_control_frame_t *
965vm_get_ruby_level_caller_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
966{
967 if (VM_FRAME_RUBYFRAME_P(cfp)) {
968 return (rb_control_frame_t *)cfp;
969 }
970
971 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
972
973 while (!RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
974 if (VM_FRAME_RUBYFRAME_P(cfp)) {
975 return (rb_control_frame_t *)cfp;
976 }
977
978 if (VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_PASSED) == FALSE) {
979 break;
980 }
981 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
982 }
983 return 0;
984}
985
986void
987rb_vm_pop_cfunc_frame(void)
988{
989 rb_execution_context_t *ec = GET_EC();
990 rb_control_frame_t *cfp = ec->cfp;
991 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(cfp);
992
993 EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, cfp->self, me->def->original_id, me->called_id, me->owner, Qnil);
994 RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, me->owner, me->def->original_id);
995 vm_pop_frame(ec, cfp, cfp->ep);
996}
997
998void
999rb_vm_rewind_cfp(rb_execution_context_t *ec, rb_control_frame_t *cfp)
1000{
1001 /* check skipped frame */
1002 while (ec->cfp != cfp) {
1003#if VMDEBUG
1004 printf("skipped frame: %s\n", vm_frametype_name(ec->cfp));
1005#endif
1006 if (VM_FRAME_TYPE(ec->cfp) != VM_FRAME_MAGIC_CFUNC) {
1007 rb_vm_pop_frame(ec);
1008 }
1009 else { /* unlikely path */
1010 rb_vm_pop_cfunc_frame();
1011 }
1012 }
1013}
1014
1015/* at exit */
1016
1017void
1018ruby_vm_at_exit(void (*func)(rb_vm_t *))
1019{
1020 rb_vm_t *vm = GET_VM();
1022 nl->func = func;
1023 nl->next = vm->at_exit;
1024 vm->at_exit = nl;
1025}
1026
1027static void
1028ruby_vm_run_at_exit_hooks(rb_vm_t *vm)
1029{
1030 rb_at_exit_list *l = vm->at_exit;
1031
1032 while (l) {
1033 rb_at_exit_list* t = l->next;
1034 rb_vm_at_exit_func *func = l->func;
1035 ruby_xfree(l);
1036 l = t;
1037 (*func)(vm);
1038 }
1039}
1040
1041/* Env */
1042
1043static VALUE check_env_value(const rb_env_t *env);
1044
1045static int
1046check_env(const rb_env_t *env)
1047{
1048 fputs("---\n", stderr);
1049 ruby_debug_printf("envptr: %p\n", (void *)&env->ep[0]);
1050 ruby_debug_printf("envval: %10p ", (void *)env->ep[1]);
1051 dp(env->ep[1]);
1052 ruby_debug_printf("ep: %10p\n", (void *)env->ep);
1053 if (rb_vm_env_prev_env(env)) {
1054 fputs(">>\n", stderr);
1055 check_env_value(rb_vm_env_prev_env(env));
1056 fputs("<<\n", stderr);
1057 }
1058 return 1;
1059}
1060
1061static VALUE
1062check_env_value(const rb_env_t *env)
1063{
1064 if (check_env(env)) {
1065 return (VALUE)env;
1066 }
1067 rb_bug("invalid env");
1068 return Qnil; /* unreachable */
1069}
1070
1071static VALUE
1072vm_block_handler_escape(const rb_execution_context_t *ec, VALUE block_handler)
1073{
1074 switch (vm_block_handler_type(block_handler)) {
1075 case block_handler_type_ifunc:
1076 case block_handler_type_iseq:
1077 return rb_vm_make_proc(ec, VM_BH_TO_CAPT_BLOCK(block_handler), rb_cProc);
1078
1079 case block_handler_type_symbol:
1080 case block_handler_type_proc:
1081 return block_handler;
1082 }
1083 VM_UNREACHABLE(vm_block_handler_escape);
1084 return Qnil;
1085}
1086
1087static VALUE
1088vm_make_env_each(const rb_execution_context_t * const ec, rb_control_frame_t *const cfp)
1089{
1090 const VALUE * const ep = cfp->ep;
1091 VALUE *env_body, *env_ep;
1092 int local_size, env_size;
1093
1094 if (VM_ENV_ESCAPED_P(ep)) {
1095 return VM_ENV_ENVVAL(ep);
1096 }
1097
1098 if (!VM_ENV_LOCAL_P(ep)) {
1099 const VALUE *prev_ep = VM_ENV_PREV_EP(ep);
1100 if (!VM_ENV_ESCAPED_P(prev_ep)) {
1101 rb_control_frame_t *prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
1102
1103 while (prev_cfp->ep != prev_ep) {
1104 prev_cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(prev_cfp);
1105 VM_ASSERT(prev_cfp->ep != NULL);
1106 }
1107
1108 vm_make_env_each(ec, prev_cfp);
1109 VM_FORCE_WRITE_SPECIAL_CONST(&ep[VM_ENV_DATA_INDEX_SPECVAL], VM_GUARDED_PREV_EP(prev_cfp->ep));
1110 }
1111 }
1112 else {
1113 VM_ASSERT(VM_ENV_LOCAL_P(ep));
1114 VALUE block_handler = VM_ENV_BLOCK_HANDLER(ep);
1115
1116 if (block_handler != VM_BLOCK_HANDLER_NONE) {
1117 VALUE blockprocval = vm_block_handler_escape(ec, block_handler);
1118 VM_STACK_ENV_WRITE(ep, VM_ENV_DATA_INDEX_SPECVAL, blockprocval);
1119 }
1120 }
1121
1122 const rb_iseq_t *iseq = CFP_ISEQ(cfp);
1123 if (!VM_FRAME_RUBYFRAME_P(cfp)) {
1124 local_size = VM_ENV_DATA_SIZE;
1125 }
1126 else {
1127 local_size = ISEQ_BODY(iseq)->local_table_size;
1128 if (ISEQ_BODY(iseq)->param.flags.forwardable && VM_ENV_LOCAL_P(cfp->ep)) {
1129 int ci_offset = local_size - ISEQ_BODY(iseq)->param.size + VM_ENV_DATA_SIZE;
1130
1131 CALL_INFO ci = (CALL_INFO)VM_CF_LEP(cfp)[-ci_offset];
1132 local_size += vm_ci_argc(ci);
1133 }
1134 local_size += VM_ENV_DATA_SIZE;
1135 }
1136
1137 // Invalidate JIT code that assumes cfp->ep == vm_base_ptr(cfp).
1138 // This is done before creating the imemo_env because VM_STACK_ENV_WRITE
1139 // below leaves the on-stack ep in a state that is unsafe to GC.
1140 if (VM_FRAME_RUBYFRAME_P(cfp)) {
1141 rb_yjit_invalidate_ep_is_bp(iseq);
1142 rb_zjit_invalidate_no_ep_escape(iseq);
1143 }
1144
1145 /*
1146 * # local variables on a stack frame (N == local_size)
1147 * [lvar1, lvar2, ..., lvarN, SPECVAL]
1148 * ^
1149 * ep[0]
1150 *
1151 * # moved local variables
1152 * [lvar1, lvar2, ..., lvarN, SPECVAL, Envval, BlockProcval (if needed)]
1153 * ^ ^
1154 * env->env[0] ep[0]
1155 */
1156
1157 env_size = local_size +
1158 1 /* envval */;
1159
1160 // Careful with order in the following sequence. Each allocation can move objects.
1161 env_body = ALLOC_N(VALUE, env_size);
1162 rb_env_t *env = IMEMO_NEW(rb_env_t, imemo_env, 0);
1163
1164 // Set up env without WB since it's brand new (similar to newobj_init(), newobj_fill())
1165 MEMCPY(env_body, ep - (local_size - 1 /* specval */), VALUE, local_size);
1166
1167 env_ep = &env_body[local_size - 1 /* specval */];
1168 env_ep[VM_ENV_DATA_INDEX_ENV] = (VALUE)env;
1169
1170 env->iseq = (rb_iseq_t *)(VM_FRAME_RUBYFRAME_P(cfp) ? iseq : NULL);
1171 env->ep = env_ep;
1172 env->env = env_body;
1173 env->env_size = env_size;
1174
1175 cfp->ep = env_ep;
1176 VM_ENV_FLAGS_SET(env_ep, VM_ENV_FLAG_ESCAPED | VM_ENV_FLAG_WB_REQUIRED);
1177 VM_STACK_ENV_WRITE(ep, 0, (VALUE)env); /* GC mark */
1178
1179#if 0
1180 for (i = 0; i < local_size; i++) {
1181 if (VM_FRAME_RUBYFRAME_P(cfp)) {
1182 /* clear value stack for GC */
1183 ep[-local_size + i] = 0;
1184 }
1185 }
1186#endif
1187
1188 return (VALUE)env;
1189}
1190
1191static VALUE
1192vm_make_env_object(const rb_execution_context_t *ec, rb_control_frame_t *cfp)
1193{
1194 VALUE envval = vm_make_env_each(ec, cfp);
1195
1196 if (PROCDEBUG) {
1197 check_env_value((const rb_env_t *)envval);
1198 }
1199
1200 return envval;
1201}
1202
1203void
1204rb_vm_stack_to_heap(rb_execution_context_t *ec)
1205{
1206 rb_control_frame_t *cfp = ec->cfp;
1207 while ((cfp = rb_vm_get_binding_creatable_next_cfp(ec, cfp)) != 0) {
1208 vm_make_env_object(ec, cfp);
1209 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
1210 }
1211}
1212
1213const rb_env_t *
1214rb_vm_env_prev_env(const rb_env_t *env)
1215{
1216 const VALUE *ep = env->ep;
1217
1218 if (VM_ENV_LOCAL_P(ep)) {
1219 return NULL;
1220 }
1221 else {
1222 const VALUE *prev_ep = VM_ENV_PREV_EP(ep);
1223 return VM_ENV_ENVVAL_PTR(prev_ep);
1224 }
1225}
1226
1227static int
1228collect_local_variables_in_iseq(const rb_iseq_t *iseq, const struct local_var_list *vars)
1229{
1230 unsigned int i;
1231 if (!iseq) return 0;
1232 for (i = 0; i < ISEQ_BODY(iseq)->local_table_size; i++) {
1233 local_var_list_add(vars, ISEQ_BODY(iseq)->local_table[i]);
1234 }
1235 return 1;
1236}
1237
1238static void
1239collect_local_variables_in_env(const rb_env_t *env, const struct local_var_list *vars)
1240{
1241 do {
1242 if (VM_ENV_FLAGS(env->ep, VM_ENV_FLAG_ISOLATED)) break;
1243 collect_local_variables_in_iseq(env->iseq, vars);
1244 } while ((env = rb_vm_env_prev_env(env)) != NULL);
1245}
1246
1247static int
1248vm_collect_local_variables_in_heap(const VALUE *ep, const struct local_var_list *vars)
1249{
1250 if (VM_ENV_ESCAPED_P(ep)) {
1251 collect_local_variables_in_env(VM_ENV_ENVVAL_PTR(ep), vars);
1252 return 1;
1253 }
1254 else {
1255 return 0;
1256 }
1257}
1258
1259VALUE
1260rb_vm_env_local_variables(const rb_env_t *env)
1261{
1262 struct local_var_list vars;
1263 local_var_list_init(&vars);
1264 collect_local_variables_in_env(env, &vars);
1265 return local_var_list_finish(&vars);
1266}
1267
1268VALUE
1269rb_vm_env_numbered_parameters(const rb_env_t *env)
1270{
1271 struct local_var_list vars;
1272 local_var_list_init(&vars);
1273 // if (VM_ENV_FLAGS(env->ep, VM_ENV_FLAG_ISOLATED)) break; // TODO: is this needed?
1274 const rb_iseq_t *iseq = env->iseq;
1275 unsigned int i;
1276 if (!iseq) return 0;
1277 for (i = 0; i < ISEQ_BODY(iseq)->local_table_size; i++) {
1278 numparam_list_add(&vars, ISEQ_BODY(iseq)->local_table[i]);
1279 }
1280 return local_var_list_finish(&vars);
1281}
1282
1283VALUE
1284rb_iseq_local_variables(const rb_iseq_t *iseq)
1285{
1286 struct local_var_list vars;
1287 local_var_list_init(&vars);
1288 while (collect_local_variables_in_iseq(iseq, &vars)) {
1289 iseq = ISEQ_BODY(iseq)->parent_iseq;
1290 }
1291 return local_var_list_finish(&vars);
1292}
1293
1294/* Proc */
1295
1296static VALUE
1297vm_proc_create_from_captured(VALUE klass,
1298 const struct rb_captured_block *captured,
1299 enum rb_block_type block_type,
1300 int8_t is_from_method, int8_t is_lambda)
1301{
1302 VALUE procval = rb_proc_alloc(klass);
1303 rb_proc_t *proc = RTYPEDDATA_DATA(procval);
1304
1305 VM_ASSERT(VM_EP_IN_HEAP_P(GET_EC(), captured->ep));
1306
1307 /* copy block */
1308 RB_OBJ_WRITE(procval, &proc->block.as.captured.code.val, captured->code.val);
1309 RB_OBJ_WRITE(procval, &proc->block.as.captured.self, captured->self);
1310 rb_vm_block_ep_update(procval, &proc->block, captured->ep);
1311
1312 vm_block_type_set(&proc->block, block_type);
1313 proc->is_from_method = is_from_method;
1314 proc->is_lambda = is_lambda;
1315
1316 return procval;
1317}
1318
1319void
1320rb_vm_block_copy(VALUE obj, const struct rb_block *dst, const struct rb_block *src)
1321{
1322 /* copy block */
1323 switch (vm_block_type(src)) {
1324 case block_type_iseq:
1325 case block_type_ifunc:
1326 RB_OBJ_WRITE(obj, &dst->as.captured.self, src->as.captured.self);
1327 RB_OBJ_WRITE(obj, &dst->as.captured.code.val, src->as.captured.code.val);
1328 rb_vm_block_ep_update(obj, dst, src->as.captured.ep);
1329 break;
1330 case block_type_symbol:
1331 RB_OBJ_WRITE(obj, &dst->as.symbol, src->as.symbol);
1332 break;
1333 case block_type_proc:
1334 RB_OBJ_WRITE(obj, &dst->as.proc, src->as.proc);
1335 break;
1336 }
1337}
1338
1339static VALUE
1340proc_create(VALUE klass, const struct rb_block *block, int8_t is_from_method, int8_t is_lambda)
1341{
1342 VALUE procval = rb_proc_alloc(klass);
1343 rb_proc_t *proc = RTYPEDDATA_DATA(procval);
1344
1345 VM_ASSERT(VM_EP_IN_HEAP_P(GET_EC(), vm_block_ep(block)));
1346 rb_vm_block_copy(procval, &proc->block, block);
1347 vm_block_type_set(&proc->block, block->type);
1348 proc->is_from_method = is_from_method;
1349 proc->is_lambda = is_lambda;
1350
1351 return procval;
1352}
1353
1354VALUE
1355rb_proc_dup(VALUE self)
1356{
1357 VALUE procval;
1358 rb_proc_t *src;
1359
1360 GetProcPtr(self, src);
1361
1362 switch (vm_block_type(&src->block)) {
1363 case block_type_ifunc:
1364 procval = rb_func_proc_dup(self);
1365 break;
1366 default:
1367 procval = proc_create(rb_obj_class(self), &src->block, src->is_from_method, src->is_lambda);
1368 break;
1369 }
1370
1371 if (RB_OBJ_SHAREABLE_P(self)) RB_OBJ_SET_SHAREABLE(procval);
1372 RB_GC_GUARD(self); /* for: body = rb_proc_dup(body) */
1373 return procval;
1374}
1375
1377 VALUE ary;
1378 VALUE read_only;
1379 bool yield;
1380 bool isolate;
1381};
1382
1383static VALUE
1384ID2NUM(ID id)
1385{
1386 if (SIZEOF_VOIDP > SIZEOF_LONG)
1387 return ULL2NUM(id);
1388 else
1389 return ULONG2NUM(id);
1390}
1391
1392static ID
1393NUM2ID(VALUE num)
1394{
1395 if (SIZEOF_VOIDP > SIZEOF_LONG)
1396 return (ID)NUM2ULL(num);
1397 else
1398 return (ID)NUM2ULONG(num);
1399}
1400
1401static enum rb_id_table_iterator_result
1402collect_outer_variable_names(ID id, VALUE val, void *ptr)
1403{
1405
1406 if (id == rb_intern("yield")) {
1407 data->yield = true;
1408 }
1409 else {
1410 VALUE *store;
1411 if (data->isolate ||
1412 val == Qtrue /* write */) {
1413 store = &data->ary;
1414 }
1415 else {
1416 store = &data->read_only;
1417 }
1418 if (*store == Qfalse) *store = rb_ary_new();
1419 rb_ary_push(*store, ID2NUM(id));
1420 }
1421 return ID_TABLE_CONTINUE;
1422}
1423
1424static const rb_env_t *
1425env_copy(const VALUE *src_ep, VALUE read_only_variables)
1426{
1427 const rb_env_t *src_env = (rb_env_t *)VM_ENV_ENVVAL(src_ep);
1428 VM_ASSERT(src_env->ep == src_ep);
1429
1430 VALUE *env_body = ZALLOC_N(VALUE, src_env->env_size); // fill with Qfalse
1431 VALUE *ep = &env_body[src_env->env_size - 2];
1432 const rb_env_t *copied_env = vm_env_new(ep, env_body, src_env->env_size, src_env->iseq);
1433
1434 // Copy after allocations above, since they can move objects in src_ep.
1435 VALUE svar_val = src_ep[VM_ENV_DATA_INDEX_ME_CREF];
1436 if (imemo_type_p(svar_val, imemo_svar)) {
1437 const struct vm_svar *svar = (struct vm_svar *)svar_val;
1438
1439 if (svar->cref_or_me) {
1440 svar_val = svar->cref_or_me;
1441 }
1442 else {
1443 svar_val = Qfalse;
1444 }
1445 }
1446 RB_OBJ_WRITE(copied_env, &ep[VM_ENV_DATA_INDEX_ME_CREF], svar_val);
1447
1448 ep[VM_ENV_DATA_INDEX_FLAGS] = src_ep[VM_ENV_DATA_INDEX_FLAGS] | VM_ENV_FLAG_ISOLATED;
1449 if (!VM_ENV_LOCAL_P(src_ep)) {
1450 VM_ENV_FLAGS_SET(ep, VM_ENV_FLAG_LOCAL);
1451 }
1452
1453 if (read_only_variables) {
1454 for (int i=RARRAY_LENINT(read_only_variables)-1; i>=0; i--) {
1455 ID id = NUM2ID(RARRAY_AREF(read_only_variables, i));
1456
1457 const struct rb_iseq_constant_body *body = ISEQ_BODY(src_env->iseq);
1458 for (unsigned int j=0; j<body->local_table_size; j++) {
1459 if (id == body->local_table[j]) {
1460 // check reassignment
1461 if (body->lvar_states[j] == lvar_reassigned) {
1462 VALUE name = rb_id2str(id);
1463 VALUE msg = rb_sprintf("cannot make a shareable Proc because "
1464 "the outer variable '%" PRIsVALUE "' may be reassigned.", name);
1465 rb_exc_raise(rb_exc_new_str(rb_eRactorIsolationError, msg));
1466 }
1467
1468 // check shareable
1469 VALUE v = src_env->env[j];
1470 if (!rb_ractor_shareable_p(v)) {
1471 VALUE name = rb_id2str(id);
1472 VALUE msg = rb_sprintf("cannot make a shareable Proc because it can refer"
1473 " unshareable object %+" PRIsVALUE " from ", v);
1474 if (name)
1475 rb_str_catf(msg, "variable '%" PRIsVALUE "'", name);
1476 else
1477 rb_str_cat_cstr(msg, "a hidden variable");
1478 rb_exc_raise(rb_exc_new_str(rb_eRactorIsolationError, msg));
1479 }
1480 RB_OBJ_WRITE((VALUE)copied_env, &env_body[j], v);
1481 rb_ary_delete_at(read_only_variables, i);
1482 break;
1483 }
1484 }
1485 }
1486 }
1487
1488 if (!VM_ENV_LOCAL_P(src_ep)) {
1489 const VALUE *prev_ep = VM_ENV_PREV_EP(src_env->ep);
1490 const rb_env_t *new_prev_env = env_copy(prev_ep, read_only_variables);
1491 ep[VM_ENV_DATA_INDEX_SPECVAL] = VM_GUARDED_PREV_EP(new_prev_env->ep);
1492 RB_OBJ_WRITTEN(copied_env, Qundef, new_prev_env);
1493 VM_ENV_FLAGS_UNSET(ep, VM_ENV_FLAG_LOCAL);
1494 }
1495 else {
1496 ep[VM_ENV_DATA_INDEX_SPECVAL] = VM_BLOCK_HANDLER_NONE;
1497 }
1498
1499 RB_OBJ_SET_SHAREABLE((VALUE)copied_env);
1500 return copied_env;
1501}
1502
1503static void
1504proc_isolate_env(VALUE self, rb_proc_t *proc, VALUE read_only_variables)
1505{
1506 const struct rb_captured_block *captured = &proc->block.as.captured;
1507 const rb_env_t *env = env_copy(captured->ep, read_only_variables);
1508 *((const VALUE **)&proc->block.as.captured.ep) = env->ep;
1509 RB_OBJ_WRITTEN(self, Qundef, env);
1510}
1511
1512static VALUE
1513proc_shared_outer_variables(struct rb_id_table *outer_variables, bool isolate, const char *message)
1514{
1515 struct collect_outer_variable_name_data data = {
1516 .isolate = isolate,
1517 .ary = Qfalse,
1518 .read_only = Qfalse,
1519 .yield = false,
1520 };
1521 rb_id_table_foreach(outer_variables, collect_outer_variable_names, (void *)&data);
1522
1523 if (data.ary != Qfalse) {
1524 VALUE str = rb_sprintf("can not %s because it accesses outer variables", message);
1525 VALUE ary = data.ary;
1526 const char *sep = " (";
1527 for (long i = 0; i < RARRAY_LEN(ary); i++) {
1528 VALUE name = rb_id2str(NUM2ID(RARRAY_AREF(ary, i)));
1529 if (!name) continue;
1530 rb_str_cat_cstr(str, sep);
1531 sep = ", ";
1532 rb_str_append(str, name);
1533 }
1534 if (*sep == ',') rb_str_cat_cstr(str, ")");
1535 rb_str_cat_cstr(str, data.yield ? " and uses 'yield'." : ".");
1536 rb_exc_raise(rb_exc_new_str(rb_eRactorIsolationError, str));
1537 }
1538 else if (data.yield) {
1539 rb_raise(rb_eRactorIsolationError, "can not %s because it uses 'yield'.", message);
1540 }
1541
1542 return data.read_only;
1543}
1544
1545VALUE
1546rb_proc_isolate_bang(VALUE self, VALUE replace_self)
1547{
1548 const rb_iseq_t *iseq = vm_proc_iseq(self);
1549
1550 if (iseq) {
1551 rb_proc_t *proc = (rb_proc_t *)RTYPEDDATA_DATA(self);
1552
1553 if (!UNDEF_P(replace_self)) {
1554 VM_ASSERT(rb_ractor_shareable_p(replace_self));
1555 RB_OBJ_WRITE(self, &proc->block.as.captured.self, replace_self);
1556 }
1557
1558 if (proc->block.type != block_type_iseq) rb_raise(rb_eRuntimeError, "not supported yet");
1559
1560 if (ISEQ_BODY(iseq)->outer_variables) {
1561 proc_shared_outer_variables(ISEQ_BODY(iseq)->outer_variables, true, "isolate a Proc");
1562 }
1563
1564 proc_isolate_env(self, proc, Qfalse);
1565 proc->is_isolated = TRUE;
1566 RB_OBJ_WRITE(self, &proc->block.as.captured.self, Qnil);
1567 }
1568
1569 RB_OBJ_SET_SHAREABLE(self);
1570 return self;
1571}
1572
1573VALUE
1574rb_proc_isolate(VALUE self)
1575{
1576 VALUE dst = rb_proc_dup(self);
1577 rb_proc_isolate_bang(dst, Qundef);
1578 return dst;
1579}
1580
1581VALUE
1582rb_proc_ractor_make_shareable(VALUE self, VALUE replace_self)
1583{
1584 const rb_iseq_t *iseq = vm_proc_iseq(self);
1585
1586 if (iseq) {
1587 rb_proc_t *proc = (rb_proc_t *)RTYPEDDATA_DATA(self);
1588
1589 if (!UNDEF_P(replace_self)) {
1590 RB_OBJ_WRITE(self, &proc->block.as.captured.self, replace_self);
1591 }
1592
1593 if (proc->block.type != block_type_iseq) rb_raise(rb_eRuntimeError, "not supported yet");
1594
1595 if (!rb_ractor_shareable_p(vm_block_self(&proc->block))) {
1596 rb_raise(rb_eRactorIsolationError,
1597 "Proc's self is not shareable: %" PRIsVALUE,
1598 self);
1599 }
1600
1601 VALUE read_only_variables = Qfalse;
1602
1603 if (ISEQ_BODY(iseq)->outer_variables) {
1604 read_only_variables =
1605 proc_shared_outer_variables(ISEQ_BODY(iseq)->outer_variables, false, "make a Proc shareable");
1606 }
1607
1608 proc_isolate_env(self, proc, read_only_variables);
1609 proc->is_isolated = TRUE;
1610 }
1611 else {
1612 const struct rb_block *block = vm_proc_block(self);
1613 if (block->type != block_type_symbol) rb_raise(rb_eRuntimeError, "not supported yet");
1614
1615 VALUE proc_self = vm_block_self(block);
1616 if (!rb_ractor_shareable_p(proc_self)) {
1617 rb_raise(rb_eRactorIsolationError,
1618 "Proc's self is not shareable: %" PRIsVALUE,
1619 self);
1620 }
1621 }
1622
1623 RB_OBJ_SET_FROZEN_SHAREABLE(self);
1624 return self;
1625}
1626
1627VALUE
1628rb_vm_make_proc_lambda(const rb_execution_context_t *ec, const struct rb_captured_block *captured, VALUE klass, int8_t is_lambda)
1629{
1630 VALUE procval;
1631 enum imemo_type code_type = imemo_type(captured->code.val);
1632
1633 if (!VM_ENV_ESCAPED_P(captured->ep)) {
1634 rb_control_frame_t *cfp = VM_CAPTURED_BLOCK_TO_CFP(captured);
1635 vm_make_env_object(ec, cfp);
1636 }
1637
1638 VM_ASSERT(VM_EP_IN_HEAP_P(ec, captured->ep));
1639 VM_ASSERT(code_type == imemo_iseq || code_type == imemo_ifunc);
1640
1641 procval = vm_proc_create_from_captured(klass, captured,
1642 code_type == imemo_iseq ? block_type_iseq : block_type_ifunc,
1643 FALSE, is_lambda);
1644
1645 if (code_type == imemo_ifunc) {
1646 struct vm_ifunc *ifunc = (struct vm_ifunc *)captured->code.val;
1647 if (ifunc->svar_lep) {
1648 VALUE ep0 = ifunc->svar_lep[0];
1649 if (RB_TYPE_P(ep0, T_IMEMO) && imemo_type_p(ep0, imemo_env)) {
1650 // `ep0 == imemo_env` means this ep is escaped to heap (in env object).
1651 const rb_env_t *env = (const rb_env_t *)ep0;
1652 ifunc->svar_lep = (VALUE *)env->ep;
1653 }
1654 else {
1655 VM_ASSERT(FIXNUM_P(ep0));
1656 if (ep0 & VM_ENV_FLAG_ESCAPED) {
1657 // ok. do nothing
1658 }
1659 else {
1660 ifunc->svar_lep = NULL;
1661 }
1662 }
1663 }
1664 }
1665
1666 return procval;
1667}
1668
1669/* Binding */
1670
1671VALUE
1672rb_vm_make_binding(const rb_execution_context_t *ec, const rb_control_frame_t *src_cfp)
1673{
1674 rb_control_frame_t *cfp = rb_vm_get_binding_creatable_next_cfp(ec, src_cfp);
1675 rb_control_frame_t *ruby_level_cfp = rb_vm_get_ruby_level_next_cfp(ec, src_cfp);
1676 VALUE bindval, envval;
1677 rb_binding_t *bind;
1678
1679 if (cfp == 0 || ruby_level_cfp == 0) {
1680 rb_raise(rb_eRuntimeError, "Can't create Binding Object on top of Fiber.");
1681 }
1682 if (!VM_FRAME_RUBYFRAME_P(src_cfp) &&
1683 !VM_FRAME_RUBYFRAME_P(RUBY_VM_PREVIOUS_CONTROL_FRAME(src_cfp))) {
1684 rb_raise(rb_eRuntimeError, "Cannot create Binding object for non-Ruby caller");
1685 }
1686
1687 envval = vm_make_env_object(ec, cfp);
1688 bindval = rb_binding_alloc(rb_cBinding);
1689 GetBindingPtr(bindval, bind);
1690 vm_bind_update_env(bindval, bind, envval);
1691 RB_OBJ_WRITE(bindval, &bind->block.as.captured.self, cfp->self);
1692 RB_OBJ_WRITE(bindval, &bind->block.as.captured.code.iseq, CFP_ISEQ(cfp));
1693 RB_OBJ_WRITE(bindval, &bind->pathobj, ISEQ_BODY(CFP_ISEQ(ruby_level_cfp))->location.pathobj);
1694 bind->first_lineno = rb_vm_get_sourceline(ruby_level_cfp);
1695
1696 return bindval;
1697}
1698
1699const VALUE *
1700rb_binding_add_dynavars(VALUE bindval, rb_binding_t *bind, int dyncount, const ID *dynvars)
1701{
1702 VALUE envval, pathobj = bind->pathobj;
1703 VALUE path = pathobj_path(pathobj);
1704 VALUE realpath = pathobj_realpath(pathobj);
1705 const struct rb_block *base_block;
1706 const rb_env_t *env;
1707 rb_execution_context_t *ec = GET_EC();
1708 const rb_iseq_t *base_iseq, *iseq;
1709 rb_node_scope_t tmp_node;
1710
1711 if (dyncount < 0) return 0;
1712
1713 base_block = &bind->block;
1714 base_iseq = vm_block_iseq(base_block);
1715
1716 VALUE idtmp = 0;
1717 rb_ast_id_table_t *dyns = ALLOCV(idtmp, sizeof(rb_ast_id_table_t) + dyncount * sizeof(ID));
1718 dyns->size = dyncount;
1719 MEMCPY(dyns->ids, dynvars, ID, dyncount);
1720
1721 rb_node_init(RNODE(&tmp_node), NODE_SCOPE);
1722 tmp_node.nd_tbl = dyns;
1723 tmp_node.nd_body = 0;
1724 tmp_node.nd_parent = NULL;
1725 tmp_node.nd_args = 0;
1726
1727 VALUE ast_value = rb_ruby_ast_new(RNODE(&tmp_node));
1728
1729 if (base_iseq) {
1730 iseq = rb_iseq_new(ast_value, ISEQ_BODY(base_iseq)->location.label, path, realpath, base_iseq, ISEQ_TYPE_EVAL);
1731 }
1732 else {
1733 VALUE tempstr = rb_fstring_lit("<temp>");
1734 iseq = rb_iseq_new_top(ast_value, tempstr, tempstr, tempstr, NULL);
1735 }
1736 tmp_node.nd_tbl = 0; /* reset table */
1737 ALLOCV_END(idtmp);
1738
1739 vm_set_eval_stack(ec, iseq, 0, base_block);
1740 vm_bind_update_env(bindval, bind, envval = vm_make_env_object(ec, ec->cfp));
1741 rb_vm_pop_frame(ec);
1742
1743 env = (const rb_env_t *)envval;
1744 return env->env;
1745}
1746
1747/* C -> Ruby: block */
1748
1749static inline void
1750invoke_block(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE self, const struct rb_captured_block *captured, const rb_cref_t *cref, VALUE type, int opt_pc)
1751{
1752 int arg_size = ISEQ_BODY(iseq)->param.size;
1753
1754 vm_push_frame(ec, iseq, type | VM_FRAME_FLAG_FINISH, self,
1755 VM_GUARDED_PREV_EP(captured->ep),
1756 (VALUE)cref, /* cref or method */
1757 ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
1758 ec->cfp->sp + arg_size,
1759 ISEQ_BODY(iseq)->local_table_size - arg_size,
1760 ISEQ_BODY(iseq)->stack_max);
1761}
1762
1763static inline void
1764invoke_bmethod(rb_execution_context_t *ec, const rb_iseq_t *iseq, VALUE self, const struct rb_captured_block *captured, const rb_callable_method_entry_t *me, VALUE type, int opt_pc)
1765{
1766 /* bmethod call from outside the VM */
1767 int arg_size = ISEQ_BODY(iseq)->param.size;
1768
1769 VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
1770
1771 vm_push_frame(ec, iseq, type | VM_FRAME_FLAG_BMETHOD, self,
1772 VM_GUARDED_PREV_EP(captured->ep),
1773 (VALUE)me,
1774 ISEQ_BODY(iseq)->iseq_encoded + opt_pc,
1775 ec->cfp->sp + 1 /* self */ + arg_size,
1776 ISEQ_BODY(iseq)->local_table_size - arg_size,
1777 ISEQ_BODY(iseq)->stack_max);
1778
1779 VM_ENV_FLAGS_SET(ec->cfp->ep, VM_FRAME_FLAG_FINISH);
1780}
1781
1782ALWAYS_INLINE(static VALUE
1783 invoke_iseq_block_from_c(rb_execution_context_t *ec, const struct rb_captured_block *captured,
1784 VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler,
1785 const rb_cref_t *cref, int is_lambda, const rb_callable_method_entry_t *me));
1786
1787static inline VALUE
1788invoke_iseq_block_from_c(rb_execution_context_t *ec, const struct rb_captured_block *captured,
1789 VALUE self, int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler,
1790 const rb_cref_t *cref, int is_lambda, const rb_callable_method_entry_t *me)
1791{
1792 const rb_iseq_t *iseq = rb_iseq_check(captured->code.iseq);
1793 int opt_pc;
1794 VALUE type = VM_FRAME_MAGIC_BLOCK | (is_lambda ? VM_FRAME_FLAG_LAMBDA : 0);
1795 rb_control_frame_t *cfp = ec->cfp;
1796 VALUE *sp = cfp->sp;
1797 int flags = (kw_splat ? VM_CALL_KW_SPLAT : 0);
1798 VALUE *use_argv = (VALUE *)argv;
1799 VALUE av[2];
1800
1801 stack_check(ec);
1802
1803 if (UNLIKELY(argc > VM_ARGC_STACK_MAX) &&
1804 (VM_ARGC_STACK_MAX >= 1 ||
1805 /* Skip ruby array for potential autosplat case */
1806 (argc != 1 || is_lambda))) {
1807 use_argv = vm_argv_ruby_array(av, argv, &flags, &argc, kw_splat);
1808 }
1809
1810 CHECK_VM_STACK_OVERFLOW(cfp, argc + 1);
1811 vm_check_canary(ec, sp);
1812
1813 VALUE *stack_argv = sp;
1814 if (me) {
1815 *sp = self; // bemthods need `self` on the VM stack
1816 stack_argv++;
1817 }
1818 cfp->sp = stack_argv + argc;
1819 MEMCPY(stack_argv, use_argv, VALUE, argc); // restrict: new stack space
1820
1821 opt_pc = vm_yield_setup_args(ec, iseq, argc, stack_argv, flags, passed_block_handler,
1822 (is_lambda ? arg_setup_method : arg_setup_block));
1823 cfp->sp = sp;
1824
1825 if (me == NULL) {
1826 invoke_block(ec, iseq, self, captured, cref, type, opt_pc);
1827 }
1828 else {
1829 invoke_bmethod(ec, iseq, self, captured, me, type, opt_pc);
1830 }
1831
1832 return vm_exec(ec);
1833}
1834
1835static VALUE
1836invoke_block_from_c_bh(rb_execution_context_t *ec, VALUE block_handler,
1837 int argc, const VALUE *argv,
1838 int kw_splat, VALUE passed_block_handler, const rb_cref_t *cref,
1839 int is_lambda, int force_blockarg)
1840{
1841 again:
1842 switch (vm_block_handler_type(block_handler)) {
1843 case block_handler_type_iseq:
1844 {
1845 const struct rb_captured_block *captured = VM_BH_TO_ISEQ_BLOCK(block_handler);
1846 return invoke_iseq_block_from_c(ec, captured, captured->self,
1847 argc, argv, kw_splat, passed_block_handler,
1848 cref, is_lambda, NULL);
1849 }
1850 case block_handler_type_ifunc:
1851 return vm_yield_with_cfunc(ec, VM_BH_TO_IFUNC_BLOCK(block_handler),
1852 VM_BH_TO_IFUNC_BLOCK(block_handler)->self,
1853 argc, argv, kw_splat, passed_block_handler, NULL);
1854 case block_handler_type_symbol:
1855 return vm_yield_with_symbol(ec, VM_BH_TO_SYMBOL(block_handler),
1856 argc, argv, kw_splat, passed_block_handler);
1857 case block_handler_type_proc:
1858 if (force_blockarg == FALSE) {
1859 is_lambda = block_proc_is_lambda(VM_BH_TO_PROC(block_handler));
1860 }
1861 block_handler = vm_proc_to_block_handler(VM_BH_TO_PROC(block_handler));
1862 goto again;
1863 }
1864 VM_UNREACHABLE(invoke_block_from_c_splattable);
1865 return Qundef;
1866}
1867
1868static inline VALUE
1869check_block_handler(rb_execution_context_t *ec)
1870{
1871 VALUE block_handler = VM_CF_BLOCK_HANDLER(ec->cfp);
1872 vm_block_handler_verify(block_handler);
1873 if (UNLIKELY(block_handler == VM_BLOCK_HANDLER_NONE)) {
1874 rb_vm_localjump_error("no block given", Qnil, 0);
1875 }
1876
1877 return block_handler;
1878}
1879
1880static VALUE
1881vm_yield_with_cref(rb_execution_context_t *ec, int argc, const VALUE *argv, int kw_splat, const rb_cref_t *cref, int is_lambda)
1882{
1883 return invoke_block_from_c_bh(ec, check_block_handler(ec),
1884 argc, argv, kw_splat, VM_BLOCK_HANDLER_NONE,
1885 cref, is_lambda, FALSE);
1886}
1887
1888static VALUE
1889vm_yield(rb_execution_context_t *ec, int argc, const VALUE *argv, int kw_splat)
1890{
1891 return vm_yield_with_cref(ec, argc, argv, kw_splat, NULL, FALSE);
1892}
1893
1894static VALUE
1895vm_yield_with_block(rb_execution_context_t *ec, int argc, const VALUE *argv, VALUE block_handler, int kw_splat)
1896{
1897 return invoke_block_from_c_bh(ec, check_block_handler(ec),
1898 argc, argv, kw_splat, block_handler,
1899 NULL, FALSE, FALSE);
1900}
1901
1902static VALUE
1903vm_yield_force_blockarg(rb_execution_context_t *ec, VALUE args)
1904{
1905 return invoke_block_from_c_bh(ec, check_block_handler(ec), 1, &args,
1906 RB_NO_KEYWORDS, VM_BLOCK_HANDLER_NONE, NULL, FALSE, TRUE);
1907}
1908
1909ALWAYS_INLINE(static VALUE
1910 invoke_block_from_c_proc(rb_execution_context_t *ec, const rb_proc_t *proc,
1911 VALUE self, int argc, const VALUE *argv,
1912 int kw_splat, VALUE passed_block_handler, int is_lambda,
1913 const rb_callable_method_entry_t *me));
1914
1915static inline VALUE
1916invoke_block_from_c_proc(rb_execution_context_t *ec, const rb_proc_t *proc,
1917 VALUE self, int argc, const VALUE *argv,
1918 int kw_splat, VALUE passed_block_handler, int is_lambda,
1920{
1921 const struct rb_block *block = &proc->block;
1922
1923 again:
1924 switch (vm_block_type(block)) {
1925 case block_type_iseq:
1926 return invoke_iseq_block_from_c(ec, &block->as.captured, self, argc, argv, kw_splat, passed_block_handler, NULL, is_lambda, me);
1927 case block_type_ifunc:
1928 if (kw_splat == 1) {
1929 VALUE keyword_hash = argv[argc-1];
1930 if (!RB_TYPE_P(keyword_hash, T_HASH)) {
1931 keyword_hash = rb_to_hash_type(keyword_hash);
1932 }
1933 if (RHASH_EMPTY_P(keyword_hash)) {
1934 argc--;
1935 }
1936 else {
1937 ((VALUE *)argv)[argc-1] = rb_hash_dup(keyword_hash);
1938 }
1939 }
1940 return vm_yield_with_cfunc(ec, &block->as.captured, self, argc, argv, kw_splat, passed_block_handler, me);
1941 case block_type_symbol:
1942 return vm_yield_with_symbol(ec, block->as.symbol, argc, argv, kw_splat, passed_block_handler);
1943 case block_type_proc:
1944 is_lambda = block_proc_is_lambda(block->as.proc);
1945 block = vm_proc_block(block->as.proc);
1946 goto again;
1947 }
1948 VM_UNREACHABLE(invoke_block_from_c_proc);
1949 return Qundef;
1950}
1951
1952static VALUE
1953vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
1954 int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler)
1955{
1956 return invoke_block_from_c_proc(ec, proc, self, argc, argv, kw_splat, passed_block_handler, proc->is_lambda, NULL);
1957}
1958
1959static VALUE
1960vm_invoke_bmethod(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
1961 int argc, const VALUE *argv, int kw_splat, VALUE block_handler, const rb_callable_method_entry_t *me)
1962{
1963 return invoke_block_from_c_proc(ec, proc, self, argc, argv, kw_splat, block_handler, TRUE, me);
1964}
1965
1966VALUE
1967rb_vm_invoke_proc(rb_execution_context_t *ec, rb_proc_t *proc,
1968 int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler)
1969{
1970 VALUE self = vm_block_self(&proc->block);
1971 vm_block_handler_verify(passed_block_handler);
1972
1973 if (proc->is_from_method) {
1974 return vm_invoke_bmethod(ec, proc, self, argc, argv, kw_splat, passed_block_handler, NULL);
1975 }
1976 else {
1977 return vm_invoke_proc(ec, proc, self, argc, argv, kw_splat, passed_block_handler);
1978 }
1979}
1980
1981VALUE
1982rb_vm_invoke_proc_with_self(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
1983 int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler)
1984{
1985 vm_block_handler_verify(passed_block_handler);
1986
1987 if (proc->is_from_method) {
1988 return vm_invoke_bmethod(ec, proc, self, argc, argv, kw_splat, passed_block_handler, NULL);
1989 }
1990 else {
1991 return vm_invoke_proc(ec, proc, self, argc, argv, kw_splat, passed_block_handler);
1992 }
1993}
1994
1995/* special variable */
1996
1997VALUE *
1998rb_vm_svar_lep(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
1999{
2000 while (!CFP_PC(cfp) || !CFP_ISEQ(cfp)) {
2001 if (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_IFUNC) {
2002 struct vm_ifunc *ifunc = (struct vm_ifunc *)CFP_ISEQ(cfp);
2003 return ifunc->svar_lep;
2004 }
2005 else {
2006 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
2007 }
2008
2009 if (RUBY_VM_CONTROL_FRAME_STACK_OVERFLOW_P(ec, cfp)) {
2010 return NULL;
2011 }
2012 }
2013
2014 return (VALUE *)VM_CF_LEP(cfp);
2015}
2016
2017static VALUE
2018vm_cfp_svar_get(const rb_execution_context_t *ec, rb_control_frame_t *cfp, VALUE key)
2019{
2020 return lep_svar_get(ec, rb_vm_svar_lep(ec, cfp), key);
2021}
2022
2023static void
2024vm_cfp_svar_set(const rb_execution_context_t *ec, rb_control_frame_t *cfp, VALUE key, const VALUE val)
2025{
2026 lep_svar_set(ec, rb_vm_svar_lep(ec, cfp), key, val);
2027}
2028
2029static VALUE
2030vm_svar_get(const rb_execution_context_t *ec, VALUE key)
2031{
2032 return vm_cfp_svar_get(ec, ec->cfp, key);
2033}
2034
2035static void
2036vm_svar_set(const rb_execution_context_t *ec, VALUE key, VALUE val)
2037{
2038 vm_cfp_svar_set(ec, ec->cfp, key, val);
2039}
2040
2041VALUE
2043{
2044 return vm_svar_get(GET_EC(), VM_SVAR_BACKREF);
2045}
2046
2047void
2049{
2050 vm_svar_set(GET_EC(), VM_SVAR_BACKREF, val);
2051}
2052
2053VALUE
2055{
2056 return vm_svar_get(GET_EC(), VM_SVAR_LASTLINE);
2057}
2058
2059void
2061{
2062 vm_svar_set(GET_EC(), VM_SVAR_LASTLINE, val);
2063}
2064
2065void
2066rb_lastline_set_up(VALUE val, unsigned int up)
2067{
2068 rb_control_frame_t * cfp = GET_EC()->cfp;
2069
2070 for(unsigned int i = 0; i < up; i++) {
2071 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
2072 }
2073 vm_cfp_svar_set(GET_EC(), cfp, VM_SVAR_LASTLINE, val);
2074}
2075
2076/* misc */
2077
2078const char *
2080{
2081 const rb_execution_context_t *ec = GET_EC();
2082 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
2083
2084 if (cfp) {
2085 return RSTRING_PTR(rb_iseq_path(CFP_ISEQ(cfp)));
2086 }
2087 else {
2088 return 0;
2089 }
2090}
2091
2092int
2094{
2095 const rb_execution_context_t *ec = GET_EC();
2096 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
2097
2098 if (cfp) {
2099 return rb_vm_get_sourceline(cfp);
2100 }
2101 else {
2102 return 0;
2103 }
2104}
2105
2106VALUE
2107rb_source_location(int *pline)
2108{
2109 const rb_execution_context_t *ec = GET_EC();
2110 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
2111
2112 if (cfp && VM_FRAME_RUBYFRAME_P(cfp)) {
2113 if (pline) *pline = rb_vm_get_sourceline(cfp);
2114 return rb_iseq_path(CFP_ISEQ(cfp));
2115 }
2116 else {
2117 if (pline) *pline = 0;
2118 return Qnil;
2119 }
2120}
2121
2122const char *
2123rb_source_location_cstr(int *pline)
2124{
2125 VALUE path = rb_source_location(pline);
2126 if (NIL_P(path)) return NULL;
2127 return RSTRING_PTR(path);
2128}
2129
2130rb_cref_t *
2131rb_vm_cref(void)
2132{
2133 const rb_execution_context_t *ec = GET_EC();
2134 return vm_ec_cref(ec);
2135}
2136
2137rb_cref_t *
2138rb_vm_cref_replace_with_duplicated_cref(void)
2139{
2140 const rb_execution_context_t *ec = GET_EC();
2141 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
2142 rb_cref_t *cref = vm_cref_replace_with_duplicated_cref(cfp->ep);
2143 ASSUME(cref);
2144 return cref;
2145}
2146
2147const rb_cref_t *
2148rb_vm_cref_in_context(VALUE self, VALUE cbase)
2149{
2150 const rb_execution_context_t *ec = GET_EC();
2151 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
2152 const rb_cref_t *cref;
2153 if (!cfp || cfp->self != self) return NULL;
2154 if (!vm_env_cref_by_cref(cfp->ep)) return NULL;
2155 cref = vm_get_cref(cfp->ep);
2156 if (CREF_CLASS(cref) != cbase) return NULL;
2157 return cref;
2158}
2159
2160#if 0
2161void
2162debug_cref(rb_cref_t *cref)
2163{
2164 while (cref) {
2165 dp(CREF_CLASS(cref));
2166 printf("%ld\n", CREF_VISI(cref));
2167 cref = CREF_NEXT(cref);
2168 }
2169}
2170#endif
2171
2172VALUE
2173rb_vm_cbase(void)
2174{
2175 const rb_execution_context_t *ec = GET_EC();
2176 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(ec, ec->cfp);
2177
2178 if (cfp == 0) {
2179 rb_raise(rb_eRuntimeError, "Can't call on top of Fiber or Thread");
2180 }
2181 return vm_get_cbase(cfp->ep);
2182}
2183
2184/* jump */
2185
2186static VALUE
2187make_localjump_error(const char *mesg, VALUE value, int reason)
2188{
2191 ID id;
2192
2193 switch (reason) {
2194 case TAG_BREAK:
2195 CONST_ID(id, "break");
2196 break;
2197 case TAG_REDO:
2198 CONST_ID(id, "redo");
2199 break;
2200 case TAG_RETRY:
2201 CONST_ID(id, "retry");
2202 break;
2203 case TAG_NEXT:
2204 CONST_ID(id, "next");
2205 break;
2206 case TAG_RETURN:
2207 CONST_ID(id, "return");
2208 break;
2209 default:
2210 CONST_ID(id, "noreason");
2211 break;
2212 }
2213 rb_iv_set(exc, "@exit_value", value);
2214 rb_iv_set(exc, "@reason", ID2SYM(id));
2215 return exc;
2216}
2217
2218void
2219rb_vm_localjump_error(const char *mesg, VALUE value, int reason)
2220{
2221 VALUE exc = make_localjump_error(mesg, value, reason);
2222 rb_exc_raise(exc);
2223}
2224
2225VALUE
2226rb_vm_make_jump_tag_but_local_jump(enum ruby_tag_type state, VALUE val)
2227{
2228 const char *mesg;
2229
2230 switch (state) {
2231 case TAG_RETURN:
2232 mesg = "unexpected return";
2233 break;
2234 case TAG_BREAK:
2235 mesg = "unexpected break";
2236 break;
2237 case TAG_NEXT:
2238 mesg = "unexpected next";
2239 break;
2240 case TAG_REDO:
2241 mesg = "unexpected redo";
2242 val = Qnil;
2243 break;
2244 case TAG_RETRY:
2245 mesg = "retry outside of rescue clause";
2246 val = Qnil;
2247 break;
2248 default:
2249 return Qnil;
2250 }
2251 if (UNDEF_P(val)) {
2252 val = GET_EC()->tag->retval;
2253 }
2254 return make_localjump_error(mesg, val, state);
2255}
2256
2257void
2258rb_vm_jump_tag_but_local_jump(enum ruby_tag_type state)
2259{
2260 VALUE exc = rb_vm_make_jump_tag_but_local_jump(state, Qundef);
2261 if (!NIL_P(exc)) rb_exc_raise(exc);
2262 EC_JUMP_TAG(GET_EC(), state);
2263}
2264
2265static rb_control_frame_t *
2266next_not_local_frame(rb_control_frame_t *cfp)
2267{
2268 while (VM_ENV_LOCAL_P(cfp->ep)) {
2269 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
2270 }
2271 return cfp;
2272}
2273
2274NORETURN(static void vm_iter_break(rb_execution_context_t *ec, VALUE val));
2275
2276static void
2277vm_iter_break(rb_execution_context_t *ec, VALUE val)
2278{
2279 rb_control_frame_t *cfp = next_not_local_frame(ec->cfp);
2280 const VALUE *ep = VM_CF_PREV_EP(cfp);
2281 const rb_control_frame_t *target_cfp = rb_vm_search_cf_from_ep(ec, cfp, ep);
2282
2283 if (!target_cfp) {
2284 rb_vm_localjump_error("unexpected break", val, TAG_BREAK);
2285 }
2286
2287 ec->errinfo = (VALUE)THROW_DATA_NEW(val, target_cfp, TAG_BREAK);
2288 EC_JUMP_TAG(ec, TAG_BREAK);
2289}
2290
2291void
2293{
2294 vm_iter_break(GET_EC(), Qnil);
2295}
2296
2297void
2299{
2300 vm_iter_break(GET_EC(), val);
2301}
2302
2303/* optimization: redefine management */
2304
2305short ruby_vm_redefined_flag[BOP_LAST_];
2306static st_table *vm_opt_method_def_table = 0;
2307static st_table *vm_opt_mid_table = 0;
2308
2309void
2310rb_free_vm_opt_tables(void)
2311{
2312 st_free_table(vm_opt_method_def_table);
2313 st_free_table(vm_opt_mid_table);
2314}
2315
2316static int
2317vm_redefinition_check_flag(VALUE klass)
2318{
2319 if (klass == rb_cInteger) return INTEGER_REDEFINED_OP_FLAG;
2320 if (klass == rb_cFloat) return FLOAT_REDEFINED_OP_FLAG;
2321 if (klass == rb_cString) return STRING_REDEFINED_OP_FLAG;
2322 if (klass == rb_cArray) return ARRAY_REDEFINED_OP_FLAG;
2323 if (klass == rb_cHash) return HASH_REDEFINED_OP_FLAG;
2324 if (klass == rb_cSymbol) return SYMBOL_REDEFINED_OP_FLAG;
2325#if 0
2326 if (klass == rb_cTime) return TIME_REDEFINED_OP_FLAG;
2327#endif
2328 if (klass == rb_cRegexp) return REGEXP_REDEFINED_OP_FLAG;
2329 if (klass == rb_cNilClass) return NIL_REDEFINED_OP_FLAG;
2330 if (klass == rb_cTrueClass) return TRUE_REDEFINED_OP_FLAG;
2331 if (klass == rb_cFalseClass) return FALSE_REDEFINED_OP_FLAG;
2332 if (klass == rb_cProc) return PROC_REDEFINED_OP_FLAG;
2333 return 0;
2334}
2335
2336int
2337rb_vm_check_optimizable_mid(VALUE mid)
2338{
2339 if (!vm_opt_mid_table) {
2340 return FALSE;
2341 }
2342
2343 return st_lookup(vm_opt_mid_table, mid, NULL);
2344}
2345
2346static int
2347vm_redefinition_check_method_type(const rb_method_entry_t *me)
2348{
2349 if (me->called_id != me->def->original_id) {
2350 return FALSE;
2351 }
2352
2353 if (METHOD_ENTRY_BASIC(me)) return TRUE;
2354
2355 const rb_method_definition_t *def = me->def;
2356 switch (def->type) {
2357 case VM_METHOD_TYPE_CFUNC:
2358 case VM_METHOD_TYPE_OPTIMIZED:
2359 return TRUE;
2360 default:
2361 return FALSE;
2362 }
2363}
2364
2365static void
2366rb_vm_check_redefinition_opt_method(const rb_method_entry_t *me, VALUE klass)
2367{
2368 st_data_t bop;
2369 if (RB_TYPE_P(klass, T_ICLASS) && RICLASS_IS_ORIGIN_P(klass) &&
2370 RB_TYPE_P(RBASIC_CLASS(klass), T_CLASS)) {
2371 klass = RBASIC_CLASS(klass);
2372 }
2373 if (vm_redefinition_check_method_type(me)) {
2374 if (st_lookup(vm_opt_method_def_table, (st_data_t)me->def, &bop)) {
2375 int flag = vm_redefinition_check_flag(klass);
2376 if (flag != 0) {
2379 "Redefining '%s#%s' disables interpreter and JIT optimizations",
2380 rb_class2name(me->owner),
2381 rb_id2name(me->called_id)
2382 );
2383 rb_yjit_bop_redefined(flag, (enum ruby_basic_operators)bop);
2384 rb_zjit_bop_redefined(flag, (enum ruby_basic_operators)bop);
2385 ruby_vm_redefined_flag[bop] |= flag;
2386 }
2387 }
2388 }
2389}
2390
2391static enum rb_id_table_iterator_result
2392check_redefined_method(ID mid, VALUE value, void *data)
2393{
2394 VALUE klass = (VALUE)data;
2395 const rb_method_entry_t *me = (rb_method_entry_t *)value;
2396 const rb_method_entry_t *newme = rb_method_entry(klass, mid);
2397
2398 if (newme != me) rb_vm_check_redefinition_opt_method(me, me->owner);
2399
2400 return ID_TABLE_CONTINUE;
2401}
2402
2403void
2404rb_vm_check_redefinition_by_prepend(VALUE klass)
2405{
2406 if (!vm_redefinition_check_flag(klass)) return;
2407 rb_id_table_foreach(RCLASS_M_TBL(RCLASS_ORIGIN(klass)), check_redefined_method, (void *)klass);
2408}
2409
2410static void
2411add_opt_method_entry_bop(const rb_method_entry_t *me, ID mid, enum ruby_basic_operators bop)
2412{
2413 st_insert(vm_opt_method_def_table, (st_data_t)me->def, (st_data_t)bop);
2414 st_insert(vm_opt_mid_table, (st_data_t)mid, (st_data_t)Qtrue);
2415}
2416
2417static void
2418add_opt_method(VALUE klass, ID mid, enum ruby_basic_operators bop)
2419{
2420 const rb_method_entry_t *me = rb_method_entry_at(klass, mid);
2421
2422 if (me && vm_redefinition_check_method_type(me)) {
2423 add_opt_method_entry_bop(me, mid, bop);
2424 }
2425 else {
2426 rb_bug("undefined optimized method: %s", rb_id2name(mid));
2427 }
2428}
2429
2430static enum ruby_basic_operators vm_redefinition_bop_for_id(ID mid);
2431
2432static void
2433add_opt_method_entry(const rb_method_entry_t *me)
2434{
2435 if (me && vm_redefinition_check_method_type(me)) {
2436 ID mid = me->called_id;
2437 enum ruby_basic_operators bop = vm_redefinition_bop_for_id(mid);
2438 if ((int)bop >= 0) {
2439 add_opt_method_entry_bop(me, mid, bop);
2440 }
2441 }
2442}
2443
2444static void
2445vm_init_redefined_flag(void)
2446{
2447 ID mid;
2448 enum ruby_basic_operators bop;
2449
2450#define OP(mid_, bop_) (mid = id##mid_, bop = BOP_##bop_, ruby_vm_redefined_flag[bop] = 0)
2451#define C(k) add_opt_method(rb_c##k, mid, bop)
2452 OP(PLUS, PLUS), (C(Integer), C(Float), C(String), C(Array));
2453 OP(MINUS, MINUS), (C(Integer), C(Float));
2454 OP(MULT, MULT), (C(Integer), C(Float));
2455 OP(DIV, DIV), (C(Integer), C(Float));
2456 OP(MOD, MOD), (C(Integer), C(Float));
2457 OP(Eq, EQ), (C(Integer), C(Float), C(String), C(Symbol));
2458 OP(Eqq, EQQ), (C(Integer), C(Float), C(Symbol), C(String),
2459 C(NilClass), C(TrueClass), C(FalseClass));
2460 OP(LT, LT), (C(Integer), C(Float));
2461 OP(LE, LE), (C(Integer), C(Float));
2462 OP(GT, GT), (C(Integer), C(Float));
2463 OP(GE, GE), (C(Integer), C(Float));
2464 OP(LTLT, LTLT), (C(String), C(Array));
2465 OP(GTGT, GTGT), (C(Integer));
2466 OP(AREF, AREF), (C(Array), C(Hash), C(Integer));
2467 OP(ASET, ASET), (C(Array), C(Hash));
2468 OP(Length, LENGTH), (C(Array), C(String), C(Hash));
2469 OP(Size, SIZE), (C(Array), C(String), C(Hash));
2470 OP(EmptyP, EMPTY_P), (C(Array), C(String), C(Hash));
2471 OP(Succ, SUCC), (C(Integer), C(String));
2472 OP(EqTilde, MATCH), (C(Regexp), C(String));
2473 OP(Freeze, FREEZE), (C(String), C(Array), C(Hash));
2474 OP(UMinus, UMINUS), (C(String));
2475 OP(Max, MAX), (C(Array));
2476 OP(Min, MIN), (C(Array));
2477 OP(Hash, HASH), (C(Array));
2478 OP(Call, CALL), (C(Proc));
2479 OP(And, AND), (C(Integer));
2480 OP(Or, OR), (C(Integer));
2481 OP(NilP, NIL_P), (C(NilClass));
2482 OP(Cmp, CMP), (C(Integer), C(Float), C(String));
2483 OP(Default, DEFAULT), (C(Hash));
2484 OP(IncludeP, INCLUDE_P), (C(Array));
2485#undef C
2486#undef OP
2487}
2488
2489static enum ruby_basic_operators
2490vm_redefinition_bop_for_id(ID mid)
2491{
2492 switch (mid) {
2493#define OP(mid_, bop_) case id##mid_: return BOP_##bop_
2494 OP(PLUS, PLUS);
2495 OP(MINUS, MINUS);
2496 OP(MULT, MULT);
2497 OP(DIV, DIV);
2498 OP(MOD, MOD);
2499 OP(Eq, EQ);
2500 OP(Eqq, EQQ);
2501 OP(LT, LT);
2502 OP(LE, LE);
2503 OP(GT, GT);
2504 OP(GE, GE);
2505 OP(LTLT, LTLT);
2506 OP(AREF, AREF);
2507 OP(ASET, ASET);
2508 OP(Length, LENGTH);
2509 OP(Size, SIZE);
2510 OP(EmptyP, EMPTY_P);
2511 OP(Succ, SUCC);
2512 OP(EqTilde, MATCH);
2513 OP(Freeze, FREEZE);
2514 OP(UMinus, UMINUS);
2515 OP(Max, MAX);
2516 OP(Min, MIN);
2517 OP(Hash, HASH);
2518 OP(Call, CALL);
2519 OP(And, AND);
2520 OP(Or, OR);
2521 OP(NilP, NIL_P);
2522 OP(Cmp, CMP);
2523 OP(Default, DEFAULT);
2524 OP(Pack, PACK);
2525#undef OP
2526 }
2527 return -1;
2528}
2529
2530/* for vm development */
2531
2532#if VMDEBUG
2533static const char *
2534vm_frametype_name(const rb_control_frame_t *cfp)
2535{
2536 switch (VM_FRAME_TYPE(cfp)) {
2537 case VM_FRAME_MAGIC_METHOD: return "method";
2538 case VM_FRAME_MAGIC_BLOCK: return "block";
2539 case VM_FRAME_MAGIC_CLASS: return "class";
2540 case VM_FRAME_MAGIC_TOP: return "top";
2541 case VM_FRAME_MAGIC_CFUNC: return "cfunc";
2542 case VM_FRAME_MAGIC_IFUNC: return "ifunc";
2543 case VM_FRAME_MAGIC_EVAL: return "eval";
2544 case VM_FRAME_MAGIC_RESCUE: return "rescue";
2545 default:
2546 rb_bug("unknown frame");
2547 }
2548}
2549#endif
2550
2551static VALUE
2552frame_return_value(const struct vm_throw_data *err)
2553{
2554 if (THROW_DATA_P(err) &&
2555 THROW_DATA_STATE(err) == TAG_BREAK &&
2556 THROW_DATA_CONSUMED_P(err) == FALSE) {
2557 return THROW_DATA_VAL(err);
2558 }
2559 else {
2560 return Qnil;
2561 }
2562}
2563
2564#if 0
2565/* for debug */
2566static const char *
2567frame_name(const rb_control_frame_t *cfp)
2568{
2569 unsigned long type = VM_FRAME_TYPE(cfp);
2570#define C(t) if (type == VM_FRAME_MAGIC_##t) return #t
2571 C(METHOD);
2572 C(BLOCK);
2573 C(CLASS);
2574 C(TOP);
2575 C(CFUNC);
2576 C(PROC);
2577 C(IFUNC);
2578 C(EVAL);
2579 C(LAMBDA);
2580 C(RESCUE);
2581 C(DUMMY);
2582#undef C
2583 return "unknown";
2584}
2585#endif
2586
2587// cfp_returning_with_value:
2588// Whether cfp is the last frame in the unwinding process for a non-local return.
2589static void
2590hook_before_rewind(rb_execution_context_t *ec, bool cfp_returning_with_value, int state, struct vm_throw_data *err)
2591{
2592 if (state == TAG_RAISE && RBASIC(err)->klass == rb_eSysStackError) {
2593 return;
2594 }
2595 else {
2596 const rb_iseq_t *iseq = CFP_ISEQ(ec->cfp);
2597 rb_hook_list_t *local_hooks = NULL;
2598 unsigned int local_hooks_cnt = iseq->aux.exec.local_hooks_cnt;
2599 if (RB_UNLIKELY(local_hooks_cnt > 0)) {
2600 local_hooks = rb_iseq_local_hooks(iseq, rb_ec_ractor_ptr(ec), false);
2601 }
2602
2603 switch (VM_FRAME_TYPE(ec->cfp)) {
2604 case VM_FRAME_MAGIC_METHOD:
2605 RUBY_DTRACE_METHOD_RETURN_HOOK(ec, 0, 0);
2606 EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_RETURN, ec->cfp->self, 0, 0, 0, frame_return_value(err));
2607
2608 if (UNLIKELY(local_hooks && local_hooks->events & RUBY_EVENT_RETURN)) {
2609 rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_RETURN,
2610 ec->cfp->self, 0, 0, 0, frame_return_value(err), TRUE);
2611 }
2612
2613 THROW_DATA_CONSUMED_SET(err);
2614 break;
2615 case VM_FRAME_MAGIC_BLOCK:
2616 if (VM_FRAME_BMETHOD_P(ec->cfp)) {
2617 VALUE bmethod_return_value = frame_return_value(err);
2618 if (cfp_returning_with_value) {
2619 // Non-local return terminating at a BMETHOD control frame.
2620 bmethod_return_value = THROW_DATA_VAL(err);
2621 }
2622
2623
2624 EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_B_RETURN, ec->cfp->self, 0, 0, 0, bmethod_return_value);
2625 if (UNLIKELY(local_hooks && local_hooks->events & RUBY_EVENT_B_RETURN)) {
2626 rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_B_RETURN,
2627 ec->cfp->self, 0, 0, 0, bmethod_return_value, TRUE);
2628 }
2629
2630 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(ec->cfp);
2631
2632 EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_RETURN, ec->cfp->self,
2633 rb_vm_frame_method_entry(ec->cfp)->def->original_id,
2634 rb_vm_frame_method_entry(ec->cfp)->called_id,
2635 rb_vm_frame_method_entry(ec->cfp)->owner,
2636 bmethod_return_value);
2637
2638 VM_ASSERT(me->def->type == VM_METHOD_TYPE_BMETHOD);
2639 unsigned int local_hooks_cnt = me->def->body.bmethod.local_hooks_cnt;
2640 if (UNLIKELY(local_hooks_cnt > 0)) {
2641 local_hooks = rb_method_def_local_hooks(me->def, rb_ec_ractor_ptr(ec), false);
2642 if (local_hooks && local_hooks->events & RUBY_EVENT_RETURN) {
2643 rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_RETURN, ec->cfp->self,
2644 rb_vm_frame_method_entry(ec->cfp)->def->original_id,
2645 rb_vm_frame_method_entry(ec->cfp)->called_id,
2646 rb_vm_frame_method_entry(ec->cfp)->owner,
2647 bmethod_return_value, TRUE);
2648 }
2649 }
2650
2651 THROW_DATA_CONSUMED_SET(err);
2652 }
2653 else {
2654 EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_B_RETURN, ec->cfp->self, 0, 0, 0, frame_return_value(err));
2655 if (UNLIKELY(local_hooks && local_hooks->events & RUBY_EVENT_B_RETURN)) {
2656 rb_exec_event_hook_orig(ec, local_hooks, RUBY_EVENT_B_RETURN,
2657 ec->cfp->self, 0, 0, 0, frame_return_value(err), TRUE);
2658 }
2659 THROW_DATA_CONSUMED_SET(err);
2660 }
2661 break;
2662 case VM_FRAME_MAGIC_CLASS:
2663 EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_END, ec->cfp->self, 0, 0, 0, Qnil);
2664 break;
2665 }
2666 }
2667}
2668
2669/* evaluator body */
2670
2671/* finish
2672 VMe (h1) finish
2673 VM finish F1 F2
2674 cfunc finish F1 F2 C1
2675 rb_funcall finish F1 F2 C1
2676 VMe finish F1 F2 C1
2677 VM finish F1 F2 C1 F3
2678
2679 F1 - F3 : pushed by VM
2680 C1 : pushed by send insn (CFUNC)
2681
2682 struct CONTROL_FRAME {
2683 VALUE *pc; // cfp[0], program counter
2684 VALUE *sp; // cfp[1], stack pointer
2685 rb_iseq_t *iseq; // cfp[2], iseq
2686 VALUE self; // cfp[3], self
2687 const VALUE *ep; // cfp[4], env pointer
2688 const void *block_code; // cfp[5], block code
2689 };
2690
2691 struct rb_captured_block {
2692 VALUE self;
2693 VALUE *ep;
2694 union code;
2695 };
2696
2697 struct METHOD_ENV {
2698 VALUE param0;
2699 ...
2700 VALUE paramN;
2701 VALUE lvar1;
2702 ...
2703 VALUE lvarM;
2704 VALUE cref; // ep[-2]
2705 VALUE special; // ep[-1]
2706 VALUE flags; // ep[ 0] == lep[0]
2707 };
2708
2709 struct BLOCK_ENV {
2710 VALUE block_param0;
2711 ...
2712 VALUE block_paramN;
2713 VALUE block_lvar1;
2714 ...
2715 VALUE block_lvarM;
2716 VALUE cref; // ep[-2]
2717 VALUE special; // ep[-1]
2718 VALUE flags; // ep[ 0]
2719 };
2720
2721 struct CLASS_ENV {
2722 VALUE class_lvar0;
2723 ...
2724 VALUE class_lvarN;
2725 VALUE cref;
2726 VALUE prev_ep; // for frame jump
2727 VALUE flags;
2728 };
2729
2730 struct C_METHOD_CONTROL_FRAME {
2731 VALUE *pc; // 0
2732 VALUE *sp; // stack pointer
2733 rb_iseq_t *iseq; // cmi
2734 VALUE self; // ?
2735 VALUE *ep; // ep == lep
2736 void *code; //
2737 };
2738
2739 struct C_BLOCK_CONTROL_FRAME {
2740 VALUE *pc; // point only "finish" insn
2741 VALUE *sp; // sp
2742 rb_iseq_t *iseq; // ?
2743 VALUE self; //
2744 VALUE *ep; // ep
2745 void *code; //
2746 };
2747 */
2748
2749static inline VALUE
2750vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state, VALUE errinfo);
2751static inline VALUE
2752vm_exec_loop(rb_execution_context_t *ec, enum ruby_tag_type state, struct rb_vm_tag *tag, VALUE result);
2753
2754// for non-Emscripten Wasm build, use vm_exec with optimized setjmp for runtime performance
2755#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
2756
2757struct rb_vm_exec_context {
2758 rb_execution_context_t *const ec;
2759 struct rb_vm_tag *const tag;
2760
2761 VALUE result;
2762};
2763
2764static void
2765vm_exec_bottom_main(void *context)
2766{
2767 struct rb_vm_exec_context *ctx = context;
2768 rb_execution_context_t *ec = ctx->ec;
2769
2770 ctx->result = vm_exec_loop(ec, TAG_NONE, ctx->tag, vm_exec_core(ec));
2771}
2772
2773static void
2774vm_exec_bottom_rescue(void *context)
2775{
2776 struct rb_vm_exec_context *ctx = context;
2777 rb_execution_context_t *ec = ctx->ec;
2778
2779 ctx->result = vm_exec_loop(ec, rb_ec_tag_state(ec), ctx->tag, ec->errinfo);
2780}
2781#endif
2782
2783VALUE
2784vm_exec(rb_execution_context_t *ec)
2785{
2786 VALUE result = Qundef;
2787
2788 EC_PUSH_TAG(ec);
2789
2790 _tag.retval = Qnil;
2791
2792#if defined(__wasm__) && !defined(__EMSCRIPTEN__)
2793 struct rb_vm_exec_context ctx = {
2794 .ec = ec,
2795 .tag = &_tag,
2796 };
2797 struct rb_wasm_try_catch try_catch;
2798
2799 EC_REPUSH_TAG();
2800
2801 rb_wasm_try_catch_init(&try_catch, vm_exec_bottom_main, vm_exec_bottom_rescue, &ctx);
2802
2803 rb_wasm_try_catch_loop_run(&try_catch, &RB_VM_TAG_JMPBUF_GET(_tag.buf));
2804
2805 result = ctx.result;
2806#else
2807 enum ruby_tag_type state;
2808 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
2809 if (UNDEF_P(result = jit_exec(ec))) {
2810 result = vm_exec_core(ec);
2811 }
2812 /* fallback to the VM */
2813 result = vm_exec_loop(ec, TAG_NONE, &_tag, result);
2814 }
2815 else {
2816 result = vm_exec_loop(ec, state, &_tag, ec->errinfo);
2817 }
2818#endif
2819
2820 EC_POP_TAG();
2821 return result;
2822}
2823
2824static inline VALUE
2825vm_exec_loop(rb_execution_context_t *ec, enum ruby_tag_type state,
2826 struct rb_vm_tag *tag, VALUE result)
2827{
2828 if (state == TAG_NONE) { /* no jumps, result is discarded */
2829 goto vm_loop_start;
2830 }
2831
2832 rb_ec_raised_reset(ec, RAISED_STACKOVERFLOW | RAISED_NOMEMORY);
2833 while (UNDEF_P(result = vm_exec_handle_exception(ec, state, result))) {
2834 // caught a jump, exec the handler. JIT code in jit_exec_exception()
2835 // may return Qundef to run remaining frames with vm_exec_core().
2836 if (UNDEF_P(result = jit_exec_exception(ec))) {
2837 result = vm_exec_core(ec);
2838 }
2839 vm_loop_start:
2840 VM_ASSERT(ec->tag == tag);
2841 /* when caught `throw`, `tag.state` is set. */
2842 if ((state = tag->state) == TAG_NONE) break;
2843 tag->state = TAG_NONE;
2844 }
2845
2846 return result;
2847}
2848
2849static inline void
2850zjit_materialize_frames(rb_control_frame_t *cfp)
2851{
2852 if (!rb_zjit_enabled_p) return;
2853
2854 while (true) {
2855 if (CFP_ZJIT_FRAME(cfp)) {
2856 const zjit_jit_frame_t *jit_frame = (const zjit_jit_frame_t *)cfp->jit_return;
2857 cfp->pc = jit_frame->pc;
2858 cfp->_iseq = (rb_iseq_t *)jit_frame->iseq;
2859 if (jit_frame->materialize_block_code) {
2860 cfp->block_code = NULL;
2861 }
2862 cfp->jit_return = 0;
2863 }
2864 if (VM_FRAME_FINISHED_P(cfp)) break;
2865 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
2866 }
2867}
2868
2869void
2870rb_zjit_materialize_frames(rb_control_frame_t *cfp)
2871{
2872 zjit_materialize_frames(cfp);
2873}
2874
2875static inline VALUE
2876vm_exec_handle_exception(rb_execution_context_t *ec, enum ruby_tag_type state, VALUE errinfo)
2877{
2878 struct vm_throw_data *err = (struct vm_throw_data *)errinfo;
2879
2880 for (;;) {
2881 unsigned int i;
2882 const struct iseq_catch_table_entry *entry;
2883 const struct iseq_catch_table *ct;
2884 unsigned long epc, cont_pc, cont_sp;
2885 const rb_iseq_t *catch_iseq;
2886 VALUE type;
2887 const rb_control_frame_t *escape_cfp;
2888
2889 cont_pc = cont_sp = 0;
2890 catch_iseq = NULL;
2891
2892 while (CFP_PC(ec->cfp) == 0 || CFP_ISEQ(ec->cfp) == 0) {
2893 if (UNLIKELY(VM_FRAME_TYPE(ec->cfp) == VM_FRAME_MAGIC_CFUNC)) {
2894 EXEC_EVENT_HOOK_AND_POP_FRAME(ec, RUBY_EVENT_C_RETURN, ec->cfp->self,
2895 rb_vm_frame_method_entry(ec->cfp)->def->original_id,
2896 rb_vm_frame_method_entry(ec->cfp)->called_id,
2897 rb_vm_frame_method_entry(ec->cfp)->owner, Qnil);
2898 RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec,
2899 rb_vm_frame_method_entry(ec->cfp)->owner,
2900 rb_vm_frame_method_entry(ec->cfp)->def->original_id);
2901 }
2902 rb_vm_pop_frame(ec);
2903 }
2904
2905 rb_control_frame_t *const cfp = ec->cfp;
2906 epc = CFP_PC(cfp) - ISEQ_BODY(CFP_ISEQ(cfp))->iseq_encoded;
2907
2908 escape_cfp = NULL;
2909 if (state == TAG_BREAK || state == TAG_RETURN) {
2910 escape_cfp = THROW_DATA_CATCH_FRAME(err);
2911
2912 if (cfp == escape_cfp) {
2913 if (state == TAG_RETURN) {
2914 if (!VM_FRAME_FINISHED_P(cfp)) {
2915 THROW_DATA_CATCH_FRAME_SET(err, cfp + 1);
2916 THROW_DATA_STATE_SET(err, state = TAG_BREAK);
2917 }
2918 else {
2919 ct = ISEQ_BODY(CFP_ISEQ(cfp))->catch_table;
2920 if (ct) for (i = 0; i < ct->size; i++) {
2921 entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
2922 if (entry->start < epc && entry->end >= epc) {
2923 if (entry->type == CATCH_TYPE_ENSURE) {
2924 catch_iseq = entry->iseq;
2925 cont_pc = entry->cont;
2926 cont_sp = entry->sp;
2927 break;
2928 }
2929 }
2930 }
2931 if (catch_iseq == NULL) {
2932 ec->errinfo = Qnil;
2933 THROW_DATA_CATCH_FRAME_SET(err, cfp + 1);
2934 // cfp == escape_cfp here so calling with cfp_returning_with_value = true
2935 hook_before_rewind(ec, true, state, err);
2936 rb_vm_pop_frame(ec);
2937 return THROW_DATA_VAL(err);
2938 }
2939 }
2940 /* through */
2941 }
2942 else {
2943 /* TAG_BREAK */
2944 *cfp->sp++ = THROW_DATA_VAL(err);
2945 ec->errinfo = Qnil;
2946 zjit_materialize_frames(cfp);
2947 return Qundef;
2948 }
2949 }
2950 }
2951
2952 if (state == TAG_RAISE) {
2953 ct = ISEQ_BODY(CFP_ISEQ(cfp))->catch_table;
2954 if (ct) for (i = 0; i < ct->size; i++) {
2955 entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
2956 if (entry->start < epc && entry->end >= epc) {
2957
2958 if (entry->type == CATCH_TYPE_RESCUE ||
2959 entry->type == CATCH_TYPE_ENSURE) {
2960 catch_iseq = entry->iseq;
2961 cont_pc = entry->cont;
2962 cont_sp = entry->sp;
2963 break;
2964 }
2965 }
2966 }
2967 }
2968 else if (state == TAG_RETRY) {
2969 ct = ISEQ_BODY(CFP_ISEQ(cfp))->catch_table;
2970 if (ct) for (i = 0; i < ct->size; i++) {
2971 entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
2972 if (entry->start < epc && entry->end >= epc) {
2973
2974 if (entry->type == CATCH_TYPE_ENSURE) {
2975 catch_iseq = entry->iseq;
2976 cont_pc = entry->cont;
2977 cont_sp = entry->sp;
2978 break;
2979 }
2980 else if (entry->type == CATCH_TYPE_RETRY) {
2981 const rb_control_frame_t *escape_cfp;
2982 escape_cfp = THROW_DATA_CATCH_FRAME(err);
2983 if (cfp == escape_cfp) {
2984 zjit_materialize_frames(cfp);
2985 cfp->pc = ISEQ_BODY(CFP_ISEQ(cfp))->iseq_encoded + entry->cont;
2986 ec->errinfo = Qnil;
2987 return Qundef;
2988 }
2989 }
2990 }
2991 }
2992 }
2993 else if ((state == TAG_BREAK && !escape_cfp) ||
2994 (state == TAG_REDO) ||
2995 (state == TAG_NEXT)) {
2996 type = (const enum rb_catch_type[TAG_MASK]) {
2997 [TAG_BREAK] = CATCH_TYPE_BREAK,
2998 [TAG_NEXT] = CATCH_TYPE_NEXT,
2999 [TAG_REDO] = CATCH_TYPE_REDO,
3000 /* otherwise = dontcare */
3001 }[state];
3002
3003 ct = ISEQ_BODY(CFP_ISEQ(cfp))->catch_table;
3004 if (ct) for (i = 0; i < ct->size; i++) {
3005 entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
3006
3007 if (entry->start < epc && entry->end >= epc) {
3008 if (entry->type == CATCH_TYPE_ENSURE) {
3009 catch_iseq = entry->iseq;
3010 cont_pc = entry->cont;
3011 cont_sp = entry->sp;
3012 break;
3013 }
3014 else if (entry->type == type) {
3015 zjit_materialize_frames(cfp);
3016 cfp->pc = ISEQ_BODY(CFP_ISEQ(cfp))->iseq_encoded + entry->cont;
3017 cfp->sp = vm_base_ptr(cfp) + entry->sp;
3018
3019 if (state != TAG_REDO) {
3020 *cfp->sp++ = THROW_DATA_VAL(err);
3021 }
3022 ec->errinfo = Qnil;
3023 VM_ASSERT(ec->tag->state == TAG_NONE);
3024 return Qundef;
3025 }
3026 }
3027 }
3028 }
3029 else {
3030 ct = ISEQ_BODY(CFP_ISEQ(cfp))->catch_table;
3031 if (ct) for (i = 0; i < ct->size; i++) {
3032 entry = UNALIGNED_MEMBER_PTR(ct, entries[i]);
3033 if (entry->start < epc && entry->end >= epc) {
3034
3035 if (entry->type == CATCH_TYPE_ENSURE) {
3036 catch_iseq = entry->iseq;
3037 cont_pc = entry->cont;
3038 cont_sp = entry->sp;
3039 break;
3040 }
3041 }
3042 }
3043 }
3044
3045 if (catch_iseq != NULL) { /* found catch table */
3046 /* enter catch scope */
3047 const int arg_size = 1;
3048
3049 rb_iseq_check(catch_iseq);
3050 zjit_materialize_frames(cfp); // vm_base_ptr looks at cfp->_iseq
3051 cfp->sp = vm_base_ptr(cfp) + cont_sp;
3052 cfp->pc = ISEQ_BODY(CFP_ISEQ(cfp))->iseq_encoded + cont_pc;
3053
3054 /* push block frame */
3055 cfp->sp[0] = (VALUE)err;
3056 vm_push_frame(ec, catch_iseq, VM_FRAME_MAGIC_RESCUE,
3057 cfp->self,
3058 VM_GUARDED_PREV_EP(cfp->ep),
3059 0, /* cref or me */
3060 ISEQ_BODY(catch_iseq)->iseq_encoded,
3061 cfp->sp + arg_size /* push value */,
3062 ISEQ_BODY(catch_iseq)->local_table_size - arg_size,
3063 ISEQ_BODY(catch_iseq)->stack_max);
3064
3065 state = 0;
3066 ec->tag->state = TAG_NONE;
3067 ec->errinfo = Qnil;
3068
3069 return Qundef;
3070 }
3071 else {
3072 hook_before_rewind(ec, (cfp == escape_cfp), state, err);
3073
3074 if (VM_FRAME_FINISHED_P(ec->cfp)) {
3075 rb_vm_pop_frame(ec);
3076 ec->errinfo = (VALUE)err;
3077 rb_vm_tag_jmpbuf_deinit(&ec->tag->buf);
3078 ec->tag = ec->tag->prev;
3079 EC_JUMP_TAG(ec, state);
3080 }
3081 else {
3082 rb_vm_pop_frame(ec);
3083 }
3084 }
3085 }
3086}
3087
3088/* misc */
3089
3090VALUE
3091rb_iseq_eval(const rb_iseq_t *iseq, const rb_box_t *box)
3092{
3093 rb_execution_context_t *ec = GET_EC();
3094 VALUE val;
3095 vm_set_top_stack(ec, iseq, box);
3096 val = vm_exec(ec);
3097 return val;
3098}
3099
3100VALUE
3101rb_iseq_eval_main(const rb_iseq_t *iseq)
3102{
3103 rb_execution_context_t *ec = GET_EC();
3104 VALUE val;
3105 vm_set_main_stack(ec, iseq);
3106 val = vm_exec(ec);
3107 return val;
3108}
3109
3110int
3111rb_vm_control_frame_id_and_class(const rb_control_frame_t *cfp, ID *idp, ID *called_idp, VALUE *klassp)
3112{
3113 const rb_callable_method_entry_t *me = rb_vm_frame_method_entry(cfp);
3114
3115 if (me) {
3116 if (idp) *idp = me->def->original_id;
3117 if (called_idp) *called_idp = me->called_id;
3118 if (klassp) *klassp = me->owner;
3119 return TRUE;
3120 }
3121 else {
3122 return FALSE;
3123 }
3124}
3125
3126int
3127rb_ec_frame_method_id_and_class(const rb_execution_context_t *ec, ID *idp, ID *called_idp, VALUE *klassp)
3128{
3129 return rb_vm_control_frame_id_and_class(ec->cfp, idp, called_idp, klassp);
3130}
3131
3132int
3134{
3135 return rb_ec_frame_method_id_and_class(GET_EC(), idp, 0, klassp);
3136}
3137
3138VALUE
3139rb_vm_call_cfunc(VALUE recv, VALUE (*func)(VALUE), VALUE arg,
3140 VALUE block_handler, VALUE filename)
3141{
3142 rb_execution_context_t *ec = GET_EC();
3143 const rb_control_frame_t *reg_cfp = ec->cfp;
3144 const rb_iseq_t *iseq = rb_iseq_new(Qnil, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
3145 const rb_box_t *box = rb_current_box();
3146 VALUE val;
3147
3148 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_TOP | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH,
3149 recv, GC_GUARDED_PTR(box),
3150 (VALUE)vm_cref_new_toplevel(ec), /* cref or me */
3151 0, reg_cfp->sp, 0, 0);
3152
3153 val = (*func)(arg);
3154
3155 rb_vm_pop_frame(ec);
3156 return val;
3157}
3158
3159/* Ruby::Box */
3160
3161VALUE
3162rb_vm_call_cfunc_in_box(VALUE recv, VALUE (*func)(VALUE, VALUE), VALUE arg1, VALUE arg2,
3163 VALUE filename, const rb_box_t *box)
3164{
3165 rb_execution_context_t *ec = GET_EC();
3166 const rb_control_frame_t *reg_cfp = ec->cfp;
3167 const rb_iseq_t *iseq = rb_iseq_new(Qnil, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
3168 VALUE val;
3169
3170 vm_push_frame(ec, iseq, VM_FRAME_MAGIC_TOP | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH,
3171 recv, GC_GUARDED_PTR(box),
3172 (VALUE)vm_cref_new_toplevel(ec), /* cref or me */
3173 0, reg_cfp->sp, 0, 0);
3174
3175 val = (*func)(arg1, arg2);
3176
3177 rb_vm_pop_frame(ec);
3178 return val;
3179}
3180
3181void
3182rb_vm_frame_flag_set_box_require(const rb_execution_context_t *ec)
3183{
3184 VM_ASSERT(rb_box_available());
3185 VM_ENV_FLAGS_SET(ec->cfp->ep, VM_FRAME_FLAG_BOX_REQUIRE);
3186}
3187
3188static const rb_box_t *
3189current_box_on_cfp(const rb_execution_context_t *ec, const rb_control_frame_t *cfp)
3190{
3192 const rb_box_t *box;
3193 const VALUE *lep = VM_EP_RUBY_LEP(ec, cfp);
3194 VM_BOX_ASSERT(lep, "lep should be valid");
3195 VM_BOX_ASSERT(rb_box_available(), "box should be available here");
3196
3197 if (VM_ENV_FRAME_TYPE_P(lep, VM_FRAME_MAGIC_METHOD) || VM_ENV_FRAME_TYPE_P(lep, VM_FRAME_MAGIC_CFUNC)) {
3198 cme = check_method_entry(lep[VM_ENV_DATA_INDEX_ME_CREF], TRUE);
3199 VM_BOX_ASSERT(cme, "cme should be valid");
3200 VM_BOX_ASSERT(cme->def, "cme->def shold be valid");
3201 return cme->def->box;
3202 }
3203 else if (VM_ENV_FRAME_TYPE_P(lep, VM_FRAME_MAGIC_TOP) || VM_ENV_FRAME_TYPE_P(lep, VM_FRAME_MAGIC_CLASS)) {
3204 VM_BOX_ASSERT(VM_ENV_LOCAL_P(lep), "lep should be local on MAGIC_TOP or MAGIC_CLASS frames");
3205 return VM_ENV_BOX(lep);
3206 }
3207 else if (VM_ENV_FRAME_TYPE_P(lep, VM_FRAME_MAGIC_DUMMY)) {
3208 // No valid local ep found (just after process boot?)
3209 // return the root box (the only valid box) until the main is initialized
3210 box = rb_main_box();
3211 if (box)
3212 return box;
3213 return rb_root_box();
3214 }
3215 else {
3216 VM_BOX_CRASHED();
3217 rb_bug("BUG: Local ep without cme/box, flags: %08lX", (unsigned long)lep[VM_ENV_DATA_INDEX_FLAGS]);
3218 }
3220}
3221
3222const rb_box_t *
3223rb_vm_current_box(const rb_execution_context_t *ec)
3224{
3225 return current_box_on_cfp(ec, ec->cfp);
3226}
3227
3228static const rb_control_frame_t *
3229find_loader_control_frame(const rb_execution_context_t *ec, const rb_control_frame_t *cfp, const rb_control_frame_t *end_cfp)
3230{
3231 while (RUBY_VM_VALID_CONTROL_FRAME_P(cfp, end_cfp)) {
3232 if (!VM_ENV_FRAME_TYPE_P(cfp->ep, VM_FRAME_MAGIC_CFUNC))
3233 break;
3234 if (!BOX_ROOT_P(current_box_on_cfp(ec, cfp)))
3235 break;
3236 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
3237 }
3238 VM_ASSERT(RUBY_VM_VALID_CONTROL_FRAME_P(cfp, end_cfp));
3239 return cfp;
3240}
3241
3242const rb_box_t *
3243rb_vm_loading_box(const rb_execution_context_t *ec)
3244{
3245 const rb_control_frame_t *cfp, *current_cfp, *end_cfp;
3246
3247 if (!rb_box_available() || !ec)
3248 return rb_root_box();
3249
3250 cfp = ec->cfp;
3251 current_cfp = cfp;
3252 end_cfp = RUBY_VM_END_CONTROL_FRAME(ec);
3253
3254 while (RUBY_VM_VALID_CONTROL_FRAME_P(cfp, end_cfp)) {
3255 if (VM_ENV_FLAGS(cfp->ep, VM_FRAME_FLAG_BOX_REQUIRE)) {
3256 if (RTEST(cfp->self) && BOX_OBJ_P(cfp->self)) {
3257 // Box#require, #require_relative, #load
3258 return rb_get_box_t(cfp->self);
3259 }
3260 // Kernel#require, #require_relative, #load
3261 cfp = find_loader_control_frame(ec, cfp, end_cfp);
3262 return current_box_on_cfp(ec, cfp);
3263 }
3264 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
3265 }
3266 // no require/load with explicit boxes.
3267 return current_box_on_cfp(ec, current_cfp);
3268}
3269
3270/* vm */
3271
3272void
3273rb_vm_update_references(void *ptr)
3274{
3275 if (ptr) {
3276 rb_vm_t *vm = ptr;
3277
3278 vm->self = rb_gc_location(vm->self);
3279 vm->mark_object_ary = rb_gc_location(vm->mark_object_ary);
3280 vm->orig_progname = rb_gc_location(vm->orig_progname);
3281 vm->cc_refinement_set = rb_gc_location(vm->cc_refinement_set);
3282
3283 if (vm->root_box)
3284 rb_box_gc_update_references(vm->root_box);
3285 if (vm->main_box)
3286 rb_box_gc_update_references(vm->main_box);
3287
3288 rb_gc_update_values(RUBY_NSIG, vm->trap_list.cmd);
3289
3290 if (vm->coverages) {
3291 vm->coverages = rb_gc_location(vm->coverages);
3292 vm->me2counter = rb_gc_location(vm->me2counter);
3293 }
3294 }
3295}
3296
3297void
3298rb_vm_each_stack_value(void *ptr, void (*cb)(VALUE, void*), void *ctx)
3299{
3300 if (ptr) {
3301 rb_vm_t *vm = ptr;
3302 rb_ractor_t *r = 0;
3303 ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
3304 VM_ASSERT(rb_ractor_status_p(r, ractor_blocking) ||
3305 rb_ractor_status_p(r, ractor_running));
3306 if (r->threads.cnt > 0) {
3307 rb_thread_t *th = 0;
3308 ccan_list_for_each(&r->threads.set, th, lt_node) {
3309 VM_ASSERT(th != NULL);
3310 rb_execution_context_t * ec = th->ec;
3311 if (ec->vm_stack) {
3312 VALUE *p = ec->vm_stack;
3313 VALUE *sp = ec->cfp->sp;
3314 while (p < sp) {
3315 if (!RB_SPECIAL_CONST_P(*p)) {
3316 cb(*p, ctx);
3317 }
3318 p++;
3319 }
3320 }
3321 }
3322 }
3323 }
3324 }
3325}
3326
3327static enum rb_id_table_iterator_result
3328vm_mark_negative_cme(VALUE val, void *dmy)
3329{
3330 rb_gc_mark(val);
3331 return ID_TABLE_CONTINUE;
3332}
3333
3334void rb_thread_sched_mark_zombies(rb_vm_t *vm);
3335
3336void
3337rb_vm_mark(void *ptr)
3338{
3339 RUBY_MARK_ENTER("vm");
3340 RUBY_GC_INFO("-------------------------------------------------\n");
3341 if (ptr) {
3342 rb_vm_t *vm = ptr;
3343 rb_ractor_t *r = 0;
3344 long i;
3345
3346 ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
3347 // ractor.set only contains blocking or running ractors
3348 VM_ASSERT(rb_ractor_status_p(r, ractor_blocking) ||
3349 rb_ractor_status_p(r, ractor_running));
3350 rb_gc_mark(rb_ractor_self(r));
3351 }
3352
3353 for (struct global_object_list *list = vm->global_object_list; list; list = list->next) {
3354 rb_gc_mark_maybe(*list->varptr);
3355 }
3356
3357 rb_gc_mark_movable(vm->self);
3358
3359 if (vm->root_box) {
3360 rb_box_entry_mark(vm->root_box);
3361 }
3362 if (vm->main_box) {
3363 rb_box_entry_mark(vm->main_box);
3364 }
3365
3366 rb_gc_mark_movable(vm->mark_object_ary);
3367 rb_gc_mark_movable(vm->orig_progname);
3368 rb_gc_mark_movable(vm->coverages);
3369 rb_gc_mark_movable(vm->me2counter);
3370 rb_gc_mark_movable(vm->cc_refinement_set);
3371
3372 rb_gc_mark_values(RUBY_NSIG, vm->trap_list.cmd);
3373
3374 rb_hook_list_mark(&vm->global_hooks);
3375
3376 rb_id_table_foreach_values(&vm->negative_cme_table, vm_mark_negative_cme, NULL);
3377 rb_mark_tbl_no_pin(&vm->overloaded_cme_table);
3378 for (i=0; i<VM_GLOBAL_CC_CACHE_TABLE_SIZE; i++) {
3379 const struct rb_callcache *cc = vm->global_cc_cache_table[i];
3380
3381 if (cc != NULL) {
3382 if (!vm_cc_invalidated_p(cc)) {
3383 rb_gc_mark((VALUE)cc);
3384 }
3385 else {
3386 vm->global_cc_cache_table[i] = NULL;
3387 }
3388 }
3389 }
3390
3391 rb_thread_sched_mark_zombies(vm);
3392 }
3393
3394 RUBY_MARK_LEAVE("vm");
3395}
3396
3397#undef rb_vm_register_special_exception
3398void
3399rb_vm_register_special_exception_str(enum ruby_special_exceptions sp, VALUE cls, VALUE mesg)
3400{
3401 rb_vm_t *vm = GET_VM();
3402 VALUE exc = rb_exc_new3(cls, rb_obj_freeze(mesg));
3403 OBJ_FREEZE(exc);
3404 ((VALUE *)vm->special_exceptions)[sp] = exc;
3405 rb_vm_register_global_object(exc);
3406}
3407
3408void rb_objspace_free_objects(void *objspace);
3409
3410int
3412{
3413 RUBY_FREE_ENTER("vm");
3414 ruby_vm_during_cleanup = true;
3415
3416 if (vm) {
3417 rb_thread_t *th = vm->ractor.main_thread;
3418
3419 if (rb_free_at_exit) {
3420 rb_free_encoded_insn_data();
3421 rb_free_global_enc_table();
3422 rb_free_loaded_builtin_table();
3423 rb_free_global_symbol_table();
3424
3425 rb_free_shared_fiber_pool();
3426 rb_free_transcoder_table();
3427 rb_free_vm_opt_tables();
3428 rb_free_warning();
3429 rb_free_rb_global_tbl();
3430
3431 rb_id_table_free_items(&vm->negative_cme_table);
3432 st_free_embedded_table(&vm->overloaded_cme_table);
3433
3434 // TODO: Is this ignorable for classext->m_tbl ?
3435 // rb_id_table_free(RCLASS(rb_mRubyVMFrozenCore)->m_tbl);
3436
3437 st_free_embedded_table(&vm->static_ext_inits);
3438
3439 rb_id_table_free_items(&vm->constant_cache);
3440 set_free_embedded_table(&vm->unused_block_warning_table);
3441
3442 rb_thread_free_native_thread(th);
3443
3444#ifndef HAVE_SETPROCTITLE
3445 ruby_free_proctitle();
3446#endif
3447 }
3448 else {
3449 rb_fiber_reset_root_local_storage(th);
3450 thread_free(th);
3451 }
3452
3453 struct rb_objspace *objspace = vm->gc.objspace;
3454
3455 rb_vm_living_threads_init(vm);
3456 ruby_vm_run_at_exit_hooks(vm);
3457 st_free_embedded_table(&vm->ci_table);
3458 RB_ALTSTACK_FREE(vm->main_altstack);
3459
3460 struct global_object_list *next;
3461 for (struct global_object_list *list = vm->global_object_list; list; list = next) {
3462 next = list->next;
3463 xfree(list);
3464 }
3465
3466 if (objspace) {
3467 if (rb_free_at_exit) {
3468 rb_objspace_free_objects(objspace);
3469 rb_free_generic_fields_tbl_();
3470 rb_free_default_rand_key();
3471 }
3472 rb_objspace_free(objspace);
3473 }
3474 rb_native_mutex_destroy(&vm->workqueue_lock);
3475 /* after freeing objspace, you *can't* use ruby_xfree() */
3476 ruby_current_vm_ptr = NULL;
3477
3478 if (rb_free_at_exit) {
3479 rb_shape_free_all();
3480#if USE_YJIT
3481 rb_yjit_free_at_exit();
3482#endif
3483 }
3484 }
3485 RUBY_FREE_LEAVE("vm");
3486 return 0;
3487}
3488
3489size_t rb_vm_memsize_workqueue(struct ccan_list_head *workqueue); // vm_trace.c
3490
3491// Used for VM memsize reporting. Returns the size of the at_exit list by
3492// looping through the linked list and adding up the size of the structs.
3493static enum rb_id_table_iterator_result
3494vm_memsize_constant_cache_i(ID id, VALUE ics, void *size)
3495{
3496 *((size_t *) size) += rb_set_memsize((set_table *) ics);
3497 return ID_TABLE_CONTINUE;
3498}
3499
3500// Returns a size_t representing the memory footprint of the VM's constant
3501// cache, which is the memsize of the table as well as the memsize of all of the
3502// nested tables.
3503static size_t
3504vm_memsize_constant_cache(void)
3505{
3506 rb_vm_t *vm = GET_VM();
3507 size_t size = rb_id_table_memsize(&vm->constant_cache) - sizeof(struct rb_id_table);
3508
3509 rb_id_table_foreach(&vm->constant_cache, vm_memsize_constant_cache_i, &size);
3510 return size;
3511}
3512
3513static size_t
3514vm_memsize_at_exit_list(rb_at_exit_list *at_exit)
3515{
3516 size_t size = 0;
3517
3518 while (at_exit) {
3519 size += sizeof(rb_at_exit_list);
3520 at_exit = at_exit->next;
3521 }
3522
3523 return size;
3524}
3525
3526// Used for VM memsize reporting. Returns the size of the builtin function
3527// table if it has been defined.
3528static size_t
3529vm_memsize_builtin_function_table(const struct rb_builtin_function *builtin_function_table)
3530{
3531 return builtin_function_table == NULL ? 0 : sizeof(struct rb_builtin_function);
3532}
3533
3534// Reports the memsize of the VM struct object and the structs that are
3535// associated with it.
3536static size_t
3537vm_memsize(const void *ptr)
3538{
3539 rb_vm_t *vm = GET_VM();
3540
3541 return (
3542 sizeof(rb_vm_t) +
3543 rb_vm_memsize_postponed_job_queue() +
3544 rb_vm_memsize_workqueue(&vm->workqueue) +
3545 vm_memsize_at_exit_list(vm->at_exit) +
3546 (rb_st_memsize(&vm->ci_table) - sizeof(struct st_table)) +
3547 vm_memsize_builtin_function_table(vm->builtin_function_table) +
3548 (rb_id_table_memsize(&vm->negative_cme_table) - sizeof(struct rb_id_table)) +
3549 (rb_st_memsize(&vm->overloaded_cme_table) - sizeof(struct st_table)) +
3550 vm_memsize_constant_cache()
3551 );
3552
3553 // TODO
3554 // struct { struct ccan_list_head set; } ractor;
3555 // void *main_altstack; #ifdef USE_SIGALTSTACK
3556 // struct rb_objspace *objspace;
3557}
3558
3559const rb_data_type_t ruby_vm_data_type = {
3560 "VM",
3561 {0, 0, vm_memsize,},
3563};
3564
3565#define vm_data_type ruby_vm_data_type
3566
3567static VALUE
3568vm_default_params(void)
3569{
3570 rb_vm_t *vm = GET_VM();
3571 VALUE result = rb_hash_new_with_size(4);
3572#define SET(name) rb_hash_aset(result, ID2SYM(rb_intern(#name)), SIZET2NUM(vm->default_params.name));
3573 SET(thread_vm_stack_size);
3574 SET(thread_machine_stack_size);
3575 SET(fiber_vm_stack_size);
3576 SET(fiber_machine_stack_size);
3577#undef SET
3578 rb_obj_freeze(result);
3579 return result;
3580}
3581
3582static size_t
3583get_param(const char *name, size_t default_value, size_t min_value)
3584{
3585 const char *envval;
3586 size_t result = default_value;
3587 if ((envval = getenv(name)) != 0) {
3588 long val = atol(envval);
3589 if (val < (long)min_value) {
3590 val = (long)min_value;
3591 }
3592 result = (size_t)(((val -1 + RUBY_VM_SIZE_ALIGN) / RUBY_VM_SIZE_ALIGN) * RUBY_VM_SIZE_ALIGN);
3593 }
3594 if (0) ruby_debug_printf("%s: %"PRIuSIZE"\n", name, result); /* debug print */
3595
3596 return result;
3597}
3598
3599static void
3600check_machine_stack_size(size_t *sizep)
3601{
3602#ifdef PTHREAD_STACK_MIN
3603 size_t size = *sizep;
3604#endif
3605
3606#ifdef PTHREAD_STACK_MIN
3607 if (size < (size_t)PTHREAD_STACK_MIN) {
3608 *sizep = (size_t)PTHREAD_STACK_MIN * 2;
3609 }
3610#endif
3611}
3612
3613static void
3614vm_default_params_setup(rb_vm_t *vm)
3615{
3616 vm->default_params.thread_vm_stack_size =
3617 get_param("RUBY_THREAD_VM_STACK_SIZE",
3618 RUBY_VM_THREAD_VM_STACK_SIZE,
3619 RUBY_VM_THREAD_VM_STACK_SIZE_MIN);
3620
3621 vm->default_params.thread_machine_stack_size =
3622 get_param("RUBY_THREAD_MACHINE_STACK_SIZE",
3623 RUBY_VM_THREAD_MACHINE_STACK_SIZE,
3624 RUBY_VM_THREAD_MACHINE_STACK_SIZE_MIN);
3625
3626 vm->default_params.fiber_vm_stack_size =
3627 get_param("RUBY_FIBER_VM_STACK_SIZE",
3628 RUBY_VM_FIBER_VM_STACK_SIZE,
3629 RUBY_VM_FIBER_VM_STACK_SIZE_MIN);
3630
3631 vm->default_params.fiber_machine_stack_size =
3632 get_param("RUBY_FIBER_MACHINE_STACK_SIZE",
3633 RUBY_VM_FIBER_MACHINE_STACK_SIZE,
3634 RUBY_VM_FIBER_MACHINE_STACK_SIZE_MIN);
3635
3636 /* environment dependent check */
3637 check_machine_stack_size(&vm->default_params.thread_machine_stack_size);
3638 check_machine_stack_size(&vm->default_params.fiber_machine_stack_size);
3639}
3640
3641static void
3642vm_init2(rb_vm_t *vm)
3643{
3644 rb_vm_living_threads_init(vm);
3645 vm->thread_report_on_exception = 1;
3646 vm->src_encoding_index = -1;
3647
3648 vm_default_params_setup(vm);
3649}
3650
3651void
3652rb_execution_context_update(rb_execution_context_t *ec)
3653{
3654 /* update VM stack */
3655 if (ec->vm_stack) {
3656 long i;
3657 VM_ASSERT(ec->cfp);
3658 VALUE *p = ec->vm_stack;
3659 VALUE *sp = ec->cfp->sp;
3660 rb_control_frame_t *cfp = ec->cfp;
3661 rb_control_frame_t *limit_cfp = (void *)(ec->vm_stack + ec->vm_stack_size);
3662
3663 for (i = 0; i < (long)(sp - p); i++) {
3664 VALUE ref = p[i];
3665 VALUE update = rb_gc_location(ref);
3666 if (ref != update) {
3667 p[i] = update;
3668 }
3669 }
3670
3671 while (cfp != limit_cfp) {
3672 const VALUE *ep = cfp->ep;
3673 cfp->self = rb_gc_location(cfp->self);
3674 if (CFP_ZJIT_FRAME(cfp)) {
3675 rb_zjit_jit_frame_update_references((zjit_jit_frame_t *)cfp->jit_return);
3676 // block_code must always be relocated. For ISEQ frames, the JIT caller
3677 // may have written it (gen_block_handler_specval) for passing blocks.
3678 // For C frames, rb_iterate0 may have written an ifunc to block_code
3679 // after the JIT pushed the frame. NULL is safe to pass to rb_gc_location.
3680 cfp->block_code = (void *)rb_gc_location((VALUE)cfp->block_code);
3681 }
3682 else {
3683 cfp->_iseq = (rb_iseq_t *)rb_gc_location((VALUE)cfp->_iseq);
3684 cfp->block_code = (void *)rb_gc_location((VALUE)cfp->block_code);
3685 }
3686
3687 if (!VM_ENV_LOCAL_P(ep)) {
3688 const VALUE *prev_ep = VM_ENV_PREV_EP(ep);
3689 if (VM_ENV_FLAGS(prev_ep, VM_ENV_FLAG_ESCAPED)) {
3690 VM_FORCE_WRITE(&prev_ep[VM_ENV_DATA_INDEX_ENV], rb_gc_location(prev_ep[VM_ENV_DATA_INDEX_ENV]));
3691 }
3692
3693 if (VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED)) {
3694 VM_FORCE_WRITE(&ep[VM_ENV_DATA_INDEX_ENV], rb_gc_location(ep[VM_ENV_DATA_INDEX_ENV]));
3695 VM_FORCE_WRITE(&ep[VM_ENV_DATA_INDEX_ME_CREF], rb_gc_location(ep[VM_ENV_DATA_INDEX_ME_CREF]));
3696 }
3697 }
3698
3699 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
3700 }
3701 }
3702
3703 ec->storage = rb_gc_location(ec->storage);
3704
3705 ec->gen_fields_cache.obj = rb_gc_location(ec->gen_fields_cache.obj);
3706 ec->gen_fields_cache.fields_obj = rb_gc_location(ec->gen_fields_cache.fields_obj);
3707}
3708
3709static enum rb_id_table_iterator_result
3710mark_local_storage_i(VALUE local, void *data)
3711{
3712 rb_gc_mark(local);
3713 return ID_TABLE_CONTINUE;
3714}
3715
3716void
3717rb_execution_context_mark(const rb_execution_context_t *ec)
3718{
3719 /* mark VM stack */
3720 if (ec->vm_stack) {
3721 VM_ASSERT(ec->cfp);
3722 VALUE *p = ec->vm_stack;
3723 VALUE *sp = ec->cfp->sp;
3724 rb_control_frame_t *cfp = ec->cfp;
3725 rb_control_frame_t *limit_cfp = (void *)(ec->vm_stack + ec->vm_stack_size);
3726
3727 for (long i = 0; i < (long)(sp - p); i++) {
3728 rb_gc_mark_movable(p[i]);
3729 }
3730
3731 while (cfp != limit_cfp) {
3732 const VALUE *ep = cfp->ep;
3733 VM_ASSERT(!!VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED) == vm_ep_in_heap_p_(ec, ep));
3734
3735 rb_gc_mark_movable(cfp->self);
3736 rb_gc_mark_movable((VALUE)CFP_ISEQ(cfp));
3737 // Mark block_code directly (not through rb_zjit_cfp_block_code)
3738 // because rb_iterate0 may write a valid ifunc after JIT frame push.
3739 rb_gc_mark_movable((VALUE)cfp->block_code);
3740
3741 if (VM_ENV_LOCAL_P(ep) && VM_ENV_BOXED_P(ep)) {
3742 const rb_box_t *box = VM_ENV_BOX(ep);
3743 if (BOX_USER_P(box)) {
3744 rb_gc_mark_movable(box->box_object);
3745 }
3746 }
3747
3748 if (!VM_ENV_LOCAL_P(ep)) {
3749 const VALUE *prev_ep = VM_ENV_PREV_EP(ep);
3750 if (VM_ENV_FLAGS(prev_ep, VM_ENV_FLAG_ESCAPED)) {
3751 rb_gc_mark_movable(prev_ep[VM_ENV_DATA_INDEX_ENV]);
3752 }
3753
3754 if (VM_ENV_FLAGS(ep, VM_ENV_FLAG_ESCAPED)) {
3755 rb_gc_mark_movable(ep[VM_ENV_DATA_INDEX_ENV]);
3756 rb_gc_mark(ep[VM_ENV_DATA_INDEX_ME_CREF]);
3757 }
3758 }
3759
3760 cfp = RUBY_VM_PREVIOUS_CONTROL_FRAME(cfp);
3761 }
3762 }
3763
3764 /* mark machine stack */
3765 if (ec->machine.stack_start && ec->machine.stack_end &&
3766 ec != GET_EC() /* marked for current ec at the first stage of marking */
3767 ) {
3768 rb_gc_mark_machine_context(ec);
3769 }
3770
3771 rb_gc_mark(ec->errinfo);
3772 rb_gc_mark(ec->root_svar);
3773 if (ec->local_storage) {
3774 rb_id_table_foreach_values(ec->local_storage, mark_local_storage_i, NULL);
3775 }
3776 rb_gc_mark(ec->local_storage_recursive_hash);
3777 rb_gc_mark(ec->local_storage_recursive_hash_for_trace);
3778 rb_gc_mark(ec->private_const_reference);
3779
3780 rb_gc_mark_movable(ec->storage);
3781}
3782
3783void rb_fiber_mark_self(rb_fiber_t *fib);
3784void rb_fiber_update_self(rb_fiber_t *fib);
3785void rb_threadptr_root_fiber_setup(rb_thread_t *th);
3786void rb_root_fiber_obj_setup(rb_thread_t *th);
3787void rb_threadptr_root_fiber_release(rb_thread_t *th);
3788
3789static void
3790thread_compact(void *ptr)
3791{
3792 rb_thread_t *th = ptr;
3793
3794 th->self = rb_gc_location(th->self);
3795}
3796
3797static void
3798thread_mark(void *ptr)
3799{
3800 rb_thread_t *th = ptr;
3801 RUBY_MARK_ENTER("thread");
3802
3803 // ec is null when setting up the thread in rb_threadptr_root_fiber_setup
3804 if (th->ec) {
3805 rb_fiber_mark_self(th->ec->fiber_ptr);
3806 }
3807
3808 /* mark ruby objects */
3809 switch (th->invoke_type) {
3810 case thread_invoke_type_proc:
3811 case thread_invoke_type_ractor_proc:
3812 rb_gc_mark(th->invoke_arg.proc.proc);
3813 rb_gc_mark(th->invoke_arg.proc.args);
3814 break;
3815 case thread_invoke_type_func:
3816 rb_gc_mark_maybe((VALUE)th->invoke_arg.func.arg);
3817 break;
3818 default:
3819 break;
3820 }
3821
3822 rb_gc_mark(rb_ractor_self(th->ractor));
3823 rb_gc_mark(th->thgroup);
3824 rb_gc_mark(th->value);
3825 rb_gc_mark(th->pending_interrupt_queue);
3826 rb_gc_mark(th->pending_interrupt_mask_stack);
3827 rb_gc_mark(th->top_self);
3828 rb_gc_mark(th->top_wrapper);
3829 if (th->root_fiber) rb_fiber_mark_self(th->root_fiber);
3830
3831 RUBY_ASSERT(th->ec == NULL || th->ec == rb_fiberptr_get_ec(th->ec->fiber_ptr));
3832 rb_gc_mark(th->last_status);
3833 rb_gc_mark(th->locking_mutex);
3834 rb_gc_mark(th->name);
3835
3836 rb_gc_mark(th->scheduler);
3837
3838 rb_threadptr_interrupt_exec_task_mark(th);
3839
3840 RUBY_MARK_LEAVE("thread");
3841}
3842
3843void rb_threadptr_sched_free(rb_thread_t *th); // thread_*.c
3844
3845static void
3846thread_free(void *ptr)
3847{
3848 rb_thread_t *th = ptr;
3849 RUBY_FREE_ENTER("thread");
3850
3851 rb_threadptr_sched_free(th);
3852
3853 if (th->locking_mutex != Qfalse) {
3854 rb_bug("thread_free: locking_mutex must be NULL (%p:%p)", (void *)th, (void *)th->locking_mutex);
3855 }
3856 if (th->keeping_mutexes != NULL) {
3857 rb_bug("thread_free: keeping_mutexes must be NULL (%p:%p)", (void *)th, (void *)th->keeping_mutexes);
3858 }
3859
3860 ruby_xfree(th->specific_storage);
3861
3862 if (th->vm && th->vm->ractor.main_thread == th) {
3863 RUBY_GC_INFO("MRI main thread\n");
3864 }
3865 else {
3866 // ruby_xfree(th->nt);
3867 // TODO: MN system collect nt, but without MN system it should be freed here.
3868 if (!th->main_thread) {
3869 ruby_xfree(th);
3870 }
3871 }
3872
3873 RUBY_FREE_LEAVE("thread");
3874}
3875
3876static size_t
3877thread_memsize(const void *ptr)
3878{
3879 const rb_thread_t *th = ptr;
3880 size_t size = sizeof(rb_thread_t);
3881
3882 if (!th->root_fiber) {
3883 size += th->ec->vm_stack_size * sizeof(VALUE);
3884 }
3885 if (th->ec->local_storage) {
3886 size += rb_id_table_memsize(th->ec->local_storage);
3887 }
3888 return size;
3889}
3890
3891#define thread_data_type ruby_threadptr_data_type
3892const rb_data_type_t ruby_threadptr_data_type = {
3893 "VM/thread",
3894 {
3895 thread_mark,
3896 thread_free,
3897 thread_memsize,
3898 thread_compact,
3899 },
3901};
3902
3903VALUE
3904rb_obj_is_thread(VALUE obj)
3905{
3906 return RBOOL(rb_typeddata_is_kind_of(obj, &thread_data_type));
3907}
3908
3909static VALUE
3910thread_alloc(VALUE klass)
3911{
3912 rb_thread_t *th;
3913 return TypedData_Make_Struct(klass, rb_thread_t, &thread_data_type, th);
3914}
3915
3916void
3917rb_ec_set_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size)
3918{
3919 ec->vm_stack = stack;
3920 ec->vm_stack_size = size;
3921}
3922
3923void
3924rb_ec_initialize_vm_stack(rb_execution_context_t *ec, VALUE *stack, size_t size)
3925{
3926 rb_ec_set_vm_stack(ec, stack, size);
3927
3928#if VM_CHECK_MODE > 0
3929 MEMZERO(stack, VALUE, size); // malloc memory could have the VM canary in it
3930#endif
3931
3932 ec->cfp = (void *)(ec->vm_stack + ec->vm_stack_size);
3933
3934 vm_push_frame(ec,
3935 NULL /* dummy iseq */,
3936 VM_FRAME_MAGIC_DUMMY | VM_ENV_FLAG_LOCAL | VM_FRAME_FLAG_FINISH | VM_FRAME_FLAG_CFRAME /* dummy frame */,
3937 Qnil /* dummy self */, VM_BLOCK_HANDLER_NONE /* dummy block ptr */,
3938 0 /* dummy cref/me */,
3939 0 /* dummy pc */, ec->vm_stack, 0, 0
3940 );
3941}
3942
3943void
3944rb_ec_clear_vm_stack(rb_execution_context_t *ec)
3945{
3946 // set cfp to NULL before clearing the stack in case `thread_profile_frames`
3947 // gets called in this middle of `rb_ec_set_vm_stack` via signal handler.
3948 ec->cfp = NULL;
3949 rb_ec_set_vm_stack(ec, NULL, 0);
3950}
3951
3952void
3953rb_ec_close(rb_execution_context_t *ec)
3954{
3955 // Fiber storage is not accessible from outside the running fiber, so it is safe to clear it here.
3956 ec->storage = Qnil;
3957}
3958
3959static void
3960th_init(rb_thread_t *th, VALUE self, rb_vm_t *vm)
3961{
3962 const rb_box_t *box = rb_current_box();
3963
3964 th->self = self;
3965
3966 ccan_list_head_init(&th->interrupt_exec_tasks);
3967
3968 rb_threadptr_root_fiber_setup(th);
3969
3970 /* All threads are blocking until a non-blocking fiber is scheduled */
3971 th->blocking = 1;
3972 th->scheduler = Qnil;
3973
3974 if (self == 0) {
3975 size_t size = vm->default_params.thread_vm_stack_size / sizeof(VALUE);
3976 VALUE *stack = ALLOC_N(VALUE, size);
3977 rb_ec_initialize_vm_stack(th->ec, stack, size);
3978 rb_thread_malloc_stack_set(th, stack, size);
3979 }
3980 else {
3981 VM_ASSERT(th->ec->cfp == NULL);
3982 VM_ASSERT(th->ec->vm_stack == NULL);
3983 VM_ASSERT(th->ec->vm_stack_size == 0);
3984 }
3985
3986 th->status = THREAD_RUNNABLE;
3987 th->last_status = Qnil;
3988 th->top_wrapper = 0;
3989 if (box->top_self) {
3990 th->top_self = box->top_self;
3991 }
3992 else {
3993 th->top_self = 0;
3994 }
3995 th->value = Qundef;
3996
3997 th->ec->errinfo = Qnil;
3998 th->ec->root_svar = Qfalse;
3999 th->ec->local_storage_recursive_hash = Qnil;
4000 th->ec->local_storage_recursive_hash_for_trace = Qnil;
4001
4002 th->ec->storage = Qnil;
4003 th->ec->ractor_id = rb_ractor_id(th->ractor);
4004
4005#if OPT_CALL_THREADED_CODE
4006 th->retval = Qundef;
4007#endif
4008 th->name = Qnil;
4009 th->report_on_exception = vm->thread_report_on_exception;
4010 th->ext_config.ractor_safe = true;
4011
4012#if USE_RUBY_DEBUG_LOG
4013 static rb_atomic_t thread_serial = 1;
4014 th->serial = RUBY_ATOMIC_FETCH_ADD(thread_serial, 1);
4015
4016 RUBY_DEBUG_LOG("th:%u", th->serial);
4017#endif
4018}
4019
4020VALUE
4021rb_thread_alloc(VALUE klass)
4022{
4023 VALUE self = thread_alloc(klass);
4024 rb_thread_t *target_th = rb_thread_ptr(self);
4025 target_th->ractor = GET_RACTOR();
4026 th_init(target_th, self, target_th->vm = GET_VM());
4027 rb_root_fiber_obj_setup(target_th);
4028 return self;
4029}
4030
4031#define REWIND_CFP(expr) do { \
4032 rb_execution_context_t *ec__ = GET_EC(); \
4033 VALUE *const curr_sp = (ec__->cfp++)->sp; \
4034 VALUE *const saved_sp = ec__->cfp->sp; \
4035 ec__->cfp->sp = curr_sp; \
4036 expr; \
4037 (ec__->cfp--)->sp = saved_sp; \
4038} while (0)
4039
4040static VALUE
4041m_core_set_method_alias(VALUE self, VALUE cbase, VALUE sym1, VALUE sym2)
4042{
4043 REWIND_CFP({
4044 rb_alias(cbase, SYM2ID(sym1), SYM2ID(sym2));
4045 });
4046 return Qnil;
4047}
4048
4049static VALUE
4050m_core_set_variable_alias(VALUE self, VALUE sym1, VALUE sym2)
4051{
4052 REWIND_CFP({
4053 rb_alias_variable(SYM2ID(sym1), SYM2ID(sym2));
4054 });
4055 return Qnil;
4056}
4057
4058static VALUE
4059m_core_undef_method(VALUE self, VALUE cbase, VALUE sym)
4060{
4061 REWIND_CFP({
4062 ID mid = SYM2ID(sym);
4063 rb_undef(cbase, mid);
4064 rb_clear_method_cache(self, mid);
4065 });
4066 return Qnil;
4067}
4068
4069static VALUE
4070m_core_set_postexe(VALUE self)
4071{
4072 rb_set_end_proc(rb_call_end_proc, rb_block_proc());
4073 return Qnil;
4074}
4075
4076static VALUE core_hash_merge_kwd(VALUE hash, VALUE kw);
4077
4078static VALUE
4079core_hash_merge(VALUE hash, long argc, const VALUE *argv)
4080{
4081 Check_Type(hash, T_HASH);
4082 VM_ASSERT(argc % 2 == 0);
4083 rb_hash_bulk_insert(argc, argv, hash);
4084 return hash;
4085}
4086
4087static VALUE
4088m_core_hash_merge_ptr(int argc, VALUE *argv, VALUE recv)
4089{
4090 VALUE hash = argv[0];
4091
4092 REWIND_CFP(hash = core_hash_merge(hash, argc-1, argv+1));
4093
4094 return hash;
4095}
4096
4097static int
4098kwmerge_i(VALUE key, VALUE value, VALUE hash)
4099{
4100 rb_hash_aset(hash, key, value);
4101 return ST_CONTINUE;
4102}
4103
4104static VALUE
4105m_core_hash_merge_kwd(VALUE recv, VALUE hash, VALUE kw)
4106{
4107 if (!NIL_P(kw)) {
4108 REWIND_CFP(hash = core_hash_merge_kwd(hash, kw));
4109 }
4110 return hash;
4111}
4112
4113static VALUE
4114m_core_make_shareable(VALUE recv, VALUE obj)
4115{
4116 return rb_ractor_make_shareable(obj);
4117}
4118
4119static VALUE
4120m_core_make_shareable_copy(VALUE recv, VALUE obj)
4121{
4123}
4124
4125static VALUE
4126m_core_ensure_shareable(VALUE recv, VALUE obj, VALUE name)
4127{
4128 return rb_ractor_ensure_shareable(obj, name);
4129}
4130
4131static VALUE
4132core_hash_merge_kwd(VALUE hash, VALUE kw)
4133{
4134 rb_hash_foreach(rb_to_hash_type(kw), kwmerge_i, hash);
4135 return hash;
4136}
4137
4138extern VALUE *rb_gc_stack_start;
4139extern size_t rb_gc_stack_maxsize;
4140
4141/* debug functions */
4142
4143/* :nodoc: */
4144static VALUE
4145sdr(VALUE self)
4146{
4147 rb_vm_bugreport(NULL, stderr);
4148 return Qnil;
4149}
4150
4151/* :nodoc: */
4152static VALUE
4153nsdr(VALUE self)
4154{
4155 VALUE ary = rb_ary_new();
4156#ifdef HAVE_BACKTRACE
4157#include <execinfo.h>
4158#define MAX_NATIVE_TRACE 1024
4159 static void *trace[MAX_NATIVE_TRACE];
4160 int n = (int)backtrace(trace, MAX_NATIVE_TRACE);
4161 char **syms = backtrace_symbols(trace, n);
4162 int i;
4163
4164 if (syms == 0) {
4165 rb_memerror();
4166 }
4167
4168 for (i=0; i<n; i++) {
4169 rb_ary_push(ary, rb_str_new2(syms[i]));
4170 }
4171 free(syms); /* OK */
4172#endif
4173 return ary;
4174}
4175
4176#if VM_COLLECT_USAGE_DETAILS
4177static VALUE usage_analysis_insn_start(VALUE self);
4178static VALUE usage_analysis_operand_start(VALUE self);
4179static VALUE usage_analysis_register_start(VALUE self);
4180static VALUE usage_analysis_insn_stop(VALUE self);
4181static VALUE usage_analysis_operand_stop(VALUE self);
4182static VALUE usage_analysis_register_stop(VALUE self);
4183static VALUE usage_analysis_insn_running(VALUE self);
4184static VALUE usage_analysis_operand_running(VALUE self);
4185static VALUE usage_analysis_register_running(VALUE self);
4186static VALUE usage_analysis_insn_clear(VALUE self);
4187static VALUE usage_analysis_operand_clear(VALUE self);
4188static VALUE usage_analysis_register_clear(VALUE self);
4189#endif
4190
4191static VALUE
4192f_raise(int c, VALUE *v, VALUE _)
4193{
4194 return rb_f_raise(c, v);
4195}
4196
4197static VALUE
4198f_proc(VALUE _)
4199{
4200 return rb_block_proc();
4201}
4202
4203static VALUE
4204f_lambda(VALUE _)
4205{
4206 return rb_block_lambda();
4207}
4208
4209static VALUE
4210f_sprintf(int c, const VALUE *v, VALUE _)
4211{
4212 return rb_f_sprintf(c, v);
4213}
4214
4215/* :nodoc: */
4216static VALUE
4217vm_mtbl(VALUE self, VALUE obj, VALUE sym)
4218{
4219 vm_mtbl_dump(CLASS_OF(obj), RTEST(sym) ? SYM2ID(sym) : 0);
4220 return Qnil;
4221}
4222
4223/* :nodoc: */
4224static VALUE
4225vm_mtbl2(VALUE self, VALUE obj, VALUE sym)
4226{
4227 vm_mtbl_dump(obj, RTEST(sym) ? SYM2ID(sym) : 0);
4228 return Qnil;
4229}
4230
4231/*
4232 * call-seq:
4233 * RubyVM.keep_script_lines -> true or false
4234 *
4235 * Return current +keep_script_lines+ status. Now it only returns
4236 * +true+ of +false+, but it can return other objects in future.
4237 *
4238 * Note that this is an API for ruby internal use, debugging,
4239 * and research. Do not use this for any other purpose.
4240 * The compatibility is not guaranteed.
4241 */
4242static VALUE
4243vm_keep_script_lines(VALUE self)
4244{
4245 return RBOOL(ruby_vm_keep_script_lines);
4246}
4247
4248/*
4249 * call-seq:
4250 * RubyVM.keep_script_lines = true / false
4251 *
4252 * It set +keep_script_lines+ flag. If the flag is set, all
4253 * loaded scripts are recorded in a interpreter process.
4254 *
4255 * Note that this is an API for ruby internal use, debugging,
4256 * and research. Do not use this for any other purpose.
4257 * The compatibility is not guaranteed.
4258 */
4259static VALUE
4260vm_keep_script_lines_set(VALUE self, VALUE flags)
4261{
4262 ruby_vm_keep_script_lines = RTEST(flags);
4263 return flags;
4264}
4265
4266void
4267Init_VM(void)
4268{
4269 VALUE opts;
4270 VALUE klass;
4271 VALUE fcore;
4272
4273 /*
4274 * Document-class: RubyVM
4275 *
4276 * The RubyVM module only exists on MRI. +RubyVM+ is not defined in
4277 * other Ruby implementations such as JRuby and TruffleRuby.
4278 *
4279 * The RubyVM module provides some access to MRI internals.
4280 * This module is for very limited purposes, such as debugging,
4281 * prototyping, and research. Normal users must not use it.
4282 * This module is not portable between Ruby implementations.
4283 */
4284 rb_cRubyVM = rb_define_class("RubyVM", rb_cObject);
4285 rb_undef_alloc_func(rb_cRubyVM);
4286 rb_undef_method(CLASS_OF(rb_cRubyVM), "new");
4287 rb_define_singleton_method(rb_cRubyVM, "stat", vm_stat, -1);
4288 rb_define_singleton_method(rb_cRubyVM, "keep_script_lines", vm_keep_script_lines, 0);
4289 rb_define_singleton_method(rb_cRubyVM, "keep_script_lines=", vm_keep_script_lines_set, 1);
4290
4291#if USE_DEBUG_COUNTER
4292 rb_define_singleton_method(rb_cRubyVM, "reset_debug_counters", rb_debug_counter_reset, 0);
4293 rb_define_singleton_method(rb_cRubyVM, "show_debug_counters", rb_debug_counter_show, 0);
4294#endif
4295
4296 /* FrozenCore (hidden) */
4298 rb_set_class_path(fcore, rb_cRubyVM, "FrozenCore");
4299 rb_vm_register_global_object(rb_class_path_cached(fcore));
4300 klass = rb_singleton_class(fcore);
4301 rb_define_method_id(klass, id_core_set_method_alias, m_core_set_method_alias, 3);
4302 rb_define_method_id(klass, id_core_set_variable_alias, m_core_set_variable_alias, 2);
4303 rb_define_method_id(klass, id_core_undef_method, m_core_undef_method, 2);
4304 rb_define_method_id(klass, id_core_set_postexe, m_core_set_postexe, 0);
4305 rb_define_method_id(klass, id_core_hash_merge_ptr, m_core_hash_merge_ptr, -1);
4306 rb_define_method_id(klass, id_core_hash_merge_kwd, m_core_hash_merge_kwd, 2);
4307 rb_define_method_id(klass, id_core_raise, f_raise, -1);
4308 rb_define_method_id(klass, id_core_sprintf, f_sprintf, -1);
4309 rb_define_method_id(klass, idProc, f_proc, 0);
4310 rb_define_method_id(klass, idLambda, f_lambda, 0);
4311 rb_define_method(klass, "make_shareable", m_core_make_shareable, 1);
4312 rb_define_method(klass, "make_shareable_copy", m_core_make_shareable_copy, 1);
4313 rb_define_method(klass, "ensure_shareable", m_core_ensure_shareable, 2);
4314 rb_obj_freeze(fcore);
4315 RBASIC_CLEAR_CLASS(klass);
4316 rb_obj_freeze(klass);
4317 rb_vm_register_global_object(fcore);
4318 rb_mRubyVMFrozenCore = fcore;
4319
4320 /*
4321 * Document-class: Thread
4322 *
4323 * Threads are the Ruby implementation for a concurrent programming model.
4324 *
4325 * Programs that require multiple threads of execution are a perfect
4326 * candidate for Ruby's Thread class.
4327 *
4328 * For example, we can create a new thread separate from the main thread's
4329 * execution using ::new.
4330 *
4331 * thr = Thread.new { puts "What's the big deal" }
4332 *
4333 * Then we are able to pause the execution of the main thread and allow
4334 * our new thread to finish, using #join:
4335 *
4336 * thr.join #=> "What's the big deal"
4337 *
4338 * If we don't call +thr.join+ before the main thread terminates, then all
4339 * other threads including +thr+ will be killed.
4340 *
4341 * Alternatively, you can use an array for handling multiple threads at
4342 * once, like in the following example:
4343 *
4344 * threads = []
4345 * threads << Thread.new { puts "What's the big deal" }
4346 * threads << Thread.new { 3.times { puts "Threads are fun!" } }
4347 *
4348 * After creating a few threads we wait for them all to finish
4349 * consecutively.
4350 *
4351 * threads.each { |thr| thr.join }
4352 *
4353 * To retrieve the last value of a thread, use #value
4354 *
4355 * thr = Thread.new { sleep 1; "Useful value" }
4356 * thr.value #=> "Useful value"
4357 *
4358 * === Thread initialization
4359 *
4360 * In order to create new threads, Ruby provides ::new, ::start, and
4361 * ::fork. A block must be provided with each of these methods, otherwise
4362 * a ThreadError will be raised.
4363 *
4364 * When subclassing the Thread class, the +initialize+ method of your
4365 * subclass will be ignored by ::start and ::fork. Otherwise, be sure to
4366 * call super in your +initialize+ method.
4367 *
4368 * === Thread termination
4369 *
4370 * For terminating threads, Ruby provides a variety of ways to do this.
4371 *
4372 * The class method ::kill, is meant to exit a given thread:
4373 *
4374 * thr = Thread.new { sleep }
4375 * Thread.kill(thr) # sends exit() to thr
4376 *
4377 * Alternatively, you can use the instance method #exit, or any of its
4378 * aliases #kill or #terminate.
4379 *
4380 * thr.exit
4381 *
4382 * === Thread status
4383 *
4384 * Ruby provides a few instance methods for querying the state of a given
4385 * thread. To get a string with the current thread's state use #status
4386 *
4387 * thr = Thread.new { sleep }
4388 * thr.status # => "sleep"
4389 * thr.exit
4390 * thr.status # => false
4391 *
4392 * You can also use #alive? to tell if the thread is running or sleeping,
4393 * and #stop? if the thread is dead or sleeping.
4394 *
4395 * === Thread variables and scope
4396 *
4397 * Since threads are created with blocks, the same rules apply to other
4398 * Ruby blocks for variable scope. Any local variables created within this
4399 * block are accessible to only this thread.
4400 *
4401 * ==== Fiber-local vs. Thread-local
4402 *
4403 * Each fiber has its own bucket for Thread#[] storage. When you set a
4404 * new fiber-local it is only accessible within this Fiber. To illustrate:
4405 *
4406 * Thread.new {
4407 * Thread.current[:foo] = "bar"
4408 * Fiber.new {
4409 * p Thread.current[:foo] # => nil
4410 * }.resume
4411 * }.join
4412 *
4413 * This example uses #[] for getting and #[]= for setting fiber-locals,
4414 * you can also use #keys to list the fiber-locals for a given
4415 * thread and #key? to check if a fiber-local exists.
4416 *
4417 * When it comes to thread-locals, they are accessible within the entire
4418 * scope of the thread. Given the following example:
4419 *
4420 * Thread.new{
4421 * Thread.current.thread_variable_set(:foo, 1)
4422 * p Thread.current.thread_variable_get(:foo) # => 1
4423 * Fiber.new{
4424 * Thread.current.thread_variable_set(:foo, 2)
4425 * p Thread.current.thread_variable_get(:foo) # => 2
4426 * }.resume
4427 * p Thread.current.thread_variable_get(:foo) # => 2
4428 * }.join
4429 *
4430 * You can see that the thread-local +:foo+ carried over into the fiber
4431 * and was changed to +2+ by the end of the thread.
4432 *
4433 * This example makes use of #thread_variable_set to create new
4434 * thread-locals, and #thread_variable_get to reference them.
4435 *
4436 * There is also #thread_variables to list all thread-locals, and
4437 * #thread_variable? to check if a given thread-local exists.
4438 *
4439 * === Exception handling
4440 *
4441 * When an unhandled exception is raised inside a thread, it will
4442 * terminate. By default, this exception will not propagate to other
4443 * threads. The exception is stored and when another thread calls #value
4444 * or #join, the exception will be re-raised in that thread.
4445 *
4446 * t = Thread.new{ raise 'something went wrong' }
4447 * t.value #=> RuntimeError: something went wrong
4448 *
4449 * An exception can be raised from outside the thread using the
4450 * Thread#raise instance method, which takes the same parameters as
4451 * Kernel#raise.
4452 *
4453 * Setting Thread.abort_on_exception = true, Thread#abort_on_exception =
4454 * true, or $DEBUG = true will cause a subsequent unhandled exception
4455 * raised in a thread to be automatically re-raised in the main thread.
4456 *
4457 * With the addition of the class method ::handle_interrupt, you can now
4458 * handle exceptions asynchronously with threads.
4459 *
4460 * === Scheduling
4461 *
4462 * Ruby provides a few ways to support scheduling threads in your program.
4463 *
4464 * The first way is by using the class method ::stop, to put the current
4465 * running thread to sleep and schedule the execution of another thread.
4466 *
4467 * Once a thread is asleep, you can use the instance method #wakeup to
4468 * mark your thread as eligible for scheduling.
4469 *
4470 * You can also try ::pass, which attempts to pass execution to another
4471 * thread but is dependent on the OS whether a running thread will switch
4472 * or not. The same goes for #priority, which lets you hint to the thread
4473 * scheduler which threads you want to take precedence when passing
4474 * execution. This method is also dependent on the OS and may be ignored
4475 * on some platforms.
4476 *
4477 */
4480
4481#if VM_COLLECT_USAGE_DETAILS
4482 /* ::RubyVM::USAGE_ANALYSIS_* */
4483#define define_usage_analysis_hash(name) /* shut up rdoc -C */ \
4484 rb_define_const(rb_cRubyVM, "USAGE_ANALYSIS_" #name, rb_hash_new())
4485 define_usage_analysis_hash(INSN);
4486 define_usage_analysis_hash(REGS);
4487 define_usage_analysis_hash(INSN_BIGRAM);
4488
4489 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_INSN_START", usage_analysis_insn_start, 0);
4490 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_OPERAND_START", usage_analysis_operand_start, 0);
4491 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_REGISTER_START", usage_analysis_register_start, 0);
4492 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_INSN_STOP", usage_analysis_insn_stop, 0);
4493 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_OPERAND_STOP", usage_analysis_operand_stop, 0);
4494 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_REGISTER_STOP", usage_analysis_register_stop, 0);
4495 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_INSN_RUNNING", usage_analysis_insn_running, 0);
4496 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_OPERAND_RUNNING", usage_analysis_operand_running, 0);
4497 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_REGISTER_RUNNING", usage_analysis_register_running, 0);
4498 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_INSN_CLEAR", usage_analysis_insn_clear, 0);
4499 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_OPERAND_CLEAR", usage_analysis_operand_clear, 0);
4500 rb_define_singleton_method(rb_cRubyVM, "USAGE_ANALYSIS_REGISTER_CLEAR", usage_analysis_register_clear, 0);
4501#endif
4502
4503 /* ::RubyVM::OPTS
4504 * An Array of VM build options.
4505 * This constant is MRI specific.
4506 */
4507 rb_define_const(rb_cRubyVM, "OPTS", opts = rb_ary_new());
4508
4509#if OPT_DIRECT_THREADED_CODE
4510 rb_ary_push(opts, rb_str_new2("direct threaded code"));
4511#elif OPT_TOKEN_THREADED_CODE
4512 rb_ary_push(opts, rb_str_new2("token threaded code"));
4513#elif OPT_CALL_THREADED_CODE
4514 rb_ary_push(opts, rb_str_new2("call threaded code"));
4515#endif
4516
4517#if OPT_OPERANDS_UNIFICATION
4518 rb_ary_push(opts, rb_str_new2("operands unification"));
4519#endif
4520#if OPT_INSTRUCTIONS_UNIFICATION
4521 rb_ary_push(opts, rb_str_new2("instructions unification"));
4522#endif
4523#if OPT_INLINE_METHOD_CACHE
4524 rb_ary_push(opts, rb_str_new2("inline method cache"));
4525#endif
4526
4527 /* ::RubyVM::INSTRUCTION_NAMES
4528 * A list of bytecode instruction names in MRI.
4529 * This constant is MRI specific.
4530 */
4531 rb_define_const(rb_cRubyVM, "INSTRUCTION_NAMES", rb_insns_name_array());
4532
4533 /* ::RubyVM::DEFAULT_PARAMS
4534 * This constant exposes the VM's default parameters.
4535 * Note that changing these values does not affect VM execution.
4536 * Specification is not stable and you should not depend on this value.
4537 * Of course, this constant is MRI specific.
4538 */
4539 rb_define_const(rb_cRubyVM, "DEFAULT_PARAMS", vm_default_params());
4540
4541 /* debug functions ::RubyVM::SDR(), ::RubyVM::NSDR() */
4542#if VMDEBUG
4543 rb_define_singleton_method(rb_cRubyVM, "SDR", sdr, 0);
4544 rb_define_singleton_method(rb_cRubyVM, "NSDR", nsdr, 0);
4545 rb_define_singleton_method(rb_cRubyVM, "mtbl", vm_mtbl, 2);
4546 rb_define_singleton_method(rb_cRubyVM, "mtbl2", vm_mtbl2, 2);
4547#else
4548 (void)sdr;
4549 (void)nsdr;
4550 (void)vm_mtbl;
4551 (void)vm_mtbl2;
4552#endif
4553
4554 /* VM bootstrap: phase 2 */
4555 {
4556 rb_vm_t *vm = ruby_current_vm_ptr;
4557 rb_thread_t *th = GET_THREAD();
4558 VALUE filename = rb_fstring_lit("<main>");
4559 const rb_iseq_t *iseq = rb_iseq_new(Qnil, filename, filename, Qnil, 0, ISEQ_TYPE_TOP);
4560
4561 // Ractor setup
4562 rb_ractor_main_setup(vm, th->ractor, th);
4563
4564 /* create vm object */
4565 vm->self = TypedData_Wrap_Struct(rb_cRubyVM, &vm_data_type, vm);
4566
4567 /* create main thread */
4568 th->self = TypedData_Wrap_Struct(rb_cThread, &thread_data_type, th);
4569 vm->ractor.main_thread = th;
4570 vm->ractor.main_ractor = th->ractor;
4571 th->vm = vm;
4572 th->top_wrapper = 0;
4573 th->top_self = rb_vm_top_self();
4574
4575 rb_root_fiber_obj_setup(th);
4576
4577 rb_vm_register_global_object((VALUE)iseq);
4578 th->ec->cfp->_iseq = iseq;
4579 th->ec->cfp->pc = ISEQ_BODY(iseq)->iseq_encoded;
4580 th->ec->cfp->self = th->top_self;
4581
4582 VM_ENV_FLAGS_UNSET(th->ec->cfp->ep, VM_FRAME_FLAG_CFRAME);
4583 VM_STACK_ENV_WRITE(th->ec->cfp->ep, VM_ENV_DATA_INDEX_ME_CREF, (VALUE)vm_cref_new(rb_cObject, METHOD_VISI_PRIVATE, FALSE, NULL, FALSE, FALSE));
4584
4585 /*
4586 * The Binding of the top level scope
4587 */
4588 rb_define_global_const("TOPLEVEL_BINDING", rb_binding_new());
4589
4590#ifdef _WIN32
4591 rb_objspace_gc_enable(vm->gc.objspace);
4592#endif
4593 }
4594 vm_init_redefined_flag();
4595
4596 rb_block_param_proxy = rb_obj_alloc(rb_cObject);
4597 rb_add_method_optimized(rb_singleton_class(rb_block_param_proxy), idCall,
4598 OPTIMIZED_METHOD_TYPE_BLOCK_CALL, 0, METHOD_VISI_PUBLIC);
4599 rb_obj_freeze(rb_block_param_proxy);
4600 rb_vm_register_global_object(rb_block_param_proxy);
4601
4602 /* vm_backtrace.c */
4603 Init_vm_backtrace();
4604}
4605
4606void
4607rb_vm_set_progname(VALUE filename)
4608{
4609 rb_thread_t *th = GET_VM()->ractor.main_thread;
4610 rb_control_frame_t *cfp = (void *)(th->ec->vm_stack + th->ec->vm_stack_size);
4611 --cfp;
4612
4613 filename = rb_str_new_frozen(filename);
4614 rb_iseq_pathobj_set(CFP_ISEQ(cfp), filename, rb_iseq_realpath(CFP_ISEQ(cfp)));
4615}
4616
4617extern const struct st_hash_type rb_fstring_hash_type;
4618
4619static rb_vm_t _vm;
4620static rb_thread_t _main_thread = {
4621 .vm = &_vm,
4622 .main_thread = 1,
4623};
4624
4625void
4626Init_BareVM(void)
4627{
4628 /* VM bootstrap: phase 1 */
4629 rb_vm_t *vm = &_vm;
4630 rb_thread_t *th = &_main_thread;
4631
4632 // setup the VM
4633 vm_init2(vm);
4634
4635 ruby_current_vm_ptr = vm;
4636 rb_objspace_alloc();
4637 rb_id_table_init(&vm->negative_cme_table, 16);
4638 st_init_existing_numtable_with_size(&vm->overloaded_cme_table, 0);
4639 st_init_existing_strtable_with_size(&vm->static_ext_inits, 0);
4640 set_init_embedded_numtable_with_size(&vm->unused_block_warning_table, 0);
4641 vm->global_hooks.type = hook_list_type_global;
4642
4643 // setup main thread
4644 th->nt = ZALLOC(struct rb_native_thread);
4645 th->ractor = vm->ractor.main_ractor = rb_ractor_main_alloc();
4646 Init_native_thread(th);
4647 rb_jit_cont_init();
4648 th_init(th, 0, vm);
4649
4650 rb_ractor_set_current_ec(th->ractor, th->ec);
4651
4652 /* n.b. native_main_thread_stack_top is set by the INIT_STACK macro */
4653 ruby_thread_init_stack(th, native_main_thread_stack_top);
4654
4655 // setup ractor system
4656 rb_native_mutex_initialize(&vm->ractor.sync.lock);
4657 rb_native_cond_initialize(&vm->ractor.sync.terminate_cond);
4658
4659 vm_opt_method_def_table = st_init_numtable();
4660 vm_opt_mid_table = st_init_numtable();
4661
4662#ifdef RUBY_THREAD_WIN32_H
4663 rb_native_cond_initialize(&vm->ractor.sync.barrier_complete_cond);
4664 rb_native_cond_initialize(&vm->ractor.sync.barrier_release_cond);
4665#endif
4666}
4667
4668void
4670{
4671 native_main_thread_stack_top = addr;
4672}
4673
4674#ifndef _WIN32
4675#include <unistd.h>
4676#include <sys/mman.h>
4677#endif
4678
4679
4680#ifndef MARK_OBJECT_ARY_BUCKET_SIZE
4681#define MARK_OBJECT_ARY_BUCKET_SIZE 1024
4682#endif
4683
4685 VALUE next;
4686 long len;
4687 VALUE *array;
4688};
4689
4690static void
4691pin_array_list_mark(void *data)
4692{
4693 struct pin_array_list *array = (struct pin_array_list *)data;
4694 rb_gc_mark_movable(array->next);
4695
4696 rb_gc_mark_vm_stack_values(array->len, array->array);
4697}
4698
4699static void
4700pin_array_list_free(void *data)
4701{
4702 struct pin_array_list *array = (struct pin_array_list *)data;
4703 xfree(array->array);
4704}
4705
4706static size_t
4707pin_array_list_memsize(const void *data)
4708{
4709 return sizeof(struct pin_array_list) + (MARK_OBJECT_ARY_BUCKET_SIZE * sizeof(VALUE));
4710}
4711
4712static void
4713pin_array_list_update_references(void *data)
4714{
4715 struct pin_array_list *array = (struct pin_array_list *)data;
4716 array->next = rb_gc_location(array->next);
4717}
4718
4719static const rb_data_type_t pin_array_list_type = {
4720 .wrap_struct_name = "VM/pin_array_list",
4721 .function = {
4722 .dmark = pin_array_list_mark,
4723 .dfree = pin_array_list_free,
4724 .dsize = pin_array_list_memsize,
4725 .dcompact = pin_array_list_update_references,
4726 },
4727 .flags = RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE,
4728};
4729
4730static VALUE
4731pin_array_list_new(VALUE next)
4732{
4733 struct pin_array_list *array_list;
4734 VALUE obj = TypedData_Make_Struct(0, struct pin_array_list, &pin_array_list_type, array_list);
4735 RB_OBJ_WRITE(obj, &array_list->next, next);
4736 array_list->array = ALLOC_N(VALUE, MARK_OBJECT_ARY_BUCKET_SIZE);
4737 return obj;
4738}
4739
4740static VALUE
4741pin_array_list_append(VALUE obj, VALUE item)
4742{
4743 struct pin_array_list *array_list;
4744 TypedData_Get_Struct(obj, struct pin_array_list, &pin_array_list_type, array_list);
4745
4746 if (array_list->len >= MARK_OBJECT_ARY_BUCKET_SIZE) {
4747 obj = pin_array_list_new(obj);
4748 TypedData_Get_Struct(obj, struct pin_array_list, &pin_array_list_type, array_list);
4749 }
4750
4751 RB_OBJ_WRITE(obj, &array_list->array[array_list->len], item);
4752 array_list->len++;
4753 return obj;
4754}
4755
4756void
4757rb_vm_register_global_object(VALUE obj)
4758{
4760 if (RB_SPECIAL_CONST_P(obj)) {
4761 return;
4762 }
4763
4764 switch (RB_BUILTIN_TYPE(obj)) {
4765 case T_CLASS:
4766 case T_MODULE:
4767 if (FL_TEST(obj, RCLASS_IS_ROOT)) {
4768 return;
4769 }
4770 FL_SET(obj, RCLASS_IS_ROOT);
4771 break;
4772 default:
4773 break;
4774 }
4775 RB_VM_LOCKING() {
4776 VALUE list = GET_VM()->mark_object_ary;
4777 VALUE head = pin_array_list_append(list, obj);
4778 if (head != list) {
4779 GET_VM()->mark_object_ary = head;
4780 }
4781 RB_GC_GUARD(obj);
4782 }
4783}
4784
4785VALUE rb_cc_refinement_set_create(void);
4786
4787void
4788Init_vm_objects(void)
4789{
4790 rb_vm_t *vm = GET_VM();
4791
4792 /* initialize mark object array, hash */
4793 vm->mark_object_ary = pin_array_list_new(Qnil);
4794 st_init_existing_table_with_size(&vm->ci_table, &vm_ci_hashtype, 0);
4795 vm->cc_refinement_set = rb_cc_refinement_set_create();
4796}
4797
4798// Whether JIT is enabled or not, we need to load/undef `#with_jit` for other builtins.
4799#include "jit_hook.rbinc"
4800#include "jit_undef.rbinc"
4801
4802// Stub for builtin function when not building YJIT units
4803#if !USE_YJIT
4804void Init_builtin_yjit(void) {}
4805#endif
4806
4807// Stub for builtin function when not building ZJIT units
4808#if !USE_ZJIT
4809void Init_builtin_zjit(void) {}
4810#endif
4811
4812/* top self */
4813
4814static VALUE
4815main_to_s(VALUE obj)
4816{
4817 return rb_str_new2("main");
4818}
4819
4820VALUE
4821rb_vm_top_self(void)
4822{
4823 const rb_box_t *box = rb_current_box();
4824 VM_ASSERT(box);
4825 VM_ASSERT(box->top_self);
4826 return box->top_self;
4827}
4828
4829void
4830Init_top_self(void)
4831{
4832 rb_vm_t *vm = GET_VM();
4833 vm->root_box = (rb_box_t *)rb_root_box();
4834 vm->root_box->top_self = rb_obj_alloc(rb_cObject);
4835 rb_define_singleton_method(vm->root_box->top_self, "to_s", main_to_s, 0);
4836 rb_define_alias(rb_singleton_class(vm->root_box->top_self), "inspect", "to_s");
4837}
4838
4839VALUE *
4841{
4842 rb_ractor_t *cr = GET_RACTOR();
4843 return &cr->verbose;
4844}
4845
4846VALUE *
4848{
4849 rb_ractor_t *cr = GET_RACTOR();
4850 return &cr->debug;
4851}
4852
4853bool rb_free_at_exit = false;
4854
4855bool
4856ruby_free_at_exit_p(void)
4857{
4858 return rb_free_at_exit;
4859}
4860
4861/* iseq.c */
4862VALUE rb_insn_operand_intern(const rb_iseq_t *iseq,
4863 VALUE insn, int op_no, VALUE op,
4864 int len, size_t pos, VALUE *pnop, VALUE child);
4865
4866#if VM_COLLECT_USAGE_DETAILS
4867
4868#define HASH_ASET(h, k, v) rb_hash_aset((h), (st_data_t)(k), (st_data_t)(v))
4869
4870/* uh = {
4871 * insn(Fixnum) => ihash(Hash)
4872 * }
4873 * ihash = {
4874 * -1(Fixnum) => count, # insn usage
4875 * 0(Fixnum) => ophash, # operand usage
4876 * }
4877 * ophash = {
4878 * val(interned string) => count(Fixnum)
4879 * }
4880 */
4881static void
4882vm_analysis_insn(int insn)
4883{
4884 ID usage_hash;
4885 ID bigram_hash;
4886 static int prev_insn = -1;
4887
4888 VALUE uh;
4889 VALUE ihash;
4890 VALUE cv;
4891
4892 CONST_ID(usage_hash, "USAGE_ANALYSIS_INSN");
4893 CONST_ID(bigram_hash, "USAGE_ANALYSIS_INSN_BIGRAM");
4894 uh = rb_const_get(rb_cRubyVM, usage_hash);
4895 if (NIL_P(ihash = rb_hash_aref(uh, INT2FIX(insn)))) {
4896 ihash = rb_hash_new();
4897 HASH_ASET(uh, INT2FIX(insn), ihash);
4898 }
4899 if (NIL_P(cv = rb_hash_aref(ihash, INT2FIX(-1)))) {
4900 cv = INT2FIX(0);
4901 }
4902 HASH_ASET(ihash, INT2FIX(-1), INT2FIX(FIX2INT(cv) + 1));
4903
4904 /* calc bigram */
4905 if (prev_insn != -1) {
4906 VALUE bi;
4907 VALUE ary[2];
4908 VALUE cv;
4909
4910 ary[0] = INT2FIX(prev_insn);
4911 ary[1] = INT2FIX(insn);
4912 bi = rb_ary_new4(2, &ary[0]);
4913
4914 uh = rb_const_get(rb_cRubyVM, bigram_hash);
4915 if (NIL_P(cv = rb_hash_aref(uh, bi))) {
4916 cv = INT2FIX(0);
4917 }
4918 HASH_ASET(uh, bi, INT2FIX(FIX2INT(cv) + 1));
4919 }
4920 prev_insn = insn;
4921}
4922
4923static void
4924vm_analysis_operand(int insn, int n, VALUE op)
4925{
4926 ID usage_hash;
4927
4928 VALUE uh;
4929 VALUE ihash;
4930 VALUE ophash;
4931 VALUE valstr;
4932 VALUE cv;
4933
4934 CONST_ID(usage_hash, "USAGE_ANALYSIS_INSN");
4935
4936 uh = rb_const_get(rb_cRubyVM, usage_hash);
4937 if (NIL_P(ihash = rb_hash_aref(uh, INT2FIX(insn)))) {
4938 ihash = rb_hash_new();
4939 HASH_ASET(uh, INT2FIX(insn), ihash);
4940 }
4941 if (NIL_P(ophash = rb_hash_aref(ihash, INT2FIX(n)))) {
4942 ophash = rb_hash_new();
4943 HASH_ASET(ihash, INT2FIX(n), ophash);
4944 }
4945 /* intern */
4946 valstr = rb_insn_operand_intern(CFP_ISEQ(GET_EC()->cfp), insn, n, op, 0, 0, 0, 0);
4947
4948 /* set count */
4949 if (NIL_P(cv = rb_hash_aref(ophash, valstr))) {
4950 cv = INT2FIX(0);
4951 }
4952 HASH_ASET(ophash, valstr, INT2FIX(FIX2INT(cv) + 1));
4953}
4954
4955static void
4956vm_analysis_register(int reg, int isset)
4957{
4958 ID usage_hash;
4959 VALUE uh;
4960 VALUE valstr;
4961 static const char regstrs[][5] = {
4962 "pc", /* 0 */
4963 "sp", /* 1 */
4964 "ep", /* 2 */
4965 "cfp", /* 3 */
4966 "self", /* 4 */
4967 "iseq", /* 5 */
4968 };
4969 static const char getsetstr[][4] = {
4970 "get",
4971 "set",
4972 };
4973 static VALUE syms[sizeof(regstrs) / sizeof(regstrs[0])][2];
4974
4975 VALUE cv;
4976
4977 CONST_ID(usage_hash, "USAGE_ANALYSIS_REGS");
4978 if (syms[0] == 0) {
4979 char buff[0x10];
4980 int i;
4981
4982 for (i = 0; i < (int)(sizeof(regstrs) / sizeof(regstrs[0])); i++) {
4983 int j;
4984 for (j = 0; j < 2; j++) {
4985 snprintf(buff, 0x10, "%d %s %-4s", i, getsetstr[j], regstrs[i]);
4986 syms[i][j] = ID2SYM(rb_intern(buff));
4987 }
4988 }
4989 }
4990 valstr = syms[reg][isset];
4991
4992 uh = rb_const_get(rb_cRubyVM, usage_hash);
4993 if (NIL_P(cv = rb_hash_aref(uh, valstr))) {
4994 cv = INT2FIX(0);
4995 }
4996 HASH_ASET(uh, valstr, INT2FIX(FIX2INT(cv) + 1));
4997}
4998
4999#undef HASH_ASET
5000
5001static void (*ruby_vm_collect_usage_func_insn)(int insn) = NULL;
5002static void (*ruby_vm_collect_usage_func_operand)(int insn, int n, VALUE op) = NULL;
5003static void (*ruby_vm_collect_usage_func_register)(int reg, int isset) = NULL;
5004
5005/* :nodoc: */
5006static VALUE
5007usage_analysis_insn_start(VALUE self)
5008{
5009 ruby_vm_collect_usage_func_insn = vm_analysis_insn;
5010 return Qnil;
5011}
5012
5013/* :nodoc: */
5014static VALUE
5015usage_analysis_operand_start(VALUE self)
5016{
5017 ruby_vm_collect_usage_func_operand = vm_analysis_operand;
5018 return Qnil;
5019}
5020
5021/* :nodoc: */
5022static VALUE
5023usage_analysis_register_start(VALUE self)
5024{
5025 ruby_vm_collect_usage_func_register = vm_analysis_register;
5026 return Qnil;
5027}
5028
5029/* :nodoc: */
5030static VALUE
5031usage_analysis_insn_stop(VALUE self)
5032{
5033 ruby_vm_collect_usage_func_insn = 0;
5034 return Qnil;
5035}
5036
5037/* :nodoc: */
5038static VALUE
5039usage_analysis_operand_stop(VALUE self)
5040{
5041 ruby_vm_collect_usage_func_operand = 0;
5042 return Qnil;
5043}
5044
5045/* :nodoc: */
5046static VALUE
5047usage_analysis_register_stop(VALUE self)
5048{
5049 ruby_vm_collect_usage_func_register = 0;
5050 return Qnil;
5051}
5052
5053/* :nodoc: */
5054static VALUE
5055usage_analysis_insn_running(VALUE self)
5056{
5057 return RBOOL(ruby_vm_collect_usage_func_insn != 0);
5058}
5059
5060/* :nodoc: */
5061static VALUE
5062usage_analysis_operand_running(VALUE self)
5063{
5064 return RBOOL(ruby_vm_collect_usage_func_operand != 0);
5065}
5066
5067/* :nodoc: */
5068static VALUE
5069usage_analysis_register_running(VALUE self)
5070{
5071 return RBOOL(ruby_vm_collect_usage_func_register != 0);
5072}
5073
5074static VALUE
5075usage_analysis_clear(VALUE self, ID usage_hash)
5076{
5077 VALUE uh;
5078 uh = rb_const_get(self, usage_hash);
5079 rb_hash_clear(uh);
5080
5081 return Qtrue;
5082}
5083
5084
5085/* :nodoc: */
5086static VALUE
5087usage_analysis_insn_clear(VALUE self)
5088{
5089 ID usage_hash;
5090 ID bigram_hash;
5091
5092 CONST_ID(usage_hash, "USAGE_ANALYSIS_INSN");
5093 CONST_ID(bigram_hash, "USAGE_ANALYSIS_INSN_BIGRAM");
5094 usage_analysis_clear(rb_cRubyVM, usage_hash);
5095 return usage_analysis_clear(rb_cRubyVM, bigram_hash);
5096}
5097
5098/* :nodoc: */
5099static VALUE
5100usage_analysis_operand_clear(VALUE self)
5101{
5102 ID usage_hash;
5103
5104 CONST_ID(usage_hash, "USAGE_ANALYSIS_INSN");
5105 return usage_analysis_clear(self, usage_hash);
5106}
5107
5108/* :nodoc: */
5109static VALUE
5110usage_analysis_register_clear(VALUE self)
5111{
5112 ID usage_hash;
5113
5114 CONST_ID(usage_hash, "USAGE_ANALYSIS_REGS");
5115 return usage_analysis_clear(self, usage_hash);
5116}
5117
5118#else
5119
5120MAYBE_UNUSED(static void (*ruby_vm_collect_usage_func_insn)(int insn)) = 0;
5121MAYBE_UNUSED(static void (*ruby_vm_collect_usage_func_operand)(int insn, int n, VALUE op)) = 0;
5122MAYBE_UNUSED(static void (*ruby_vm_collect_usage_func_register)(int reg, int isset)) = 0;
5123
5124#endif
5125
5126#if VM_COLLECT_USAGE_DETAILS
5127/* @param insn instruction number */
5128static void
5129vm_collect_usage_insn(int insn)
5130{
5131 if (RUBY_DTRACE_INSN_ENABLED()) {
5132 RUBY_DTRACE_INSN(rb_insns_name(insn));
5133 }
5134 if (ruby_vm_collect_usage_func_insn)
5135 (*ruby_vm_collect_usage_func_insn)(insn);
5136}
5137
5138/* @param insn instruction number
5139 * @param n n-th operand
5140 * @param op operand value
5141 */
5142static void
5143vm_collect_usage_operand(int insn, int n, VALUE op)
5144{
5145 if (RUBY_DTRACE_INSN_OPERAND_ENABLED()) {
5146 VALUE valstr;
5147
5148 valstr = rb_insn_operand_intern(CFP_ISEQ(GET_EC()->cfp), insn, n, op, 0, 0, 0, 0);
5149
5150 RUBY_DTRACE_INSN_OPERAND(RSTRING_PTR(valstr), rb_insns_name(insn));
5151 RB_GC_GUARD(valstr);
5152 }
5153 if (ruby_vm_collect_usage_func_operand)
5154 (*ruby_vm_collect_usage_func_operand)(insn, n, op);
5155}
5156
5157/* @param reg register id. see code of vm_analysis_register() */
5158/* @param isset 0: read, 1: write */
5159static void
5160vm_collect_usage_register(int reg, int isset)
5161{
5162 if (ruby_vm_collect_usage_func_register)
5163 (*ruby_vm_collect_usage_func_register)(reg, isset);
5164}
5165#endif
5166
5167const struct rb_callcache *
5168rb_vm_empty_cc(void)
5169{
5170 return &vm_empty_cc;
5171}
5172
5173const struct rb_callcache *
5174rb_vm_empty_cc_for_super(void)
5175{
5176 return &vm_empty_cc_for_super;
5177}
5178
5179#include "vm_call_iseq_optimized.inc" /* required from vm_insnhelper.c */
#define RUBY_ASSERT_MESG(expr,...)
Asserts that the expression is truthy.
Definition assert.h:186
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ATOMIC_FETCH_ADD(var, val)
Atomically replaces the value pointed by var with the result of addition of val to the old value of v...
Definition atomic.h:118
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_method_id(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
#define RUBY_EVENT_END
Encountered an end of a class clause.
Definition event.h:40
#define RUBY_EVENT_B_RETURN
Encountered a next statement.
Definition event.h:56
#define RUBY_EVENT_RETURN
Encountered a return statement.
Definition event.h:42
#define RUBY_EVENT_C_RETURN
Return from a method, written in C.
Definition event.h:44
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition class.c:1523
VALUE rb_class_new(VALUE super)
Creates a new, anonymous class.
Definition class.c:896
VALUE rb_singleton_class(VALUE obj)
Finds or creates the singleton class of the passed object.
Definition class.c:2847
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
Definition class.c:2890
void rb_undef_method(VALUE klass, const char *name)
Defines an undef of a method.
Definition class.c:2700
#define rb_str_new2
Old name of rb_str_new_cstr.
Definition string.h:1676
#define NUM2ULONG
Old name of RB_NUM2ULONG.
Definition long.h:52
#define ALLOCV
Old name of RB_ALLOCV.
Definition memory.h:404
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:400
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define T_IMEMO
Old name of RUBY_T_IMEMO.
Definition value_type.h:67
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition fl_type.h:131
#define ULONG2NUM
Old name of RB_ULONG2NUM.
Definition long.h:60
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition assume.h:29
#define SYM2ID
Old name of RB_SYM2ID.
Definition symbol.h:45
#define ZALLOC
Old name of RB_ZALLOC.
Definition memory.h:402
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:205
#define rb_ary_new4
Old name of rb_ary_new_from_values.
Definition array.h:659
#define SIZET2NUM
Old name of RB_SIZE2NUM.
Definition size_t.h:62
#define rb_exc_new2
Old name of rb_exc_new_cstr.
Definition error.h:37
#define FIX2INT
Old name of RB_FIX2INT.
Definition int.h:41
#define T_MODULE
Old name of RUBY_T_MODULE.
Definition value_type.h:70
#define ZALLOC_N
Old name of RB_ZALLOC_N.
Definition memory.h:401
#define ASSUME
Old name of RBIMPL_ASSUME.
Definition assume.h:27
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define ALLOC_N
Old name of RB_ALLOC_N.
Definition memory.h:399
#define FL_SET
Old name of RB_FL_SET.
Definition fl_type.h:125
#define rb_exc_new3
Old name of rb_exc_new_str.
Definition error.h:38
#define ULL2NUM
Old name of RB_ULL2NUM.
Definition long_long.h:31
#define Qtrue
Old name of RUBY_Qtrue.
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define NIL_P
Old name of RB_NIL_P.
#define NUM2ULL
Old name of RB_NUM2ULL.
Definition long_long.h:35
#define T_CLASS
Old name of RUBY_T_CLASS.
Definition value_type.h:58
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define FL_TEST
Old name of RB_FL_TEST.
Definition fl_type.h:127
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define FL_USHIFT
Old name of RUBY_FL_USHIFT.
Definition fl_type.h:67
#define CONST_ID
Old name of RUBY_CONST_ID.
Definition symbol.h:47
#define ALLOCV_END
Old name of RB_ALLOCV_END.
Definition memory.h:406
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
void ruby_init_stack(void *addr)
Set stack bottom of Ruby implementation.
Definition vm.c:4669
VALUE rb_eLocalJumpError
LocalJumpError exception.
Definition eval.c:49
void rb_category_warn(rb_warning_category_t category, const char *fmt,...)
Identical to rb_category_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:477
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition eval.c:661
void rb_iter_break(void)
Breaks from a block.
Definition vm.c:2292
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1427
void rb_iter_break_value(VALUE val)
Identical to rb_iter_break(), except it additionally takes the "value" of this breakage.
Definition vm.c:2298
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1425
VALUE * rb_ruby_verbose_ptr(void)
This is an implementation detail of ruby_verbose.
Definition vm.c:4840
VALUE rb_exc_new_str(VALUE etype, VALUE str)
Identical to rb_exc_new_cstr(), except it takes a Ruby's string instead of C's.
Definition error.c:1478
VALUE * rb_ruby_debug_ptr(void)
This is an implementation detail of ruby_debug.
Definition vm.c:4847
VALUE rb_eSysStackError
SystemStackError exception.
Definition eval.c:50
@ RB_WARN_CATEGORY_PERFORMANCE
Warning is for performance issues (not enabled by -w).
Definition error.h:54
VALUE rb_cTime
Time class.
Definition time.c:679
VALUE rb_cArray
Array class.
VALUE rb_cObject
Object class.
Definition object.c:61
VALUE rb_obj_alloc(VALUE klass)
Allocates an instance of the given class.
Definition object.c:2247
VALUE rb_cInteger
Module class.
Definition numeric.c:199
VALUE rb_cNilClass
NilClass class.
Definition object.c:66
VALUE rb_cBinding
Binding class.
Definition proc.c:44
VALUE rb_cRegexp
Regexp class.
Definition re.c:2664
VALUE rb_cHash
Hash class.
Definition hash.c:109
VALUE rb_cFalseClass
FalseClass class.
Definition object.c:68
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:229
VALUE rb_cSymbol
Symbol class.
Definition string.c:85
VALUE rb_cBasicObject
BasicObject class.
Definition object.c:59
VALUE rb_cThread
Thread class.
Definition vm.c:682
VALUE rb_obj_freeze(VALUE obj)
Just calls rb_obj_freeze_inline() inside.
Definition object.c:1307
VALUE rb_cFloat
Float class.
Definition numeric.c:198
VALUE rb_cProc
Proc class.
Definition proc.c:45
VALUE rb_cTrueClass
TrueClass class.
Definition object.c:67
VALUE rb_cString
String class.
Definition string.c:84
#define RB_OBJ_WRITTEN(old, oldv, young)
Identical to RB_OBJ_WRITE(), except it doesn't write any values, but only a WB declaration.
Definition gc.h:615
#define RB_OBJ_WRITE(old, slot, young)
Declaration of a "back" pointer.
Definition gc.h:603
Defines RBIMPL_HAS_BUILTIN.
VALUE rb_ary_delete_at(VALUE ary, long pos)
Destructively removes an element which resides at the specific index of the passed array.
VALUE rb_ary_new(void)
Allocates a new, empty array.
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
void rb_undef(VALUE mod, ID mid)
Inserts a method entry that hides previous method definition of the given name.
Definition vm_method.c:2386
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:284
VALUE rb_backref_get(void)
Queries the last match, or Regexp.last_match, or the $~.
Definition vm.c:2042
void rb_lastline_set(VALUE str)
Updates $_.
Definition vm.c:2060
VALUE rb_lastline_get(void)
Queries the last line, or the $_.
Definition vm.c:2054
void rb_backref_set(VALUE md)
Updates $~.
Definition vm.c:2048
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition proc.c:988
VALUE rb_block_lambda(void)
Identical to rb_proc_new(), except it returns a lambda.
Definition proc.c:1007
VALUE rb_binding_new(void)
Snapshots the current execution context and turn it into an instance of rb_cBinding.
Definition proc.c:331
VALUE rb_str_append(VALUE dst, VALUE src)
Identical to rb_str_buf_append(), except it converts the right hand side before concatenating.
Definition string.c:3818
VALUE rb_str_new_frozen(VALUE str)
Creates a frozen copy of the string, if necessary.
Definition string.c:1501
#define rb_str_cat_cstr(buf, str)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1657
VALUE rb_const_get(VALUE space, ID name)
Identical to rb_const_defined(), except it returns the actual defined value.
Definition variable.c:3465
void rb_set_class_path(VALUE klass, VALUE space, const char *name)
Names a class.
Definition variable.c:441
VALUE rb_class_path_cached(VALUE mod)
Just another name of rb_mod_name.
Definition variable.c:389
void rb_alias_variable(ID dst, ID src)
Aliases a global variable.
Definition variable.c:1153
VALUE rb_class_path(VALUE mod)
Identical to rb_mod_name(), except it returns #<Class: ...> style inspection for anonymous modules.
Definition variable.c:380
void rb_undef_alloc_func(VALUE klass)
Deletes the allocator function of a class.
Definition vm_method.c:1731
const char * rb_sourcefile(void)
Resembles __FILE__.
Definition vm.c:2079
void rb_alias(VALUE klass, ID dst, ID src)
Resembles alias.
Definition vm_method.c:2769
int rb_frame_method_id_and_class(ID *idp, VALUE *klassp)
Resembles __method__.
Definition vm.c:3133
int rb_sourceline(void)
Resembles __LINE__.
Definition vm.c:2093
VALUE rb_sym2str(VALUE symbol)
Obtain a frozen string representation of a symbol (not including the leading colon).
Definition symbol.c:1024
void rb_define_global_const(const char *name, VALUE val)
Identical to rb_define_const(), except it defines that of "global", i.e.
Definition variable.c:4052
VALUE rb_iv_set(VALUE obj, const char *name, VALUE val)
Assigns to an instance variable.
Definition variable.c:4519
int len
Length of the buffer.
Definition io.h:8
VALUE rb_ractor_make_shareable_copy(VALUE obj)
Identical to rb_ractor_make_shareable(), except it returns a (deep) copy of the passed one instead of...
Definition ractor.c:1560
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
Definition ractor.h:249
#define RB_OBJ_SHAREABLE_P(obj)
Queries if the passed object has previously classified as shareable or not.
Definition ractor.h:235
VALUE rb_ractor_make_shareable(VALUE obj)
Destructively transforms the passed object so that multiple Ractors can share it.
Definition ractor.c:1551
void ruby_vm_at_exit(void(*func)(ruby_vm_t *))
ruby_vm_at_exit registers a function func to be invoked when a VM passed away.
Definition vm.c:1018
int ruby_vm_destruct(ruby_vm_t *vm)
Destructs the passed VM.
Definition vm.c:3411
VALUE rb_f_sprintf(int argc, const VALUE *argv)
Identical to rb_str_format(), except how the arguments are arranged.
Definition sprintf.c:209
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition memory.h:372
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
Definition memory.h:360
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
VALUE type(ANYARGS)
ANYARGS-ed function type.
void rb_hash_foreach(VALUE q, int_type *w, VALUE e)
Iteration over the given hash.
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
static int RARRAY_LENINT(VALUE ary)
Identical to rb_array_len(), except it differs for the return type.
Definition rarray.h:281
#define RARRAY_AREF(a, i)
Definition rarray.h:403
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:166
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
#define StringValuePtr(v)
Identical to StringValue, except it returns a char*.
Definition rstring.h:76
#define RTYPEDDATA_DATA(v)
Convenient getter macro.
Definition rtypeddata.h:106
#define RUBY_TYPED_FREE_IMMEDIATELY
Macros to see if each corresponding flag is defined.
Definition rtypeddata.h:122
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
Definition rtypeddata.h:769
#define TypedData_Wrap_Struct(klass, data_type, sval)
Converts sval, a pointer to your struct, into a Ruby object.
Definition rtypeddata.h:531
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
Definition rtypeddata.h:578
const char * rb_class2name(VALUE klass)
Queries the name of the passed class.
Definition variable.c:506
#define RB_NO_KEYWORDS
Do not pass keywords.
Definition scan_args.h:69
static bool RB_SPECIAL_CONST_P(VALUE obj)
Checks if the given object is of enum ruby_special_consts.
#define RTEST
This is an old name of RB_TEST.
#define _(args)
This was a transition path from K&R to ANSI.
Definition stdarg.h:35
Definition proc.c:30
Definition iseq.h:289
Internal header for Ruby Box.
Definition box.h:14
Definition method.h:63
CREF (Class REFerence)
Definition method.h:45
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:229
const char * wrap_struct_name
Name of structs of this kind.
Definition rtypeddata.h:236
Definition method.h:55
Definition st.h:79
IFUNC (Internal FUNCtion)
Definition imemo.h:84
SVAR (Special VARiable)
Definition imemo.h:49
const VALUE cref_or_me
class reference or rb_method_entry_t
Definition imemo.h:51
THROW_DATA.
Definition imemo.h:58
void rb_native_cond_initialize(rb_nativethread_cond_t *cond)
Fills the passed condition variable with an initial value.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy.
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static enum ruby_value_type RB_BUILTIN_TYPE(VALUE obj)
Queries the type of the object.
Definition value_type.h:182
static void Check_Type(VALUE v, enum ruby_value_type t)
Identical to RB_TYPE_P(), except it raises exceptions on predication failure.
Definition value_type.h:433
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376
ruby_value_type
C-level type of an object.
Definition value_type.h:113