24#include "eval_intern.h"
26#include "internal/bits.h"
27#include "internal/class.h"
28#include "internal/gc.h"
29#include "internal/hash.h"
30#include "internal/symbol.h"
31#include "internal/thread.h"
42static VALUE sym_default;
47 rb_event_hook_flag_t hook_flags;
55 unsigned int target_line;
61#define MAX_EVENT_NUM 32
69 rb_gc_mark(hook->data);
77 if (!rb_gc_checking_shareable()) {
83 rb_gc_mark_and_move(&hook->data);
94 hooks->need_clean =
true;
96 if (hooks->running == 0) {
103void rb_clear_attr_ccs(
void);
104void rb_clear_bf_ccs(
void);
110 rb_event_flag_t enabled_iseq_events = ruby_vm_event_enabled_global_flags & ISEQ_TRACE_EVENTS;
111 bool first_time_iseq_events_p = new_iseq_events & ~enabled_iseq_events;
118 if (first_time_iseq_events_p) {
120 rb_iseq_trace_set_all(new_iseq_events | enabled_iseq_events);
123 else if (enable_c_call || enable_c_return) {
126 else if (enable_call || enable_return) {
130 ruby_vm_event_flags = new_events;
131 ruby_vm_event_enabled_global_flags |= new_events;
132 rb_objspace_set_event_hook(new_events);
135 if (first_time_iseq_events_p || enable_c_call || enable_c_return) {
142 rb_yjit_tracing_invalidate_all();
143 rb_zjit_tracing_invalidate_all();
155 rb_raise(
rb_eTypeError,
"Can not specify normal event and internal event simultaneously.");
159 hook->hook_flags = hook_flags;
160 hook->events = events;
165 hook->filter.th = NULL;
166 hook->filter.target_line = 0;
175 hook->next = list->hooks;
177 list->events |= hook->events;
181 update_global_event_hook(prev_events, list->events);
192 hook_list_connect(
Qundef, list, hook, TRUE);
199 rb_event_hook_t *hook = alloc_event_hook(func, events, data, hook_flags);
200 hook->filter.th = th;
201 connect_event_hook(ec, hook);
207 rb_threadptr_add_event_hook(GET_EC(), rb_thread_ptr(thval), func, events, data, RUBY_EVENT_HOOK_FLAG_SAFE);
213 rb_add_event_hook2(func, events, data, RUBY_EVENT_HOOK_FLAG_SAFE);
219 rb_threadptr_add_event_hook(GET_EC(), rb_thread_ptr(thval), func, events, data, hook_flags);
225 rb_event_hook_t *hook = alloc_event_hook(func, events, data, hook_flags);
226 connect_event_hook(GET_EC(), hook);
235 VM_ASSERT(list->running == 0);
236 VM_ASSERT(list->need_clean ==
true);
239 list->need_clean =
false;
241 while ((hook = *nextp) != 0) {
242 if (hook->hook_flags & RUBY_EVENT_HOOK_FLAG_DELETED) {
247 list->events |= hook->events;
252 if (list->is_local) {
253 if (list->events == 0) {
259 update_global_event_hook(prev_events, list->events);
266 if (UNLIKELY(list->need_clean)) {
267 if (list->running == 0) {
273#define MATCH_ANY_FILTER_TH ((rb_thread_t *)1)
284 if (func == 0 || hook->func == func) {
285 if (hook->filter.th == filter_th || filter_th == MATCH_ANY_FILTER_TH) {
286 if (UNDEF_P(data) || hook->data == data) {
287 hook->hook_flags |= RUBY_EVENT_HOOK_FLAG_DELETED;
289 list->need_clean =
true;
296 clean_hooks_check(list);
303 return remove_event_hook(ec, filter_th, func, data);
309 return rb_threadptr_remove_event_hook(GET_EC(), rb_thread_ptr(thval), func,
Qundef);
315 return rb_threadptr_remove_event_hook(GET_EC(), rb_thread_ptr(thval), func, data);
321 return remove_event_hook(GET_EC(), NULL, func,
Qundef);
327 return remove_event_hook(GET_EC(), NULL, func, data);
333 rb_threadptr_remove_event_hook(ec, rb_ec_thread_ptr(ec), 0,
Qundef);
339 rb_threadptr_remove_event_hook(ec, MATCH_ANY_FILTER_TH, 0,
Qundef);
349 for (hook = list->hooks; hook; hook = hook->next) {
350 if (!(hook->hook_flags & RUBY_EVENT_HOOK_FLAG_DELETED) &&
351 (trace_arg->event & hook->events) &&
352 (LIKELY(hook->filter.th == 0) || hook->filter.th == rb_ec_thread_ptr(ec)) &&
353 (LIKELY(hook->filter.target_line == 0) || (hook->filter.target_line == (
unsigned int)rb_vm_get_sourceline(ec->cfp)))) {
354 if (!(hook->hook_flags & RUBY_EVENT_HOOK_FLAG_RAW_ARG)) {
355 (*hook->func)(trace_arg->event, hook->data, trace_arg->self, trace_arg->id, trace_arg->klass);
358 (*((rb_event_hook_raw_arg_func_t)hook->func))(hook->data, trace_arg);
367 if (list->events & trace_arg->event) {
380 clean_hooks_check(list);
386 if (exec_hooks_precheck(ec, list, trace_arg) == 0)
return;
387 exec_hooks_body(ec, list, trace_arg);
388 exec_hooks_postcheck(ec, list);
394 enum ruby_tag_type state;
397 if (exec_hooks_precheck(ec, list, trace_arg) == 0)
return 0;
399 raised = rb_ec_reset_raised(ec);
404 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
405 exec_hooks_body(ec, list, trace_arg);
409 exec_hooks_postcheck(ec, list);
412 rb_ec_set_raised(ec);
431 ec->trace_arg = trace_arg;
433 exec_hooks_unprotected(ec, rb_ec_ractor_hooks(ec), trace_arg);
434 ec->trace_arg = prev_trace_arg;
438 if (ec->trace_arg == NULL &&
439 trace_arg->self != rb_mRubyVMFrozenCore ) {
440 const VALUE errinfo = ec->errinfo;
441 const VALUE old_recursive = ec->local_storage_recursive_hash;
442 enum ruby_tag_type state = 0;
445 ec->local_storage_recursive_hash = ec->local_storage_recursive_hash_for_trace;
447 ec->trace_arg = trace_arg;
450 if ((state = exec_hooks_protected(ec, hooks, trace_arg)) == TAG_NONE) {
451 ec->errinfo = errinfo;
455 ec->trace_arg = NULL;
456 ec->local_storage_recursive_hash_for_trace = ec->local_storage_recursive_hash;
457 ec->local_storage_recursive_hash = old_recursive;
461 if (VM_FRAME_FINISHED_P(ec->cfp)) {
462 rb_vm_tag_jmpbuf_deinit(&ec->tag->buf);
463 ec->tag = ec->tag->prev;
467 EC_JUMP_TAG(ec, state);
479 rb_vm_t *
const vm = rb_ec_vm_ptr(ec);
480 enum ruby_tag_type state;
482 dummy_trace_arg.event = 0;
484 if (!ec->trace_arg) {
485 ec->trace_arg = &dummy_trace_arg;
488 raised = rb_ec_reset_raised(ec);
491 if (LIKELY((state = EC_EXEC_TAG()) == TAG_NONE)) {
492 result = (*func)(arg);
500 rb_ec_reset_raised(ec);
503 if (ec->trace_arg == &dummy_trace_arg) {
504 ec->trace_arg = NULL;
508#if defined RUBY_USE_SETJMPEX && RUBY_USE_SETJMPEX
511 EC_JUMP_TAG(ec, state);
607 rb_threadptr_add_event_hook(ec, filter_th, call_trace_func,
RUBY_EVENT_ALL, trace, RUBY_EVENT_HOOK_FLAG_SAFE);
620thread_add_trace_func_m(
VALUE obj,
VALUE trace)
622 thread_add_trace_func(GET_EC(), rb_thread_ptr(obj), trace);
638thread_set_trace_func_m(
VALUE target_thread,
VALUE trace)
641 rb_thread_t *target_th = rb_thread_ptr(target_thread);
643 rb_threadptr_remove_event_hook(ec, target_th, call_trace_func,
Qundef);
649 thread_add_trace_func(ec, target_th, trace);
677#define C(name, NAME) case RUBY_EVENT_##NAME: CONST_ID(id, #name); return id;
684 C(c_return, C_RETURN);
687 C(b_return, B_RETURN);
688 C(thread_begin, THREAD_BEGIN);
689 C(thread_end, THREAD_END);
690 C(fiber_switch, FIBER_SWITCH);
691 C(script_compiled, SCRIPT_COMPILED);
702 cfp = rb_vm_get_ruby_level_next_cfp(ec, cfp);
706 *pathp = rb_iseq_path(iseq);
711 *linep =
FIX2INT(rb_iseq_first_lineno(iseq));
714 *linep = rb_vm_get_sourceline(cfp);
732 get_path_and_lineno(ec, ec->cfp, event, &filename, &line);
735 rb_ec_frame_method_id_and_class(ec, &
id, 0, &klass);
740 klass =
RBASIC(klass)->klass;
742 else if (RCLASS_SINGLETON_P(klass)) {
743 klass = RCLASS_ATTACHED_OBJECT(klass);
752 if (self && (filename !=
Qnil) &&
755 (VM_FRAME_RUBYFRAME_P(ec->cfp) && imemo_type_p((
VALUE)ec->cfp->iseq, imemo_iseq))) {
758 argv[5] = klass ? klass :
Qnil;
765static VALUE rb_cTracePoint;
771 VALUE local_target_set;
775 void (*func)(
VALUE tpval,
void *data);
786 rb_gc_mark(tp->proc);
787 rb_gc_mark(tp->local_target_set);
788 if (tp->target_th) rb_gc_mark(tp->target_th->self);
798 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE
809symbol2event_flag(
VALUE v)
812 VALUE sym = rb_to_symbol_type(v);
818#define C(name, NAME) CONST_ID(id, #name); if (sym == ID2SYM(id)) return RUBY_EVENT_##NAME
825 C(c_return, C_RETURN);
828 C(b_return, B_RETURN);
829 C(thread_begin, THREAD_BEGIN);
830 C(thread_end, THREAD_END);
831 C(fiber_switch, FIBER_SWITCH);
832 C(script_compiled, SCRIPT_COMPILED);
837 C(a_return, A_RETURN);
839 rb_raise(rb_eArgError,
"unknown event: %"PRIsVALUE,
rb_sym2str(sym));
854 if (trace_arg == 0) {
863 return get_trace_arg();
869 return trace_arg->event;
875 return ID2SYM(get_event_id(trace_arg->event));
881 if (UNDEF_P(trace_arg->path)) {
882 get_path_and_lineno(trace_arg->ec, trace_arg->cfp, trace_arg->event, &trace_arg->path, &trace_arg->lineno);
889 fill_path_and_lineno(trace_arg);
890 return INT2FIX(trace_arg->lineno);
895 fill_path_and_lineno(trace_arg);
896 return trace_arg->path;
902 if (!trace_arg->klass_solved) {
903 if (!trace_arg->klass) {
904 rb_vm_control_frame_id_and_class(trace_arg->cfp, &trace_arg->id, &trace_arg->called_id, &trace_arg->klass);
907 if (trace_arg->klass) {
909 trace_arg->klass =
RBASIC(trace_arg->klass)->klass;
913 trace_arg->klass =
Qnil;
916 trace_arg->klass_solved = 1;
923 switch (trace_arg->event) {
928 const rb_control_frame_t *cfp = rb_vm_get_ruby_level_next_cfp(trace_arg->ec, trace_arg->cfp);
931 if (VM_FRAME_TYPE(cfp) == VM_FRAME_MAGIC_BLOCK && !VM_FRAME_LAMBDA_P(cfp)) {
934 return rb_iseq_parameters(cfp->iseq, is_proc);
940 fill_id_and_klass(trace_arg);
941 if (trace_arg->klass && trace_arg->id) {
944 me = rb_method_entry_without_refinements(trace_arg->klass, trace_arg->called_id, &iclass);
946 me = rb_method_entry_without_refinements(trace_arg->klass, trace_arg->id, &iclass);
948 return rb_unnamed_parameters(rb_method_entry_arity(me));
967 fill_id_and_klass(trace_arg);
968 return trace_arg->id ?
ID2SYM(trace_arg->id) :
Qnil;
974 fill_id_and_klass(trace_arg);
975 return trace_arg->called_id ?
ID2SYM(trace_arg->called_id) :
Qnil;
981 fill_id_and_klass(trace_arg);
982 return trace_arg->klass;
989 switch (trace_arg->event) {
994 cfp = rb_vm_get_binding_creatable_next_cfp(trace_arg->ec, trace_arg->cfp);
996 if (cfp && imemo_type_p((
VALUE)cfp->iseq, imemo_iseq)) {
997 return rb_vm_make_binding(trace_arg->ec, cfp);
1007 return trace_arg->self;
1019 if (UNDEF_P(trace_arg->data)) {
1020 rb_bug(
"rb_tracearg_return_value: unreachable");
1022 return trace_arg->data;
1034 if (UNDEF_P(trace_arg->data)) {
1035 rb_bug(
"rb_tracearg_raised_exception: unreachable");
1037 return trace_arg->data;
1043 VALUE data = trace_arg->data;
1051 if (UNDEF_P(data)) {
1052 rb_bug(
"rb_tracearg_raised_exception: unreachable");
1054 if (rb_obj_is_iseq(data)) {
1067 VALUE data = trace_arg->data;
1075 if (UNDEF_P(data)) {
1076 rb_bug(
"rb_tracearg_raised_exception: unreachable");
1079 if (rb_obj_is_iseq(data)) {
1080 return rb_iseqw_new((
const rb_iseq_t *)data);
1100 if (UNDEF_P(trace_arg->data)) {
1101 rb_bug(
"rb_tracearg_object: unreachable");
1103 return trace_arg->data;
1189 (*tp->func)(tpval, tp->data);
1192 if (tp->ractor == NULL || tp->ractor == GET_RACTOR()) {
1204 if (tp->local_target_set !=
Qfalse) {
1205 rb_raise(rb_eArgError,
"can't nest-enable a targeting TracePoint");
1212 if (tp->target_th) {
1213 rb_thread_add_event_hook2(tp->target_th->self, (
rb_event_hook_func_t)tp_call_trace, tp->events, tpval,
1214 RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
1218 RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
1225iseq_of(
VALUE target)
1229 rb_raise(rb_eArgError,
"specified target is not supported");
1232 return rb_iseqw_to_iseq(iseqv);
1239rb_tracepoint_enable_for_target(
VALUE tpval,
VALUE target,
VALUE target_line)
1242 const rb_iseq_t *iseq = iseq_of(target);
1244 unsigned int line = 0;
1245 bool target_bmethod =
false;
1247 if (tp->tracing > 0) {
1248 rb_raise(rb_eArgError,
"can't nest-enable a targeting TracePoint");
1251 if (!
NIL_P(target_line)) {
1253 rb_raise(rb_eArgError,
"target_line is specified, but line event is not specified");
1260 VM_ASSERT(tp->local_target_set ==
Qfalse);
1266 if (def->type == VM_METHOD_TYPE_BMETHOD &&
1268 if (def->body.bmethod.hooks == NULL) {
1270 def->body.bmethod.hooks->is_local =
true;
1272 rb_hook_list_connect_tracepoint(target, def->body.bmethod.hooks, tpval, 0);
1273 rb_hash_aset(tp->local_target_set, target,
Qfalse);
1274 target_bmethod =
true;
1281 n += rb_iseq_add_local_tracepoint_recursively(iseq, tp->events, tpval, line, target_bmethod);
1282 rb_hash_aset(tp->local_target_set, (
VALUE)iseq,
Qtrue);
1285 iseq->body->builtin_attrs & BUILTIN_ATTR_SINGLE_NOARG_LEAF) {
1290 rb_raise(rb_eArgError,
"can not enable any hooks");
1293 rb_yjit_tracing_invalidate_all();
1294 rb_zjit_tracing_invalidate_all();
1296 ruby_vm_event_local_num++;
1307 rb_iseq_remove_local_tracepoint_recursively((
rb_iseq_t *)target, tpval);
1313 VM_ASSERT(hooks != NULL);
1314 rb_hook_list_remove_tracepoint(hooks, tpval);
1316 if (hooks->events == 0) {
1317 rb_hook_list_free(def->body.bmethod.hooks);
1318 def->body.bmethod.hooks = NULL;
1331 if (tp->local_target_set) {
1332 rb_hash_foreach(tp->local_target_set, disable_local_event_iseq_i, tpval);
1334 ruby_vm_event_local_num--;
1337 if (tp->target_th) {
1345 tp->target_th = NULL;
1354 RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
1355 hook->filter.target_line = target_line;
1356 hook_list_connect(target, list, hook, FALSE);
1366 if (hook->data == tpval) {
1367 hook->hook_flags |= RUBY_EVENT_HOOK_FLAG_DELETED;
1368 list->need_clean =
true;
1370 else if ((hook->hook_flags & RUBY_EVENT_HOOK_FLAG_DELETED) == 0) {
1371 events |= hook->events;
1376 list->events = events;
1383 int previous_tracing = tp->tracing;
1385 if (target_thread == sym_default) {
1390 target_thread =
Qnil;
1395 if (
RTEST(target_thread)) {
1396 if (tp->target_th) {
1397 rb_raise(rb_eArgError,
"can not override target_thread filter");
1399 tp->target_th = rb_thread_ptr(target_thread);
1401 RUBY_ASSERT(tp->target_th->self == target_thread);
1405 tp->target_th = NULL;
1408 if (
NIL_P(target)) {
1409 if (!
NIL_P(target_line)) {
1410 rb_raise(rb_eArgError,
"only target_line is specified");
1415 rb_tracepoint_enable_for_target(tpval, target, target_line);
1424 return RBOOL(previous_tracing);
1432 int previous_tracing = tp->tracing;
1435 if (tp->local_target_set !=
Qfalse) {
1436 rb_raise(rb_eArgError,
"can't disable a targeting TracePoint in a block");
1446 return RBOOL(previous_tracing);
1454 return RBOOL(tp->tracing);
1466 VALUE tpval = tp_alloc(klass);
1474 tp->events = events;
1485 if (
RTEST(target_thval)) {
1486 target_th = rb_thread_ptr(target_thval);
1491 return tracepoint_new(rb_cTracePoint, target_th, events, func, data,
Qundef);
1502 for (i=0; i<argc; i++) {
1503 events |= symbol2event_flag(
RARRAY_AREF(args, i));
1511 rb_raise(rb_eArgError,
"must be called with a block");
1514 return tracepoint_new(self, 0, events, 0, 0,
rb_block_proc());
1520 VALUE trace = tracepoint_new_s(ec, self, args);
1532 switch (trace_arg->event) {
1538 return rb_sprintf(
"#<TracePoint:%"PRIsVALUE
" %"PRIsVALUE
":%d in '%"PRIsVALUE
"'>",
1548 return rb_sprintf(
"#<TracePoint:%"PRIsVALUE
" '%"PRIsVALUE
"' %"PRIsVALUE
":%d>",
1555 return rb_sprintf(
"#<TracePoint:%"PRIsVALUE
" %"PRIsVALUE
">",
1561 return rb_sprintf(
"#<TracePoint:%"PRIsVALUE
" %"PRIsVALUE
":%d>",
1567 return rb_sprintf(
"#<TracePoint:%s>", tp->tracing ?
"enabled" :
"disabled");
1574 int active = 0, deleted = 0;
1577 if (hook->hook_flags & RUBY_EVENT_HOOK_FLAG_DELETED) {
1593 VALUE stat = rb_hash_new();
1595 tracepoint_stat_event_hooks(stat, vm->self, rb_ec_ractor_hooks(ec)->hooks);
1602disallow_reentry(
VALUE val)
1606 if (ec->trace_arg != NULL) rb_bug(
"should be NULL, but %p", (
void *)ec->trace_arg);
1607 ec->trace_arg = arg;
1615 if (arg == NULL) rb_raise(
rb_eRuntimeError,
"No need to allow reentrance.");
1616 ec->trace_arg = NULL;
1620#include "trace_point.rbinc"
1650 struct ccan_list_node jnode;
1658rb_vm_memsize_workqueue(
struct ccan_list_head *workqueue)
1663 ccan_list_for_each(workqueue, work, jnode) {
1680 if (!wq_job)
return FALSE;
1681 wq_job->func = func;
1682 wq_job->data = data;
1685 ccan_list_add_tail(&vm->workqueue, &wq_job->jnode);
1689 RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(rb_vm_main_ractor_ec(vm));
1694#define PJOB_TABLE_SIZE (sizeof(rb_atomic_t) * CHAR_BIT)
1700 } table[PJOB_TABLE_SIZE];
1707rb_vm_postponed_job_queue_init(
rb_vm_t *vm)
1712 pjq->triggered_bitset = 0;
1713 memset(pjq->table, 0,
sizeof(pjq->table));
1714 vm->postponed_job_queue = pjq;
1721 if (ec == NULL) ec = rb_vm_main_ractor_ec(vm);
1726rb_vm_postponed_job_atfork(
void)
1732 if (pjq->triggered_bitset) {
1733 RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(get_valid_ec(vm));
1740rb_vm_postponed_job_free(
void)
1743 ruby_xfree(vm->postponed_job_queue);
1744 vm->postponed_job_queue = NULL;
1750rb_vm_memsize_postponed_job_queue(
void)
1769 for (
unsigned int i = 0; i < PJOB_TABLE_SIZE; i++) {
1772 if (existing_func == NULL || existing_func == func) {
1787 return POSTPONED_JOB_HANDLE_INVALID;
1797 RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(get_valid_ec(vm));
1807 if (h == POSTPONED_JOB_HANDLE_INVALID) {
1817 return pjob_register_legacy_impl(flags, func, data);
1823 return pjob_register_legacy_impl(flags, func, data);
1828rb_postponed_job_flush(
rb_vm_t *vm)
1832 const rb_atomic_t block_mask = POSTPONED_JOB_INTERRUPT_MASK | TRAP_INTERRUPT_MASK;
1833 volatile rb_atomic_t saved_mask = ec->interrupt_mask & block_mask;
1834 VALUE volatile saved_errno = ec->errinfo;
1835 struct ccan_list_head tmp;
1837 ccan_list_head_init(&tmp);
1840 ccan_list_append_list(&tmp, &vm->workqueue);
1847 ec->interrupt_mask |= block_mask;
1850 if (EC_EXEC_TAG() == TAG_NONE) {
1852 while (triggered_bits) {
1853 unsigned int i = bit_length(triggered_bits) - 1;
1854 triggered_bits ^= ((1UL) << i);
1856 void *data = pjq->table[i].data;
1864 void *data = wq_job->data;
1873 ec->interrupt_mask &= ~(saved_mask ^ block_mask);
1874 ec->errinfo = saved_errno;
1878 if (!ccan_list_empty(&tmp)) {
1880 ccan_list_prepend_list(&vm->workqueue, &tmp);
1883 RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(GET_EC());
1887 if (triggered_bits) {
1889 RUBY_VM_SET_POSTPONED_JOB_INTERRUPT(GET_EC());
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
#define RUBY_ATOMIC_OR(var, val)
Atomically replaces the value pointed by var with the result of bitwise OR between val and the old va...
#define RUBY_ATOMIC_PTR_CAS(var, oldval, newval)
Identical to RUBY_ATOMIC_CAS, except it expects its arguments are void*.
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
#define RUBY_ATOMIC_PTR_EXCHANGE(var, val)
Identical to RUBY_ATOMIC_EXCHANGE, except it expects its arguments are void*.
#define RUBY_ATOMIC_EXCHANGE(var, val)
Atomically replaces the value pointed by var with val.
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_global_function(mid, func, arity)
Defines rb_mKernel #mid.
unsigned int rb_postponed_job_handle_t
The type of a handle returned from rb_postponed_job_preregister and passed to rb_postponed_job_trigge...
VALUE rb_tracearg_binding(rb_trace_arg_t *trace_arg)
Creates a binding object of the point where the trace is at.
VALUE rb_tracearg_parameters(rb_trace_arg_t *trace_arg)
Queries the parameters passed on a call or return event.
VALUE rb_tracearg_instruction_sequence(rb_trace_arg_t *trace_arg)
Queries the compiled instruction sequence on a 'script_compiled' event.
void rb_postponed_job_trigger(rb_postponed_job_handle_t h)
Triggers a pre-registered job registered with rb_postponed_job_preregister, scheduling it for executi...
VALUE rb_tracepoint_enabled_p(VALUE tpval)
Queries if the passed TracePoint is up and running.
VALUE rb_tracearg_object(rb_trace_arg_t *trace_arg)
Queries the allocated/deallocated object that the trace represents.
VALUE rb_tracearg_callee_id(rb_trace_arg_t *trace_arg)
Identical to rb_tracearg_method_id(), except it returns callee id like rb_frame_callee().
VALUE rb_tracearg_defined_class(rb_trace_arg_t *trace_arg)
Queries the class that defines the method that the passed trace is at.
VALUE rb_tracepoint_new(VALUE target_thread_not_supported_yet, rb_event_flag_t events, void(*func)(VALUE, void *), void *data)
Creates a tracepoint by registering a callback function for one or more tracepoint events.
VALUE rb_tracearg_raised_exception(rb_trace_arg_t *trace_arg)
Queries the raised exception that the trace represents.
void rb_thread_add_event_hook(VALUE thval, rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
Identical to rb_add_event_hook(), except its effect is limited to the passed thread.
rb_postponed_job_handle_t rb_postponed_job_preregister(unsigned int flags, rb_postponed_job_func_t func, void *data)
Pre-registers a func in Ruby's postponed job preregistration table, returning an opaque handle which ...
VALUE rb_tracepoint_disable(VALUE tpval)
Stops (disables) an already running instance of TracePoint.
VALUE rb_tracearg_self(rb_trace_arg_t *trace_arg)
Queries the receiver of the point trace is at.
int rb_thread_remove_event_hook(VALUE thval, rb_event_hook_func_t func)
Identical to rb_remove_event_hook(), except it additionally takes a thread argument.
int rb_postponed_job_register_one(unsigned int flags, rb_postponed_job_func_t func, void *data)
Identical to rb_postponed_job_register
VALUE rb_tracearg_return_value(rb_trace_arg_t *trace_arg)
Queries the return value that the trace represents.
rb_event_flag_t rb_tracearg_event_flag(rb_trace_arg_t *trace_arg)
Queries the event of the passed trace.
VALUE rb_tracearg_path(rb_trace_arg_t *trace_arg)
Queries the file name of the point where the trace is at.
VALUE rb_tracearg_eval_script(rb_trace_arg_t *trace_arg)
Queries the compiled source code of the 'script_compiled' event.
int rb_thread_remove_event_hook_with_data(VALUE thval, rb_event_hook_func_t func, VALUE data)
Identical to rb_thread_remove_event_hook(), except it additionally takes the data argument.
VALUE rb_tracepoint_enable(VALUE tpval)
Starts (enables) trace(s) defined by the passed object.
int rb_postponed_job_register(unsigned int flags, rb_postponed_job_func_t func, void *data)
Schedules the given func to be called with data when Ruby next checks for interrupts.
VALUE rb_tracearg_method_id(rb_trace_arg_t *trace_arg)
Queries the method name of the point where the trace is at.
int rb_remove_event_hook_with_data(rb_event_hook_func_t func, VALUE data)
Identical to rb_remove_event_hook(), except it additionally takes the data argument.
rb_trace_arg_t * rb_tracearg_from_tracepoint(VALUE tpval)
Queries the current event of the passed tracepoint.
VALUE rb_tracearg_lineno(rb_trace_arg_t *trace_arg)
Queries the line of the point where the trace is at.
void(* rb_postponed_job_func_t)(void *arg)
Type of postponed jobs.
VALUE rb_tracearg_event(rb_trace_arg_t *trace_arg)
Identical to rb_tracearg_event_flag(), except it returns the name of the event in Ruby's symbol.
#define RUBY_EVENT_END
Encountered an end of a class clause.
#define RUBY_EVENT_C_CALL
A method, written in C, is called.
#define RUBY_EVENT_TRACEPOINT_ALL
Bitmask of extended events.
void rb_add_event_hook(rb_event_hook_func_t func, rb_event_flag_t events, VALUE data)
Registers an event hook function.
#define RUBY_EVENT_RAISE
Encountered a raise statement.
#define RUBY_EVENT_B_RETURN
Encountered a next statement.
#define RUBY_EVENT_SCRIPT_COMPILED
Encountered an eval.
#define RUBY_INTERNAL_EVENT_MASK
Bitmask of internal events.
int rb_remove_event_hook(rb_event_hook_func_t func)
Removes the passed function from the list of event hooks.
#define RUBY_EVENT_ALL
Bitmask of traditional events.
#define RUBY_EVENT_THREAD_BEGIN
Encountered a new thread.
#define RUBY_EVENT_CLASS
Encountered a new class.
void(* rb_event_hook_func_t)(rb_event_flag_t evflag, VALUE data, VALUE self, ID mid, VALUE klass)
Type of event hooks.
#define RUBY_EVENT_LINE
Encountered a new line.
#define RUBY_EVENT_RETURN
Encountered a return statement.
#define RUBY_EVENT_C_RETURN
Return from a method, written in C.
#define RUBY_EVENT_B_CALL
Encountered an yield statement.
#define RUBY_INTERNAL_EVENT_FREEOBJ
Object swept.
uint32_t rb_event_flag_t
Represents event(s).
#define RUBY_EVENT_CALL
A method, written in Ruby, is called.
#define RUBY_INTERNAL_EVENT_NEWOBJ
Object allocated.
#define RUBY_EVENT_THREAD_END
Encountered an end of a thread.
#define RUBY_EVENT_RESCUE
Encountered a rescue statement.
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
int rb_block_given_p(void)
Determines if the current method is given a block.
#define rb_str_new2
Old name of rb_str_new_cstr.
#define ALLOC
Old name of RB_ALLOC.
#define xfree
Old name of ruby_xfree.
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
#define ID2SYM
Old name of RB_ID2SYM.
#define ZALLOC
Old name of RB_ZALLOC.
#define FIX2INT
Old name of RB_FIX2INT.
#define NUM2UINT
Old name of RB_NUM2UINT.
#define T_ICLASS
Old name of RUBY_T_ICLASS.
#define rb_ary_new3
Old name of rb_ary_new_from_args.
#define Qtrue
Old name of RUBY_Qtrue.
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
#define NIL_P
Old name of RB_NIL_P.
VALUE rb_eTypeError
TypeError exception.
VALUE rb_eRuntimeError
RuntimeError exception.
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
VALUE rb_cThread
Thread class.
#define RB_OBJ_WRITTEN(old, oldv, young)
Identical to RB_OBJ_WRITE(), except it doesn't write any values, but only a WB declaration.
#define RB_OBJ_WRITE(old, slot, young)
Declaration of a "back" pointer.
VALUE rb_funcall(VALUE recv, ID mid, int n,...)
Calls a method.
Defines RBIMPL_HAS_BUILTIN.
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
VALUE rb_proc_call_with_block(VALUE recv, int argc, const VALUE *argv, VALUE proc)
Identical to rb_proc_call(), except you can additionally pass another proc object,...
VALUE rb_obj_is_method(VALUE recv)
Queries if the given object is a method.
VALUE rb_binding_new(void)
Snapshots the current execution context and turn it into an instance of rb_cBinding.
VALUE rb_obj_is_proc(VALUE recv)
Queries if the given object is a proc.
VALUE rb_thread_current(void)
Obtains the "current" thread.
void rb_undef_alloc_func(VALUE klass)
Deletes the allocator function of a class.
static ID rb_intern_const(const char *str)
This is a "tiny optimisation" over rb_intern().
VALUE rb_sym2str(VALUE symbol)
Obtain a frozen string representation of a symbol (not including the leading colon).
static bool rb_ractor_shareable_p(VALUE obj)
Queries if multiple Ractors can share the passed object or not.
VALUE rb_yield(VALUE val)
Yields the block.
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
void rb_hash_foreach(VALUE q, int_type *w, VALUE e)
Iteration over the given hash.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define RARRAY_LEN
Just another name of rb_array_len.
#define RARRAY_AREF(a, i)
#define RBASIC(obj)
Convenient casting macro.
#define RUBY_TYPED_DEFAULT_FREE
This is a value you can set to rb_data_type_struct::dfree.
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
#define RTEST
This is an old name of RB_TEST.
This is the struct that holds necessary info for a struct.
void rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
Blocks until the current thread obtains a lock.
void rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
Releases a lock.
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
uintptr_t VALUE
Type that represents a Ruby object.
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.