61#undef __USE_FORTIFY_LEVEL
62#define __USE_FORTIFY_LEVEL 0
66#include "ruby/internal/config.h"
73#define TH_SCHED(th) (&(th)->ractor->threads.sched)
75#include "eval_intern.h"
78#include "internal/class.h"
79#include "internal/cont.h"
80#include "internal/error.h"
81#include "internal/eval.h"
82#include "internal/gc.h"
83#include "internal/hash.h"
84#include "internal/io.h"
85#include "internal/object.h"
86#include "internal/proc.h"
88#include "internal/signal.h"
89#include "internal/thread.h"
90#include "internal/time.h"
91#include "internal/warnings.h"
99#include "ractor_core.h"
103#include "ccan/list/list.h"
105#ifndef USE_NATIVE_THREAD_PRIORITY
106#define USE_NATIVE_THREAD_PRIORITY 0
107#define RUBY_THREAD_PRIORITY_MAX 3
108#define RUBY_THREAD_PRIORITY_MIN -3
111static VALUE rb_cThreadShield;
112static VALUE cThGroup;
114static VALUE sym_immediate;
115static VALUE sym_on_blocking;
116static VALUE sym_never;
118static uint32_t thread_default_quantum_ms = 100;
120#define THREAD_LOCAL_STORAGE_INITIALISED FL_USER13
121#define THREAD_LOCAL_STORAGE_INITIALISED_P(th) RB_FL_TEST_RAW((th), THREAD_LOCAL_STORAGE_INITIALISED)
124rb_thread_local_storage(
VALUE thread)
126 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
134 SLEEP_DEADLOCKABLE = 0x01,
135 SLEEP_SPURIOUS_CHECK = 0x02,
136 SLEEP_ALLOW_SPURIOUS = 0x04,
137 SLEEP_NO_CHECKINTS = 0x08,
140static void sleep_forever(
rb_thread_t *th,
unsigned int fl);
141static int sleep_hrtime(
rb_thread_t *, rb_hrtime_t,
unsigned int fl);
143static void rb_thread_sleep_deadly_allow_spurious_wakeup(
VALUE blocker,
VALUE timeout, rb_hrtime_t end);
146static int rb_threadptr_pending_interrupt_empty_p(
const rb_thread_t *th);
147static const char *thread_status_name(
rb_thread_t *th,
int detail);
148static int hrtime_update_expire(rb_hrtime_t *,
const rb_hrtime_t);
149NORETURN(
static void async_bug_fd(
const char *mesg,
int errno_arg,
int fd));
150MAYBE_UNUSED(
static int consume_communication_pipe(
int fd));
153static rb_internal_thread_specific_key_t specific_key_count;
157#define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
160 enum rb_thread_status prev_status;
164static void unblock_function_clear(
rb_thread_t *th);
170#define THREAD_BLOCKING_BEGIN(th) do { \
171 struct rb_thread_sched * const sched = TH_SCHED(th); \
172 RB_VM_SAVE_MACHINE_CONTEXT(th); \
173 thread_sched_to_waiting((sched), (th));
175#define THREAD_BLOCKING_END(th) \
176 thread_sched_to_running((sched), (th)); \
177 rb_ractor_thread_switch(th->ractor, th, false); \
181#ifdef HAVE_BUILTIN___BUILTIN_CHOOSE_EXPR_CONSTANT_P
182#define only_if_constant(expr, notconst) __builtin_choose_expr(__builtin_constant_p(expr), (expr), (notconst))
184#define only_if_constant(expr, notconst) (__builtin_constant_p(expr) ? (expr) : (notconst))
187#define only_if_constant(expr, notconst) notconst
189#define BLOCKING_REGION(th, exec, ubf, ubfarg, fail_if_interrupted) do { \
190 struct rb_blocking_region_buffer __region; \
191 if (blocking_region_begin(th, &__region, (ubf), (ubfarg), fail_if_interrupted) || \
193 !only_if_constant(fail_if_interrupted, TRUE)) { \
196 RB_VM_SAVE_MACHINE_CONTEXT(th); \
197 thread_sched_to_waiting(TH_SCHED(th), th); \
199 blocking_region_end(th, &__region); \
207#define RUBY_VM_CHECK_INTS_BLOCKING(ec) vm_check_ints_blocking(ec)
211#ifdef RUBY_ASSERT_CRITICAL_SECTION
212 VM_ASSERT(ruby_assert_critical_section_entered == 0);
217 if (LIKELY(rb_threadptr_pending_interrupt_empty_p(th))) {
218 if (LIKELY(!RUBY_VM_INTERRUPTED_ANY(ec)))
return FALSE;
221 th->pending_interrupt_queue_checked = 0;
222 RUBY_VM_SET_INTERRUPT(ec);
225 int result = rb_threadptr_execute_interrupts(th, 1);
228 if (result || RUBY_VM_INTERRUPTED(ec)) {
230 if (scheduler !=
Qnil) {
241 return vm_check_ints_blocking(ec);
249#if defined(HAVE_POLL)
250# if defined(__linux__)
253# if defined(__FreeBSD_version) && __FreeBSD_version >= 1100000
256# define POLLERR_SET (POLLHUP | POLLERR)
261timeout_prepare(rb_hrtime_t **to, rb_hrtime_t *rel, rb_hrtime_t *end,
265 *rel = rb_timeval2hrtime(timeout);
266 *end = rb_hrtime_add(rb_hrtime_now(), *rel);
274MAYBE_UNUSED(NOINLINE(
static int thread_start_func_2(
rb_thread_t *th,
VALUE *stack_start)));
275MAYBE_UNUSED(
static bool th_has_dedicated_nt(
const rb_thread_t *th));
276MAYBE_UNUSED(
static int waitfd_to_waiting_flag(
int wfd_event));
278#include THREAD_IMPL_SRC
285#ifndef BUSY_WAIT_SIGNALS
286# define BUSY_WAIT_SIGNALS (0)
290# define USE_EVENTFD (0)
293#include "thread_sync.c"
323 if (fail_if_interrupted) {
324 if (RUBY_VM_INTERRUPTED_ANY(th->ec)) {
329 RUBY_VM_CHECK_INTS(th->ec);
333 }
while (!th->ec->raised_flag && RUBY_VM_INTERRUPTED_ANY(th->ec) &&
336 VM_ASSERT(th->unblock.func == NULL);
338 th->unblock.func = func;
339 th->unblock.arg = arg;
349 th->unblock.func = 0;
354threadptr_set_interrupt_locked(
rb_thread_t *th,
bool trap)
358 RUBY_DEBUG_LOG(
"th:%u trap:%d", rb_th_serial(th), trap);
361 RUBY_VM_SET_TRAP_INTERRUPT(th->ec);
364 RUBY_VM_SET_INTERRUPT(th->ec);
367 if (th->unblock.func != NULL) {
368 (th->unblock.func)(th->unblock.arg);
380 threadptr_set_interrupt_locked(th, trap);
389 RUBY_DEBUG_LOG(
"th:%u", rb_th_serial(th));
390 threadptr_set_interrupt(th,
false);
396 threadptr_set_interrupt(th,
true);
404 ccan_list_for_each(&r->threads.set, th, lt_node) {
405 if (th != main_thread) {
406 RUBY_DEBUG_LOG(
"terminate start th:%u status:%s", rb_th_serial(th), thread_status_name(th, TRUE));
408 rb_threadptr_pending_interrupt_enque(th, RUBY_FATAL_THREAD_TERMINATED);
409 rb_threadptr_interrupt(th);
411 RUBY_DEBUG_LOG(
"terminate done th:%u status:%s", rb_th_serial(th), thread_status_name(th, TRUE));
414 RUBY_DEBUG_LOG(
"main thread th:%u", rb_th_serial(th));
422 while (thread->join_list) {
426 thread->join_list = join_list->next;
430 if (target_thread->scheduler !=
Qnil && join_list->fiber) {
434 rb_threadptr_interrupt(target_thread);
436 switch (target_thread->status) {
438 case THREAD_STOPPED_FOREVER:
439 target_thread->status = THREAD_RUNNABLE;
449rb_threadptr_unlock_all_locking_mutexes(
rb_thread_t *th)
451 while (th->keeping_mutexes) {
453 th->keeping_mutexes = mutex->next_mutex;
456 VM_ASSERT(mutex->fiber_serial);
457 const char *error_message = rb_mutex_unlock_th(mutex, th, NULL);
458 if (error_message) rb_bug(
"invalid keeping_mutexes: %s", error_message);
467 volatile int sleeping = 0;
469 if (cr->threads.main != th) {
470 rb_bug(
"rb_thread_terminate_all: called by child thread (%p, %p)",
471 (
void *)cr->threads.main, (
void *)th);
475 rb_threadptr_unlock_all_locking_mutexes(th);
478 if (EC_EXEC_TAG() == TAG_NONE) {
480 RUBY_DEBUG_LOG(
"th:%u", rb_th_serial(th));
482 terminate_all(cr, th);
484 while (rb_ractor_living_thread_num(cr) > 1) {
485 rb_hrtime_t rel = RB_HRTIME_PER_SEC;
491 native_sleep(th, &rel);
492 RUBY_VM_CHECK_INTS_BLOCKING(ec);
510void rb_threadptr_root_fiber_terminate(
rb_thread_t *th);
511static void threadptr_interrupt_exec_cleanup(
rb_thread_t *th);
514thread_cleanup_func_before_exec(
void *th_ptr)
517 th->status = THREAD_KILLED;
520 th->ec->machine.stack_start = th->ec->machine.stack_end = NULL;
522 threadptr_interrupt_exec_cleanup(th);
523 rb_threadptr_root_fiber_terminate(th);
527thread_cleanup_func(
void *th_ptr,
int atfork)
531 th->locking_mutex =
Qfalse;
532 thread_cleanup_func_before_exec(th_ptr);
535 native_thread_destroy_atfork(th->nt);
544rb_thread_free_native_thread(
void *th_ptr)
548 native_thread_destroy_atfork(th->nt);
556ruby_thread_init_stack(
rb_thread_t *th,
void *local_in_parent_frame)
558 native_thread_init_stack(th, local_in_parent_frame);
562rb_vm_proc_local_ep(
VALUE proc)
564 const VALUE *ep = vm_proc_ep(proc);
567 return rb_vm_ep_local_ep(ep);
576 int argc,
const VALUE *argv,
int kw_splat,
VALUE passed_block_handler);
581 VALUE args = th->invoke_arg.proc.args;
582 const VALUE *args_ptr;
584 VALUE procval = th->invoke_arg.proc.proc;
586 GetProcPtr(procval, proc);
588 th->ec->errinfo =
Qnil;
589 th->ec->root_lep = rb_vm_proc_local_ep(procval);
590 th->ec->root_svar =
Qfalse;
592 vm_check_ints_blocking(th->ec);
594 if (th->invoke_type == thread_invoke_type_ractor_proc) {
595 VALUE self = rb_ractor_self(th->ractor);
596 th->thgroup = th->ractor->thgroup_default =
rb_obj_alloc(cThGroup);
601 rb_ractor_receive_parameters(th->ec, th->ractor, args_len, (
VALUE *)args_ptr);
602 vm_check_ints_blocking(th->ec);
604 return rb_vm_invoke_proc_with_self(
607 th->invoke_arg.proc.kw_splat,
608 VM_BLOCK_HANDLER_NONE
617 th->invoke_arg.proc.args =
Qnil;
623 vm_check_ints_blocking(th->ec);
625 return rb_vm_invoke_proc(
628 th->invoke_arg.proc.kw_splat,
629 VM_BLOCK_HANDLER_NONE
637 native_set_thread_name(th);
640 switch (th->invoke_type) {
641 case thread_invoke_type_proc:
642 result = thread_do_start_proc(th);
645 case thread_invoke_type_ractor_proc:
646 result = thread_do_start_proc(th);
647 rb_ractor_atexit(th->ec, result);
650 case thread_invoke_type_func:
651 result = (*th->invoke_arg.func.func)(th->invoke_arg.func.arg);
654 case thread_invoke_type_none:
655 rb_bug(
"unreachable");
666 RUBY_DEBUG_LOG(
"th:%u", rb_th_serial(th));
667 VM_ASSERT(th != th->vm->ractor.main_thread);
669 enum ruby_tag_type state;
671 rb_thread_t *ractor_main_th = th->ractor->threads.main;
674 if (rb_ractor_status_p(th->ractor, ractor_blocking)) {
677 rb_vm_ractor_blocking_cnt_dec(th->vm, th->ractor, __FILE__, __LINE__);
679 r->r_stdin = rb_io_prep_stdin();
680 r->r_stdout = rb_io_prep_stdout();
681 r->r_stderr = rb_io_prep_stderr();
687 VM_ASSERT(UNDEF_P(th->value));
689 int fiber_scheduler_closed = 0, event_thread_end_hooked = 0;
694 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
697 result = thread_do_start(th);
700 if (!fiber_scheduler_closed) {
701 fiber_scheduler_closed = 1;
705 if (!event_thread_end_hooked) {
706 event_thread_end_hooked = 1;
710 if (state == TAG_NONE) {
715 errinfo = th->ec->errinfo;
717 VALUE exc = rb_vm_make_jump_tag_but_local_jump(state,
Qundef);
718 if (!
NIL_P(exc)) errinfo = exc;
720 if (state == TAG_FATAL) {
721 if (th->invoke_type == thread_invoke_type_ractor_proc) {
722 rb_ractor_atexit(th->ec,
Qnil);
727 if (th->invoke_type == thread_invoke_type_ractor_proc) {
728 rb_ractor_atexit_exception(th->ec);
734 if (th->report_on_exception) {
735 VALUE mesg = rb_thread_to_s(th->self);
736 rb_str_cat_cstr(mesg,
" terminated with exception (report_on_exception is true):\n");
737 rb_write_error_str(mesg);
738 rb_ec_error_print(th->ec, errinfo);
741 if (th->invoke_type == thread_invoke_type_ractor_proc) {
742 rb_ractor_atexit_exception(th->ec);
745 if (th->vm->thread_abort_on_exception ||
757 VM_ASSERT(!UNDEF_P(th->value));
759 rb_threadptr_join_list_wakeup(th);
760 rb_threadptr_unlock_all_locking_mutexes(th);
762 if (th->invoke_type == thread_invoke_type_ractor_proc) {
763 rb_thread_terminate_all(th);
764 rb_ractor_teardown(th->ec);
767 th->status = THREAD_KILLED;
768 RUBY_DEBUG_LOG(
"killed th:%u", rb_th_serial(th));
770 if (th->vm->ractor.main_thread == th) {
776 rb_threadptr_raise(ractor_main_th, 1, &errinfo);
781 rb_ec_clear_current_thread_trace_func(th->ec);
784 if (th->locking_mutex !=
Qfalse) {
785 rb_bug(
"thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE
")",
786 (
void *)th, th->locking_mutex);
789 if (ractor_main_th->status == THREAD_KILLED &&
790 th->ractor->threads.cnt <= 2 ) {
792 rb_threadptr_interrupt(ractor_main_th);
795 rb_check_deadlock(th->ractor);
797 rb_fiber_close(th->ec->fiber_ptr);
799 thread_cleanup_func(th, FALSE);
800 VM_ASSERT(th->ec->vm_stack == NULL);
802 if (th->invoke_type == thread_invoke_type_ractor_proc) {
806 thread_sched_to_dead(TH_SCHED(th), th);
807 rb_ractor_living_threads_remove(th->ractor, th);
810 rb_ractor_living_threads_remove(th->ractor, th);
811 thread_sched_to_dead(TH_SCHED(th), th);
818 enum thread_invoke_type type;
831static void thread_specific_storage_alloc(
rb_thread_t *th);
837 rb_thread_t *th = rb_thread_ptr(thval), *current_th = rb_ec_thread_ptr(ec);
840 thread_specific_storage_alloc(th);
844 "can't start a new thread (frozen ThreadGroup)");
847 rb_fiber_inherit_storage(ec, th->ec->fiber_ptr);
849 switch (params->type) {
850 case thread_invoke_type_proc:
851 th->invoke_type = thread_invoke_type_proc;
852 th->invoke_arg.proc.args = params->args;
853 th->invoke_arg.proc.proc = params->proc;
857 case thread_invoke_type_ractor_proc:
858#if RACTOR_CHECK_MODE > 0
859 rb_ractor_setup_belonging_to(thval, rb_ractor_id(params->g));
861 th->invoke_type = thread_invoke_type_ractor_proc;
862 th->ractor = params->g;
863 th->ractor->threads.main = th;
864 th->invoke_arg.proc.proc = rb_proc_isolate_bang(params->proc,
Qnil);
867 rb_ractor_send_parameters(ec, params->g, params->args);
870 case thread_invoke_type_func:
871 th->invoke_type = thread_invoke_type_func;
872 th->invoke_arg.func.func = params->fn;
873 th->invoke_arg.func.arg = (
void *)params->args;
877 rb_bug(
"unreachable");
880 th->priority = current_th->priority;
881 th->thgroup = current_th->thgroup;
884 th->pending_interrupt_queue_checked = 0;
885 th->pending_interrupt_mask_stack =
rb_ary_dup(current_th->pending_interrupt_mask_stack);
886 RBASIC_CLEAR_CLASS(th->pending_interrupt_mask_stack);
890 RUBY_DEBUG_LOG(
"r:%u th:%u", rb_ractor_id(th->ractor), rb_th_serial(th));
892 rb_ractor_living_threads_insert(th->ractor, th);
895 err = native_thread_create(th);
897 th->status = THREAD_KILLED;
898 rb_ractor_living_threads_remove(th->ractor, th);
904#define threadptr_initialized(th) ((th)->invoke_type != thread_invoke_type_none)
927thread_s_new(
int argc,
VALUE *argv,
VALUE klass)
930 VALUE thread = rb_thread_alloc(klass);
932 if (GET_RACTOR()->threads.main->status == THREAD_KILLED) {
937 th = rb_thread_ptr(thread);
938 if (!threadptr_initialized(th)) {
939 rb_raise(
rb_eThreadError,
"uninitialized thread - check '%"PRIsVALUE
"#initialize'",
959 .type = thread_invoke_type_proc,
963 return thread_create_core(rb_thread_alloc(klass), ¶ms);
969 if (th->invoke_type == thread_invoke_type_proc) {
970 return rb_proc_location(th->invoke_arg.proc.proc);
986 else if (th->invoke_type != thread_invoke_type_none) {
987 VALUE loc = threadptr_invoke_proc_location(th);
990 "already initialized thread - %"PRIsVALUE
":%"PRIsVALUE,
999 .type = thread_invoke_type_proc,
1003 return thread_create_core(thread, ¶ms);
1011 .type = thread_invoke_type_func,
1015 return thread_create_core(rb_thread_alloc(
rb_cThread), ¶ms);
1022 .type = thread_invoke_type_ractor_proc,
1027 return thread_create_core(rb_thread_alloc(
rb_cThread), ¶ms);
1039remove_from_join_list(
VALUE arg)
1044 if (target_thread->status != THREAD_KILLED) {
1047 while (*join_list) {
1048 if (*join_list == p->waiter) {
1049 *join_list = (*join_list)->next;
1053 join_list = &(*join_list)->next;
1063 return th->status == THREAD_KILLED || !UNDEF_P(th->value);
1067thread_join_sleep(
VALUE arg)
1070 rb_thread_t *target_th = p->target, *th = p->waiter->thread;
1071 rb_hrtime_t end = 0, *limit = p->limit;
1074 end = rb_hrtime_add(*limit, rb_hrtime_now());
1077 while (!thread_finished(target_th)) {
1081 if (scheduler !=
Qnil) {
1085 sleep_forever(th, SLEEP_DEADLOCKABLE | SLEEP_ALLOW_SPURIOUS | SLEEP_NO_CHECKINTS);
1089 if (hrtime_update_expire(limit, end)) {
1090 RUBY_DEBUG_LOG(
"timeout target_th:%u", rb_th_serial(target_th));
1094 if (scheduler !=
Qnil) {
1095 VALUE timeout = rb_float_new(hrtime2double(*limit));
1099 th->status = THREAD_STOPPED;
1100 native_sleep(th, limit);
1103 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1104 th->status = THREAD_RUNNABLE;
1106 RUBY_DEBUG_LOG(
"interrupted target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1119 if (th == target_th) {
1120 rb_raise(
rb_eThreadError,
"Target thread must not be current thread");
1123 if (th->ractor->threads.main == target_th) {
1127 RUBY_DEBUG_LOG(
"target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1129 if (target_th->status != THREAD_KILLED) {
1131 waiter.next = target_th->join_list;
1133 waiter.fiber = rb_fiberptr_blocking(fiber) ? NULL : fiber;
1134 target_th->join_list = &waiter;
1137 arg.waiter = &waiter;
1138 arg.target = target_th;
1139 arg.timeout = timeout;
1147 RUBY_DEBUG_LOG(
"success target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1149 if (target_th->ec->errinfo !=
Qnil) {
1150 VALUE err = target_th->ec->errinfo;
1155 RUBY_DEBUG_LOG(
"terminated target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1160 if (err == RUBY_FATAL_FIBER_KILLED) {
1164 rb_bug(
"thread_join: Fixnum (%d) should not reach here.",
FIX2INT(err));
1167 else if (THROW_DATA_P(target_th->ec->errinfo)) {
1168 rb_bug(
"thread_join: THROW_DATA should not reach here.");
1175 return target_th->self;
1218thread_join_m(
int argc,
VALUE *argv,
VALUE self)
1221 rb_hrtime_t rel = 0, *limit = 0;
1232 if (
NIL_P(timeout)) {
1236 rel = rb_sec2hrtime(NUM2TIMET(timeout));
1240 limit = double2hrtime(&rel,
rb_num2dbl(timeout));
1243 return thread_join(rb_thread_ptr(self), timeout, limit);
1261thread_value(
VALUE self)
1264 thread_join(th,
Qnil, 0);
1265 if (UNDEF_P(th->value)) {
1279#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1280 if (clock_gettime(CLOCK_MONOTONIC, ts) == 0)
1290NOINLINE(rb_hrtime_t rb_hrtime_now(
void));
1297 return rb_timespec2hrtime(&ts);
1304COMPILER_WARNING_PUSH
1305#if defined(__GNUC__) && __GNUC__ == 7 && __GNUC_MINOR__ <= 3
1306COMPILER_WARNING_IGNORED(-Wmaybe-uninitialized)
1309#define PRIu64 PRI_64_PREFIX "u"
1317hrtime_update_expire(rb_hrtime_t *timeout,
const rb_hrtime_t end)
1319 rb_hrtime_t now = rb_hrtime_now();
1321 if (now > end)
return 1;
1323 RUBY_DEBUG_LOG(
"%"PRIu64
" > %"PRIu64
"", (uint64_t)end, (uint64_t)now);
1325 *timeout = end - now;
1331sleep_hrtime(
rb_thread_t *th, rb_hrtime_t rel,
unsigned int fl)
1333 enum rb_thread_status prev_status = th->status;
1335 rb_hrtime_t end = rb_hrtime_add(rb_hrtime_now(), rel);
1337 th->status = THREAD_STOPPED;
1338 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1339 while (th->status == THREAD_STOPPED) {
1340 native_sleep(th, &rel);
1341 woke = vm_check_ints_blocking(th->ec);
1342 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1344 if (hrtime_update_expire(&rel, end))
1348 th->status = prev_status;
1353sleep_hrtime_until(
rb_thread_t *th, rb_hrtime_t end,
unsigned int fl)
1355 enum rb_thread_status prev_status = th->status;
1357 rb_hrtime_t rel = rb_hrtime_sub(end, rb_hrtime_now());
1359 th->status = THREAD_STOPPED;
1360 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1361 while (th->status == THREAD_STOPPED) {
1362 native_sleep(th, &rel);
1363 woke = vm_check_ints_blocking(th->ec);
1364 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1366 if (hrtime_update_expire(&rel, end))
1370 th->status = prev_status;
1377 enum rb_thread_status prev_status = th->status;
1378 enum rb_thread_status status;
1381 status = fl & SLEEP_DEADLOCKABLE ? THREAD_STOPPED_FOREVER : THREAD_STOPPED;
1382 th->status = status;
1384 if (!(fl & SLEEP_NO_CHECKINTS)) RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1386 while (th->status == status) {
1387 if (fl & SLEEP_DEADLOCKABLE) {
1388 rb_ractor_sleeper_threads_inc(th->ractor);
1389 rb_check_deadlock(th->ractor);
1392 native_sleep(th, 0);
1394 if (fl & SLEEP_DEADLOCKABLE) {
1395 rb_ractor_sleeper_threads_dec(th->ractor);
1397 if (fl & SLEEP_ALLOW_SPURIOUS) {
1401 woke = vm_check_ints_blocking(th->ec);
1403 if (woke && !(fl & SLEEP_SPURIOUS_CHECK)) {
1407 th->status = prev_status;
1413 RUBY_DEBUG_LOG(
"forever");
1414 sleep_forever(GET_THREAD(), SLEEP_SPURIOUS_CHECK);
1420 RUBY_DEBUG_LOG(
"deadly");
1421 sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE|SLEEP_SPURIOUS_CHECK);
1425rb_thread_sleep_deadly_allow_spurious_wakeup(
VALUE blocker,
VALUE timeout, rb_hrtime_t end)
1429 if (scheduler !=
Qnil) {
1433 RUBY_DEBUG_LOG(
"...");
1435 sleep_hrtime_until(th, end, SLEEP_SPURIOUS_CHECK);
1438 sleep_forever(th, SLEEP_DEADLOCKABLE);
1448 sleep_hrtime(th, rb_timeval2hrtime(&time), SLEEP_SPURIOUS_CHECK);
1454 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1467 rb_ec_check_ints(GET_EC());
1475rb_thread_check_trap_pending(
void)
1477 return rb_signal_buff_size() != 0;
1484 return (
int)RUBY_VM_INTERRUPTED(rb_thread_ptr(thval)->ec);
1494rb_thread_schedule_limits(uint32_t limits_us)
1498 RUBY_DEBUG_LOG(
"us:%u", (
unsigned int)limits_us);
1500 if (th->running_time_us >= limits_us) {
1501 RUBY_DEBUG_LOG(
"switch %s",
"start");
1503 RB_VM_SAVE_MACHINE_CONTEXT(th);
1504 thread_sched_yield(TH_SCHED(th), th);
1505 rb_ractor_thread_switch(th->ractor, th,
true);
1507 RUBY_DEBUG_LOG(
"switch %s",
"done");
1515 rb_thread_schedule_limits(0);
1516 RUBY_VM_CHECK_INTS(GET_EC());
1525#ifdef RUBY_ASSERT_CRITICAL_SECTION
1526 VM_ASSERT(ruby_assert_critical_section_entered == 0);
1528 VM_ASSERT(th == GET_THREAD());
1530 region->prev_status = th->status;
1531 if (unblock_function_set(th, ubf, arg, fail_if_interrupted)) {
1532 th->blocking_region_buffer = region;
1533 th->status = THREAD_STOPPED;
1534 rb_ractor_blocking_threads_inc(th->ractor, __FILE__, __LINE__);
1536 RUBY_DEBUG_LOG(
"thread_id:%p", (
void *)th->nt->thread_id);
1548 unblock_function_clear(th);
1550 unregister_ubf_list(th);
1552 thread_sched_to_running(TH_SCHED(th), th);
1553 rb_ractor_thread_switch(th->ractor, th,
false);
1555 th->blocking_region_buffer = 0;
1556 rb_ractor_blocking_threads_dec(th->ractor, __FILE__, __LINE__);
1557 if (th->status == THREAD_STOPPED) {
1558 th->status = region->prev_status;
1561 RUBY_DEBUG_LOG(
"end");
1565 VM_ASSERT(th == GET_THREAD());
1585 *unblock_function = ubf_select;
1593rb_nogvl(
void *(*func)(
void *),
void *data1,
1599 if (scheduler !=
Qnil) {
1604 if (!UNDEF_P(result)) {
1605 rb_errno_set(state.saved_errno);
1606 return state.result;
1614 rb_vm_t *vm = rb_ec_vm_ptr(ec);
1615 bool is_main_thread = vm->ractor.main_thread == th;
1616 int saved_errno = 0;
1618 rb_thread_resolve_unblock_function(&ubf, &data2, th);
1620 if (ubf && rb_ractor_living_thread_num(th->ractor) == 1 && is_main_thread) {
1622 vm->ubf_async_safe = 1;
1626 rb_vm_t *
volatile saved_vm = vm;
1627 BLOCKING_REGION(th, {
1629 saved_errno = rb_errno();
1633 if (is_main_thread) vm->ubf_async_safe = 0;
1636 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1639 rb_errno_set(saved_errno);
1740 return rb_nogvl(func, data1, ubf, data2, 0);
1744waitfd_to_waiting_flag(
int wfd_event)
1746 return wfd_event << 1;
1749static struct ccan_list_head *
1750rb_io_blocking_operations(
struct rb_io *io)
1752 rb_serial_t fork_generation = GET_VM()->fork_gen;
1756 if (io->fork_generation != fork_generation) {
1758 io->fork_generation = fork_generation;
1777 ccan_list_add(rb_io_blocking_operations(io), &blocking_operation->list);
1783 ccan_list_del(&blocking_operation->list);
1792io_blocking_operation_exit(
VALUE _arguments)
1797 rb_io_blocking_operation_pop(arguments->io, blocking_operation);
1801 rb_fiber_t *fiber = io->closing_ec->fiber_ptr;
1803 if (thread->scheduler !=
Qnil) {
1827 VALUE wakeup_mutex = io->wakeup_mutex;
1830 blocking_operation->ec = NULL;
1835 .blocking_operation = blocking_operation
1842 rb_io_blocking_operation_pop(io, blocking_operation);
1847rb_thread_io_blocking_operation_ensure(
VALUE _argument)
1851 rb_io_blocking_operation_exit(arguments->io, arguments->blocking_operation);
1879 rb_io_blocking_operation_enter(io, &blocking_operation);
1883 .blocking_operation = &blocking_operation
1892#if defined(USE_MN_THREADS) && USE_MN_THREADS
1893 return !th_has_dedicated_nt(th) && (events || timeout) && th->blocking;
1901thread_io_wait_events(
rb_thread_t *th,
int fd,
int events,
const struct timeval *timeout)
1903#if defined(USE_MN_THREADS) && USE_MN_THREADS
1904 if (thread_io_mn_schedulable(th, events, timeout)) {
1905 rb_hrtime_t rel, *prel;
1908 rel = rb_timeval2hrtime(timeout);
1915 VM_ASSERT(prel || (events & (RB_WAITFD_IN | RB_WAITFD_OUT)));
1917 if (thread_sched_wait_events(TH_SCHED(th), th, fd, waitfd_to_waiting_flag(events), prel)) {
1931blocking_call_retryable_p(
int r,
int eno)
1933 if (r != -1)
return false;
1937#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
1947rb_thread_mn_schedulable(
VALUE thval)
1950 return th->mn_schedulable;
1954rb_thread_io_blocking_call(
struct rb_io* io, rb_blocking_function_t *func,
void *data1,
int events)
1959 RUBY_DEBUG_LOG(
"th:%u fd:%d ev:%d", rb_th_serial(th), io->
fd, events);
1962 volatile int saved_errno = 0;
1963 enum ruby_tag_type state;
1964 volatile bool prev_mn_schedulable = th->mn_schedulable;
1965 th->mn_schedulable = thread_io_mn_schedulable(th, events, NULL);
1978 rb_io_blocking_operation_enter(io, &blocking_operation);
1982 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
1983 volatile enum ruby_tag_type saved_state = state;
1985 BLOCKING_REGION(th, {
1987 saved_errno =
errno;
1988 }, ubf_select, th, FALSE);
1992 blocking_call_retryable_p((
int)val, saved_errno) &&
1993 thread_io_wait_events(th, fd, events, NULL)) {
1994 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1998 RUBY_VM_CHECK_INTS_BLOCKING(ec);
2000 state = saved_state;
2004 th = rb_ec_thread_ptr(ec);
2005 th->mn_schedulable = prev_mn_schedulable;
2008 rb_io_blocking_operation_exit(io, &blocking_operation);
2011 EC_JUMP_TAG(ec, state);
2015 if (saved_errno == ETIMEDOUT) {
2019 errno = saved_errno;
2025rb_thread_io_blocking_region(
struct rb_io *io, rb_blocking_function_t *func,
void *data1)
2027 return rb_thread_io_blocking_call(io, func, data1, 0);
2075 fprintf(stderr,
"[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
2080 prev_unblock = th->unblock;
2084 return (*func)(data1);
2087 blocking_region_end(th, brb);
2091 int released = blocking_region_begin(th, brb, prev_unblock.func, prev_unblock.arg, FALSE);
2093 RB_VM_SAVE_MACHINE_CONTEXT(th);
2094 thread_sched_to_waiting(TH_SCHED(th), th);
2103ruby_thread_has_gvl_p(
void)
2107 if (th && th->blocking_region_buffer == 0) {
2124thread_s_pass(
VALUE klass)
2149rb_threadptr_pending_interrupt_clear(
rb_thread_t *th)
2158 th->pending_interrupt_queue_checked = 0;
2162threadptr_check_pending_interrupt_queue(
rb_thread_t *th)
2164 if (!th->pending_interrupt_queue) {
2169enum handle_interrupt_timing {
2171 INTERRUPT_IMMEDIATE,
2172 INTERRUPT_ON_BLOCKING,
2176static enum handle_interrupt_timing
2179 if (sym == sym_immediate) {
2180 return INTERRUPT_IMMEDIATE;
2182 else if (sym == sym_on_blocking) {
2183 return INTERRUPT_ON_BLOCKING;
2185 else if (sym == sym_never) {
2186 return INTERRUPT_NEVER;
2193static enum handle_interrupt_timing
2197 long mask_stack_len =
RARRAY_LEN(th->pending_interrupt_mask_stack);
2202 for (i=0; i<mask_stack_len; i++) {
2203 mask = mask_stack[mask_stack_len-(i+1)];
2208 return rb_threadptr_pending_interrupt_from_symbol(th, mask);
2220 klass =
RBASIC(mod)->klass;
2222 else if (mod != RCLASS_ORIGIN(mod)) {
2226 if ((sym = rb_hash_aref(mask, klass)) !=
Qnil) {
2227 return rb_threadptr_pending_interrupt_from_symbol(th, sym);
2232 return INTERRUPT_NONE;
2236rb_threadptr_pending_interrupt_empty_p(
const rb_thread_t *th)
2238 return RARRAY_LEN(th->pending_interrupt_queue) == 0;
2245 for (i=0; i<
RARRAY_LEN(th->pending_interrupt_queue); i++) {
2255rb_threadptr_pending_interrupt_deque(
rb_thread_t *th,
enum handle_interrupt_timing timing)
2260 for (i=0; i<
RARRAY_LEN(th->pending_interrupt_queue); i++) {
2263 enum handle_interrupt_timing mask_timing = rb_threadptr_pending_interrupt_check_mask(th,
CLASS_OF(err));
2265 switch (mask_timing) {
2266 case INTERRUPT_ON_BLOCKING:
2267 if (timing != INTERRUPT_ON_BLOCKING) {
2271 case INTERRUPT_NONE:
2272 case INTERRUPT_IMMEDIATE:
2275 case INTERRUPT_NEVER:
2280 th->pending_interrupt_queue_checked = 1;
2284 if (rb_threadptr_pending_interrupt_empty_p(th)) {
2285 th->pending_interrupt_queue_checked = 1;
2292threadptr_pending_interrupt_active_p(
rb_thread_t *th)
2299 if (th->pending_interrupt_queue_checked) {
2303 if (rb_threadptr_pending_interrupt_empty_p(th)) {
2315 if (val != sym_immediate && val != sym_on_blocking && val != sym_never) {
2316 rb_raise(rb_eArgError,
"unknown mask signature");
2324 if (
RTEST(*maskp)) {
2326 VALUE prev = *maskp;
2327 *maskp = rb_ident_hash_new();
2332 rb_hash_aset(*maskp, key, val);
2426rb_thread_s_handle_interrupt(
VALUE self,
VALUE mask_arg)
2432 enum ruby_tag_type state;
2435 rb_raise(rb_eArgError,
"block is needed.");
2438 mask_arg = rb_to_hash_type(mask_arg);
2440 if (
OBJ_FROZEN(mask_arg) && rb_hash_compare_by_id_p(mask_arg)) {
2446 if (UNDEF_P(mask)) {
2457 rb_ary_push(th->pending_interrupt_mask_stack, mask);
2458 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2459 th->pending_interrupt_queue_checked = 0;
2460 RUBY_VM_SET_INTERRUPT(th->ec);
2463 EC_PUSH_TAG(th->ec);
2464 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
2469 rb_ary_pop(th->pending_interrupt_mask_stack);
2470 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2471 th->pending_interrupt_queue_checked = 0;
2472 RUBY_VM_SET_INTERRUPT(th->ec);
2475 RUBY_VM_CHECK_INTS(th->ec);
2478 EC_JUMP_TAG(th->ec, state);
2495rb_thread_pending_interrupt_p(
int argc,
VALUE *argv,
VALUE target_thread)
2497 rb_thread_t *target_th = rb_thread_ptr(target_thread);
2499 if (!target_th->pending_interrupt_queue) {
2502 if (rb_threadptr_pending_interrupt_empty_p(target_th)) {
2506 VALUE err = argv[0];
2508 rb_raise(
rb_eTypeError,
"class or module required for rescue clause");
2510 return RBOOL(rb_threadptr_pending_interrupt_include_p(target_th, err));
2575rb_thread_s_pending_interrupt_p(
int argc,
VALUE *argv,
VALUE self)
2577 return rb_thread_pending_interrupt_p(argc, argv, GET_THREAD()->self);
2580NORETURN(
static void rb_threadptr_to_kill(
rb_thread_t *th));
2585 VM_ASSERT(GET_THREAD() == th);
2586 rb_threadptr_pending_interrupt_clear(th);
2587 th->status = THREAD_RUNNABLE;
2589 th->ec->errinfo =
INT2FIX(TAG_FATAL);
2590 EC_JUMP_TAG(th->ec, TAG_FATAL);
2600 old = ATOMIC_LOAD_RELAXED(ec->interrupt_flag);
2603 old = ATOMIC_CAS(ec->interrupt_flag, interrupt, interrupt & ec->interrupt_mask);
2604 }
while (old != interrupt);
2605 return interrupt & (
rb_atomic_t)~ec->interrupt_mask;
2608static void threadptr_interrupt_exec_exec(
rb_thread_t *th);
2616rb_threadptr_execute_interrupts(
rb_thread_t *th,
int blocking_timing)
2619 int postponed_job_interrupt = 0;
2622 VM_ASSERT(GET_THREAD() == th);
2624 if (th->ec->raised_flag)
return ret;
2626 while ((interrupt = threadptr_get_interrupts(th)) != 0) {
2628 int timer_interrupt;
2629 int pending_interrupt;
2631 int terminate_interrupt;
2633 timer_interrupt = interrupt & TIMER_INTERRUPT_MASK;
2634 pending_interrupt = interrupt & PENDING_INTERRUPT_MASK;
2635 postponed_job_interrupt = interrupt & POSTPONED_JOB_INTERRUPT_MASK;
2636 trap_interrupt = interrupt & TRAP_INTERRUPT_MASK;
2637 terminate_interrupt = interrupt & TERMINATE_INTERRUPT_MASK;
2639 if (interrupt & VM_BARRIER_INTERRUPT_MASK) {
2643 if (postponed_job_interrupt) {
2644 rb_postponed_job_flush(th->vm);
2647 if (trap_interrupt) {
2649 if (th == th->vm->ractor.main_thread) {
2650 enum rb_thread_status prev_status = th->status;
2652 th->status = THREAD_RUNNABLE;
2654 while ((sig = rb_get_next_signal()) != 0) {
2655 ret |= rb_signal_exec(th, sig);
2658 th->status = prev_status;
2661 if (!ccan_list_empty(&th->interrupt_exec_tasks)) {
2662 enum rb_thread_status prev_status = th->status;
2664 th->status = THREAD_RUNNABLE;
2666 threadptr_interrupt_exec_exec(th);
2668 th->status = prev_status;
2673 if (pending_interrupt && threadptr_pending_interrupt_active_p(th)) {
2674 VALUE err = rb_threadptr_pending_interrupt_deque(th, blocking_timing ? INTERRUPT_ON_BLOCKING : INTERRUPT_NONE);
2675 RUBY_DEBUG_LOG(
"err:%"PRIdVALUE, err);
2681 else if (err == RUBY_FATAL_THREAD_KILLED ||
2682 err == RUBY_FATAL_THREAD_TERMINATED ||
2684 terminate_interrupt = 1;
2687 if (err == th->vm->special_exceptions[ruby_error_stream_closed]) {
2689 err = ruby_vm_special_exception_copy(err);
2692 if (th->status == THREAD_STOPPED ||
2693 th->status == THREAD_STOPPED_FOREVER)
2694 th->status = THREAD_RUNNABLE;
2699 if (terminate_interrupt) {
2700 rb_threadptr_to_kill(th);
2703 if (timer_interrupt) {
2704 uint32_t limits_us = thread_default_quantum_ms * 1000;
2706 if (th->priority > 0)
2707 limits_us <<= th->priority;
2709 limits_us >>= -th->priority;
2711 if (th->status == THREAD_RUNNABLE)
2712 th->running_time_us += 10 * 1000;
2714 VM_ASSERT(th->ec->cfp);
2718 rb_thread_schedule_limits(limits_us);
2725rb_thread_execute_interrupts(
VALUE thval)
2727 rb_threadptr_execute_interrupts(rb_thread_ptr(thval), 1);
2733 rb_threadptr_interrupt(th);
2739 if (rb_threadptr_dead(target_th)) {
2743 VALUE exception = rb_exception_setup(argc, argv);
2747 if (rb_threadptr_dead(target_th)) {
2751 rb_threadptr_pending_interrupt_enque(target_th, exception);
2752 rb_threadptr_interrupt(target_th);
2758rb_threadptr_signal_raise(
rb_thread_t *th,
int sig)
2764 rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2776 rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2782 if (ec->raised_flag & RAISED_EXCEPTION) {
2785 ec->raised_flag |= RAISED_EXCEPTION;
2792 if (!(ec->raised_flag & RAISED_EXCEPTION)) {
2795 ec->raised_flag &= ~RAISED_EXCEPTION;
2820thread_io_close_notify_all(
VALUE _io)
2825 rb_vm_t *vm = io->closing_ec->thread_ptr->vm;
2826 VALUE error = vm->special_exceptions[ruby_error_stream_closed];
2829 ccan_list_for_each(rb_io_blocking_operations(io), blocking_operation, list) {
2837 if (thread->scheduler !=
Qnil) {
2843 rb_threadptr_pending_interrupt_enque(thread, error);
2844 rb_threadptr_interrupt(thread);
2851 return (
VALUE)count;
2855rb_thread_io_close_interrupt(
struct rb_io *io)
2858 if (io->closing_ec) {
2863 if (ccan_list_empty(rb_io_blocking_operations(io))) {
2869 io->closing_ec = ec;
2873 rb_mutex_allow_trap(io->wakeup_mutex, 1);
2878 return (
size_t)result;
2882rb_thread_io_close_wait(
struct rb_io* io)
2884 VALUE wakeup_mutex = io->wakeup_mutex;
2892 while (!ccan_list_empty(rb_io_blocking_operations(io))) {
2898 io->wakeup_mutex =
Qnil;
2899 io->closing_ec = NULL;
2905 rb_warn(
"rb_thread_fd_close is deprecated (and is now a no-op).");
2930thread_raise_m(
int argc,
VALUE *argv,
VALUE self)
2935 threadptr_check_pending_interrupt_queue(target_th);
2936 rb_threadptr_raise(target_th, argc, argv);
2939 if (current_th == target_th) {
2940 RUBY_VM_CHECK_INTS(target_th->ec);
2965 if (target_th->to_kill || target_th->status == THREAD_KILLED) {
2968 if (target_th == target_th->vm->ractor.main_thread) {
2972 RUBY_DEBUG_LOG(
"target_th:%u", rb_th_serial(target_th));
2974 if (target_th == GET_THREAD()) {
2976 rb_threadptr_to_kill(target_th);
2979 threadptr_check_pending_interrupt_queue(target_th);
2980 rb_threadptr_pending_interrupt_enque(target_th, RUBY_FATAL_THREAD_KILLED);
2981 rb_threadptr_interrupt(target_th);
2988rb_thread_to_be_killed(
VALUE thread)
2992 if (target_th->to_kill || target_th->status == THREAD_KILLED) {
3068 if (target_th->status == THREAD_KILLED)
return Qnil;
3070 rb_threadptr_ready(target_th);
3072 if (target_th->status == THREAD_STOPPED ||
3073 target_th->status == THREAD_STOPPED_FOREVER) {
3074 target_th->status = THREAD_RUNNABLE;
3116 "stopping only thread\n\tnote: use sleep to stop forever");
3149 return rb_ractor_thread_list();
3175 return rb_thread_list();
3181 return GET_THREAD()->self;
3194thread_s_current(
VALUE klass)
3202 return GET_RACTOR()->threads.main->self;
3213rb_thread_s_main(
VALUE klass)
3240rb_thread_s_abort_exc(
VALUE _)
3242 return RBOOL(GET_THREAD()->vm->thread_abort_on_exception);
3277rb_thread_s_abort_exc_set(
VALUE self,
VALUE val)
3279 GET_THREAD()->vm->thread_abort_on_exception =
RTEST(val);
3300rb_thread_abort_exc(
VALUE thread)
3302 return RBOOL(rb_thread_ptr(thread)->abort_on_exception);
3320rb_thread_abort_exc_set(
VALUE thread,
VALUE val)
3322 rb_thread_ptr(thread)->abort_on_exception =
RTEST(val);
3370rb_thread_s_report_exc(
VALUE _)
3372 return RBOOL(GET_THREAD()->vm->thread_report_on_exception);
3407rb_thread_s_report_exc_set(
VALUE self,
VALUE val)
3409 GET_THREAD()->vm->thread_report_on_exception =
RTEST(val);
3426rb_thread_s_ignore_deadlock(
VALUE _)
3428 return RBOOL(GET_THREAD()->vm->thread_ignore_deadlock);
3453rb_thread_s_ignore_deadlock_set(
VALUE self,
VALUE val)
3455 GET_THREAD()->vm->thread_ignore_deadlock =
RTEST(val);
3477rb_thread_report_exc(
VALUE thread)
3479 return RBOOL(rb_thread_ptr(thread)->report_on_exception);
3497rb_thread_report_exc_set(
VALUE thread,
VALUE val)
3499 rb_thread_ptr(thread)->report_on_exception =
RTEST(val);
3514rb_thread_group(
VALUE thread)
3516 return rb_thread_ptr(thread)->thgroup;
3522 switch (th->status) {
3523 case THREAD_RUNNABLE:
3524 return th->to_kill ?
"aborting" :
"run";
3525 case THREAD_STOPPED_FOREVER:
3526 if (detail)
return "sleep_forever";
3527 case THREAD_STOPPED:
3539 return th->status == THREAD_KILLED;
3575rb_thread_status(
VALUE thread)
3579 if (rb_threadptr_dead(target_th)) {
3580 if (!
NIL_P(target_th->ec->errinfo) &&
3581 !
FIXNUM_P(target_th->ec->errinfo)) {
3589 return rb_str_new2(thread_status_name(target_th, FALSE));
3609rb_thread_alive_p(
VALUE thread)
3611 return RBOOL(!thread_finished(rb_thread_ptr(thread)));
3629rb_thread_stop_p(
VALUE thread)
3633 if (rb_threadptr_dead(th)) {
3636 return RBOOL(th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER);
3647rb_thread_getname(
VALUE thread)
3649 return rb_thread_ptr(thread)->name;
3668 enc = rb_enc_get(name);
3669 if (!rb_enc_asciicompat(enc)) {
3670 rb_raise(rb_eArgError,
"ASCII incompatible encoding (%s)",
3675 target_th->name = name;
3676 if (threadptr_initialized(target_th) && target_th->has_dedicated_nt) {
3677 native_set_another_thread_name(target_th->nt->thread_id, name);
3682#if USE_NATIVE_THREAD_NATIVE_THREAD_ID
3706rb_thread_native_thread_id(
VALUE thread)
3709 if (rb_threadptr_dead(target_th))
return Qnil;
3710 return native_thread_native_thread_id(target_th);
3713# define rb_thread_native_thread_id rb_f_notimplement
3724rb_thread_to_s(
VALUE thread)
3731 status = thread_status_name(target_th, TRUE);
3732 str = rb_sprintf(
"#<%"PRIsVALUE
":%p", cname, (
void *)thread);
3733 if (!
NIL_P(target_th->name)) {
3734 rb_str_catf(str,
"@%"PRIsVALUE, target_th->name);
3736 if ((loc = threadptr_invoke_proc_location(target_th)) !=
Qnil) {
3737 rb_str_catf(str,
" %"PRIsVALUE
":%"PRIsVALUE,
3740 rb_str_catf(str,
" %s>", status);
3746#define recursive_key id__recursive_key__
3751 if (
id == recursive_key) {
3752 return th->ec->local_storage_recursive_hash;
3756 struct rb_id_table *local_storage = th->ec->local_storage;
3758 if (local_storage != NULL && rb_id_table_lookup(local_storage,
id, &val)) {
3770 return threadptr_local_aref(rb_thread_ptr(thread),
id);
3837 if (!
id)
return Qnil;
3855rb_thread_fetch(
int argc,
VALUE *argv,
VALUE self)
3866 if (block_given && argc == 2) {
3867 rb_warn(
"block supersedes default value argument");
3872 if (
id == recursive_key) {
3873 return target_th->ec->local_storage_recursive_hash;
3875 else if (
id && target_th->ec->local_storage &&
3876 rb_id_table_lookup(target_th->ec->local_storage,
id, &val)) {
3879 else if (block_given) {
3882 else if (argc == 1) {
3883 rb_key_err_raise(rb_sprintf(
"key not found: %+"PRIsVALUE, key), self, key);
3893 if (
id == recursive_key) {
3894 th->ec->local_storage_recursive_hash = val;
3898 struct rb_id_table *local_storage = th->ec->local_storage;
3901 if (!local_storage)
return Qnil;
3902 rb_id_table_delete(local_storage,
id);
3906 if (local_storage == NULL) {
3907 th->ec->local_storage = local_storage = rb_id_table_create(0);
3909 rb_id_table_insert(local_storage,
id, val);
3922 return threadptr_local_aset(rb_thread_ptr(thread),
id, val);
3973rb_thread_variable_get(
VALUE thread,
VALUE key)
3978 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3981 locals = rb_thread_local_storage(thread);
3982 return rb_hash_aref(locals, symbol);
4003 locals = rb_thread_local_storage(thread);
4025 struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
4027 if (!
id || local_storage == NULL) {
4030 return RBOOL(rb_id_table_lookup(local_storage,
id, &val));
4033static enum rb_id_table_iterator_result
4034thread_keys_i(
ID key,
VALUE value,
void *ary)
4037 return ID_TABLE_CONTINUE;
4044 return rb_ractor_living_thread_num(GET_RACTOR()) == 1;
4062rb_thread_keys(
VALUE self)
4064 struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
4067 if (local_storage) {
4068 rb_id_table_foreach(local_storage, thread_keys_i, (
void *)ary);
4098rb_thread_variables(
VALUE thread)
4104 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
4107 locals = rb_thread_local_storage(thread);
4135 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
4138 locals = rb_thread_local_storage(thread);
4140 return RBOOL(rb_hash_lookup(locals, symbol) !=
Qnil);
4159rb_thread_priority(
VALUE thread)
4161 return INT2NUM(rb_thread_ptr(thread)->priority);
4192rb_thread_priority_set(
VALUE thread,
VALUE prio)
4197#if USE_NATIVE_THREAD_PRIORITY
4198 target_th->priority =
NUM2INT(prio);
4199 native_thread_apply_priority(th);
4202 if (priority > RUBY_THREAD_PRIORITY_MAX) {
4203 priority = RUBY_THREAD_PRIORITY_MAX;
4205 else if (priority < RUBY_THREAD_PRIORITY_MIN) {
4206 priority = RUBY_THREAD_PRIORITY_MIN;
4208 target_th->priority = (int8_t)priority;
4210 return INT2NUM(target_th->priority);
4215#if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
4251 FD_ZERO(fds->
fdset);
4257 size_t size = howmany(
rb_fd_max(src), NFDBITS) *
sizeof(fd_mask);
4259 if (size <
sizeof(fd_set))
4260 size =
sizeof(fd_set);
4284 size_t m = howmany(n + 1, NFDBITS) *
sizeof(fd_mask);
4285 size_t o = howmany(fds->
maxfd, NFDBITS) *
sizeof(fd_mask);
4287 if (m <
sizeof(fd_set)) m =
sizeof(fd_set);
4288 if (o <
sizeof(fd_set)) o =
sizeof(fd_set);
4292 memset((
char *)fds->
fdset + o, 0, m - o);
4301 FD_SET(n, fds->
fdset);
4307 if (n >= fds->
maxfd)
return;
4308 FD_CLR(n, fds->
fdset);
4314 if (n >= fds->
maxfd)
return 0;
4315 return FD_ISSET(n, fds->
fdset) != 0;
4321 size_t size = howmany(max, NFDBITS) *
sizeof(fd_mask);
4323 if (size <
sizeof(fd_set)) size =
sizeof(fd_set);
4326 memcpy(dst->
fdset, src, size);
4332 size_t size = howmany(
rb_fd_max(src), NFDBITS) *
sizeof(fd_mask);
4334 if (size <
sizeof(fd_set))
4335 size =
sizeof(fd_set);
4344 fd_set *r = NULL, *w = NULL, *e = NULL;
4357 return select(n, r, w, e, timeout);
4360#define rb_fd_no_init(fds) ((void)((fds)->fdset = 0), (void)((fds)->maxfd = 0))
4367#define FD_ZERO(f) rb_fd_zero(f)
4368#define FD_SET(i, f) rb_fd_set((i), (f))
4369#define FD_CLR(i, f) rb_fd_clr((i), (f))
4370#define FD_ISSET(i, f) rb_fd_isset((i), (f))
4372#elif defined(_WIN32)
4377 set->
capa = FD_SETSIZE;
4379 FD_ZERO(set->
fdset);
4401 SOCKET s = rb_w32_get_osfhandle(fd);
4403 for (i = 0; i < set->
fdset->fd_count; i++) {
4404 if (set->
fdset->fd_array[i] == s) {
4408 if (set->
fdset->fd_count >= (
unsigned)set->
capa) {
4409 set->
capa = (set->
fdset->fd_count / FD_SETSIZE + 1) * FD_SETSIZE;
4411 rb_xrealloc_mul_add(
4412 set->
fdset, set->
capa,
sizeof(SOCKET),
sizeof(
unsigned int));
4414 set->
fdset->fd_array[set->
fdset->fd_count++] = s;
4422#define FD_ZERO(f) rb_fd_zero(f)
4423#define FD_SET(i, f) rb_fd_set((i), (f))
4424#define FD_CLR(i, f) rb_fd_clr((i), (f))
4425#define FD_ISSET(i, f) rb_fd_isset((i), (f))
4427#define rb_fd_no_init(fds) (void)((fds)->fdset = 0)
4431#ifndef rb_fd_no_init
4432#define rb_fd_no_init(fds) (void)(fds)
4436wait_retryable(
volatile int *result,
int errnum, rb_hrtime_t *rel, rb_hrtime_t end)
4446 if (rel && hrtime_update_expire(rel, end)) {
4456 return !hrtime_update_expire(rel, end);
4476select_set_free(
VALUE p)
4491 volatile int result = 0;
4493 rb_hrtime_t *to, rel, end = 0;
4495 timeout_prepare(&to, &rel, &end, set->timeout);
4496 volatile rb_hrtime_t endtime = end;
4497#define restore_fdset(dst, src) \
4498 ((dst) ? rb_fd_dup(dst, src) : (void)0)
4499#define do_select_update() \
4500 (restore_fdset(set->rset, &set->orig_rset), \
4501 restore_fdset(set->wset, &set->orig_wset), \
4502 restore_fdset(set->eset, &set->orig_eset), \
4508 BLOCKING_REGION(set->th, {
4511 if (!RUBY_VM_INTERRUPTED(set->th->ec)) {
4512 result = native_fd_select(set->max,
4513 set->rset, set->wset, set->eset,
4514 rb_hrtime2timeval(&tv, to), set->th);
4515 if (result < 0) lerrno = errno;
4517 }, ubf_select, set->th, TRUE);
4519 RUBY_VM_CHECK_INTS_BLOCKING(set->th->ec);
4520 }
while (wait_retryable(&result, lerrno, to, endtime) && do_select_update());
4522 RUBY_VM_CHECK_INTS_BLOCKING(set->th->ec);
4528 return (
VALUE)result;
4537 set.th = GET_THREAD();
4538 RUBY_VM_CHECK_INTS_BLOCKING(set.th->ec);
4543 set.timeout = timeout;
4545 if (!set.rset && !set.wset && !set.eset) {
4554#define fd_init_copy(f) do { \
4556 rb_fd_resize(set.max - 1, set.f); \
4557 if (&set.orig_##f != set.f) { \
4558 rb_fd_init_copy(&set.orig_##f, set.f); \
4562 rb_fd_no_init(&set.orig_##f); \
4576#define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
4577#define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
4578#define POLLEX_SET (POLLPRI)
4581# define POLLERR_SET (0)
4585wait_for_single_fd_blocking_region(
rb_thread_t *th,
struct pollfd *fds, nfds_t nfds,
4586 rb_hrtime_t *
const to,
volatile int *lerrno)
4589 volatile int result = 0;
4592 BLOCKING_REGION(th, {
4593 if (!RUBY_VM_INTERRUPTED(th->ec)) {
4594 result = ppoll(fds, nfds, rb_hrtime2timespec(&ts, to), 0);
4595 if (result < 0) *lerrno = errno;
4597 }, ubf_select, th, TRUE);
4607 struct pollfd fds[1] = {{
4609 .events = (short)events,
4612 volatile int result = 0;
4615 enum ruby_tag_type state;
4616 volatile int lerrno;
4622 blocking_operation.ec = ec;
4623 rb_io_blocking_operation_enter(io, &blocking_operation);
4626 if (timeout == NULL && thread_io_wait_events(th, fd, events, NULL)) {
4629 fds[0].revents = events;
4634 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
4635 rb_hrtime_t *to, rel, end = 0;
4636 RUBY_VM_CHECK_INTS_BLOCKING(ec);
4637 timeout_prepare(&to, &rel, &end, timeout);
4639 nfds = numberof(fds);
4640 result = wait_for_single_fd_blocking_region(th, fds, nfds, to, &lerrno);
4642 RUBY_VM_CHECK_INTS_BLOCKING(ec);
4643 }
while (wait_retryable(&result, lerrno, to, end));
4645 RUBY_VM_CHECK_INTS_BLOCKING(ec);
4652 rb_io_blocking_operation_exit(io, &blocking_operation);
4656 EC_JUMP_TAG(ec, state);
4664 if (fds[0].revents & POLLNVAL) {
4674 if (fds[0].revents & POLLIN_SET)
4675 result |= RB_WAITFD_IN;
4676 if (fds[0].revents & POLLOUT_SET)
4677 result |= RB_WAITFD_OUT;
4678 if (fds[0].revents & POLLEX_SET)
4679 result |= RB_WAITFD_PRI;
4682 if (fds[0].revents & POLLERR_SET)
4703select_single(
VALUE ptr)
4709 args->read, args->write, args->except, args->tv);
4711 args->as.error =
errno;
4714 if (args->read &&
rb_fd_isset(args->as.fd, args->read))
4716 if (args->write &&
rb_fd_isset(args->as.fd, args->write))
4718 if (args->except &&
rb_fd_isset(args->as.fd, args->except))
4725select_single_cleanup(
VALUE ptr)
4729 if (args->blocking_operation) {
4730 rb_io_blocking_operation_exit(args->io, args->blocking_operation);
4762 blocking_operation.ec = th->ec;
4763 rb_io_blocking_operation_enter(io, &blocking_operation);
4764 args.blocking_operation = &blocking_operation;
4768 blocking_operation.ec = NULL;
4769 args.blocking_operation = NULL;
4773 args.read = (events & RB_WAITFD_IN) ? init_set_fd(fd, &rfds) : NULL;
4774 args.write = (events & RB_WAITFD_OUT) ? init_set_fd(fd, &wfds) : NULL;
4775 args.except = (events & RB_WAITFD_PRI) ? init_set_fd(fd, &efds) : NULL;
4778 int result = (int)
rb_ensure(select_single, ptr, select_single_cleanup, ptr);
4780 errno = args.as.error;
4787rb_thread_wait_for_single_fd(
rb_thread_t *th,
int fd,
int events,
struct timeval *timeout)
4789 return thread_io_wait(th, NULL, fd, events, timeout);
4795 return thread_io_wait(th, io, io->
fd, events, timeout);
4802#ifdef USE_CONSERVATIVE_STACK_END
4804rb_gc_set_stack_end(
VALUE **stack_end_p)
4807COMPILER_WARNING_PUSH
4808#if RBIMPL_COMPILER_IS(GCC)
4809COMPILER_WARNING_IGNORED(-Wdangling-pointer);
4811 *stack_end_p = &stack_end;
4824 if (rb_signal_buff_size() > 0) {
4826 threadptr_trap_interrupt(mth);
4831async_bug_fd(
const char *mesg,
int errno_arg,
int fd)
4834 size_t n = strlcpy(buff, mesg,
sizeof(buff));
4835 if (n <
sizeof(buff)-3) {
4838 rb_async_bug_errno(buff, errno_arg);
4843consume_communication_pipe(
int fd)
4849 static char buff[1024];
4855 result = read(fd, buff,
sizeof(buff));
4857 RUBY_DEBUG_LOG(
"resultf:%d buff:%lu", (
int)result, (
unsigned long)buff[0]);
4859 RUBY_DEBUG_LOG(
"result:%d", (
int)result);
4863 if (USE_EVENTFD || result < (ssize_t)
sizeof(buff)) {
4867 else if (result == 0) {
4870 else if (result < 0) {
4876#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
4881 async_bug_fd(
"consume_communication_pipe: read", e, fd);
4888rb_thread_stop_timer_thread(
void)
4890 if (TIMER_THREAD_CREATED_P() && native_stop_timer_thread()) {
4891 native_reset_timer_thread();
4896rb_thread_reset_timer_thread(
void)
4898 native_reset_timer_thread();
4902rb_thread_start_timer_thread(
void)
4905 rb_thread_create_timer_thread();
4909clear_coverage_i(st_data_t key, st_data_t val, st_data_t dummy)
4917 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
4939rb_clear_coverages(
void)
4941 VALUE coverages = rb_get_coverages();
4942 if (
RTEST(coverages)) {
4947#if defined(HAVE_WORKING_FORK)
4955 vm->ractor.main_ractor = r;
4956 vm->ractor.main_thread = th;
4957 r->threads.main = th;
4958 r->status_ = ractor_created;
4960 thread_sched_atfork(TH_SCHED(th));
4965 ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
4966 if (r != vm->ractor.main_ractor) {
4967 rb_ractor_terminate_atfork(vm, r);
4969 ccan_list_for_each(&r->threads.set, i, lt_node) {
4973 rb_vm_living_threads_init(vm);
4975 rb_ractor_atfork(vm, th);
4976 rb_vm_postponed_job_atfork();
4980 ccan_list_head_init(&th->interrupt_exec_tasks);
4983 rb_ractor_sleeper_threads_clear(th->ractor);
4984 rb_clear_coverages();
4987 rb_thread_reset_timer_thread();
4988 rb_thread_start_timer_thread();
4990 VM_ASSERT(vm->ractor.blocking_cnt == 0);
4991 VM_ASSERT(vm->ractor.cnt == 1);
4997 if (th != current_th) {
4999 th->scheduler =
Qnil;
5002 rb_mutex_abandon_keeping_mutexes(th);
5003 rb_mutex_abandon_locking_mutex(th);
5004 thread_cleanup_func(th, TRUE);
5013 rb_threadptr_pending_interrupt_clear(th);
5014 rb_thread_atfork_internal(th, terminate_atfork_i);
5015 th->join_list = NULL;
5016 th->scheduler =
Qnil;
5017 rb_fiber_atfork(th);
5026 if (th != current_th) {
5027 thread_cleanup_func_before_exec(th);
5035 rb_thread_atfork_internal(th, terminate_atfork_before_exec_i);
5060 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE
5083thgroup_s_alloc(
VALUE klass)
5104thgroup_list(
VALUE group)
5110 ccan_list_for_each(&r->threads.set, th, lt_node) {
5111 if (th->thgroup == group) {
5136thgroup_enclose(
VALUE group)
5155thgroup_enclosed_p(
VALUE group)
5160 return RBOOL(data->enclosed);
5200 if (data->enclosed) {
5208 if (data->enclosed) {
5210 "can't move from the enclosed thread group");
5213 target_th->thgroup = group;
5221thread_shield_mark(
void *ptr)
5223 rb_gc_mark((
VALUE)ptr);
5228 {thread_shield_mark, 0, 0,},
5229 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
5233thread_shield_alloc(
VALUE klass)
5238#define GetThreadShieldPtr(obj) ((VALUE)rb_check_typeddata((obj), &thread_shield_data_type))
5239#define THREAD_SHIELD_WAITING_MASK (((FL_USER19-1)&~(FL_USER0-1))|FL_USER19)
5240#define THREAD_SHIELD_WAITING_SHIFT (FL_USHIFT)
5241#define THREAD_SHIELD_WAITING_MAX (THREAD_SHIELD_WAITING_MASK>>THREAD_SHIELD_WAITING_SHIFT)
5242STATIC_ASSERT(THREAD_SHIELD_WAITING_MAX, THREAD_SHIELD_WAITING_MAX <= UINT_MAX);
5243static inline unsigned int
5244rb_thread_shield_waiting(
VALUE b)
5246 return ((
RBASIC(b)->flags&THREAD_SHIELD_WAITING_MASK)>>THREAD_SHIELD_WAITING_SHIFT);
5250rb_thread_shield_waiting_inc(
VALUE b)
5252 unsigned int w = rb_thread_shield_waiting(b);
5254 if (w > THREAD_SHIELD_WAITING_MAX)
5256 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
5257 RBASIC(b)->flags |= ((
VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
5261rb_thread_shield_waiting_dec(
VALUE b)
5263 unsigned int w = rb_thread_shield_waiting(b);
5266 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
5267 RBASIC(b)->flags |= ((
VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
5271rb_thread_shield_new(
void)
5273 VALUE thread_shield = thread_shield_alloc(rb_cThreadShield);
5275 return thread_shield;
5279rb_thread_shield_owned(
VALUE self)
5281 VALUE mutex = GetThreadShieldPtr(self);
5282 if (!mutex)
return false;
5286 return m->fiber_serial == rb_fiber_serial(GET_EC()->fiber_ptr);
5298rb_thread_shield_wait(
VALUE self)
5300 VALUE mutex = GetThreadShieldPtr(self);
5303 if (!mutex)
return Qfalse;
5304 m = mutex_ptr(mutex);
5305 if (m->fiber_serial == rb_fiber_serial(GET_EC()->fiber_ptr))
return Qnil;
5306 rb_thread_shield_waiting_inc(self);
5308 rb_thread_shield_waiting_dec(self);
5311 return rb_thread_shield_waiting(self) > 0 ?
Qnil :
Qfalse;
5315thread_shield_get_mutex(
VALUE self)
5317 VALUE mutex = GetThreadShieldPtr(self);
5319 rb_raise(
rb_eThreadError,
"destroyed thread shield - %p", (
void *)self);
5327rb_thread_shield_release(
VALUE self)
5329 VALUE mutex = thread_shield_get_mutex(self);
5331 return RBOOL(rb_thread_shield_waiting(self) > 0);
5338rb_thread_shield_destroy(
VALUE self)
5340 VALUE mutex = thread_shield_get_mutex(self);
5343 return RBOOL(rb_thread_shield_waiting(self) > 0);
5349 return th->ec->local_storage_recursive_hash;
5355 th->ec->local_storage_recursive_hash = hash;
5367recursive_list_access(
VALUE sym)
5370 VALUE hash = threadptr_recursive_hash(th);
5373 hash = rb_ident_hash_new();
5374 threadptr_recursive_hash_set(th, hash);
5378 list = rb_hash_aref(hash, sym);
5381 list = rb_ident_hash_new();
5382 rb_hash_aset(hash, sym, list);
5396#if SIZEOF_LONG == SIZEOF_VOIDP
5397 #define OBJ_ID_EQL(obj_id, other) ((obj_id) == (other))
5398#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
5399 #define OBJ_ID_EQL(obj_id, other) (RB_BIGNUM_TYPE_P((obj_id)) ? \
5400 rb_big_eql((obj_id), (other)) : ((obj_id) == (other)))
5403 VALUE pair_list = rb_hash_lookup2(list, obj,
Qundef);
5404 if (UNDEF_P(pair_list))
5406 if (paired_obj_id) {
5408 if (!OBJ_ID_EQL(paired_obj_id, pair_list))
5412 if (
NIL_P(rb_hash_lookup(pair_list, paired_obj_id)))
5434 rb_hash_aset(list, obj,
Qtrue);
5436 else if (UNDEF_P(pair_list = rb_hash_lookup2(list, obj,
Qundef))) {
5437 rb_hash_aset(list, obj, paired_obj);
5441 VALUE other_paired_obj = pair_list;
5442 pair_list = rb_hash_new();
5443 rb_hash_aset(pair_list, other_paired_obj,
Qtrue);
5444 rb_hash_aset(list, obj, pair_list);
5446 rb_hash_aset(pair_list, paired_obj,
Qtrue);
5462 VALUE pair_list = rb_hash_lookup2(list, obj,
Qundef);
5463 if (UNDEF_P(pair_list)) {
5467 rb_hash_delete_entry(pair_list, paired_obj);
5473 rb_hash_delete_entry(list, obj);
5489 return (*p->func)(p->obj, p->arg, FALSE);
5510 p.list = recursive_list_access(sym);
5514 outermost = outer && !recursive_check(p.list,
ID2SYM(recursive_key), 0);
5516 if (recursive_check(p.list, p.obj, pairid)) {
5517 if (outer && !outermost) {
5520 return (*func)(obj, arg, TRUE);
5523 enum ruby_tag_type state;
5528 recursive_push(p.list,
ID2SYM(recursive_key), 0);
5529 recursive_push(p.list, p.obj, p.pairid);
5530 result = rb_catch_protect(p.list, exec_recursive_i, (
VALUE)&p, &state);
5531 if (!recursive_pop(p.list, p.obj, p.pairid))
goto invalid;
5532 if (!recursive_pop(p.list,
ID2SYM(recursive_key), 0))
goto invalid;
5533 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5534 if (result == p.list) {
5535 result = (*func)(obj, arg, TRUE);
5540 recursive_push(p.list, p.obj, p.pairid);
5541 EC_PUSH_TAG(GET_EC());
5542 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
5543 ret = (*func)(obj, arg, FALSE);
5546 if (!recursive_pop(p.list, p.obj, p.pairid)) {
5549 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5558 "for %+"PRIsVALUE
" in %+"PRIsVALUE,
5582 return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 0,
rb_frame_last_func());
5600 return exec_recursive(func, obj, 0, arg, 1, mid);
5612 return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 1,
rb_frame_last_func());
5624rb_thread_backtrace_m(
int argc,
VALUE *argv,
VALUE thval)
5626 return rb_vm_thread_backtrace(argc, argv, thval);
5641rb_thread_backtrace_locations_m(
int argc,
VALUE *argv,
VALUE thval)
5643 return rb_vm_thread_backtrace_locations(argc, argv, thval);
5647Init_Thread_Mutex(
void)
5736 rb_vm_register_special_exception(ruby_error_stream_closed,
rb_eIOError,
5737 "stream closed in another thread");
5746 const char * ptr = getenv(
"RUBY_THREAD_TIMESLICE");
5749 long quantum = strtol(ptr, NULL, 0);
5750 if (quantum > 0 && !(SIZEOF_LONG > 4 && quantum > UINT32_MAX)) {
5751 thread_default_quantum_ms = (uint32_t)quantum;
5754 fprintf(stderr,
"Ignored RUBY_THREAD_TIMESLICE=%s\n", ptr);
5759 th->thgroup = th->ractor->thgroup_default =
rb_obj_alloc(cThGroup);
5760 rb_define_const(cThGroup,
"Default", th->thgroup);
5770#ifdef HAVE_PTHREAD_NP_H
5771 VM_ASSERT(TH_SCHED(th)->running == th);
5778 th->pending_interrupt_queue_checked = 0;
5783 rb_thread_create_timer_thread();
5799#ifdef NON_SCALAR_THREAD_ID
5800 #define thread_id_str(th) (NULL)
5802 #define thread_id_str(th) ((void *)(uintptr_t)(th)->nt->thread_id)
5811 rb_str_catf(msg,
"\n%d threads, %d sleeps current:%p main thread:%p\n",
5812 rb_ractor_living_thread_num(r), rb_ractor_sleeper_thread_num(r),
5813 (
void *)GET_THREAD(), (
void *)r->threads.main);
5815 ccan_list_for_each(&r->threads.set, th, lt_node) {
5816 rb_str_catf(msg,
"* %+"PRIsVALUE
"\n rb_thread_t:%p "
5818 th->self, (
void *)th, th->nt ? thread_id_str(th) :
"N/A", th->ec->interrupt_flag);
5820 if (th->locking_mutex) {
5821 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5822 rb_str_catf(msg,
" mutex:%llu cond:%"PRIuSIZE,
5823 (
unsigned long long)mutex->fiber_serial, rb_mutex_num_waiting(mutex));
5829 rb_str_catf(msg,
"\n depended by: tb_thread_id:%p", (
void *)list->thread);
5833 rb_str_catf(msg,
"\n ");
5834 rb_str_concat(msg,
rb_ary_join(rb_ec_backtrace_str_ary(th->ec, RUBY_BACKTRACE_START, RUBY_ALL_BACKTRACE_LINES), sep));
5835 rb_str_catf(msg,
"\n");
5842 if (GET_THREAD()->vm->thread_ignore_deadlock)
return;
5844#ifdef RUBY_THREAD_PTHREAD_H
5845 if (r->threads.sched.readyq_cnt > 0)
return;
5848 int sleeper_num = rb_ractor_sleeper_thread_num(r);
5849 int ltnum = rb_ractor_living_thread_num(r);
5851 if (ltnum > sleeper_num)
return;
5852 if (ltnum < sleeper_num) rb_bug(
"sleeper must not be more than vm_living_thread_num(vm)");
5857 ccan_list_for_each(&r->threads.set, th, lt_node) {
5858 if (th->status != THREAD_STOPPED_FOREVER || RUBY_VM_INTERRUPTED(th->ec)) {
5861 else if (th->locking_mutex) {
5862 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5863 if (mutex->fiber_serial == rb_fiber_serial(th->ec->fiber_ptr) || (!mutex->fiber_serial && !ccan_list_empty(&mutex->waitq))) {
5874 argv[1] =
rb_str_new2(
"No live threads left. Deadlock?");
5875 debug_deadlock_check(r, argv[1]);
5876 rb_ractor_sleeper_threads_dec(GET_RACTOR());
5877 rb_threadptr_raise(r->threads.main, 2, argv);
5885 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5890 VM_ASSERT(line >= 0);
5894 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
5895 rb_iseq_clear_event_flags(cfp->iseq, cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded - 1, RUBY_EVENT_COVERAGE_LINE);
5916 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5920 long pc = cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded - 1;
5935 VALUE path, beg_pos_lineno, beg_pos_column, end_pos_lineno, end_pos_column;
5937 if (!me->def)
return NULL;
5940 switch (me->def->type) {
5941 case VM_METHOD_TYPE_ISEQ: {
5944 path = rb_iseq_path(iseq);
5945 beg_pos_lineno =
INT2FIX(loc->code_location.beg_pos.lineno);
5946 beg_pos_column =
INT2FIX(loc->code_location.beg_pos.column);
5947 end_pos_lineno =
INT2FIX(loc->code_location.end_pos.lineno);
5948 end_pos_column =
INT2FIX(loc->code_location.end_pos.column);
5951 case VM_METHOD_TYPE_BMETHOD: {
5952 const rb_iseq_t *iseq = rb_proc_get_iseq(me->def->body.bmethod.proc, 0);
5955 rb_iseq_check(iseq);
5956 path = rb_iseq_path(iseq);
5957 loc = &ISEQ_BODY(iseq)->location;
5958 beg_pos_lineno =
INT2FIX(loc->code_location.beg_pos.lineno);
5959 beg_pos_column =
INT2FIX(loc->code_location.beg_pos.column);
5960 end_pos_lineno =
INT2FIX(loc->code_location.end_pos.lineno);
5961 end_pos_column =
INT2FIX(loc->code_location.end_pos.column);
5966 case VM_METHOD_TYPE_ALIAS:
5967 me = me->def->body.alias.original_me;
5969 case VM_METHOD_TYPE_REFINED:
5970 me = me->def->body.refined.orig_me;
5971 if (!me)
return NULL;
5982 if (resolved_location) {
5983 resolved_location[0] = path;
5984 resolved_location[1] = beg_pos_lineno;
5985 resolved_location[2] = beg_pos_column;
5986 resolved_location[3] = end_pos_lineno;
5987 resolved_location[4] = end_pos_column;
6001 me = rb_resolve_me_location(me, 0);
6004 rcount = rb_hash_aref(me2counter, (
VALUE) me);
6012rb_get_coverages(
void)
6014 return GET_VM()->coverages;
6018rb_get_coverage_mode(
void)
6020 return GET_VM()->coverage_mode;
6024rb_set_coverages(
VALUE coverages,
int mode,
VALUE me2counter)
6026 GET_VM()->coverages = coverages;
6027 GET_VM()->me2counter = me2counter;
6028 GET_VM()->coverage_mode = mode;
6032rb_resume_coverages(
void)
6034 int mode = GET_VM()->coverage_mode;
6035 VALUE me2counter = GET_VM()->me2counter;
6036 rb_add_event_hook2((
rb_event_hook_func_t) update_line_coverage, RUBY_EVENT_COVERAGE_LINE,
Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
6037 if (mode & COVERAGE_TARGET_BRANCHES) {
6038 rb_add_event_hook2((
rb_event_hook_func_t) update_branch_coverage, RUBY_EVENT_COVERAGE_BRANCH,
Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
6040 if (mode & COVERAGE_TARGET_METHODS) {
6046rb_suspend_coverages(
void)
6049 if (GET_VM()->coverage_mode & COVERAGE_TARGET_BRANCHES) {
6052 if (GET_VM()->coverage_mode & COVERAGE_TARGET_METHODS) {
6059rb_reset_coverages(
void)
6061 rb_clear_coverages();
6062 rb_iseq_remove_coverage_all();
6063 GET_VM()->coverages =
Qfalse;
6067rb_default_coverage(
int n)
6069 VALUE coverage = rb_ary_hidden_new_fill(3);
6071 int mode = GET_VM()->coverage_mode;
6073 if (mode & COVERAGE_TARGET_LINES) {
6076 RARRAY_ASET(coverage, COVERAGE_INDEX_LINES, lines);
6078 if (mode & COVERAGE_TARGET_BRANCHES) {
6079 branches = rb_ary_hidden_new_fill(2);
6101 VALUE structure = rb_hash_new();
6107 RARRAY_ASET(coverage, COVERAGE_INDEX_BRANCHES, branches);
6113uninterruptible_exit(
VALUE v)
6116 rb_ary_pop(cur_th->pending_interrupt_mask_stack);
6118 cur_th->pending_interrupt_queue_checked = 0;
6119 if (!rb_threadptr_pending_interrupt_empty_p(cur_th)) {
6120 RUBY_VM_SET_INTERRUPT(cur_th->ec);
6128 VALUE interrupt_mask = rb_ident_hash_new();
6131 rb_hash_aset(interrupt_mask, rb_cObject, sym_never);
6133 rb_ary_push(cur_th->pending_interrupt_mask_stack, interrupt_mask);
6137 RUBY_VM_CHECK_INTS(cur_th->ec);
6144 VM_ASSERT(th->specific_storage == NULL);
6146 if (UNLIKELY(specific_key_count > 0)) {
6147 th->specific_storage =
ZALLOC_N(
void *, RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
6151rb_internal_thread_specific_key_t
6156 if (specific_key_count == 0 && vm->ractor.cnt > 1) {
6157 rb_raise(
rb_eThreadError,
"The first rb_internal_thread_specific_key_create() is called with multiple ractors");
6159 else if (specific_key_count > RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX) {
6160 rb_raise(
rb_eThreadError,
"rb_internal_thread_specific_key_create() is called more than %d times", RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
6163 rb_internal_thread_specific_key_t key = specific_key_count++;
6170 ccan_list_for_each(&cr->threads.set, th, lt_node) {
6171 thread_specific_storage_alloc(th);
6184 VM_ASSERT(rb_thread_ptr(thread_val) == th);
6185 VM_ASSERT(key < RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
6186 VM_ASSERT(th->specific_storage);
6188 return th->specific_storage[key];
6197 VM_ASSERT(rb_thread_ptr(thread_val) == th);
6198 VM_ASSERT(key < RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
6199 VM_ASSERT(th->specific_storage);
6201 th->specific_storage[key] = data;
6207 struct ccan_list_node node;
6209 rb_interrupt_exec_func_t *func;
6211 enum rb_interrupt_exec_flag flags;
6215rb_threadptr_interrupt_exec_task_mark(
rb_thread_t *th)
6219 ccan_list_for_each(&th->interrupt_exec_tasks, task, node) {
6220 if (task->flags & rb_interrupt_exec_flag_value_data) {
6221 rb_gc_mark((
VALUE)task->data);
6229rb_threadptr_interrupt_exec(
rb_thread_t *th, rb_interrupt_exec_func_t *func,
void *data,
enum rb_interrupt_exec_flag flags)
6241 ccan_list_add_tail(&th->interrupt_exec_tasks, &task->node);
6242 threadptr_set_interrupt_locked(th,
true);
6259 RUBY_DEBUG_LOG(
"task:%p", task);
6262 if (task->flags & rb_interrupt_exec_flag_new_thread) {
6266 (*task->func)(task->data);
6294 rb_interrupt_exec_func_t *func,
void *data,
enum rb_interrupt_exec_flag flags)
6296 RUBY_DEBUG_LOG(
"flags:%d", (
int)flags);
6299 rb_threadptr_interrupt_exec(main_th, func, data, flags | rb_interrupt_exec_flag_new_thread);
#define RUBY_ASSERT_ALWAYS(expr,...)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
#define RUBY_INTERNAL_EVENT_SWITCH
Thread switched.
int rb_remove_event_hook(rb_event_hook_func_t func)
Removes the passed function from the list of event hooks.
#define RUBY_EVENT_THREAD_BEGIN
Encountered a new thread.
void(* rb_event_hook_func_t)(rb_event_flag_t evflag, VALUE data, VALUE self, ID mid, VALUE klass)
Type of event hooks.
uint32_t rb_event_flag_t
Represents event(s).
#define RUBY_EVENT_CALL
A method, written in Ruby, is called.
#define RUBY_EVENT_THREAD_END
Encountered an end of a thread.
static void RB_FL_SET_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_SET().
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
ID rb_frame_last_func(void)
Returns the ID of the last method in the call stack.
int rb_keyword_given_p(void)
Determines if the current method is given a keyword argument.
int rb_block_given_p(void)
Determines if the current method is given a block.
#define rb_str_new2
Old name of rb_str_new_cstr.
#define ALLOC
Old name of RB_ALLOC.
#define T_STRING
Old name of RUBY_T_STRING.
#define xfree
Old name of ruby_xfree.
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
#define OBJ_FROZEN
Old name of RB_OBJ_FROZEN.
#define xrealloc
Old name of ruby_xrealloc.
#define ID2SYM
Old name of RB_ID2SYM.
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
#define CLASS_OF
Old name of rb_class_of.
#define xmalloc
Old name of ruby_xmalloc.
#define LONG2FIX
Old name of RB_INT2FIX.
#define FIX2INT
Old name of RB_FIX2INT.
#define ZALLOC_N
Old name of RB_ZALLOC_N.
#define T_ICLASS
Old name of RUBY_T_ICLASS.
#define T_HASH
Old name of RUBY_T_HASH.
#define Qtrue
Old name of RUBY_Qtrue.
#define NUM2INT
Old name of RB_NUM2INT.
#define INT2NUM
Old name of RB_INT2NUM.
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
#define T_OBJECT
Old name of RUBY_T_OBJECT.
#define NIL_P
Old name of RB_NIL_P.
#define POSFIXABLE
Old name of RB_POSFIXABLE.
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
void ruby_stop(int ex)
Calls ruby_cleanup() and exits the process.
#define ruby_debug
This variable controls whether the interpreter is in debug mode.
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
VALUE rb_eSystemExit
SystemExit exception.
VALUE rb_eIOError
IOError exception.
VALUE rb_eStandardError
StandardError exception.
VALUE rb_eTypeError
TypeError exception.
void rb_frozen_error_raise(VALUE frozen_obj, const char *fmt,...)
Raises an instance of rb_eFrozenError.
VALUE rb_eFatal
fatal exception.
VALUE rb_eRuntimeError
RuntimeError exception.
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
VALUE rb_eException
Mother of all exceptions.
VALUE rb_eThreadError
ThreadError exception.
void rb_exit(int status)
Terminates the current execution context.
VALUE rb_eSignal
SignalException exception.
VALUE rb_obj_alloc(VALUE klass)
Allocates an instance of the given class.
VALUE rb_cInteger
Module class.
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
VALUE rb_cThread
Thread class.
VALUE rb_cModule
Module class.
double rb_num2dbl(VALUE num)
Converts an instance of rb_cNumeric into C's double.
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
VALUE rb_ary_shift(VALUE ary)
Destructively deletes an element from the beginning of the passed array and returns what was deleted.
VALUE rb_ary_dup(VALUE ary)
Duplicates an array.
VALUE rb_ary_delete_at(VALUE ary, long pos)
Destructively removes an element which resides at the specific index of the passed array.
VALUE rb_ary_new(void)
Allocates a new, empty array.
VALUE rb_ary_pop(VALUE ary)
Destructively deletes an element from the end of the passed array and returns what was deleted.
VALUE rb_ary_hidden_new(long capa)
Allocates a hidden (no class) empty array.
VALUE rb_ary_clear(VALUE ary)
Destructively removes everything form an array.
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
VALUE rb_ary_entry(VALUE ary, long off)
Queries an element of an array.
VALUE rb_ary_join(VALUE ary, VALUE sep)
Recursively stringises the elements of the passed array, flattens that result, then joins the sequenc...
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
void rb_reset_random_seed(void)
Resets the RNG behind rb_genrand_int32()/rb_genrand_real().
VALUE rb_str_new_frozen(VALUE str)
Creates a frozen copy of the string, if necessary.
VALUE rb_str_concat(VALUE dst, VALUE src)
Identical to rb_str_append(), except it also accepts an integer as a codepoint.
#define rb_str_cat_cstr(buf, str)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
#define rb_str_new_cstr(str)
Identical to rb_str_new, except it assumes the passed pointer is a pointer to a C string.
int rb_thread_interrupted(VALUE thval)
Checks if the thread's execution was recently interrupted.
VALUE rb_thread_local_aref(VALUE thread, ID key)
This badly named function reads from a Fiber local storage.
VALUE rb_mutex_new(void)
Creates a mutex.
VALUE rb_thread_kill(VALUE thread)
Terminates the given thread.
#define RUBY_UBF_IO
A special UBF for blocking IO operations.
VALUE rb_thread_main(void)
Obtains the "main" thread.
VALUE rb_exec_recursive(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h)
"Recursion" API entry point.
void rb_thread_sleep_forever(void)
Blocks indefinitely.
void rb_thread_fd_close(int fd)
This funciton is now a no-op.
void rb_thread_wait_for(struct timeval time)
Identical to rb_thread_sleep(), except it takes struct timeval instead.
VALUE rb_mutex_synchronize(VALUE mutex, VALUE(*func)(VALUE arg), VALUE arg)
Obtains the lock, runs the passed function, and releases the lock when it completes.
VALUE rb_thread_stop(void)
Stops the current thread.
VALUE rb_mutex_sleep(VALUE self, VALUE timeout)
Releases the lock held in the mutex and waits for the period of time; reacquires the lock on wakeup.
VALUE rb_exec_recursive_paired(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE p, VALUE h)
Identical to rb_exec_recursive(), except it checks for the recursion on the ordered pair of { g,...
void rb_unblock_function_t(void *)
This is the type of UBFs.
void rb_thread_atfork_before_exec(void)
:FIXME: situation of this function is unclear.
void rb_thread_check_ints(void)
Checks for interrupts.
VALUE rb_thread_run(VALUE thread)
This is a rb_thread_wakeup() + rb_thread_schedule() combo.
VALUE rb_thread_wakeup(VALUE thread)
Marks a given thread as eligible for scheduling.
VALUE rb_mutex_unlock(VALUE mutex)
Releases the mutex.
VALUE rb_exec_recursive_paired_outer(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE p, VALUE h)
Identical to rb_exec_recursive_outer(), except it checks for the recursion on the ordered pair of { g...
void rb_thread_sleep_deadly(void)
Identical to rb_thread_sleep_forever(), except the thread calling this function is considered "dead" ...
void rb_thread_atfork(void)
A pthread_atfork(3posix)-like API.
VALUE rb_thread_current(void)
Obtains the "current" thread.
int rb_thread_alone(void)
Checks if the thread this function is running is the only thread that is currently alive.
VALUE rb_thread_local_aset(VALUE thread, ID key, VALUE val)
This badly named function writes to a Fiber local storage.
void rb_thread_schedule(void)
Tries to switch to another thread.
#define RUBY_UBF_PROCESS
A special UBF for blocking process operations.
VALUE rb_exec_recursive_outer(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h)
Identical to rb_exec_recursive(), except it calls f for outermost recursion only.
VALUE rb_thread_wakeup_alive(VALUE thread)
Identical to rb_thread_wakeup(), except it doesn't raise on an already killed thread.
VALUE rb_mutex_lock(VALUE mutex)
Attempts to lock the mutex.
void rb_thread_sleep(int sec)
Blocks for the given period of time.
void rb_timespec_now(struct timespec *ts)
Fills the current time into the given struct.
struct timeval rb_time_timeval(VALUE time)
Converts an instance of rb_cTime to a struct timeval that represents the identical point of time.
VALUE rb_ivar_set(VALUE obj, ID name, VALUE val)
Identical to rb_iv_set(), except it accepts the name as an ID instead of a C string.
VALUE rb_ivar_get(VALUE obj, ID name)
Identical to rb_iv_get(), except it accepts the name as an ID instead of a C string.
VALUE rb_class_path(VALUE mod)
Identical to rb_mod_name(), except it returns #<Class: ...> style inspection for anonymous modules.
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
int rb_sourceline(void)
Resembles __LINE__.
static ID rb_intern_const(const char *str)
This is a "tiny optimisation" over rb_intern().
ID rb_check_id(volatile VALUE *namep)
Detects if the given name is already interned or not.
VALUE rb_to_symbol(VALUE name)
Identical to rb_intern_str(), except it generates a dynamic symbol if necessary.
#define RB_IO_POINTER(obj, fp)
Queries the underlying IO pointer.
VALUE rb_eIOTimeoutError
Indicates that a timeout has occurred while performing an IO operation.
#define RB_NOGVL_UBF_ASYNC_SAFE
Passing this flag to rb_nogvl() indicates that the passed UBF is async-signal-safe.
void * rb_internal_thread_specific_get(VALUE thread_val, rb_internal_thread_specific_key_t key)
Get thread and tool specific data.
#define RB_NOGVL_INTR_FAIL
Passing this flag to rb_nogvl() prevents it from checking interrupts.
void rb_internal_thread_specific_set(VALUE thread_val, rb_internal_thread_specific_key_t key, void *data)
Set thread and tool specific data.
rb_internal_thread_specific_key_t rb_internal_thread_specific_key_create(void)
Create a key to store thread specific data.
void * rb_nogvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2, int flags)
Identical to rb_thread_call_without_gvl(), except it additionally takes "flags" that change the behav...
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
#define RB_NOGVL_OFFLOAD_SAFE
Passing this flag to rb_nogvl() indicates that the passed function is safe to offload to a background...
void * rb_thread_call_without_gvl2(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Identical to rb_thread_call_without_gvl(), except it does not interface with signals etc.
void * rb_thread_call_without_gvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Allows the passed function to run in parallel with other Ruby threads.
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
Shim for block function parameters.
VALUE rb_yield(VALUE val)
Yields the block.
void rb_throw_obj(VALUE tag, VALUE val)
Identical to rb_throw(), except it allows arbitrary Ruby object to become a tag.
static int rb_fd_max(const rb_fdset_t *f)
It seems this function has no use.
void rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
Destructively overwrites an fdset with another.
void rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
Identical to rb_fd_copy(), except it copies unlimited number of file descriptors.
void rb_fd_term(rb_fdset_t *f)
Destroys the rb_fdset_t, releasing any memory and resources it used.
static fd_set * rb_fd_ptr(const rb_fdset_t *f)
Raw pointer to fd_set.
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
#define ALLOCA_N(type, n)
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
VALUE rb_thread_create(type *q, void *w)
Creates a rb_cThread instance.
void rb_hash_foreach(VALUE q, int_type *w, VALUE e)
Iteration over the given hash.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define rb_fd_isset
Queries if the given fd is in the rb_fdset_t.
#define rb_fd_select
Waits for multiple file descriptors at once.
#define rb_fd_init
Initialises the :given :rb_fdset_t.
#define rb_fd_set
Sets the given fd to the rb_fdset_t.
#define rb_fd_zero
Clears the given rb_fdset_t.
#define rb_fd_clr
Unsets the given fd from the rb_fdset_t.
#define RARRAY_LEN
Just another name of rb_array_len.
static int RARRAY_LENINT(VALUE ary)
Identical to rb_array_len(), except it differs for the return type.
static void RARRAY_ASET(VALUE ary, long i, VALUE v)
Assigns an object in an array.
#define RARRAY_AREF(a, i)
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
#define RBASIC(obj)
Convenient casting macro.
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
#define DATA_PTR(obj)
Convenient getter macro.
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
#define StringValueCStr(v)
Identical to StringValuePtr, except it additionally checks for the contents for viability as a C stri...
#define RUBY_TYPED_DEFAULT_FREE
This is a value you can set to rb_data_type_struct::dfree.
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
#define TypedData_Wrap_Struct(klass, data_type, sval)
Converts sval, a pointer to your struct, into a Ruby object.
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
#define errno
Ractor-aware version of errno.
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
int ruby_snprintf(char *str, size_t n, char const *fmt,...)
Our own locale-insensitive version of snprintf(3).
#define RB_PASS_CALLED_KEYWORDS
Pass keywords if current method is called with keywords, useful for argument delegation.
VALUE rb_fiber_scheduler_blocking_operation_wait(VALUE scheduler, void *(*function)(void *), void *data, rb_unblock_function_t *unblock_function, void *data2, int flags, struct rb_fiber_scheduler_blocking_operation_state *state)
Defer the execution of the passed function to the scheduler.
VALUE rb_fiber_scheduler_current(void)
Identical to rb_fiber_scheduler_get(), except it also returns RUBY_Qnil in case of a blocking fiber.
VALUE rb_fiber_scheduler_fiber_interrupt(VALUE scheduler, VALUE fiber, VALUE exception)
Interrupt a fiber by raising an exception.
VALUE rb_fiber_scheduler_block(VALUE scheduler, VALUE blocker, VALUE timeout)
Non-blocking wait for the passed "blocker", which is for instance Thread.join or Mutex....
VALUE rb_fiber_scheduler_yield(VALUE scheduler)
Yield to the scheduler, to be resumed on the next scheduling cycle.
VALUE rb_fiber_scheduler_set(VALUE scheduler)
Destructively assigns the passed scheduler to that of the current thread that is calling this functio...
VALUE rb_fiber_scheduler_current_for_threadptr(struct rb_thread_struct *thread)
Identical to rb_fiber_scheduler_current_for_thread(), except it expects a threadptr instead of a thre...
VALUE rb_fiber_scheduler_unblock(VALUE scheduler, VALUE blocker, VALUE fiber)
Wakes up a fiber previously blocked using rb_fiber_scheduler_block().
int rb_thread_fd_select(int nfds, rb_fdset_t *rfds, rb_fdset_t *wfds, rb_fdset_t *efds, struct timeval *timeout)
Waits for multiple file descriptors at once.
#define rb_fd_resize(n, f)
Does nothing (defined for compatibility).
static bool RB_TEST(VALUE obj)
Emulates Ruby's "if" statement.
@ RUBY_Qundef
Represents so-called undef.
#define RTEST
This is an old name of RB_TEST.
#define _(args)
This was a transition path from K&R to ANSI.
This is the struct that holds necessary info for a struct.
The data structure which wraps the fd_set bitmap used by select(2).
int maxfd
Maximum allowed number of FDs.
fd_set * fdset
File descriptors buffer.
int capa
Maximum allowed number of FDs.
Ruby's IO, metadata and buffers.
VALUE self
The IO's Ruby level counterpart.
struct ccan_list_head blocking_operations
Threads that are performing a blocking operation without the GVL using this IO.
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
void rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
Blocks until the current thread obtains a lock.
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
Releases a lock.
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy.
void rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
Fills the passed lock with an initial value.
void rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
Destroys the passed mutex.
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
uintptr_t VALUE
Type that represents a Ruby object.
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.