61#undef __USE_FORTIFY_LEVEL
62#define __USE_FORTIFY_LEVEL 0
66#include "ruby/internal/config.h"
73#define TH_SCHED(th) (&(th)->ractor->threads.sched)
75#include "eval_intern.h"
78#include "internal/class.h"
79#include "internal/cont.h"
80#include "internal/error.h"
81#include "internal/gc.h"
82#include "internal/hash.h"
83#include "internal/io.h"
84#include "internal/object.h"
85#include "internal/proc.h"
87#include "internal/signal.h"
88#include "internal/thread.h"
89#include "internal/time.h"
90#include "internal/warnings.h"
98#include "ractor_core.h"
102#include "ccan/list/list.h"
104#ifndef USE_NATIVE_THREAD_PRIORITY
105#define USE_NATIVE_THREAD_PRIORITY 0
106#define RUBY_THREAD_PRIORITY_MAX 3
107#define RUBY_THREAD_PRIORITY_MIN -3
110static VALUE rb_cThreadShield;
111static VALUE cThGroup;
113static VALUE sym_immediate;
114static VALUE sym_on_blocking;
115static VALUE sym_never;
117static uint32_t thread_default_quantum_ms = 100;
119#define THREAD_LOCAL_STORAGE_INITIALISED FL_USER13
120#define THREAD_LOCAL_STORAGE_INITIALISED_P(th) RB_FL_TEST_RAW((th), THREAD_LOCAL_STORAGE_INITIALISED)
123rb_thread_local_storage(
VALUE thread)
125 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
133 SLEEP_DEADLOCKABLE = 0x01,
134 SLEEP_SPURIOUS_CHECK = 0x02,
135 SLEEP_ALLOW_SPURIOUS = 0x04,
136 SLEEP_NO_CHECKINTS = 0x08,
139static void sleep_forever(
rb_thread_t *th,
unsigned int fl);
140static int sleep_hrtime(
rb_thread_t *, rb_hrtime_t,
unsigned int fl);
142static void rb_thread_sleep_deadly_allow_spurious_wakeup(
VALUE blocker,
VALUE timeout, rb_hrtime_t end);
145static int rb_threadptr_pending_interrupt_empty_p(
const rb_thread_t *th);
146static const char *thread_status_name(
rb_thread_t *th,
int detail);
147static int hrtime_update_expire(rb_hrtime_t *,
const rb_hrtime_t);
148NORETURN(
static void async_bug_fd(
const char *mesg,
int errno_arg,
int fd));
149MAYBE_UNUSED(
static int consume_communication_pipe(
int fd));
152static rb_internal_thread_specific_key_t specific_key_count;
156#define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
159 enum rb_thread_status prev_status;
163static void unblock_function_clear(
rb_thread_t *th);
169#define THREAD_BLOCKING_BEGIN(th) do { \
170 struct rb_thread_sched * const sched = TH_SCHED(th); \
171 RB_VM_SAVE_MACHINE_CONTEXT(th); \
172 thread_sched_to_waiting((sched), (th));
174#define THREAD_BLOCKING_END(th) \
175 thread_sched_to_running((sched), (th)); \
176 rb_ractor_thread_switch(th->ractor, th, false); \
180#ifdef HAVE_BUILTIN___BUILTIN_CHOOSE_EXPR_CONSTANT_P
181#define only_if_constant(expr, notconst) __builtin_choose_expr(__builtin_constant_p(expr), (expr), (notconst))
183#define only_if_constant(expr, notconst) (__builtin_constant_p(expr) ? (expr) : (notconst))
186#define only_if_constant(expr, notconst) notconst
188#define BLOCKING_REGION(th, exec, ubf, ubfarg, fail_if_interrupted) do { \
189 struct rb_blocking_region_buffer __region; \
190 if (blocking_region_begin(th, &__region, (ubf), (ubfarg), fail_if_interrupted) || \
192 !only_if_constant(fail_if_interrupted, TRUE)) { \
195 RB_VM_SAVE_MACHINE_CONTEXT(th); \
196 thread_sched_to_waiting(TH_SCHED(th), th); \
198 blocking_region_end(th, &__region); \
206#define RUBY_VM_CHECK_INTS_BLOCKING(ec) vm_check_ints_blocking(ec)
210#ifdef RUBY_ASSERT_CRITICAL_SECTION
211 VM_ASSERT(ruby_assert_critical_section_entered == 0);
216 if (LIKELY(rb_threadptr_pending_interrupt_empty_p(th))) {
217 if (LIKELY(!RUBY_VM_INTERRUPTED_ANY(ec)))
return FALSE;
220 th->pending_interrupt_queue_checked = 0;
221 RUBY_VM_SET_INTERRUPT(ec);
223 return rb_threadptr_execute_interrupts(th, 1);
229 return vm_check_ints_blocking(ec);
237#if defined(HAVE_POLL)
238# if defined(__linux__)
241# if defined(__FreeBSD_version) && __FreeBSD_version >= 1100000
244# define POLLERR_SET (POLLHUP | POLLERR)
249timeout_prepare(rb_hrtime_t **to, rb_hrtime_t *rel, rb_hrtime_t *end,
253 *rel = rb_timeval2hrtime(timeout);
254 *end = rb_hrtime_add(rb_hrtime_now(), *rel);
262MAYBE_UNUSED(NOINLINE(
static int thread_start_func_2(
rb_thread_t *th,
VALUE *stack_start)));
263MAYBE_UNUSED(
static bool th_has_dedicated_nt(
const rb_thread_t *th));
264MAYBE_UNUSED(
static int waitfd_to_waiting_flag(
int wfd_event));
266#include THREAD_IMPL_SRC
273#ifndef BUSY_WAIT_SIGNALS
274# define BUSY_WAIT_SIGNALS (0)
278# define USE_EVENTFD (0)
281#include "thread_sync.c"
311 if (fail_if_interrupted) {
312 if (RUBY_VM_INTERRUPTED_ANY(th->ec)) {
317 RUBY_VM_CHECK_INTS(th->ec);
321 }
while (!th->ec->raised_flag && RUBY_VM_INTERRUPTED_ANY(th->ec) &&
324 VM_ASSERT(th->unblock.func == NULL);
326 th->unblock.func = func;
327 th->unblock.arg = arg;
337 th->unblock.func = 0;
342threadptr_set_interrupt_locked(
rb_thread_t *th,
bool trap)
346 RUBY_DEBUG_LOG(
"th:%u trap:%d", rb_th_serial(th), trap);
349 RUBY_VM_SET_TRAP_INTERRUPT(th->ec);
352 RUBY_VM_SET_INTERRUPT(th->ec);
355 if (th->unblock.func != NULL) {
356 (th->unblock.func)(th->unblock.arg);
368 threadptr_set_interrupt_locked(th, trap);
377 RUBY_DEBUG_LOG(
"th:%u", rb_th_serial(th));
378 threadptr_set_interrupt(th,
false);
384 threadptr_set_interrupt(th,
true);
392 ccan_list_for_each(&r->threads.set, th, lt_node) {
393 if (th != main_thread) {
394 RUBY_DEBUG_LOG(
"terminate start th:%u status:%s", rb_th_serial(th), thread_status_name(th, TRUE));
396 rb_threadptr_pending_interrupt_enque(th, RUBY_FATAL_THREAD_TERMINATED);
397 rb_threadptr_interrupt(th);
399 RUBY_DEBUG_LOG(
"terminate done th:%u status:%s", rb_th_serial(th), thread_status_name(th, TRUE));
402 RUBY_DEBUG_LOG(
"main thread th:%u", rb_th_serial(th));
410 while (thread->join_list) {
414 thread->join_list = join_list->next;
418 if (target_thread->scheduler !=
Qnil && join_list->fiber) {
422 rb_threadptr_interrupt(target_thread);
424 switch (target_thread->status) {
426 case THREAD_STOPPED_FOREVER:
427 target_thread->status = THREAD_RUNNABLE;
437rb_threadptr_unlock_all_locking_mutexes(
rb_thread_t *th)
439 while (th->keeping_mutexes) {
441 th->keeping_mutexes = mutex->next_mutex;
445 const char *error_message = rb_mutex_unlock_th(mutex, th, mutex->fiber);
446 if (error_message) rb_bug(
"invalid keeping_mutexes: %s", error_message);
455 volatile int sleeping = 0;
457 if (cr->threads.main != th) {
458 rb_bug(
"rb_thread_terminate_all: called by child thread (%p, %p)",
459 (
void *)cr->threads.main, (
void *)th);
463 rb_threadptr_unlock_all_locking_mutexes(th);
466 if (EC_EXEC_TAG() == TAG_NONE) {
468 RUBY_DEBUG_LOG(
"th:%u", rb_th_serial(th));
470 terminate_all(cr, th);
472 while (rb_ractor_living_thread_num(cr) > 1) {
473 rb_hrtime_t rel = RB_HRTIME_PER_SEC;
479 native_sleep(th, &rel);
480 RUBY_VM_CHECK_INTS_BLOCKING(ec);
498void rb_threadptr_root_fiber_terminate(
rb_thread_t *th);
499static void threadptr_interrupt_exec_cleanup(
rb_thread_t *th);
502thread_cleanup_func_before_exec(
void *th_ptr)
505 th->status = THREAD_KILLED;
508 th->ec->machine.stack_start = th->ec->machine.stack_end = NULL;
510 threadptr_interrupt_exec_cleanup(th);
511 rb_threadptr_root_fiber_terminate(th);
515thread_cleanup_func(
void *th_ptr,
int atfork)
519 th->locking_mutex =
Qfalse;
520 thread_cleanup_func_before_exec(th_ptr);
539ruby_thread_init_stack(
rb_thread_t *th,
void *local_in_parent_frame)
541 native_thread_init_stack(th, local_in_parent_frame);
545rb_vm_proc_local_ep(
VALUE proc)
547 const VALUE *ep = vm_proc_ep(proc);
550 return rb_vm_ep_local_ep(ep);
559 int argc,
const VALUE *argv,
int kw_splat,
VALUE passed_block_handler);
564 VALUE args = th->invoke_arg.proc.args;
565 const VALUE *args_ptr;
567 VALUE procval = th->invoke_arg.proc.proc;
569 GetProcPtr(procval, proc);
571 th->ec->errinfo =
Qnil;
572 th->ec->root_lep = rb_vm_proc_local_ep(procval);
573 th->ec->root_svar =
Qfalse;
575 vm_check_ints_blocking(th->ec);
577 if (th->invoke_type == thread_invoke_type_ractor_proc) {
578 VALUE self = rb_ractor_self(th->ractor);
579 th->thgroup = th->ractor->thgroup_default =
rb_obj_alloc(cThGroup);
584 rb_ractor_receive_parameters(th->ec, th->ractor, args_len, (
VALUE *)args_ptr);
585 vm_check_ints_blocking(th->ec);
587 return rb_vm_invoke_proc_with_self(
590 th->invoke_arg.proc.kw_splat,
591 VM_BLOCK_HANDLER_NONE
600 th->invoke_arg.proc.args =
Qnil;
606 vm_check_ints_blocking(th->ec);
608 return rb_vm_invoke_proc(
611 th->invoke_arg.proc.kw_splat,
612 VM_BLOCK_HANDLER_NONE
620 native_set_thread_name(th);
623 switch (th->invoke_type) {
624 case thread_invoke_type_proc:
625 result = thread_do_start_proc(th);
628 case thread_invoke_type_ractor_proc:
629 result = thread_do_start_proc(th);
630 rb_ractor_atexit(th->ec, result);
633 case thread_invoke_type_func:
634 result = (*th->invoke_arg.func.func)(th->invoke_arg.func.arg);
637 case thread_invoke_type_none:
638 rb_bug(
"unreachable");
649 RUBY_DEBUG_LOG(
"th:%u", rb_th_serial(th));
650 VM_ASSERT(th != th->vm->ractor.main_thread);
652 enum ruby_tag_type state;
654 rb_thread_t *ractor_main_th = th->ractor->threads.main;
657 if (rb_ractor_status_p(th->ractor, ractor_blocking)) {
660 rb_vm_ractor_blocking_cnt_dec(th->vm, th->ractor, __FILE__, __LINE__);
662 r->r_stdin = rb_io_prep_stdin();
663 r->r_stdout = rb_io_prep_stdout();
664 r->r_stderr = rb_io_prep_stderr();
670 VM_ASSERT(UNDEF_P(th->value));
672 int fiber_scheduler_closed = 0, event_thread_end_hooked = 0;
677 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
680 result = thread_do_start(th);
683 if (!fiber_scheduler_closed) {
684 fiber_scheduler_closed = 1;
688 if (!event_thread_end_hooked) {
689 event_thread_end_hooked = 1;
693 if (state == TAG_NONE) {
698 errinfo = th->ec->errinfo;
700 VALUE exc = rb_vm_make_jump_tag_but_local_jump(state,
Qundef);
701 if (!
NIL_P(exc)) errinfo = exc;
703 if (state == TAG_FATAL) {
704 if (th->invoke_type == thread_invoke_type_ractor_proc) {
705 rb_ractor_atexit(th->ec,
Qnil);
713 if (th->report_on_exception) {
714 VALUE mesg = rb_thread_to_s(th->self);
715 rb_str_cat_cstr(mesg,
" terminated with exception (report_on_exception is true):\n");
716 rb_write_error_str(mesg);
717 rb_ec_error_print(th->ec, errinfo);
720 if (th->invoke_type == thread_invoke_type_ractor_proc) {
721 rb_ractor_atexit_exception(th->ec);
724 if (th->vm->thread_abort_on_exception ||
736 VM_ASSERT(!UNDEF_P(th->value));
738 rb_threadptr_join_list_wakeup(th);
739 rb_threadptr_unlock_all_locking_mutexes(th);
741 if (th->invoke_type == thread_invoke_type_ractor_proc) {
742 rb_thread_terminate_all(th);
743 rb_ractor_teardown(th->ec);
746 th->status = THREAD_KILLED;
747 RUBY_DEBUG_LOG(
"killed th:%u", rb_th_serial(th));
749 if (th->vm->ractor.main_thread == th) {
755 rb_threadptr_raise(ractor_main_th, 1, &errinfo);
760 rb_ec_clear_current_thread_trace_func(th->ec);
763 if (th->locking_mutex !=
Qfalse) {
764 rb_bug(
"thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE
")",
765 (
void *)th, th->locking_mutex);
768 if (ractor_main_th->status == THREAD_KILLED &&
769 th->ractor->threads.cnt <= 2 ) {
771 rb_threadptr_interrupt(ractor_main_th);
774 rb_check_deadlock(th->ractor);
776 rb_fiber_close(th->ec->fiber_ptr);
778 thread_cleanup_func(th, FALSE);
779 VM_ASSERT(th->ec->vm_stack == NULL);
781 if (th->invoke_type == thread_invoke_type_ractor_proc) {
785 thread_sched_to_dead(TH_SCHED(th), th);
786 rb_ractor_living_threads_remove(th->ractor, th);
789 rb_ractor_living_threads_remove(th->ractor, th);
790 thread_sched_to_dead(TH_SCHED(th), th);
797 enum thread_invoke_type type;
810static void thread_specific_storage_alloc(
rb_thread_t *th);
816 rb_thread_t *th = rb_thread_ptr(thval), *current_th = rb_ec_thread_ptr(ec);
819 thread_specific_storage_alloc(th);
823 "can't start a new thread (frozen ThreadGroup)");
826 rb_fiber_inherit_storage(ec, th->ec->fiber_ptr);
828 switch (params->type) {
829 case thread_invoke_type_proc:
830 th->invoke_type = thread_invoke_type_proc;
831 th->invoke_arg.proc.args = params->args;
832 th->invoke_arg.proc.proc = params->proc;
836 case thread_invoke_type_ractor_proc:
837#if RACTOR_CHECK_MODE > 0
838 rb_ractor_setup_belonging_to(thval, rb_ractor_id(params->g));
840 th->invoke_type = thread_invoke_type_ractor_proc;
841 th->ractor = params->g;
842 th->ractor->threads.main = th;
843 th->invoke_arg.proc.proc = rb_proc_isolate_bang(params->proc);
846 rb_ractor_send_parameters(ec, params->g, params->args);
849 case thread_invoke_type_func:
850 th->invoke_type = thread_invoke_type_func;
851 th->invoke_arg.func.func = params->fn;
852 th->invoke_arg.func.arg = (
void *)params->args;
856 rb_bug(
"unreachable");
859 th->priority = current_th->priority;
860 th->thgroup = current_th->thgroup;
863 th->pending_interrupt_queue_checked = 0;
864 th->pending_interrupt_mask_stack =
rb_ary_dup(current_th->pending_interrupt_mask_stack);
865 RBASIC_CLEAR_CLASS(th->pending_interrupt_mask_stack);
869 RUBY_DEBUG_LOG(
"r:%u th:%u", rb_ractor_id(th->ractor), rb_th_serial(th));
871 rb_ractor_living_threads_insert(th->ractor, th);
874 err = native_thread_create(th);
876 th->status = THREAD_KILLED;
877 rb_ractor_living_threads_remove(th->ractor, th);
883#define threadptr_initialized(th) ((th)->invoke_type != thread_invoke_type_none)
906thread_s_new(
int argc,
VALUE *argv,
VALUE klass)
909 VALUE thread = rb_thread_alloc(klass);
911 if (GET_RACTOR()->threads.main->status == THREAD_KILLED) {
916 th = rb_thread_ptr(thread);
917 if (!threadptr_initialized(th)) {
918 rb_raise(
rb_eThreadError,
"uninitialized thread - check '%"PRIsVALUE
"#initialize'",
938 .type = thread_invoke_type_proc,
942 return thread_create_core(rb_thread_alloc(klass), ¶ms);
948 if (th->invoke_type == thread_invoke_type_proc) {
949 return rb_proc_location(th->invoke_arg.proc.proc);
965 else if (th->invoke_type != thread_invoke_type_none) {
966 VALUE loc = threadptr_invoke_proc_location(th);
969 "already initialized thread - %"PRIsVALUE
":%"PRIsVALUE,
978 .type = thread_invoke_type_proc,
982 return thread_create_core(thread, ¶ms);
990 .type = thread_invoke_type_func,
994 return thread_create_core(rb_thread_alloc(
rb_cThread), ¶ms);
1001 .type = thread_invoke_type_ractor_proc,
1006 return thread_create_core(rb_thread_alloc(
rb_cThread), ¶ms);;
1018remove_from_join_list(
VALUE arg)
1023 if (target_thread->status != THREAD_KILLED) {
1026 while (*join_list) {
1027 if (*join_list == p->waiter) {
1028 *join_list = (*join_list)->next;
1032 join_list = &(*join_list)->next;
1042 return th->status == THREAD_KILLED || !UNDEF_P(th->value);
1046thread_join_sleep(
VALUE arg)
1049 rb_thread_t *target_th = p->target, *th = p->waiter->thread;
1050 rb_hrtime_t end = 0, *limit = p->limit;
1053 end = rb_hrtime_add(*limit, rb_hrtime_now());
1056 while (!thread_finished(target_th)) {
1060 if (scheduler !=
Qnil) {
1064 sleep_forever(th, SLEEP_DEADLOCKABLE | SLEEP_ALLOW_SPURIOUS | SLEEP_NO_CHECKINTS);
1068 if (hrtime_update_expire(limit, end)) {
1069 RUBY_DEBUG_LOG(
"timeout target_th:%u", rb_th_serial(target_th));
1073 if (scheduler !=
Qnil) {
1074 VALUE timeout = rb_float_new(hrtime2double(*limit));
1078 th->status = THREAD_STOPPED;
1079 native_sleep(th, limit);
1082 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1083 th->status = THREAD_RUNNABLE;
1085 RUBY_DEBUG_LOG(
"interrupted target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1098 if (th == target_th) {
1099 rb_raise(
rb_eThreadError,
"Target thread must not be current thread");
1102 if (th->ractor->threads.main == target_th) {
1106 RUBY_DEBUG_LOG(
"target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1108 if (target_th->status != THREAD_KILLED) {
1110 waiter.next = target_th->join_list;
1112 waiter.fiber = rb_fiberptr_blocking(fiber) ? NULL : fiber;
1113 target_th->join_list = &waiter;
1116 arg.waiter = &waiter;
1117 arg.target = target_th;
1118 arg.timeout = timeout;
1126 RUBY_DEBUG_LOG(
"success target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1128 if (target_th->ec->errinfo !=
Qnil) {
1129 VALUE err = target_th->ec->errinfo;
1134 RUBY_DEBUG_LOG(
"terminated target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1139 if (err == RUBY_FATAL_FIBER_KILLED) {
1143 rb_bug(
"thread_join: Fixnum (%d) should not reach here.",
FIX2INT(err));
1146 else if (THROW_DATA_P(target_th->ec->errinfo)) {
1147 rb_bug(
"thread_join: THROW_DATA should not reach here.");
1154 return target_th->self;
1197thread_join_m(
int argc,
VALUE *argv,
VALUE self)
1200 rb_hrtime_t rel = 0, *limit = 0;
1211 if (
NIL_P(timeout)) {
1215 rel = rb_sec2hrtime(NUM2TIMET(timeout));
1219 limit = double2hrtime(&rel,
rb_num2dbl(timeout));
1222 return thread_join(rb_thread_ptr(self), timeout, limit);
1240thread_value(
VALUE self)
1243 thread_join(th,
Qnil, 0);
1244 if (UNDEF_P(th->value)) {
1258#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1259 if (clock_gettime(CLOCK_MONOTONIC, ts) == 0)
1269NOINLINE(rb_hrtime_t rb_hrtime_now(
void));
1276 return rb_timespec2hrtime(&ts);
1283COMPILER_WARNING_PUSH
1284#if defined(__GNUC__) && __GNUC__ == 7 && __GNUC_MINOR__ <= 3
1285COMPILER_WARNING_IGNORED(-Wmaybe-uninitialized)
1288#define PRIu64 PRI_64_PREFIX "u"
1296hrtime_update_expire(rb_hrtime_t *timeout,
const rb_hrtime_t end)
1298 rb_hrtime_t now = rb_hrtime_now();
1300 if (now > end)
return 1;
1302 RUBY_DEBUG_LOG(
"%"PRIu64
" > %"PRIu64
"", (uint64_t)end, (uint64_t)now);
1304 *timeout = end - now;
1310sleep_hrtime(
rb_thread_t *th, rb_hrtime_t rel,
unsigned int fl)
1312 enum rb_thread_status prev_status = th->status;
1314 rb_hrtime_t end = rb_hrtime_add(rb_hrtime_now(), rel);
1316 th->status = THREAD_STOPPED;
1317 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1318 while (th->status == THREAD_STOPPED) {
1319 native_sleep(th, &rel);
1320 woke = vm_check_ints_blocking(th->ec);
1321 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1323 if (hrtime_update_expire(&rel, end))
1327 th->status = prev_status;
1332sleep_hrtime_until(
rb_thread_t *th, rb_hrtime_t end,
unsigned int fl)
1334 enum rb_thread_status prev_status = th->status;
1336 rb_hrtime_t rel = rb_hrtime_sub(end, rb_hrtime_now());
1338 th->status = THREAD_STOPPED;
1339 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1340 while (th->status == THREAD_STOPPED) {
1341 native_sleep(th, &rel);
1342 woke = vm_check_ints_blocking(th->ec);
1343 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1345 if (hrtime_update_expire(&rel, end))
1349 th->status = prev_status;
1356 enum rb_thread_status prev_status = th->status;
1357 enum rb_thread_status status;
1360 status = fl & SLEEP_DEADLOCKABLE ? THREAD_STOPPED_FOREVER : THREAD_STOPPED;
1361 th->status = status;
1363 if (!(fl & SLEEP_NO_CHECKINTS)) RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1365 while (th->status == status) {
1366 if (fl & SLEEP_DEADLOCKABLE) {
1367 rb_ractor_sleeper_threads_inc(th->ractor);
1368 rb_check_deadlock(th->ractor);
1371 native_sleep(th, 0);
1373 if (fl & SLEEP_DEADLOCKABLE) {
1374 rb_ractor_sleeper_threads_dec(th->ractor);
1376 if (fl & SLEEP_ALLOW_SPURIOUS) {
1380 woke = vm_check_ints_blocking(th->ec);
1382 if (woke && !(fl & SLEEP_SPURIOUS_CHECK)) {
1386 th->status = prev_status;
1392 RUBY_DEBUG_LOG(
"forever");
1393 sleep_forever(GET_THREAD(), SLEEP_SPURIOUS_CHECK);
1399 RUBY_DEBUG_LOG(
"deadly");
1400 sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE|SLEEP_SPURIOUS_CHECK);
1404rb_thread_sleep_deadly_allow_spurious_wakeup(
VALUE blocker,
VALUE timeout, rb_hrtime_t end)
1407 if (scheduler !=
Qnil) {
1411 RUBY_DEBUG_LOG(
"...");
1413 sleep_hrtime_until(GET_THREAD(), end, SLEEP_SPURIOUS_CHECK);
1416 sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE);
1426 sleep_hrtime(th, rb_timeval2hrtime(&time), SLEEP_SPURIOUS_CHECK);
1432 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1445 rb_ec_check_ints(GET_EC());
1453rb_thread_check_trap_pending(
void)
1455 return rb_signal_buff_size() != 0;
1462 return (
int)RUBY_VM_INTERRUPTED(rb_thread_ptr(thval)->ec);
1472rb_thread_schedule_limits(uint32_t limits_us)
1476 RUBY_DEBUG_LOG(
"us:%u", (
unsigned int)limits_us);
1478 if (th->running_time_us >= limits_us) {
1479 RUBY_DEBUG_LOG(
"switch %s",
"start");
1481 RB_VM_SAVE_MACHINE_CONTEXT(th);
1482 thread_sched_yield(TH_SCHED(th), th);
1483 rb_ractor_thread_switch(th->ractor, th,
true);
1485 RUBY_DEBUG_LOG(
"switch %s",
"done");
1493 rb_thread_schedule_limits(0);
1494 RUBY_VM_CHECK_INTS(GET_EC());
1503#ifdef RUBY_ASSERT_CRITICAL_SECTION
1504 VM_ASSERT(ruby_assert_critical_section_entered == 0);
1506 VM_ASSERT(th == GET_THREAD());
1508 region->prev_status = th->status;
1509 if (unblock_function_set(th, ubf, arg, fail_if_interrupted)) {
1510 th->blocking_region_buffer = region;
1511 th->status = THREAD_STOPPED;
1512 rb_ractor_blocking_threads_inc(th->ractor, __FILE__, __LINE__);
1514 RUBY_DEBUG_LOG(
"thread_id:%p", (
void *)th->nt->thread_id);
1526 unblock_function_clear(th);
1528 unregister_ubf_list(th);
1530 thread_sched_to_running(TH_SCHED(th), th);
1531 rb_ractor_thread_switch(th->ractor, th,
false);
1533 th->blocking_region_buffer = 0;
1534 rb_ractor_blocking_threads_dec(th->ractor, __FILE__, __LINE__);
1535 if (th->status == THREAD_STOPPED) {
1536 th->status = region->prev_status;
1539 RUBY_DEBUG_LOG(
"end");
1543 VM_ASSERT(th == GET_THREAD());
1548rb_nogvl(
void *(*func)(
void *),
void *data1,
1554 if (scheduler !=
Qnil) {
1559 if (!UNDEF_P(result)) {
1560 rb_errno_set(state.saved_errno);
1561 return state.result;
1569 rb_vm_t *vm = rb_ec_vm_ptr(ec);
1570 bool is_main_thread = vm->ractor.main_thread == th;
1571 int saved_errno = 0;
1577 else if (ubf && rb_ractor_living_thread_num(th->ractor) == 1 && is_main_thread) {
1579 vm->ubf_async_safe = 1;
1583 rb_vm_t *
volatile saved_vm = vm;
1584 BLOCKING_REGION(th, {
1586 saved_errno = rb_errno();
1590 if (is_main_thread) vm->ubf_async_safe = 0;
1593 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1596 rb_errno_set(saved_errno);
1697 return rb_nogvl(func, data1, ubf, data2, 0);
1701waitfd_to_waiting_flag(
int wfd_event)
1703 return wfd_event << 1;
1706static struct ccan_list_head *
1707rb_io_blocking_operations(
struct rb_io *io)
1709 rb_serial_t fork_generation = GET_VM()->fork_gen;
1713 if (io->fork_generation != fork_generation) {
1715 io->fork_generation = fork_generation;
1734 ccan_list_add(rb_io_blocking_operations(io), &blocking_operation->list);
1740 ccan_list_del(&blocking_operation->list);
1749io_blocking_operation_exit(
VALUE _arguments)
1754 rb_io_blocking_operation_pop(arguments->io, blocking_operation);
1758 rb_fiber_t *fiber = io->closing_ec->fiber_ptr;
1760 if (thread->scheduler !=
Qnil) {
1784 VALUE wakeup_mutex = io->wakeup_mutex;
1787 blocking_operation->ec = NULL;
1792 .blocking_operation = blocking_operation
1799 rb_io_blocking_operation_pop(io, blocking_operation);
1804rb_thread_io_blocking_operation_ensure(
VALUE _argument)
1808 rb_io_blocking_operation_exit(arguments->io, arguments->blocking_operation);
1836 rb_io_blocking_operation_enter(io, &blocking_operation);
1840 .blocking_operation = &blocking_operation
1849#if defined(USE_MN_THREADS) && USE_MN_THREADS
1850 return !th_has_dedicated_nt(th) && (events || timeout) && th->blocking;
1858thread_io_wait_events(
rb_thread_t *th,
int fd,
int events,
const struct timeval *timeout)
1860#if defined(USE_MN_THREADS) && USE_MN_THREADS
1861 if (thread_io_mn_schedulable(th, events, timeout)) {
1862 rb_hrtime_t rel, *prel;
1865 rel = rb_timeval2hrtime(timeout);
1872 VM_ASSERT(prel || (events & (RB_WAITFD_IN | RB_WAITFD_OUT)));
1874 if (thread_sched_wait_events(TH_SCHED(th), th, fd, waitfd_to_waiting_flag(events), prel)) {
1888blocking_call_retryable_p(
int r,
int eno)
1890 if (r != -1)
return false;
1894#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
1904rb_thread_mn_schedulable(
VALUE thval)
1907 return th->mn_schedulable;
1911rb_thread_io_blocking_call(
struct rb_io* io, rb_blocking_function_t *func,
void *data1,
int events)
1916 RUBY_DEBUG_LOG(
"th:%u fd:%d ev:%d", rb_th_serial(th), io->
fd, events);
1919 volatile int saved_errno = 0;
1920 enum ruby_tag_type state;
1921 volatile bool prev_mn_schedulable = th->mn_schedulable;
1922 th->mn_schedulable = thread_io_mn_schedulable(th, events, NULL);
1935 rb_io_blocking_operation_enter(io, &blocking_operation);
1939 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
1940 volatile enum ruby_tag_type saved_state = state;
1942 BLOCKING_REGION(th, {
1944 saved_errno =
errno;
1945 }, ubf_select, th, FALSE);
1949 blocking_call_retryable_p((
int)val, saved_errno) &&
1950 thread_io_wait_events(th, fd, events, NULL)) {
1951 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1955 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1957 state = saved_state;
1961 th = rb_ec_thread_ptr(ec);
1962 th->mn_schedulable = prev_mn_schedulable;
1965 rb_io_blocking_operation_exit(io, &blocking_operation);
1968 EC_JUMP_TAG(ec, state);
1972 if (saved_errno == ETIMEDOUT) {
1976 errno = saved_errno;
1982rb_thread_io_blocking_region(
struct rb_io *io, rb_blocking_function_t *func,
void *data1)
1984 return rb_thread_io_blocking_call(io, func, data1, 0);
2029 fprintf(stderr,
"[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
2034 prev_unblock = th->unblock;
2037 rb_bug(
"rb_thread_call_with_gvl: called by a thread which has GVL.");
2040 blocking_region_end(th, brb);
2044 int released = blocking_region_begin(th, brb, prev_unblock.func, prev_unblock.arg, FALSE);
2046 RB_VM_SAVE_MACHINE_CONTEXT(th);
2047 thread_sched_to_waiting(TH_SCHED(th), th);
2056ruby_thread_has_gvl_p(
void)
2060 if (th && th->blocking_region_buffer == 0) {
2077thread_s_pass(
VALUE klass)
2102rb_threadptr_pending_interrupt_clear(
rb_thread_t *th)
2111 th->pending_interrupt_queue_checked = 0;
2115threadptr_check_pending_interrupt_queue(
rb_thread_t *th)
2117 if (!th->pending_interrupt_queue) {
2122enum handle_interrupt_timing {
2124 INTERRUPT_IMMEDIATE,
2125 INTERRUPT_ON_BLOCKING,
2129static enum handle_interrupt_timing
2132 if (sym == sym_immediate) {
2133 return INTERRUPT_IMMEDIATE;
2135 else if (sym == sym_on_blocking) {
2136 return INTERRUPT_ON_BLOCKING;
2138 else if (sym == sym_never) {
2139 return INTERRUPT_NEVER;
2146static enum handle_interrupt_timing
2150 long mask_stack_len =
RARRAY_LEN(th->pending_interrupt_mask_stack);
2155 for (i=0; i<mask_stack_len; i++) {
2156 mask = mask_stack[mask_stack_len-(i+1)];
2161 return rb_threadptr_pending_interrupt_from_symbol(th, mask);
2173 klass =
RBASIC(mod)->klass;
2175 else if (mod != RCLASS_ORIGIN(mod)) {
2179 if ((sym = rb_hash_aref(mask, klass)) !=
Qnil) {
2180 return rb_threadptr_pending_interrupt_from_symbol(th, sym);
2185 return INTERRUPT_NONE;
2189rb_threadptr_pending_interrupt_empty_p(
const rb_thread_t *th)
2191 return RARRAY_LEN(th->pending_interrupt_queue) == 0;
2198 for (i=0; i<
RARRAY_LEN(th->pending_interrupt_queue); i++) {
2208rb_threadptr_pending_interrupt_deque(
rb_thread_t *th,
enum handle_interrupt_timing timing)
2213 for (i=0; i<
RARRAY_LEN(th->pending_interrupt_queue); i++) {
2216 enum handle_interrupt_timing mask_timing = rb_threadptr_pending_interrupt_check_mask(th,
CLASS_OF(err));
2218 switch (mask_timing) {
2219 case INTERRUPT_ON_BLOCKING:
2220 if (timing != INTERRUPT_ON_BLOCKING) {
2224 case INTERRUPT_NONE:
2225 case INTERRUPT_IMMEDIATE:
2228 case INTERRUPT_NEVER:
2233 th->pending_interrupt_queue_checked = 1;
2237 if (rb_threadptr_pending_interrupt_empty_p(th)) {
2238 th->pending_interrupt_queue_checked = 1;
2245threadptr_pending_interrupt_active_p(
rb_thread_t *th)
2252 if (th->pending_interrupt_queue_checked) {
2256 if (rb_threadptr_pending_interrupt_empty_p(th)) {
2268 if (val != sym_immediate && val != sym_on_blocking && val != sym_never) {
2269 rb_raise(rb_eArgError,
"unknown mask signature");
2277 if (
RTEST(*maskp)) {
2279 VALUE prev = *maskp;
2280 *maskp = rb_ident_hash_new();
2285 rb_hash_aset(*maskp, key, val);
2379rb_thread_s_handle_interrupt(
VALUE self,
VALUE mask_arg)
2385 enum ruby_tag_type state;
2388 rb_raise(rb_eArgError,
"block is needed.");
2391 mask_arg = rb_to_hash_type(mask_arg);
2393 if (
OBJ_FROZEN(mask_arg) && rb_hash_compare_by_id_p(mask_arg)) {
2399 if (UNDEF_P(mask)) {
2410 rb_ary_push(th->pending_interrupt_mask_stack, mask);
2411 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2412 th->pending_interrupt_queue_checked = 0;
2413 RUBY_VM_SET_INTERRUPT(th->ec);
2416 EC_PUSH_TAG(th->ec);
2417 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
2422 rb_ary_pop(th->pending_interrupt_mask_stack);
2423 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2424 th->pending_interrupt_queue_checked = 0;
2425 RUBY_VM_SET_INTERRUPT(th->ec);
2428 RUBY_VM_CHECK_INTS(th->ec);
2431 EC_JUMP_TAG(th->ec, state);
2448rb_thread_pending_interrupt_p(
int argc,
VALUE *argv,
VALUE target_thread)
2450 rb_thread_t *target_th = rb_thread_ptr(target_thread);
2452 if (!target_th->pending_interrupt_queue) {
2455 if (rb_threadptr_pending_interrupt_empty_p(target_th)) {
2459 VALUE err = argv[0];
2461 rb_raise(
rb_eTypeError,
"class or module required for rescue clause");
2463 return RBOOL(rb_threadptr_pending_interrupt_include_p(target_th, err));
2528rb_thread_s_pending_interrupt_p(
int argc,
VALUE *argv,
VALUE self)
2530 return rb_thread_pending_interrupt_p(argc, argv, GET_THREAD()->self);
2533NORETURN(
static void rb_threadptr_to_kill(
rb_thread_t *th));
2538 VM_ASSERT(GET_THREAD() == th);
2539 rb_threadptr_pending_interrupt_clear(th);
2540 th->status = THREAD_RUNNABLE;
2542 th->ec->errinfo =
INT2FIX(TAG_FATAL);
2543 EC_JUMP_TAG(th->ec, TAG_FATAL);
2553 old = ATOMIC_LOAD_RELAXED(ec->interrupt_flag);
2556 old = ATOMIC_CAS(ec->interrupt_flag, interrupt, interrupt & ec->interrupt_mask);
2557 }
while (old != interrupt);
2558 return interrupt & (
rb_atomic_t)~ec->interrupt_mask;
2561static void threadptr_interrupt_exec_exec(
rb_thread_t *th);
2569rb_threadptr_execute_interrupts(
rb_thread_t *th,
int blocking_timing)
2572 int postponed_job_interrupt = 0;
2575 VM_ASSERT(GET_THREAD() == th);
2577 if (th->ec->raised_flag)
return ret;
2579 while ((interrupt = threadptr_get_interrupts(th)) != 0) {
2581 int timer_interrupt;
2582 int pending_interrupt;
2584 int terminate_interrupt;
2586 timer_interrupt = interrupt & TIMER_INTERRUPT_MASK;
2587 pending_interrupt = interrupt & PENDING_INTERRUPT_MASK;
2588 postponed_job_interrupt = interrupt & POSTPONED_JOB_INTERRUPT_MASK;
2589 trap_interrupt = interrupt & TRAP_INTERRUPT_MASK;
2590 terminate_interrupt = interrupt & TERMINATE_INTERRUPT_MASK;
2592 if (interrupt & VM_BARRIER_INTERRUPT_MASK) {
2596 if (postponed_job_interrupt) {
2597 rb_postponed_job_flush(th->vm);
2600 if (trap_interrupt) {
2602 if (th == th->vm->ractor.main_thread) {
2603 enum rb_thread_status prev_status = th->status;
2605 th->status = THREAD_RUNNABLE;
2607 while ((sig = rb_get_next_signal()) != 0) {
2608 ret |= rb_signal_exec(th, sig);
2611 th->status = prev_status;
2614 if (!ccan_list_empty(&th->interrupt_exec_tasks)) {
2615 enum rb_thread_status prev_status = th->status;
2617 th->status = THREAD_RUNNABLE;
2619 threadptr_interrupt_exec_exec(th);
2621 th->status = prev_status;
2626 if (pending_interrupt && threadptr_pending_interrupt_active_p(th)) {
2627 VALUE err = rb_threadptr_pending_interrupt_deque(th, blocking_timing ? INTERRUPT_ON_BLOCKING : INTERRUPT_NONE);
2628 RUBY_DEBUG_LOG(
"err:%"PRIdVALUE, err);
2634 else if (err == RUBY_FATAL_THREAD_KILLED ||
2635 err == RUBY_FATAL_THREAD_TERMINATED ||
2637 terminate_interrupt = 1;
2640 if (err == th->vm->special_exceptions[ruby_error_stream_closed]) {
2642 err = ruby_vm_special_exception_copy(err);
2645 if (th->status == THREAD_STOPPED ||
2646 th->status == THREAD_STOPPED_FOREVER)
2647 th->status = THREAD_RUNNABLE;
2652 if (terminate_interrupt) {
2653 rb_threadptr_to_kill(th);
2656 if (timer_interrupt) {
2657 uint32_t limits_us = thread_default_quantum_ms * 1000;
2659 if (th->priority > 0)
2660 limits_us <<= th->priority;
2662 limits_us >>= -th->priority;
2664 if (th->status == THREAD_RUNNABLE)
2665 th->running_time_us += 10 * 1000;
2667 VM_ASSERT(th->ec->cfp);
2671 rb_thread_schedule_limits(limits_us);
2678rb_thread_execute_interrupts(
VALUE thval)
2680 rb_threadptr_execute_interrupts(rb_thread_ptr(thval), 1);
2686 rb_threadptr_interrupt(th);
2694 if (rb_threadptr_dead(target_th)) {
2702 exc = rb_make_exception(argc, argv);
2707 if (rb_threadptr_dead(target_th)) {
2711 rb_ec_setup_exception(GET_EC(), exc,
Qundef);
2712 rb_threadptr_pending_interrupt_enque(target_th, exc);
2713 rb_threadptr_interrupt(target_th);
2718rb_threadptr_signal_raise(
rb_thread_t *th,
int sig)
2724 rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2736 rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2742 if (ec->raised_flag & RAISED_EXCEPTION) {
2745 ec->raised_flag |= RAISED_EXCEPTION;
2752 if (!(ec->raised_flag & RAISED_EXCEPTION)) {
2755 ec->raised_flag &= ~RAISED_EXCEPTION;
2780thread_io_close_notify_all(
VALUE _io)
2785 rb_vm_t *vm = io->closing_ec->thread_ptr->vm;
2786 VALUE error = vm->special_exceptions[ruby_error_stream_closed];
2789 ccan_list_for_each(rb_io_blocking_operations(io), blocking_operation, list) {
2797 if (thread->scheduler !=
Qnil) {
2803 rb_threadptr_pending_interrupt_enque(thread, error);
2804 rb_threadptr_interrupt(thread);
2811 return (
VALUE)count;
2815rb_thread_io_close_interrupt(
struct rb_io *io)
2818 if (io->closing_ec) {
2823 if (ccan_list_empty(rb_io_blocking_operations(io))) {
2829 io->closing_ec = ec;
2837 return (
size_t)result;
2841rb_thread_io_close_wait(
struct rb_io* io)
2843 VALUE wakeup_mutex = io->wakeup_mutex;
2851 while (!ccan_list_empty(rb_io_blocking_operations(io))) {
2857 io->wakeup_mutex =
Qnil;
2858 io->closing_ec = NULL;
2864 rb_warn(
"rb_thread_fd_close is deprecated (and is now a no-op).");
2889thread_raise_m(
int argc,
VALUE *argv,
VALUE self)
2894 threadptr_check_pending_interrupt_queue(target_th);
2895 rb_threadptr_raise(target_th, argc, argv);
2898 if (current_th == target_th) {
2899 RUBY_VM_CHECK_INTS(target_th->ec);
2921 if (target_th->to_kill || target_th->status == THREAD_KILLED) {
2924 if (target_th == target_th->vm->ractor.main_thread) {
2928 RUBY_DEBUG_LOG(
"target_th:%u", rb_th_serial(target_th));
2930 if (target_th == GET_THREAD()) {
2932 rb_threadptr_to_kill(target_th);
2935 threadptr_check_pending_interrupt_queue(target_th);
2936 rb_threadptr_pending_interrupt_enque(target_th, RUBY_FATAL_THREAD_KILLED);
2937 rb_threadptr_interrupt(target_th);
2944rb_thread_to_be_killed(
VALUE thread)
2948 if (target_th->to_kill || target_th->status == THREAD_KILLED) {
3024 if (target_th->status == THREAD_KILLED)
return Qnil;
3026 rb_threadptr_ready(target_th);
3028 if (target_th->status == THREAD_STOPPED ||
3029 target_th->status == THREAD_STOPPED_FOREVER) {
3030 target_th->status = THREAD_RUNNABLE;
3072 "stopping only thread\n\tnote: use sleep to stop forever");
3105 return rb_ractor_thread_list();
3131 return rb_thread_list();
3137 return GET_THREAD()->self;
3150thread_s_current(
VALUE klass)
3158 return GET_RACTOR()->threads.main->self;
3169rb_thread_s_main(
VALUE klass)
3196rb_thread_s_abort_exc(
VALUE _)
3198 return RBOOL(GET_THREAD()->vm->thread_abort_on_exception);
3233rb_thread_s_abort_exc_set(
VALUE self,
VALUE val)
3235 GET_THREAD()->vm->thread_abort_on_exception =
RTEST(val);
3256rb_thread_abort_exc(
VALUE thread)
3258 return RBOOL(rb_thread_ptr(thread)->abort_on_exception);
3276rb_thread_abort_exc_set(
VALUE thread,
VALUE val)
3278 rb_thread_ptr(thread)->abort_on_exception =
RTEST(val);
3326rb_thread_s_report_exc(
VALUE _)
3328 return RBOOL(GET_THREAD()->vm->thread_report_on_exception);
3363rb_thread_s_report_exc_set(
VALUE self,
VALUE val)
3365 GET_THREAD()->vm->thread_report_on_exception =
RTEST(val);
3382rb_thread_s_ignore_deadlock(
VALUE _)
3384 return RBOOL(GET_THREAD()->vm->thread_ignore_deadlock);
3409rb_thread_s_ignore_deadlock_set(
VALUE self,
VALUE val)
3411 GET_THREAD()->vm->thread_ignore_deadlock =
RTEST(val);
3433rb_thread_report_exc(
VALUE thread)
3435 return RBOOL(rb_thread_ptr(thread)->report_on_exception);
3453rb_thread_report_exc_set(
VALUE thread,
VALUE val)
3455 rb_thread_ptr(thread)->report_on_exception =
RTEST(val);
3470rb_thread_group(
VALUE thread)
3472 return rb_thread_ptr(thread)->thgroup;
3478 switch (th->status) {
3479 case THREAD_RUNNABLE:
3480 return th->to_kill ?
"aborting" :
"run";
3481 case THREAD_STOPPED_FOREVER:
3482 if (detail)
return "sleep_forever";
3483 case THREAD_STOPPED:
3495 return th->status == THREAD_KILLED;
3531rb_thread_status(
VALUE thread)
3535 if (rb_threadptr_dead(target_th)) {
3536 if (!
NIL_P(target_th->ec->errinfo) &&
3537 !
FIXNUM_P(target_th->ec->errinfo)) {
3545 return rb_str_new2(thread_status_name(target_th, FALSE));
3565rb_thread_alive_p(
VALUE thread)
3567 return RBOOL(!thread_finished(rb_thread_ptr(thread)));
3585rb_thread_stop_p(
VALUE thread)
3589 if (rb_threadptr_dead(th)) {
3592 return RBOOL(th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER);
3603rb_thread_getname(
VALUE thread)
3605 return rb_thread_ptr(thread)->name;
3624 enc = rb_enc_get(name);
3625 if (!rb_enc_asciicompat(enc)) {
3626 rb_raise(rb_eArgError,
"ASCII incompatible encoding (%s)",
3631 target_th->name = name;
3632 if (threadptr_initialized(target_th) && target_th->has_dedicated_nt) {
3633 native_set_another_thread_name(target_th->nt->thread_id, name);
3638#if USE_NATIVE_THREAD_NATIVE_THREAD_ID
3662rb_thread_native_thread_id(
VALUE thread)
3665 if (rb_threadptr_dead(target_th))
return Qnil;
3666 return native_thread_native_thread_id(target_th);
3669# define rb_thread_native_thread_id rb_f_notimplement
3680rb_thread_to_s(
VALUE thread)
3687 status = thread_status_name(target_th, TRUE);
3688 str = rb_sprintf(
"#<%"PRIsVALUE
":%p", cname, (
void *)thread);
3689 if (!
NIL_P(target_th->name)) {
3690 rb_str_catf(str,
"@%"PRIsVALUE, target_th->name);
3692 if ((loc = threadptr_invoke_proc_location(target_th)) !=
Qnil) {
3693 rb_str_catf(str,
" %"PRIsVALUE
":%"PRIsVALUE,
3696 rb_str_catf(str,
" %s>", status);
3702#define recursive_key id__recursive_key__
3707 if (
id == recursive_key) {
3708 return th->ec->local_storage_recursive_hash;
3712 struct rb_id_table *local_storage = th->ec->local_storage;
3714 if (local_storage != NULL && rb_id_table_lookup(local_storage,
id, &val)) {
3726 return threadptr_local_aref(rb_thread_ptr(thread),
id);
3793 if (!
id)
return Qnil;
3811rb_thread_fetch(
int argc,
VALUE *argv,
VALUE self)
3822 if (block_given && argc == 2) {
3823 rb_warn(
"block supersedes default value argument");
3828 if (
id == recursive_key) {
3829 return target_th->ec->local_storage_recursive_hash;
3831 else if (
id && target_th->ec->local_storage &&
3832 rb_id_table_lookup(target_th->ec->local_storage,
id, &val)) {
3835 else if (block_given) {
3838 else if (argc == 1) {
3839 rb_key_err_raise(rb_sprintf(
"key not found: %+"PRIsVALUE, key), self, key);
3849 if (
id == recursive_key) {
3850 th->ec->local_storage_recursive_hash = val;
3854 struct rb_id_table *local_storage = th->ec->local_storage;
3857 if (!local_storage)
return Qnil;
3858 rb_id_table_delete(local_storage,
id);
3862 if (local_storage == NULL) {
3863 th->ec->local_storage = local_storage = rb_id_table_create(0);
3865 rb_id_table_insert(local_storage,
id, val);
3878 return threadptr_local_aset(rb_thread_ptr(thread),
id, val);
3929rb_thread_variable_get(
VALUE thread,
VALUE key)
3934 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3937 locals = rb_thread_local_storage(thread);
3938 return rb_hash_aref(locals, symbol);
3959 locals = rb_thread_local_storage(thread);
3981 struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
3983 if (!
id || local_storage == NULL) {
3986 return RBOOL(rb_id_table_lookup(local_storage,
id, &val));
3989static enum rb_id_table_iterator_result
3990thread_keys_i(
ID key,
VALUE value,
void *ary)
3993 return ID_TABLE_CONTINUE;
4000 return rb_ractor_living_thread_num(GET_RACTOR()) == 1;
4018rb_thread_keys(
VALUE self)
4020 struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
4023 if (local_storage) {
4024 rb_id_table_foreach(local_storage, thread_keys_i, (
void *)ary);
4054rb_thread_variables(
VALUE thread)
4060 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
4063 locals = rb_thread_local_storage(thread);
4091 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
4094 locals = rb_thread_local_storage(thread);
4096 return RBOOL(rb_hash_lookup(locals, symbol) !=
Qnil);
4115rb_thread_priority(
VALUE thread)
4117 return INT2NUM(rb_thread_ptr(thread)->priority);
4148rb_thread_priority_set(
VALUE thread,
VALUE prio)
4153#if USE_NATIVE_THREAD_PRIORITY
4154 target_th->priority =
NUM2INT(prio);
4155 native_thread_apply_priority(th);
4158 if (priority > RUBY_THREAD_PRIORITY_MAX) {
4159 priority = RUBY_THREAD_PRIORITY_MAX;
4161 else if (priority < RUBY_THREAD_PRIORITY_MIN) {
4162 priority = RUBY_THREAD_PRIORITY_MIN;
4164 target_th->priority = (int8_t)priority;
4166 return INT2NUM(target_th->priority);
4171#if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
4207 FD_ZERO(fds->
fdset);
4213 size_t size = howmany(
rb_fd_max(src), NFDBITS) *
sizeof(fd_mask);
4215 if (size <
sizeof(fd_set))
4216 size =
sizeof(fd_set);
4240 size_t m = howmany(n + 1, NFDBITS) *
sizeof(fd_mask);
4241 size_t o = howmany(fds->
maxfd, NFDBITS) *
sizeof(fd_mask);
4243 if (m <
sizeof(fd_set)) m =
sizeof(fd_set);
4244 if (o <
sizeof(fd_set)) o =
sizeof(fd_set);
4248 memset((
char *)fds->
fdset + o, 0, m - o);
4257 FD_SET(n, fds->
fdset);
4263 if (n >= fds->
maxfd)
return;
4264 FD_CLR(n, fds->
fdset);
4270 if (n >= fds->
maxfd)
return 0;
4271 return FD_ISSET(n, fds->
fdset) != 0;
4277 size_t size = howmany(max, NFDBITS) *
sizeof(fd_mask);
4279 if (size <
sizeof(fd_set)) size =
sizeof(fd_set);
4282 memcpy(dst->
fdset, src, size);
4288 size_t size = howmany(
rb_fd_max(src), NFDBITS) *
sizeof(fd_mask);
4290 if (size <
sizeof(fd_set))
4291 size =
sizeof(fd_set);
4300 fd_set *r = NULL, *w = NULL, *e = NULL;
4313 return select(n, r, w, e, timeout);
4316#define rb_fd_no_init(fds) ((void)((fds)->fdset = 0), (void)((fds)->maxfd = 0))
4323#define FD_ZERO(f) rb_fd_zero(f)
4324#define FD_SET(i, f) rb_fd_set((i), (f))
4325#define FD_CLR(i, f) rb_fd_clr((i), (f))
4326#define FD_ISSET(i, f) rb_fd_isset((i), (f))
4328#elif defined(_WIN32)
4333 set->
capa = FD_SETSIZE;
4335 FD_ZERO(set->
fdset);
4357 SOCKET s = rb_w32_get_osfhandle(fd);
4359 for (i = 0; i < set->
fdset->fd_count; i++) {
4360 if (set->
fdset->fd_array[i] == s) {
4364 if (set->
fdset->fd_count >= (
unsigned)set->
capa) {
4365 set->
capa = (set->
fdset->fd_count / FD_SETSIZE + 1) * FD_SETSIZE;
4367 rb_xrealloc_mul_add(
4368 set->
fdset, set->
capa,
sizeof(SOCKET),
sizeof(
unsigned int));
4370 set->
fdset->fd_array[set->
fdset->fd_count++] = s;
4378#define FD_ZERO(f) rb_fd_zero(f)
4379#define FD_SET(i, f) rb_fd_set((i), (f))
4380#define FD_CLR(i, f) rb_fd_clr((i), (f))
4381#define FD_ISSET(i, f) rb_fd_isset((i), (f))
4383#define rb_fd_no_init(fds) (void)((fds)->fdset = 0)
4387#ifndef rb_fd_no_init
4388#define rb_fd_no_init(fds) (void)(fds)
4392wait_retryable(
volatile int *result,
int errnum, rb_hrtime_t *rel, rb_hrtime_t end)
4402 if (rel && hrtime_update_expire(rel, end)) {
4412 return !hrtime_update_expire(rel, end);
4432select_set_free(
VALUE p)
4447 volatile int result = 0;
4449 rb_hrtime_t *to, rel, end = 0;
4451 timeout_prepare(&to, &rel, &end, set->timeout);
4452 volatile rb_hrtime_t endtime = end;
4453#define restore_fdset(dst, src) \
4454 ((dst) ? rb_fd_dup(dst, src) : (void)0)
4455#define do_select_update() \
4456 (restore_fdset(set->rset, &set->orig_rset), \
4457 restore_fdset(set->wset, &set->orig_wset), \
4458 restore_fdset(set->eset, &set->orig_eset), \
4464 BLOCKING_REGION(set->th, {
4467 if (!RUBY_VM_INTERRUPTED(set->th->ec)) {
4468 result = native_fd_select(set->max,
4469 set->rset, set->wset, set->eset,
4470 rb_hrtime2timeval(&tv, to), set->th);
4471 if (result < 0) lerrno = errno;
4473 }, ubf_select, set->th, TRUE);
4475 RUBY_VM_CHECK_INTS_BLOCKING(set->th->ec);
4476 }
while (wait_retryable(&result, lerrno, to, endtime) && do_select_update());
4478 RUBY_VM_CHECK_INTS_BLOCKING(set->th->ec);
4484 return (
VALUE)result;
4493 set.th = GET_THREAD();
4494 RUBY_VM_CHECK_INTS_BLOCKING(set.th->ec);
4499 set.timeout = timeout;
4501 if (!set.rset && !set.wset && !set.eset) {
4510#define fd_init_copy(f) do { \
4512 rb_fd_resize(set.max - 1, set.f); \
4513 if (&set.orig_##f != set.f) { \
4514 rb_fd_init_copy(&set.orig_##f, set.f); \
4518 rb_fd_no_init(&set.orig_##f); \
4532#define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
4533#define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
4534#define POLLEX_SET (POLLPRI)
4537# define POLLERR_SET (0)
4541wait_for_single_fd_blocking_region(
rb_thread_t *th,
struct pollfd *fds, nfds_t nfds,
4542 rb_hrtime_t *
const to,
volatile int *lerrno)
4545 volatile int result = 0;
4548 BLOCKING_REGION(th, {
4549 if (!RUBY_VM_INTERRUPTED(th->ec)) {
4550 result = ppoll(fds, nfds, rb_hrtime2timespec(&ts, to), 0);
4551 if (result < 0) *lerrno = errno;
4553 }, ubf_select, th, TRUE);
4561thread_io_wait(
struct rb_io *io,
int fd,
int events,
struct timeval *timeout)
4563 struct pollfd fds[1] = {{
4565 .events = (short)events,
4568 volatile int result = 0;
4571 enum ruby_tag_type state;
4572 volatile int lerrno;
4578 blocking_operation.ec = ec;
4579 rb_io_blocking_operation_enter(io, &blocking_operation);
4582 if (timeout == NULL && thread_io_wait_events(th, fd, events, NULL)) {
4585 fds[0].revents = events;
4590 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
4591 rb_hrtime_t *to, rel, end = 0;
4592 RUBY_VM_CHECK_INTS_BLOCKING(ec);
4593 timeout_prepare(&to, &rel, &end, timeout);
4595 nfds = numberof(fds);
4596 result = wait_for_single_fd_blocking_region(th, fds, nfds, to, &lerrno);
4598 RUBY_VM_CHECK_INTS_BLOCKING(ec);
4599 }
while (wait_retryable(&result, lerrno, to, end));
4601 RUBY_VM_CHECK_INTS_BLOCKING(ec);
4608 rb_io_blocking_operation_exit(io, &blocking_operation);
4612 EC_JUMP_TAG(ec, state);
4620 if (fds[0].revents & POLLNVAL) {
4630 if (fds[0].revents & POLLIN_SET)
4631 result |= RB_WAITFD_IN;
4632 if (fds[0].revents & POLLOUT_SET)
4633 result |= RB_WAITFD_OUT;
4634 if (fds[0].revents & POLLEX_SET)
4635 result |= RB_WAITFD_PRI;
4638 if (fds[0].revents & POLLERR_SET)
4659select_single(
VALUE ptr)
4665 args->read, args->write, args->except, args->tv);
4667 args->as.error =
errno;
4670 if (args->read &&
rb_fd_isset(args->as.fd, args->read))
4672 if (args->write &&
rb_fd_isset(args->as.fd, args->write))
4674 if (args->except &&
rb_fd_isset(args->as.fd, args->except))
4681select_single_cleanup(
VALUE ptr)
4685 if (args->blocking_operation) {
4686 rb_io_blocking_operation_exit(args->io, args->blocking_operation);
4709thread_io_wait(
struct rb_io *io,
int fd,
int events,
struct timeval *timeout)
4718 blocking_operation.ec = GET_EC();
4719 rb_io_blocking_operation_enter(io, &blocking_operation);
4720 args.blocking_operation = &blocking_operation;
4724 blocking_operation.ec = NULL;
4725 args.blocking_operation = NULL;
4729 args.read = (events & RB_WAITFD_IN) ? init_set_fd(fd, &rfds) : NULL;
4730 args.write = (events & RB_WAITFD_OUT) ? init_set_fd(fd, &wfds) : NULL;
4731 args.except = (events & RB_WAITFD_PRI) ? init_set_fd(fd, &efds) : NULL;
4734 int result = (int)
rb_ensure(select_single, ptr, select_single_cleanup, ptr);
4736 errno = args.as.error;
4743rb_thread_wait_for_single_fd(
int fd,
int events,
struct timeval *timeout)
4745 return thread_io_wait(NULL, fd, events, timeout);
4749rb_thread_io_wait(
struct rb_io *io,
int events,
struct timeval * timeout)
4751 return thread_io_wait(io, io->
fd, events, timeout);
4758#ifdef USE_CONSERVATIVE_STACK_END
4760rb_gc_set_stack_end(
VALUE **stack_end_p)
4763COMPILER_WARNING_PUSH
4765COMPILER_WARNING_IGNORED(-Wdangling-pointer);
4767 *stack_end_p = &stack_end;
4780 if (rb_signal_buff_size() > 0) {
4782 threadptr_trap_interrupt(mth);
4787async_bug_fd(
const char *mesg,
int errno_arg,
int fd)
4790 size_t n = strlcpy(buff, mesg,
sizeof(buff));
4791 if (n <
sizeof(buff)-3) {
4794 rb_async_bug_errno(buff, errno_arg);
4799consume_communication_pipe(
int fd)
4805 static char buff[1024];
4811 result = read(fd, buff,
sizeof(buff));
4813 RUBY_DEBUG_LOG(
"resultf:%d buff:%lu", (
int)result, (
unsigned long)buff[0]);
4815 RUBY_DEBUG_LOG(
"result:%d", (
int)result);
4819 if (USE_EVENTFD || result < (ssize_t)
sizeof(buff)) {
4823 else if (result == 0) {
4826 else if (result < 0) {
4832#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
4837 async_bug_fd(
"consume_communication_pipe: read", e, fd);
4844rb_thread_stop_timer_thread(
void)
4846 if (TIMER_THREAD_CREATED_P() && native_stop_timer_thread()) {
4847 native_reset_timer_thread();
4852rb_thread_reset_timer_thread(
void)
4854 native_reset_timer_thread();
4858rb_thread_start_timer_thread(
void)
4861 rb_thread_create_timer_thread();
4865clear_coverage_i(st_data_t key, st_data_t val, st_data_t dummy)
4873 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
4895rb_clear_coverages(
void)
4897 VALUE coverages = rb_get_coverages();
4898 if (
RTEST(coverages)) {
4903#if defined(HAVE_WORKING_FORK)
4911 vm->ractor.main_ractor = r;
4912 vm->ractor.main_thread = th;
4913 r->threads.main = th;
4914 r->status_ = ractor_created;
4916 thread_sched_atfork(TH_SCHED(th));
4920 ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
4921 if (r != vm->ractor.main_ractor) {
4922 rb_ractor_terminate_atfork(vm, r);
4924 ccan_list_for_each(&r->threads.set, i, lt_node) {
4928 rb_vm_living_threads_init(vm);
4930 rb_ractor_atfork(vm, th);
4931 rb_vm_postponed_job_atfork();
4935 ccan_list_head_init(&th->interrupt_exec_tasks);
4938 rb_ractor_sleeper_threads_clear(th->ractor);
4939 rb_clear_coverages();
4942 rb_thread_reset_timer_thread();
4943 rb_thread_start_timer_thread();
4945 VM_ASSERT(vm->ractor.blocking_cnt == 0);
4946 VM_ASSERT(vm->ractor.cnt == 1);
4952 if (th != current_th) {
4954 rb_mutex_abandon_keeping_mutexes(th);
4955 rb_mutex_abandon_locking_mutex(th);
4956 thread_cleanup_func(th, TRUE);
4965 rb_threadptr_pending_interrupt_clear(th);
4966 rb_thread_atfork_internal(th, terminate_atfork_i);
4967 th->join_list = NULL;
4968 rb_fiber_atfork(th);
4977 if (th != current_th) {
4978 thread_cleanup_func_before_exec(th);
4986 rb_thread_atfork_internal(th, terminate_atfork_before_exec_i);
5011 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE
5034thgroup_s_alloc(
VALUE klass)
5055thgroup_list(
VALUE group)
5061 ccan_list_for_each(&r->threads.set, th, lt_node) {
5062 if (th->thgroup == group) {
5087thgroup_enclose(
VALUE group)
5106thgroup_enclosed_p(
VALUE group)
5111 return RBOOL(data->enclosed);
5151 if (data->enclosed) {
5159 if (data->enclosed) {
5161 "can't move from the enclosed thread group");
5164 target_th->thgroup = group;
5172thread_shield_mark(
void *ptr)
5174 rb_gc_mark((
VALUE)ptr);
5179 {thread_shield_mark, 0, 0,},
5180 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
5184thread_shield_alloc(
VALUE klass)
5189#define GetThreadShieldPtr(obj) ((VALUE)rb_check_typeddata((obj), &thread_shield_data_type))
5190#define THREAD_SHIELD_WAITING_MASK (((FL_USER19-1)&~(FL_USER0-1))|FL_USER19)
5191#define THREAD_SHIELD_WAITING_SHIFT (FL_USHIFT)
5192#define THREAD_SHIELD_WAITING_MAX (THREAD_SHIELD_WAITING_MASK>>THREAD_SHIELD_WAITING_SHIFT)
5193STATIC_ASSERT(THREAD_SHIELD_WAITING_MAX, THREAD_SHIELD_WAITING_MAX <= UINT_MAX);
5194static inline unsigned int
5195rb_thread_shield_waiting(
VALUE b)
5197 return ((
RBASIC(b)->flags&THREAD_SHIELD_WAITING_MASK)>>THREAD_SHIELD_WAITING_SHIFT);
5201rb_thread_shield_waiting_inc(
VALUE b)
5203 unsigned int w = rb_thread_shield_waiting(b);
5205 if (w > THREAD_SHIELD_WAITING_MAX)
5207 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
5208 RBASIC(b)->flags |= ((
VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
5212rb_thread_shield_waiting_dec(
VALUE b)
5214 unsigned int w = rb_thread_shield_waiting(b);
5217 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
5218 RBASIC(b)->flags |= ((
VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
5222rb_thread_shield_new(
void)
5224 VALUE thread_shield = thread_shield_alloc(rb_cThreadShield);
5226 return thread_shield;
5230rb_thread_shield_owned(
VALUE self)
5232 VALUE mutex = GetThreadShieldPtr(self);
5233 if (!mutex)
return false;
5237 return m->fiber == GET_EC()->fiber_ptr;
5249rb_thread_shield_wait(
VALUE self)
5251 VALUE mutex = GetThreadShieldPtr(self);
5254 if (!mutex)
return Qfalse;
5255 m = mutex_ptr(mutex);
5256 if (m->fiber == GET_EC()->fiber_ptr)
return Qnil;
5257 rb_thread_shield_waiting_inc(self);
5259 rb_thread_shield_waiting_dec(self);
5262 return rb_thread_shield_waiting(self) > 0 ?
Qnil :
Qfalse;
5266thread_shield_get_mutex(
VALUE self)
5268 VALUE mutex = GetThreadShieldPtr(self);
5270 rb_raise(
rb_eThreadError,
"destroyed thread shield - %p", (
void *)self);
5278rb_thread_shield_release(
VALUE self)
5280 VALUE mutex = thread_shield_get_mutex(self);
5282 return RBOOL(rb_thread_shield_waiting(self) > 0);
5289rb_thread_shield_destroy(
VALUE self)
5291 VALUE mutex = thread_shield_get_mutex(self);
5294 return RBOOL(rb_thread_shield_waiting(self) > 0);
5300 return th->ec->local_storage_recursive_hash;
5306 th->ec->local_storage_recursive_hash = hash;
5318recursive_list_access(
VALUE sym)
5321 VALUE hash = threadptr_recursive_hash(th);
5324 hash = rb_ident_hash_new();
5325 threadptr_recursive_hash_set(th, hash);
5329 list = rb_hash_aref(hash, sym);
5332 list = rb_ident_hash_new();
5333 rb_hash_aset(hash, sym, list);
5347#if SIZEOF_LONG == SIZEOF_VOIDP
5348 #define OBJ_ID_EQL(obj_id, other) ((obj_id) == (other))
5349#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
5350 #define OBJ_ID_EQL(obj_id, other) (RB_BIGNUM_TYPE_P((obj_id)) ? \
5351 rb_big_eql((obj_id), (other)) : ((obj_id) == (other)))
5354 VALUE pair_list = rb_hash_lookup2(list, obj,
Qundef);
5355 if (UNDEF_P(pair_list))
5357 if (paired_obj_id) {
5359 if (!OBJ_ID_EQL(paired_obj_id, pair_list))
5363 if (
NIL_P(rb_hash_lookup(pair_list, paired_obj_id)))
5385 rb_hash_aset(list, obj,
Qtrue);
5387 else if (UNDEF_P(pair_list = rb_hash_lookup2(list, obj,
Qundef))) {
5388 rb_hash_aset(list, obj, paired_obj);
5392 VALUE other_paired_obj = pair_list;
5393 pair_list = rb_hash_new();
5394 rb_hash_aset(pair_list, other_paired_obj,
Qtrue);
5395 rb_hash_aset(list, obj, pair_list);
5397 rb_hash_aset(pair_list, paired_obj,
Qtrue);
5413 VALUE pair_list = rb_hash_lookup2(list, obj,
Qundef);
5414 if (UNDEF_P(pair_list)) {
5418 rb_hash_delete_entry(pair_list, paired_obj);
5424 rb_hash_delete_entry(list, obj);
5440 return (*p->func)(p->obj, p->arg, FALSE);
5461 p.list = recursive_list_access(sym);
5465 outermost = outer && !recursive_check(p.list,
ID2SYM(recursive_key), 0);
5467 if (recursive_check(p.list, p.obj, pairid)) {
5468 if (outer && !outermost) {
5471 return (*func)(obj, arg, TRUE);
5474 enum ruby_tag_type state;
5479 recursive_push(p.list,
ID2SYM(recursive_key), 0);
5480 recursive_push(p.list, p.obj, p.pairid);
5481 result = rb_catch_protect(p.list, exec_recursive_i, (
VALUE)&p, &state);
5482 if (!recursive_pop(p.list, p.obj, p.pairid))
goto invalid;
5483 if (!recursive_pop(p.list,
ID2SYM(recursive_key), 0))
goto invalid;
5484 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5485 if (result == p.list) {
5486 result = (*func)(obj, arg, TRUE);
5491 recursive_push(p.list, p.obj, p.pairid);
5492 EC_PUSH_TAG(GET_EC());
5493 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
5494 ret = (*func)(obj, arg, FALSE);
5497 if (!recursive_pop(p.list, p.obj, p.pairid)) {
5500 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5509 "for %+"PRIsVALUE
" in %+"PRIsVALUE,
5533 return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 0,
rb_frame_last_func());
5551 return exec_recursive(func, obj, 0, arg, 1, mid);
5563 return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 1,
rb_frame_last_func());
5575rb_thread_backtrace_m(
int argc,
VALUE *argv,
VALUE thval)
5577 return rb_vm_thread_backtrace(argc, argv, thval);
5592rb_thread_backtrace_locations_m(
int argc,
VALUE *argv,
VALUE thval)
5594 return rb_vm_thread_backtrace_locations(argc, argv, thval);
5598Init_Thread_Mutex(
void)
5687 rb_vm_register_special_exception(ruby_error_stream_closed,
rb_eIOError,
5688 "stream closed in another thread");
5697 const char * ptr = getenv(
"RUBY_THREAD_TIMESLICE");
5700 long quantum = strtol(ptr, NULL, 0);
5701 if (quantum > 0 && !(SIZEOF_LONG > 4 && quantum > UINT32_MAX)) {
5702 thread_default_quantum_ms = (uint32_t)quantum;
5705 fprintf(stderr,
"Ignored RUBY_THREAD_TIMESLICE=%s\n", ptr);
5710 th->thgroup = th->ractor->thgroup_default =
rb_obj_alloc(cThGroup);
5711 rb_define_const(cThGroup,
"Default", th->thgroup);
5721#ifdef HAVE_PTHREAD_NP_H
5722 VM_ASSERT(TH_SCHED(th)->running == th);
5729 th->pending_interrupt_queue_checked = 0;
5734 rb_thread_create_timer_thread();
5750#ifdef NON_SCALAR_THREAD_ID
5751 #define thread_id_str(th) (NULL)
5753 #define thread_id_str(th) ((void *)(uintptr_t)(th)->nt->thread_id)
5762 rb_str_catf(msg,
"\n%d threads, %d sleeps current:%p main thread:%p\n",
5763 rb_ractor_living_thread_num(r), rb_ractor_sleeper_thread_num(r),
5764 (
void *)GET_THREAD(), (
void *)r->threads.main);
5766 ccan_list_for_each(&r->threads.set, th, lt_node) {
5767 rb_str_catf(msg,
"* %+"PRIsVALUE
"\n rb_thread_t:%p "
5769 th->self, (
void *)th, th->nt ? thread_id_str(th) :
"N/A", th->ec->interrupt_flag);
5771 if (th->locking_mutex) {
5772 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5773 rb_str_catf(msg,
" mutex:%p cond:%"PRIuSIZE,
5774 (
void *)mutex->fiber, rb_mutex_num_waiting(mutex));
5780 rb_str_catf(msg,
"\n depended by: tb_thread_id:%p", (
void *)list->thread);
5784 rb_str_catf(msg,
"\n ");
5785 rb_str_concat(msg,
rb_ary_join(rb_ec_backtrace_str_ary(th->ec, RUBY_BACKTRACE_START, RUBY_ALL_BACKTRACE_LINES), sep));
5786 rb_str_catf(msg,
"\n");
5793 if (GET_THREAD()->vm->thread_ignore_deadlock)
return;
5795#ifdef RUBY_THREAD_PTHREAD_H
5796 if (r->threads.sched.readyq_cnt > 0)
return;
5799 int sleeper_num = rb_ractor_sleeper_thread_num(r);
5800 int ltnum = rb_ractor_living_thread_num(r);
5802 if (ltnum > sleeper_num)
return;
5803 if (ltnum < sleeper_num) rb_bug(
"sleeper must not be more than vm_living_thread_num(vm)");
5808 ccan_list_for_each(&r->threads.set, th, lt_node) {
5809 if (th->status != THREAD_STOPPED_FOREVER || RUBY_VM_INTERRUPTED(th->ec)) {
5812 else if (th->locking_mutex) {
5813 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5814 if (mutex->fiber == th->ec->fiber_ptr || (!mutex->fiber && !ccan_list_empty(&mutex->waitq))) {
5825 argv[1] =
rb_str_new2(
"No live threads left. Deadlock?");
5826 debug_deadlock_check(r, argv[1]);
5827 rb_ractor_sleeper_threads_dec(GET_RACTOR());
5828 rb_threadptr_raise(r->threads.main, 2, argv);
5836 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5841 VM_ASSERT(line >= 0);
5845 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
5846 rb_iseq_clear_event_flags(cfp->iseq, cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded - 1, RUBY_EVENT_COVERAGE_LINE);
5867 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5871 long pc = cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded - 1;
5886 VALUE path, beg_pos_lineno, beg_pos_column, end_pos_lineno, end_pos_column;
5888 if (!me->def)
return NULL;
5891 switch (me->def->type) {
5892 case VM_METHOD_TYPE_ISEQ: {
5895 path = rb_iseq_path(iseq);
5896 beg_pos_lineno =
INT2FIX(loc->code_location.beg_pos.lineno);
5897 beg_pos_column =
INT2FIX(loc->code_location.beg_pos.column);
5898 end_pos_lineno =
INT2FIX(loc->code_location.end_pos.lineno);
5899 end_pos_column =
INT2FIX(loc->code_location.end_pos.column);
5902 case VM_METHOD_TYPE_BMETHOD: {
5903 const rb_iseq_t *iseq = rb_proc_get_iseq(me->def->body.bmethod.proc, 0);
5906 rb_iseq_check(iseq);
5907 path = rb_iseq_path(iseq);
5908 loc = &ISEQ_BODY(iseq)->location;
5909 beg_pos_lineno =
INT2FIX(loc->code_location.beg_pos.lineno);
5910 beg_pos_column =
INT2FIX(loc->code_location.beg_pos.column);
5911 end_pos_lineno =
INT2FIX(loc->code_location.end_pos.lineno);
5912 end_pos_column =
INT2FIX(loc->code_location.end_pos.column);
5917 case VM_METHOD_TYPE_ALIAS:
5918 me = me->def->body.alias.original_me;
5920 case VM_METHOD_TYPE_REFINED:
5921 me = me->def->body.refined.orig_me;
5922 if (!me)
return NULL;
5933 if (resolved_location) {
5934 resolved_location[0] = path;
5935 resolved_location[1] = beg_pos_lineno;
5936 resolved_location[2] = beg_pos_column;
5937 resolved_location[3] = end_pos_lineno;
5938 resolved_location[4] = end_pos_column;
5952 me = rb_resolve_me_location(me, 0);
5955 rcount = rb_hash_aref(me2counter, (
VALUE) me);
5963rb_get_coverages(
void)
5965 return GET_VM()->coverages;
5969rb_get_coverage_mode(
void)
5971 return GET_VM()->coverage_mode;
5975rb_set_coverages(
VALUE coverages,
int mode,
VALUE me2counter)
5977 GET_VM()->coverages = coverages;
5978 GET_VM()->me2counter = me2counter;
5979 GET_VM()->coverage_mode = mode;
5983rb_resume_coverages(
void)
5985 int mode = GET_VM()->coverage_mode;
5986 VALUE me2counter = GET_VM()->me2counter;
5987 rb_add_event_hook2((
rb_event_hook_func_t) update_line_coverage, RUBY_EVENT_COVERAGE_LINE,
Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5988 if (mode & COVERAGE_TARGET_BRANCHES) {
5989 rb_add_event_hook2((
rb_event_hook_func_t) update_branch_coverage, RUBY_EVENT_COVERAGE_BRANCH,
Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5991 if (mode & COVERAGE_TARGET_METHODS) {
5997rb_suspend_coverages(
void)
6000 if (GET_VM()->coverage_mode & COVERAGE_TARGET_BRANCHES) {
6003 if (GET_VM()->coverage_mode & COVERAGE_TARGET_METHODS) {
6010rb_reset_coverages(
void)
6012 rb_clear_coverages();
6013 rb_iseq_remove_coverage_all();
6014 GET_VM()->coverages =
Qfalse;
6018rb_default_coverage(
int n)
6020 VALUE coverage = rb_ary_hidden_new_fill(3);
6022 int mode = GET_VM()->coverage_mode;
6024 if (mode & COVERAGE_TARGET_LINES) {
6027 RARRAY_ASET(coverage, COVERAGE_INDEX_LINES, lines);
6029 if (mode & COVERAGE_TARGET_BRANCHES) {
6030 branches = rb_ary_hidden_new_fill(2);
6052 VALUE structure = rb_hash_new();
6058 RARRAY_ASET(coverage, COVERAGE_INDEX_BRANCHES, branches);
6064uninterruptible_exit(
VALUE v)
6067 rb_ary_pop(cur_th->pending_interrupt_mask_stack);
6069 cur_th->pending_interrupt_queue_checked = 0;
6070 if (!rb_threadptr_pending_interrupt_empty_p(cur_th)) {
6071 RUBY_VM_SET_INTERRUPT(cur_th->ec);
6079 VALUE interrupt_mask = rb_ident_hash_new();
6082 rb_hash_aset(interrupt_mask, rb_cObject, sym_never);
6084 rb_ary_push(cur_th->pending_interrupt_mask_stack, interrupt_mask);
6088 RUBY_VM_CHECK_INTS(cur_th->ec);
6095 VM_ASSERT(th->specific_storage == NULL);
6097 if (UNLIKELY(specific_key_count > 0)) {
6098 th->specific_storage =
ZALLOC_N(
void *, RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
6102rb_internal_thread_specific_key_t
6107 if (specific_key_count == 0 && vm->ractor.cnt > 1) {
6108 rb_raise(
rb_eThreadError,
"The first rb_internal_thread_specific_key_create() is called with multiple ractors");
6110 else if (specific_key_count > RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX) {
6111 rb_raise(
rb_eThreadError,
"rb_internal_thread_specific_key_create() is called more than %d times", RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
6114 rb_internal_thread_specific_key_t key = specific_key_count++;
6121 ccan_list_for_each(&cr->threads.set, th, lt_node) {
6122 thread_specific_storage_alloc(th);
6135 VM_ASSERT(rb_thread_ptr(thread_val) == th);
6136 VM_ASSERT(key < RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
6137 VM_ASSERT(th->specific_storage);
6139 return th->specific_storage[key];
6148 VM_ASSERT(rb_thread_ptr(thread_val) == th);
6149 VM_ASSERT(key < RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
6150 VM_ASSERT(th->specific_storage);
6152 th->specific_storage[key] = data;
6158 struct ccan_list_node node;
6160 rb_interrupt_exec_func_t *func;
6162 enum rb_interrupt_exec_flag flags;
6166rb_threadptr_interrupt_exec_task_mark(
rb_thread_t *th)
6170 ccan_list_for_each(&th->interrupt_exec_tasks, task, node) {
6171 if (task->flags & rb_interrupt_exec_flag_value_data) {
6172 rb_gc_mark((
VALUE)task->data);
6180rb_threadptr_interrupt_exec(
rb_thread_t *th, rb_interrupt_exec_func_t *func,
void *data,
enum rb_interrupt_exec_flag flags)
6192 ccan_list_add_tail(&th->interrupt_exec_tasks, &task->node);
6193 threadptr_set_interrupt_locked(th,
true);
6210 RUBY_DEBUG_LOG(
"task:%p", task);
6213 (*task->func)(task->data);
6237 rb_interrupt_exec_func_t *func;
6242interrupt_ractor_new_thread_func(
void *data)
6252interrupt_ractor_func(
void *data)
6262 rb_interrupt_exec_func_t *func,
void *data,
enum rb_interrupt_exec_flag flags)
6266 RUBY_DEBUG_LOG(
"flags:%d", (
int)flags);
6271 rb_threadptr_interrupt_exec(main_th, interrupt_ractor_func, d, flags);
#define RUBY_ASSERT_ALWAYS(expr,...)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
#define RUBY_INTERNAL_EVENT_SWITCH
Thread switched.
int rb_remove_event_hook(rb_event_hook_func_t func)
Removes the passed function from the list of event hooks.
#define RUBY_EVENT_THREAD_BEGIN
Encountered a new thread.
void(* rb_event_hook_func_t)(rb_event_flag_t evflag, VALUE data, VALUE self, ID mid, VALUE klass)
Type of event hooks.
uint32_t rb_event_flag_t
Represents event(s).
#define RUBY_EVENT_CALL
A method, written in Ruby, is called.
#define RUBY_EVENT_THREAD_END
Encountered an end of a thread.
static void RB_FL_SET_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_SET().
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
ID rb_frame_last_func(void)
Returns the ID of the last method in the call stack.
int rb_keyword_given_p(void)
Determines if the current method is given a keyword argument.
int rb_block_given_p(void)
Determines if the current method is given a block.
#define rb_str_new2
Old name of rb_str_new_cstr.
#define ALLOC
Old name of RB_ALLOC.
#define T_STRING
Old name of RUBY_T_STRING.
#define xfree
Old name of ruby_xfree.
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
#define OBJ_FROZEN
Old name of RB_OBJ_FROZEN.
#define xrealloc
Old name of ruby_xrealloc.
#define ID2SYM
Old name of RB_ID2SYM.
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
#define CLASS_OF
Old name of rb_class_of.
#define xmalloc
Old name of ruby_xmalloc.
#define LONG2FIX
Old name of RB_INT2FIX.
#define FIX2INT
Old name of RB_FIX2INT.
#define ZALLOC_N
Old name of RB_ZALLOC_N.
#define T_ICLASS
Old name of RUBY_T_ICLASS.
#define T_HASH
Old name of RUBY_T_HASH.
#define Qtrue
Old name of RUBY_Qtrue.
#define NUM2INT
Old name of RB_NUM2INT.
#define INT2NUM
Old name of RB_INT2NUM.
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
#define T_ARRAY
Old name of RUBY_T_ARRAY.
#define T_OBJECT
Old name of RUBY_T_OBJECT.
#define NIL_P
Old name of RB_NIL_P.
#define POSFIXABLE
Old name of RB_POSFIXABLE.
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
void ruby_stop(int ex)
Calls ruby_cleanup() and exits the process.
#define ruby_debug
This variable controls whether the interpreter is in debug mode.
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
VALUE rb_eSystemExit
SystemExit exception.
VALUE rb_eIOError
IOError exception.
VALUE rb_eStandardError
StandardError exception.
VALUE rb_eTypeError
TypeError exception.
void rb_frozen_error_raise(VALUE frozen_obj, const char *fmt,...)
Raises an instance of rb_eFrozenError.
VALUE rb_eFatal
fatal exception.
VALUE rb_eRuntimeError
RuntimeError exception.
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
VALUE rb_exc_new(VALUE etype, const char *ptr, long len)
Creates an instance of the passed exception class.
VALUE rb_eException
Mother of all exceptions.
VALUE rb_eThreadError
ThreadError exception.
void rb_exit(int status)
Terminates the current execution context.
VALUE rb_eSignal
SignalException exception.
VALUE rb_obj_alloc(VALUE klass)
Allocates an instance of the given class.
VALUE rb_cInteger
Module class.
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
VALUE rb_cThread
Thread class.
VALUE rb_cModule
Module class.
double rb_num2dbl(VALUE num)
Converts an instance of rb_cNumeric into C's double.
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
VALUE rb_ary_shift(VALUE ary)
Destructively deletes an element from the beginning of the passed array and returns what was deleted.
VALUE rb_ary_dup(VALUE ary)
Duplicates an array.
VALUE rb_ary_delete_at(VALUE ary, long pos)
Destructively removes an element which resides at the specific index of the passed array.
VALUE rb_ary_new(void)
Allocates a new, empty array.
VALUE rb_ary_pop(VALUE ary)
Destructively deletes an element from the end of the passed array and returns what was deleted.
VALUE rb_ary_hidden_new(long capa)
Allocates a hidden (no class) empty array.
VALUE rb_ary_clear(VALUE ary)
Destructively removes everything form an array.
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
VALUE rb_ary_entry(VALUE ary, long off)
Queries an element of an array.
VALUE rb_ary_join(VALUE ary, VALUE sep)
Recursively stringises the elements of the passed array, flattens that result, then joins the sequenc...
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
void rb_reset_random_seed(void)
Resets the RNG behind rb_genrand_int32()/rb_genrand_real().
VALUE rb_str_new_frozen(VALUE str)
Creates a frozen copy of the string, if necessary.
VALUE rb_str_concat(VALUE dst, VALUE src)
Identical to rb_str_append(), except it also accepts an integer as a codepoint.
#define rb_str_cat_cstr(buf, str)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
#define rb_str_new_cstr(str)
Identical to rb_str_new, except it assumes the passed pointer is a pointer to a C string.
int rb_thread_interrupted(VALUE thval)
Checks if the thread's execution was recently interrupted.
VALUE rb_thread_local_aref(VALUE thread, ID key)
This badly named function reads from a Fiber local storage.
VALUE rb_mutex_new(void)
Creates a mutex.
VALUE rb_thread_kill(VALUE thread)
Terminates the given thread.
#define RUBY_UBF_IO
A special UBF for blocking IO operations.
VALUE rb_thread_main(void)
Obtains the "main" thread.
VALUE rb_exec_recursive(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h)
"Recursion" API entry point.
void rb_thread_sleep_forever(void)
Blocks indefinitely.
void rb_thread_fd_close(int fd)
This funciton is now a no-op.
void rb_thread_wait_for(struct timeval time)
Identical to rb_thread_sleep(), except it takes struct timeval instead.
VALUE rb_mutex_synchronize(VALUE mutex, VALUE(*func)(VALUE arg), VALUE arg)
Obtains the lock, runs the passed function, and releases the lock when it completes.
VALUE rb_thread_stop(void)
Stops the current thread.
VALUE rb_mutex_sleep(VALUE self, VALUE timeout)
Releases the lock held in the mutex and waits for the period of time; reacquires the lock on wakeup.
VALUE rb_exec_recursive_paired(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE p, VALUE h)
Identical to rb_exec_recursive(), except it checks for the recursion on the ordered pair of { g,...
void rb_unblock_function_t(void *)
This is the type of UBFs.
void rb_thread_atfork_before_exec(void)
:FIXME: situation of this function is unclear.
void rb_thread_check_ints(void)
Checks for interrupts.
VALUE rb_thread_run(VALUE thread)
This is a rb_thread_wakeup() + rb_thread_schedule() combo.
VALUE rb_thread_wakeup(VALUE thread)
Marks a given thread as eligible for scheduling.
VALUE rb_mutex_unlock(VALUE mutex)
Releases the mutex.
VALUE rb_exec_recursive_paired_outer(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE p, VALUE h)
Identical to rb_exec_recursive_outer(), except it checks for the recursion on the ordered pair of { g...
void rb_thread_sleep_deadly(void)
Identical to rb_thread_sleep_forever(), except the thread calling this function is considered "dead" ...
void rb_thread_atfork(void)
A pthread_atfork(3posix)-like API.
VALUE rb_thread_current(void)
Obtains the "current" thread.
int rb_thread_alone(void)
Checks if the thread this function is running is the only thread that is currently alive.
VALUE rb_thread_local_aset(VALUE thread, ID key, VALUE val)
This badly named function writes to a Fiber local storage.
void rb_thread_schedule(void)
Tries to switch to another thread.
#define RUBY_UBF_PROCESS
A special UBF for blocking process operations.
VALUE rb_exec_recursive_outer(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h)
Identical to rb_exec_recursive(), except it calls f for outermost recursion only.
VALUE rb_thread_wakeup_alive(VALUE thread)
Identical to rb_thread_wakeup(), except it doesn't raise on an already killed thread.
VALUE rb_mutex_lock(VALUE mutex)
Attempts to lock the mutex.
void rb_thread_sleep(int sec)
Blocks for the given period of time.
void rb_timespec_now(struct timespec *ts)
Fills the current time into the given struct.
struct timeval rb_time_timeval(VALUE time)
Converts an instance of rb_cTime to a struct timeval that represents the identical point of time.
VALUE rb_ivar_set(VALUE obj, ID name, VALUE val)
Identical to rb_iv_set(), except it accepts the name as an ID instead of a C string.
VALUE rb_ivar_get(VALUE obj, ID name)
Identical to rb_iv_get(), except it accepts the name as an ID instead of a C string.
VALUE rb_class_path(VALUE mod)
Identical to rb_mod_name(), except it returns #<Class: ...> style inspection for anonymous modules.
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
int rb_sourceline(void)
Resembles __LINE__.
static ID rb_intern_const(const char *str)
This is a "tiny optimisation" over rb_intern().
ID rb_check_id(volatile VALUE *namep)
Detects if the given name is already interned or not.
VALUE rb_to_symbol(VALUE name)
Identical to rb_intern_str(), except it generates a dynamic symbol if necessary.
#define RB_IO_POINTER(obj, fp)
Queries the underlying IO pointer.
VALUE rb_eIOTimeoutError
Indicates that a timeout has occurred while performing an IO operation.
#define RB_NOGVL_UBF_ASYNC_SAFE
Passing this flag to rb_nogvl() indicates that the passed UBF is async-signal-safe.
void * rb_internal_thread_specific_get(VALUE thread_val, rb_internal_thread_specific_key_t key)
Get thread and tool specific data.
#define RB_NOGVL_INTR_FAIL
Passing this flag to rb_nogvl() prevents it from checking interrupts.
void rb_internal_thread_specific_set(VALUE thread_val, rb_internal_thread_specific_key_t key, void *data)
Set thread and tool specific data.
rb_internal_thread_specific_key_t rb_internal_thread_specific_key_create(void)
Create a key to store thread specific data.
void * rb_nogvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2, int flags)
Identical to rb_thread_call_without_gvl(), except it additionally takes "flags" that change the behav...
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
#define RB_NOGVL_OFFLOAD_SAFE
Passing this flag to rb_nogvl() indicates that the passed function is safe to offload to a background...
void * rb_thread_call_without_gvl2(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Identical to rb_thread_call_without_gvl(), except it does not interface with signals etc.
void * rb_thread_call_without_gvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Allows the passed function to run in parallel with other Ruby threads.
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
Shim for block function parameters.
VALUE rb_yield(VALUE val)
Yields the block.
void rb_throw_obj(VALUE tag, VALUE val)
Identical to rb_throw(), except it allows arbitrary Ruby object to become a tag.
static int rb_fd_max(const rb_fdset_t *f)
It seems this function has no use.
void rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
Destructively overwrites an fdset with another.
void rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
Identical to rb_fd_copy(), except it copies unlimited number of file descriptors.
void rb_fd_term(rb_fdset_t *f)
Destroys the rb_fdset_t, releasing any memory and resources it used.
static fd_set * rb_fd_ptr(const rb_fdset_t *f)
Raw pointer to fd_set.
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
#define ALLOCA_N(type, n)
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
VALUE rb_thread_create(type *q, void *w)
Creates a rb_cThread instance.
void rb_hash_foreach(VALUE q, int_type *w, VALUE e)
Iteration over the given hash.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define rb_fd_isset
Queries if the given fd is in the rb_fdset_t.
#define rb_fd_select
Waits for multiple file descriptors at once.
#define rb_fd_init
Initialises the :given :rb_fdset_t.
#define rb_fd_set
Sets the given fd to the rb_fdset_t.
#define rb_fd_zero
Clears the given rb_fdset_t.
#define rb_fd_clr
Unsets the given fd from the rb_fdset_t.
#define RARRAY_LEN
Just another name of rb_array_len.
static int RARRAY_LENINT(VALUE ary)
Identical to rb_array_len(), except it differs for the return type.
static void RARRAY_ASET(VALUE ary, long i, VALUE v)
Assigns an object in an array.
#define RARRAY_AREF(a, i)
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
#define RBASIC(obj)
Convenient casting macro.
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
#define DATA_PTR(obj)
Convenient getter macro.
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
#define StringValueCStr(v)
Identical to StringValuePtr, except it additionally checks for the contents for viability as a C stri...
#define RUBY_TYPED_DEFAULT_FREE
This is a value you can set to rb_data_type_struct::dfree.
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
#define TypedData_Wrap_Struct(klass, data_type, sval)
Converts sval, a pointer to your struct, into a Ruby object.
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
#define errno
Ractor-aware version of errno.
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
int ruby_snprintf(char *str, size_t n, char const *fmt,...)
Our own locale-insensitive version of snprintf(3).
#define RB_PASS_CALLED_KEYWORDS
Pass keywords if current method is called with keywords, useful for argument delegation.
VALUE rb_fiber_scheduler_blocking_operation_wait(VALUE scheduler, void *(*function)(void *), void *data, rb_unblock_function_t *unblock_function, void *data2, int flags, struct rb_fiber_scheduler_blocking_operation_state *state)
Defer the execution of the passed function to the scheduler.
VALUE rb_fiber_scheduler_current(void)
Identical to rb_fiber_scheduler_get(), except it also returns RUBY_Qnil in case of a blocking fiber.
VALUE rb_fiber_scheduler_fiber_interrupt(VALUE scheduler, VALUE fiber, VALUE exception)
Interrupt a fiber by raising an exception.
VALUE rb_fiber_scheduler_block(VALUE scheduler, VALUE blocker, VALUE timeout)
Non-blocking wait for the passed "blocker", which is for instance Thread.join or Mutex....
VALUE rb_fiber_scheduler_set(VALUE scheduler)
Destructively assigns the passed scheduler to that of the current thread that is calling this functio...
VALUE rb_fiber_scheduler_unblock(VALUE scheduler, VALUE blocker, VALUE fiber)
Wakes up a fiber previously blocked using rb_fiber_scheduler_block().
int rb_thread_fd_select(int nfds, rb_fdset_t *rfds, rb_fdset_t *wfds, rb_fdset_t *efds, struct timeval *timeout)
Waits for multiple file descriptors at once.
#define rb_fd_resize(n, f)
Does nothing (defined for compatibility).
static bool RB_TEST(VALUE obj)
Emulates Ruby's "if" statement.
@ RUBY_Qundef
Represents so-called undef.
#define RTEST
This is an old name of RB_TEST.
#define _(args)
This was a transition path from K&R to ANSI.
This is the struct that holds necessary info for a struct.
The data structure which wraps the fd_set bitmap used by select(2).
int maxfd
Maximum allowed number of FDs.
fd_set * fdset
File descriptors buffer.
int capa
Maximum allowed number of FDs.
Ruby's IO, metadata and buffers.
VALUE self
The IO's Ruby level counterpart.
struct ccan_list_head blocking_operations
Threads that are performing a blocking operation without the GVL using this IO.
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
void rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
Blocks until the current thread obtains a lock.
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
Releases a lock.
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy.
void rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
Fills the passed lock with an initial value.
void rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
Destroys the passed mutex.
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
uintptr_t VALUE
Type that represents a Ruby object.
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.