12#ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
14#include "internal/gc.h"
15#include "internal/sanitizers.h"
18#ifdef HAVE_SYS_RESOURCE_H
19#include <sys/resource.h>
21#ifdef HAVE_THR_STKSEGMENT
24#if defined(HAVE_FCNTL_H)
26#elif defined(HAVE_SYS_FCNTL_H)
29#ifdef HAVE_SYS_PRCTL_H
32#if defined(HAVE_SYS_TIME_H)
39#include <sys/syscall.h>
45# include <AvailabilityMacros.h>
48#if defined(HAVE_SYS_EVENTFD_H) && defined(HAVE_EVENTFD)
49# define USE_EVENTFD (1)
50# include <sys/eventfd.h>
52# define USE_EVENTFD (0)
55#if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK) && \
56 defined(CLOCK_REALTIME) && defined(CLOCK_MONOTONIC) && \
57 defined(HAVE_CLOCK_GETTIME)
58static pthread_condattr_t condattr_mono;
59static pthread_condattr_t *condattr_monotonic = &condattr_mono;
61static const void *
const condattr_monotonic = NULL;
66#ifndef HAVE_SYS_EVENT_H
67#define HAVE_SYS_EVENT_H 0
70#ifndef HAVE_SYS_EPOLL_H
71#define HAVE_SYS_EPOLL_H 0
79 #if defined(__EMSCRIPTEN__) || defined(COROUTINE_PTHREAD_CONTEXT)
82 #define USE_MN_THREADS 0
83 #elif HAVE_SYS_EPOLL_H
84 #include <sys/epoll.h>
85 #define USE_MN_THREADS 1
86 #elif HAVE_SYS_EVENT_H
87 #include <sys/event.h>
88 #define USE_MN_THREADS 1
90 #define USE_MN_THREADS 0
96#define NATIVE_MUTEX_LOCK_DEBUG 0
99mutex_debug(
const char *msg,
void *lock)
101 if (NATIVE_MUTEX_LOCK_DEBUG) {
103 static pthread_mutex_t dbglock = PTHREAD_MUTEX_INITIALIZER;
105 if ((r = pthread_mutex_lock(&dbglock)) != 0) {exit(EXIT_FAILURE);}
106 fprintf(stdout,
"%s: %p\n", msg, lock);
107 if ((r = pthread_mutex_unlock(&dbglock)) != 0) {exit(EXIT_FAILURE);}
115 mutex_debug(
"lock", lock);
116 if ((r = pthread_mutex_lock(lock)) != 0) {
125 mutex_debug(
"unlock", lock);
126 if ((r = pthread_mutex_unlock(lock)) != 0) {
135 mutex_debug(
"trylock", lock);
136 if ((r = pthread_mutex_trylock(lock)) != 0) {
150 int r = pthread_mutex_init(lock, 0);
151 mutex_debug(
"init", lock);
160 int r = pthread_mutex_destroy(lock);
161 mutex_debug(
"destroy", lock);
170 int r = pthread_cond_init(cond, condattr_monotonic);
179 int r = pthread_cond_destroy(cond);
200 r = pthread_cond_signal(cond);
201 }
while (r == EAGAIN);
212 r = pthread_cond_broadcast(cond);
213 }
while (r == EAGAIN);
222 int r = pthread_cond_wait(cond, mutex);
229native_cond_timedwait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex,
const rb_hrtime_t *abs)
241 rb_hrtime2timespec(&ts, abs);
242 r = pthread_cond_timedwait(cond, mutex, &ts);
243 }
while (r == EINTR);
245 if (r != 0 && r != ETIMEDOUT) {
253native_cond_timeout(rb_nativethread_cond_t *cond,
const rb_hrtime_t rel)
255 if (condattr_monotonic) {
256 return rb_hrtime_add(rb_hrtime_now(), rel);
262 return rb_hrtime_add(rb_timespec2hrtime(&ts), rel);
269 rb_hrtime_t hrmsec = native_cond_timeout(cond, RB_HRTIME_PER_MSEC * msec);
270 native_cond_timedwait(cond, mutex, &hrmsec);
275static rb_internal_thread_event_hook_t *rb_internal_thread_event_hooks = NULL;
297#define RB_INTERNAL_THREAD_HOOK(event, th) \
298 if (UNLIKELY(rb_internal_thread_event_hooks)) { \
299 fprintf(stderr, "[thread=%"PRIxVALUE"] %s in %s (%s:%d)\n", th->self, event_name(event), __func__, __FILE__, __LINE__); \
300 rb_thread_execute_hooks(event, th); \
303#define RB_INTERNAL_THREAD_HOOK(event, th) if (UNLIKELY(rb_internal_thread_event_hooks)) { rb_thread_execute_hooks(event, th); }
306static rb_serial_t current_fork_gen = 1;
308#if defined(SIGVTALRM) && !defined(__EMSCRIPTEN__)
309# define USE_UBF_LIST 1
312static void threadptr_trap_interrupt(
rb_thread_t *);
314#ifdef HAVE_SCHED_YIELD
315#define native_thread_yield() (void)sched_yield()
317#define native_thread_yield() ((void)0)
325static void timer_thread_wakeup(
void);
326static void timer_thread_wakeup_locked(
rb_vm_t *vm);
327static void timer_thread_wakeup_force(
void);
332#define thread_sched_dump(s) thread_sched_dump_(__FILE__, __LINE__, s)
338 return th->nt->dedicated > 0;
343thread_sched_dump_(const
char *file,
int line, struct
rb_thread_sched *sched)
345 fprintf(stderr,
"@%s:%d running:%d\n", file, line, sched->running ? (
int)sched->running->serial : -1);
348 ccan_list_for_each(&sched->readyq, th, sched.node.readyq) {
349 i++;
if (i>10) rb_bug(
"too many");
350 fprintf(stderr,
" ready:%d (%sNT:%d)\n", th->serial,
351 th->nt ? (th->nt->dedicated ?
"D" :
"S") :
"x",
352 th->nt ? (int)th->nt->serial : -1);
356#define ractor_sched_dump(s) ractor_sched_dump_(__FILE__, __LINE__, s)
360ractor_sched_dump_(const
char *file,
int line,
rb_vm_t *vm)
364 fprintf(stderr,
"ractor_sched_dump %s:%d\n", file, line);
367 ccan_list_for_each(&vm->ractor.sched.grq, r, threads.sched.grq_node) {
369 if (i>10) rb_bug(
"!!");
370 fprintf(stderr,
" %d ready:%d\n", i, rb_ractor_id(r));
374#define thread_sched_lock(a, b) thread_sched_lock_(a, b, __FILE__, __LINE__)
375#define thread_sched_unlock(a, b) thread_sched_unlock_(a, b, __FILE__, __LINE__)
383 RUBY_DEBUG_LOG2(file, line,
"th:%u prev_owner:%u", rb_th_serial(th), rb_th_serial(sched->lock_owner));
384 VM_ASSERT(sched->lock_owner == NULL);
385 sched->lock_owner = th;
387 RUBY_DEBUG_LOG2(file, line,
"th:%u", rb_th_serial(th));
394 RUBY_DEBUG_LOG2(file, line,
"th:%u", rb_th_serial(th));
397 VM_ASSERT(sched->lock_owner == th);
398 sched->lock_owner = NULL;
407 RUBY_DEBUG_LOG(
"th:%u", rb_th_serial(th));
410 sched->lock_owner = th;
421 VM_ASSERT(sched->lock_owner == th);
424 VM_ASSERT(sched->lock_owner != NULL);
429#define ractor_sched_lock(a, b) ractor_sched_lock_(a, b, __FILE__, __LINE__)
430#define ractor_sched_unlock(a, b) ractor_sched_unlock_(a, b, __FILE__, __LINE__)
436 return rb_ractor_id(r);
447 VM_ASSERT(vm->ractor.sched.lock_owner == NULL);
448 VM_ASSERT(vm->ractor.sched.locked ==
false);
450 vm->ractor.sched.lock_owner = cr;
451 vm->ractor.sched.locked =
true;
459 VM_ASSERT(vm->ractor.sched.locked);
460 VM_ASSERT(vm->ractor.sched.lock_owner == cr);
462 vm->ractor.sched.locked =
false;
463 vm->ractor.sched.lock_owner = NULL;
473 RUBY_DEBUG_LOG2(file, line,
"cr:%u prev_owner:%u", rb_ractor_serial(cr), rb_ractor_serial(vm->ractor.sched.lock_owner));
475 RUBY_DEBUG_LOG2(file, line,
"cr:%u", rb_ractor_serial(cr));
478 ractor_sched_set_locked(vm, cr);
484 RUBY_DEBUG_LOG2(file, line,
"cr:%u", rb_ractor_serial(cr));
486 ractor_sched_set_unlocked(vm, cr);
494 VM_ASSERT(vm->ractor.sched.locked);
495 VM_ASSERT(cr == NULL || vm->ractor.sched.lock_owner == cr);
503 ccan_list_for_each(&vm->ractor.sched.running_threads, rth, sched.node.running_threads) {
504 if (rth == th)
return true;
511ractor_sched_running_threads_size(
rb_vm_t *vm)
515 ccan_list_for_each(&vm->ractor.sched.running_threads, th, sched.node.running_threads) {
523ractor_sched_timeslice_threads_size(
rb_vm_t *vm)
527 ccan_list_for_each(&vm->ractor.sched.timeslice_threads, th, sched.node.timeslice_threads) {
538 ccan_list_for_each(&vm->ractor.sched.timeslice_threads, rth, sched.node.timeslice_threads) {
539 if (rth == th)
return true;
544static void ractor_sched_barrier_join_signal_locked(
rb_vm_t *vm);
552#if USE_RUBY_DEBUG_LOG
553 unsigned int prev_running_cnt = vm->ractor.sched.running_cnt;
558 if (del_th && sched->is_running_timeslice) {
559 del_timeslice_th = del_th;
560 sched->is_running_timeslice =
false;
563 del_timeslice_th = NULL;
566 RUBY_DEBUG_LOG(
"+:%u -:%u +ts:%u -ts:%u",
567 rb_th_serial(add_th), rb_th_serial(del_th),
568 rb_th_serial(add_timeslice_th), rb_th_serial(del_timeslice_th));
570 ractor_sched_lock(vm, cr);
574 VM_ASSERT(ractor_sched_running_threads_contain_p(vm, del_th));
575 VM_ASSERT(del_timeslice_th != NULL ||
576 !ractor_sched_timeslice_threads_contain_p(vm, del_th));
578 ccan_list_del_init(&del_th->sched.node.running_threads);
579 vm->ractor.sched.running_cnt--;
581 if (UNLIKELY(vm->ractor.sched.barrier_waiting)) {
582 ractor_sched_barrier_join_signal_locked(vm);
584 sched->is_running =
false;
588 while (UNLIKELY(vm->ractor.sched.barrier_waiting)) {
589 RUBY_DEBUG_LOG(
"barrier-wait");
591 ractor_sched_barrier_join_signal_locked(vm);
592 ractor_sched_barrier_join_wait_locked(vm, add_th);
595 VM_ASSERT(!ractor_sched_running_threads_contain_p(vm, add_th));
596 VM_ASSERT(!ractor_sched_timeslice_threads_contain_p(vm, add_th));
598 ccan_list_add(&vm->ractor.sched.running_threads, &add_th->sched.node.running_threads);
599 vm->ractor.sched.running_cnt++;
600 sched->is_running =
true;
601 VM_ASSERT(!vm->ractor.sched.barrier_waiting);
604 if (add_timeslice_th) {
606 int was_empty = ccan_list_empty(&vm->ractor.sched.timeslice_threads);
607 VM_ASSERT(!ractor_sched_timeslice_threads_contain_p(vm, add_timeslice_th));
608 ccan_list_add(&vm->ractor.sched.timeslice_threads, &add_timeslice_th->sched.node.timeslice_threads);
609 sched->is_running_timeslice =
true;
611 timer_thread_wakeup_locked(vm);
615 if (del_timeslice_th) {
616 VM_ASSERT(ractor_sched_timeslice_threads_contain_p(vm, del_timeslice_th));
617 ccan_list_del_init(&del_timeslice_th->sched.node.timeslice_threads);
620 VM_ASSERT(ractor_sched_running_threads_size(vm) == vm->ractor.sched.running_cnt);
621 VM_ASSERT(ractor_sched_timeslice_threads_size(vm) <= vm->ractor.sched.running_cnt);
623 ractor_sched_unlock(vm, cr);
625 if (add_th && !del_th && UNLIKELY(vm->ractor.sync.lock_owner != NULL)) {
629 lock_owner = sched->lock_owner;
631 thread_sched_unlock(sched, lock_owner);
636 thread_sched_lock(sched, lock_owner);
642 RUBY_DEBUG_LOG(
"run:%u->%u", prev_running_cnt, vm->ractor.sched.running_cnt);
648 ASSERT_thread_sched_locked(sched, th);
649 VM_ASSERT(sched->running == th);
652 thread_sched_setup_running_threads(sched, th->ractor, vm, th, NULL, ccan_list_empty(&sched->readyq) ? NULL : th);
658 ASSERT_thread_sched_locked(sched, th);
661 thread_sched_setup_running_threads(sched, th->ractor, vm, NULL, th, NULL);
669 thread_sched_lock(sched, th);
671 thread_sched_add_running_thread(sched, th);
673 thread_sched_unlock(sched, th);
681 thread_sched_lock(sched, th);
683 thread_sched_del_running_thread(sched, th);
685 thread_sched_unlock(sched, th);
695 RUBY_DEBUG_LOG(
"th:%u->th:%u", rb_th_serial(sched->running), rb_th_serial(th));
696 VM_ASSERT(sched->running != th);
706 ccan_list_for_each(&sched->readyq, rth, sched.node.readyq) {
707 if (rth == th)
return true;
719 ASSERT_thread_sched_locked(sched, NULL);
722 VM_ASSERT(sched->running != NULL);
724 if (ccan_list_empty(&sched->readyq)) {
728 next_th = ccan_list_pop(&sched->readyq,
rb_thread_t, sched.node.readyq);
730 VM_ASSERT(sched->readyq_cnt > 0);
732 ccan_list_node_init(&next_th->sched.node.readyq);
735 RUBY_DEBUG_LOG(
"next_th:%u readyq_cnt:%d", rb_th_serial(next_th), sched->readyq_cnt);
744 ASSERT_thread_sched_locked(sched, NULL);
745 RUBY_DEBUG_LOG(
"ready_th:%u readyq_cnt:%d", rb_th_serial(ready_th), sched->readyq_cnt);
747 VM_ASSERT(sched->running != NULL);
748 VM_ASSERT(!thread_sched_readyq_contain_p(sched, ready_th));
750 if (sched->is_running) {
751 if (ccan_list_empty(&sched->readyq)) {
753 thread_sched_setup_running_threads(sched, ready_th->ractor, ready_th->vm, NULL, NULL, sched->running);
757 VM_ASSERT(!ractor_sched_timeslice_threads_contain_p(ready_th->vm, sched->running));
760 ccan_list_add_tail(&sched->readyq, &ready_th->sched.node.readyq);
769 ASSERT_thread_sched_locked(sched, NULL);
770 VM_ASSERT(sched->running == next_th);
774 if (th_has_dedicated_nt(next_th)) {
775 RUBY_DEBUG_LOG(
"pinning th:%u", next_th->serial);
780 RUBY_DEBUG_LOG(
"th:%u is already running.", next_th->serial);
785 RUBY_DEBUG_LOG(
"th:%u (do nothing)", rb_th_serial(next_th));
788 RUBY_DEBUG_LOG(
"th:%u (enq)", rb_th_serial(next_th));
789 ractor_sched_enq(next_th->vm, next_th->ractor);
794 RUBY_DEBUG_LOG(
"no waiting threads%s",
"");
802 RUBY_DEBUG_LOG(
"th:%u running:%u redyq_cnt:%d", rb_th_serial(th), rb_th_serial(sched->running), sched->readyq_cnt);
804 VM_ASSERT(sched->running != th);
805 VM_ASSERT(!thread_sched_readyq_contain_p(sched, th));
808 if (sched->running == NULL) {
809 thread_sched_set_running(sched, th);
810 if (wakeup) thread_sched_wakeup_running_thread(sched, th, will_switch);
813 thread_sched_enq(sched, th);
825 RUBY_DEBUG_LOG(
"th:%u", rb_th_serial(th));
827 thread_sched_lock(sched, th);
829 thread_sched_to_ready_common(sched, th,
true,
false);
831 thread_sched_unlock(sched, th);
838 RUBY_DEBUG_LOG(
"th:%u", rb_th_serial(th));
840 ASSERT_thread_sched_locked(sched, th);
841 VM_ASSERT(th == rb_ec_thread_ptr(rb_current_ec_noinline()));
843 if (th != sched->running) {
849 while((next_th = sched->running) != th) {
850 if (th_has_dedicated_nt(th)) {
851 RUBY_DEBUG_LOG(
"(nt) sleep th:%u running:%u", rb_th_serial(th), rb_th_serial(sched->running));
853 thread_sched_set_lock_owner(sched, NULL);
855 RUBY_DEBUG_LOG(
"nt:%d cond:%p", th->nt->serial, &th->nt->cond.readyq);
858 thread_sched_set_lock_owner(sched, th);
860 RUBY_DEBUG_LOG(
"(nt) wakeup %s", sched->running == th ?
"success" :
"failed");
861 if (th == sched->running) {
862 rb_ractor_thread_switch(th->ractor, th);
867 if (can_direct_transfer &&
868 (next_th = sched->running) != NULL &&
872 RUBY_DEBUG_LOG(
"th:%u->%u (direct)", rb_th_serial(th), rb_th_serial(next_th));
874 thread_sched_set_lock_owner(sched, NULL);
876 rb_ractor_set_current_ec(th->ractor, NULL);
877 thread_sched_switch(th, next_th);
879 thread_sched_set_lock_owner(sched, th);
884 native_thread_assign(NULL, th);
886 RUBY_DEBUG_LOG(
"th:%u->%u (ractor scheduling)", rb_th_serial(th), rb_th_serial(next_th));
888 thread_sched_set_lock_owner(sched, NULL);
890 rb_ractor_set_current_ec(th->ractor, NULL);
891 coroutine_transfer0(th->sched.context, nt->nt_context,
false);
893 thread_sched_set_lock_owner(sched, th);
896 VM_ASSERT(rb_current_ec_noinline() == th->ec);
900 VM_ASSERT(th->nt != NULL);
901 VM_ASSERT(rb_current_ec_noinline() == th->ec);
902 VM_ASSERT(th->sched.waiting_reason.flags == thread_sched_waiting_none);
905 thread_sched_add_running_thread(sched, th);
916 RUBY_DEBUG_LOG(
"th:%u dedicated:%d", rb_th_serial(th), th_has_dedicated_nt(th));
918 VM_ASSERT(sched->running != th);
919 VM_ASSERT(th_has_dedicated_nt(th));
920 VM_ASSERT(GET_THREAD() == th);
922 native_thread_dedicated_dec(th->vm, th->ractor, th->nt);
925 thread_sched_to_ready_common(sched, th,
false,
false);
927 if (sched->running == th) {
928 thread_sched_add_running_thread(sched, th);
932 thread_sched_wait_running_turn(sched, th,
false);
944 thread_sched_lock(sched, th);
946 thread_sched_to_running_common(sched, th);
948 thread_sched_unlock(sched, th);
961 ASSERT_thread_sched_locked(sched, th);
963 VM_ASSERT(sched->running == th);
964 VM_ASSERT(sched->running->nt != NULL);
968 RUBY_DEBUG_LOG(
"next_th:%u", rb_th_serial(next_th));
969 VM_ASSERT(th != next_th);
971 thread_sched_set_running(sched, next_th);
972 VM_ASSERT(next_th == sched->running);
973 thread_sched_wakeup_running_thread(sched, next_th, will_switch);
976 thread_sched_del_running_thread(sched, th);
993 if (!to_dead) native_thread_dedicated_inc(th->vm, th->ractor, th->nt);
995 RUBY_DEBUG_LOG(
"%sth:%u", to_dead ?
"to_dead " :
"", rb_th_serial(th));
997 bool can_switch = to_dead ? !th_has_dedicated_nt(th) : false;
998 thread_sched_wakeup_next_thread(sched, th, can_switch);
1005 RUBY_DEBUG_LOG(
"dedicated:%d", th->nt->dedicated);
1006 thread_sched_to_waiting_common0(sched, th,
true);
1014 thread_sched_lock(sched, th);
1016 thread_sched_to_dead_common(sched, th);
1018 thread_sched_unlock(sched, th);
1027 RUBY_DEBUG_LOG(
"dedicated:%d", th->nt->dedicated);
1028 thread_sched_to_waiting_common0(sched, th,
false);
1037 thread_sched_lock(sched, th);
1039 thread_sched_to_waiting_common(sched, th);
1041 thread_sched_unlock(sched, th);
1050 th->unblock.func = func;
1051 th->unblock.arg = arg;
1057ubf_waiting(
void *ptr)
1063 th->unblock.func = NULL;
1064 th->unblock.arg = NULL;
1066 RUBY_DEBUG_LOG(
"th:%u", rb_th_serial(th));
1068 thread_sched_lock(sched, th);
1070 if (sched->running == th) {
1074 thread_sched_to_ready_common(sched, th,
true,
false);
1077 thread_sched_unlock(sched, th);
1086 RUBY_DEBUG_LOG(
"th:%u", rb_th_serial(th));
1088 RB_VM_SAVE_MACHINE_CONTEXT(th);
1089 setup_ubf(th, ubf_waiting, (
void *)th);
1093 thread_sched_lock(sched, th);
1095 if (!RUBY_VM_INTERRUPTED(th->ec)) {
1096 bool can_direct_transfer = !th_has_dedicated_nt(th);
1097 thread_sched_wakeup_next_thread(sched, th, can_direct_transfer);
1098 thread_sched_wait_running_turn(sched, th, can_direct_transfer);
1101 RUBY_DEBUG_LOG(
"th:%u interrupted", rb_th_serial(th));
1104 thread_sched_unlock(sched, th);
1106 setup_ubf(th, NULL, NULL);
1114 RUBY_DEBUG_LOG(
"th:%d sched->readyq_cnt:%d", (
int)th->serial, sched->readyq_cnt);
1116 thread_sched_lock(sched, th);
1118 if (!ccan_list_empty(&sched->readyq)) {
1120 thread_sched_wakeup_next_thread(sched, th, !th_has_dedicated_nt(th));
1121 bool can_direct_transfer = !th_has_dedicated_nt(th);
1122 thread_sched_to_ready_common(sched, th,
false, can_direct_transfer);
1123 thread_sched_wait_running_turn(sched, th, can_direct_transfer);
1126 VM_ASSERT(sched->readyq_cnt == 0);
1129 thread_sched_unlock(sched, th);
1138 sched->lock_owner = NULL;
1141 ccan_list_head_init(&sched->readyq);
1142 sched->readyq_cnt = 0;
1145 if (!atfork) sched->enable_mn_threads =
true;
1152#ifdef RUBY_ASAN_ENABLED
1153 void **fake_stack = to_dead ? NULL : &transfer_from->fake_stack;
1154 __sanitizer_start_switch_fiber(fake_stack, transfer_to->stack_base, transfer_to->stack_size);
1158 struct
coroutine_context *returning_from = coroutine_transfer(transfer_from, transfer_to);
1162 VM_ASSERT(!to_dead);
1163#ifdef RUBY_ASAN_ENABLED
1164 __sanitizer_finish_switch_fiber(transfer_from->fake_stack,
1165 (
const void**)&returning_from->stack_base, &returning_from->stack_size);
1173 VM_ASSERT(!nt->dedicated);
1174 VM_ASSERT(next_th->nt == NULL);
1176 RUBY_DEBUG_LOG(
"next_th:%u", rb_th_serial(next_th));
1178 ruby_thread_set_native(next_th);
1179 native_thread_assign(nt, next_th);
1181 coroutine_transfer0(current_cont, next_th->sched.context, to_dead);
1188 native_thread_assign(NULL, cth);
1189 RUBY_DEBUG_LOG(
"th:%u->%u on nt:%d", rb_th_serial(cth), rb_th_serial(next_th), nt->serial);
1190 thread_sched_switch0(cth->sched.context, next_th, nt, cth->status == THREAD_KILLED);
1193#if VM_CHECK_MODE > 0
1198 ASSERT_ractor_sched_locked(vm, cr);
1203 ccan_list_for_each(&vm->ractor.sched.grq, r, threads.sched.grq_node) {
1206 VM_ASSERT(r != prev_r);
1219 VM_ASSERT(sched->running != NULL);
1220 VM_ASSERT(sched->running->nt == NULL);
1222 ractor_sched_lock(vm, cr);
1224#if VM_CHECK_MODE > 0
1227 ccan_list_for_each(&vm->ractor.sched.grq,
tr, threads.sched.grq_node) {
1232 ccan_list_add_tail(&vm->ractor.sched.grq, &sched->grq_node);
1233 vm->ractor.sched.grq_cnt++;
1234 VM_ASSERT(grq_size(vm, cr) == vm->ractor.sched.grq_cnt);
1236 RUBY_DEBUG_LOG(
"r:%u th:%u grq_cnt:%u", rb_ractor_id(r), rb_th_serial(sched->running), vm->ractor.sched.grq_cnt);
1242 ractor_sched_unlock(vm, cr);
1246#ifndef SNT_KEEP_SECONDS
1247#define SNT_KEEP_SECONDS 0
1252#define MINIMUM_SNT 0
1260 ractor_sched_lock(vm, cr);
1262 RUBY_DEBUG_LOG(
"empty? %d", ccan_list_empty(&vm->ractor.sched.grq));
1265 VM_ASSERT(rb_current_execution_context(
false) == NULL);
1266 VM_ASSERT(grq_size(vm, cr) == vm->ractor.sched.grq_cnt);
1268 while ((r = ccan_list_pop(&vm->ractor.sched.grq,
rb_ractor_t, threads.sched.grq_node)) == NULL) {
1269 RUBY_DEBUG_LOG(
"wait grq_cnt:%d", (
int)vm->ractor.sched.grq_cnt);
1271#if SNT_KEEP_SECONDS > 0
1272 rb_hrtime_t abs = rb_hrtime_add(rb_hrtime_now(), RB_HRTIME_PER_SEC * SNT_KEEP_SECONDS);
1273 if (native_cond_timedwait(&vm->ractor.sched.cond, &vm->ractor.sched.lock, &abs) == ETIMEDOUT) {
1274 RUBY_DEBUG_LOG(
"timeout, grq_cnt:%d", (
int)vm->ractor.sched.grq_cnt);
1275 VM_ASSERT(r == NULL);
1276 vm->ractor.sched.snt_cnt--;
1277 vm->ractor.sched.running_cnt--;
1281 RUBY_DEBUG_LOG(
"wakeup grq_cnt:%d", (
int)vm->ractor.sched.grq_cnt);
1284 ractor_sched_set_unlocked(vm, cr);
1286 ractor_sched_set_locked(vm, cr);
1288 RUBY_DEBUG_LOG(
"wakeup grq_cnt:%d", (
int)vm->ractor.sched.grq_cnt);
1292 VM_ASSERT(rb_current_execution_context(
false) == NULL);
1295 VM_ASSERT(vm->ractor.sched.grq_cnt > 0);
1296 vm->ractor.sched.grq_cnt--;
1297 RUBY_DEBUG_LOG(
"r:%d grq_cnt:%u", (
int)rb_ractor_id(r), vm->ractor.sched.grq_cnt);
1300 VM_ASSERT(SNT_KEEP_SECONDS > 0);
1304 ractor_sched_unlock(vm, cr);
1319 cr->sync.wait.waiting_thread = th;
1321 setup_ubf(th, ubf, (
void *)cr);
1323 thread_sched_lock(sched, th);
1325 rb_ractor_unlock_self(cr);
1327 if (RUBY_VM_INTERRUPTED(th->ec)) {
1328 RUBY_DEBUG_LOG(
"interrupted");
1330 else if (cr->sync.wait.wakeup_status != wakeup_none) {
1331 RUBY_DEBUG_LOG(
"awaken:%d", (
int)cr->sync.wait.wakeup_status);
1335 RB_VM_SAVE_MACHINE_CONTEXT(th);
1336 th->status = THREAD_STOPPED_FOREVER;
1340 bool can_direct_transfer = !th_has_dedicated_nt(th);
1341 thread_sched_wakeup_next_thread(sched, th, can_direct_transfer);
1342 thread_sched_wait_running_turn(sched, th, can_direct_transfer);
1343 th->status = THREAD_RUNNABLE;
1348 thread_sched_unlock(sched, th);
1350 setup_ubf(th, NULL, NULL);
1352 rb_ractor_lock_self(cr);
1353 cr->sync.wait.waiting_thread = NULL;
1363 VM_ASSERT(r->sync.wait.wakeup_status != 0);
1365 thread_sched_lock(sched, r_th);
1367 if (r_th->status == THREAD_STOPPED_FOREVER) {
1368 thread_sched_to_ready_common(sched, r_th,
true,
false);
1371 thread_sched_unlock(sched, r_th);
1375ractor_sched_barrier_completed_p(
rb_vm_t *vm)
1377 RUBY_DEBUG_LOG(
"run:%u wait:%u", vm->ractor.sched.running_cnt, vm->ractor.sched.barrier_waiting_cnt);
1378 VM_ASSERT(vm->ractor.sched.running_cnt - 1 >= vm->ractor.sched.barrier_waiting_cnt);
1379 return (vm->ractor.sched.running_cnt - vm->ractor.sched.barrier_waiting_cnt) == 1;
1385 VM_ASSERT(cr == GET_RACTOR());
1386 VM_ASSERT(vm->ractor.sync.lock_owner == cr);
1387 VM_ASSERT(!vm->ractor.sched.barrier_waiting);
1388 VM_ASSERT(vm->ractor.sched.barrier_waiting_cnt == 0);
1390 RUBY_DEBUG_LOG(
"start serial:%u", vm->ractor.sched.barrier_serial);
1392 unsigned int lock_rec;
1394 ractor_sched_lock(vm, cr);
1396 vm->ractor.sched.barrier_waiting =
true;
1399 lock_rec = vm->ractor.sync.lock_rec;
1400 vm->ractor.sync.lock_rec = 0;
1401 vm->ractor.sync.lock_owner = NULL;
1406 ccan_list_for_each(&vm->ractor.sched.running_threads, ith, sched.node.running_threads) {
1407 if (ith->ractor != cr) {
1408 RUBY_DEBUG_LOG(
"barrier int:%u", rb_th_serial(ith));
1409 RUBY_VM_SET_VM_BARRIER_INTERRUPT(ith->ec);
1414 while (!ractor_sched_barrier_completed_p(vm)) {
1415 ractor_sched_set_unlocked(vm, cr);
1417 ractor_sched_set_locked(vm, cr);
1421 ractor_sched_unlock(vm, cr);
1425 vm->ractor.sync.lock_rec = lock_rec;
1426 vm->ractor.sync.lock_owner = cr;
1428 RUBY_DEBUG_LOG(
"completed seirial:%u", vm->ractor.sched.barrier_serial);
1430 ractor_sched_lock(vm, cr);
1432 vm->ractor.sched.barrier_waiting =
false;
1433 vm->ractor.sched.barrier_serial++;
1434 vm->ractor.sched.barrier_waiting_cnt = 0;
1437 ractor_sched_unlock(vm, cr);
1441ractor_sched_barrier_join_signal_locked(
rb_vm_t *vm)
1443 if (ractor_sched_barrier_completed_p(vm)) {
1451 VM_ASSERT(vm->ractor.sched.barrier_waiting);
1453 unsigned int barrier_serial = vm->ractor.sched.barrier_serial;
1455 while (vm->ractor.sched.barrier_serial == barrier_serial) {
1456 RUBY_DEBUG_LOG(
"sleep serial:%u", barrier_serial);
1457 RB_VM_SAVE_MACHINE_CONTEXT(th);
1460 ractor_sched_set_unlocked(vm, cr);
1462 ractor_sched_set_locked(vm, cr);
1464 RUBY_DEBUG_LOG(
"wakeup serial:%u", barrier_serial);
1471 VM_ASSERT(cr->threads.sched.running != NULL);
1472 VM_ASSERT(cr == GET_RACTOR());
1473 VM_ASSERT(vm->ractor.sync.lock_owner == NULL);
1474 VM_ASSERT(vm->ractor.sched.barrier_waiting);
1476#if USE_RUBY_DEBUG_LOG || VM_CHECK_MODE > 0
1477 unsigned int barrier_serial = vm->ractor.sched.barrier_serial;
1480 RUBY_DEBUG_LOG(
"join");
1484 VM_ASSERT(vm->ractor.sched.barrier_waiting);
1485 VM_ASSERT(vm->ractor.sched.barrier_serial == barrier_serial);
1487 ractor_sched_lock(vm, cr);
1490 vm->ractor.sched.barrier_waiting_cnt++;
1491 RUBY_DEBUG_LOG(
"waiting_cnt:%u serial:%u", vm->ractor.sched.barrier_waiting_cnt, barrier_serial);
1493 ractor_sched_barrier_join_signal_locked(vm);
1494 ractor_sched_barrier_join_wait_locked(vm, cr->threads.sched.running);
1496 ractor_sched_unlock(vm, cr);
1506static void clear_thread_cache_altstack(
void);
1519 clear_thread_cache_altstack();
1523#ifdef RB_THREAD_T_HAS_NATIVE_ID
1525get_native_thread_id(
void)
1528 return (
int)syscall(SYS_gettid);
1529#elif defined(__FreeBSD__)
1530 return pthread_getthreadid_np();
1535#if defined(HAVE_WORKING_FORK)
1540 rb_thread_sched_init(sched,
true);
1544 if (th_has_dedicated_nt(th)) {
1545 vm->ractor.sched.snt_cnt = 0;
1548 vm->ractor.sched.snt_cnt = 1;
1550 vm->ractor.sched.running_cnt = 0;
1558 ccan_list_head_init(&vm->ractor.sched.grq);
1559 ccan_list_head_init(&vm->ractor.sched.timeslice_threads);
1560 ccan_list_head_init(&vm->ractor.sched.running_threads);
1562 VM_ASSERT(sched->is_running);
1563 sched->is_running_timeslice =
false;
1565 if (sched->running != th) {
1566 thread_sched_to_running(sched, th);
1569 thread_sched_setup_running_threads(sched, th->ractor, vm, th, NULL, NULL);
1572#ifdef RB_THREAD_T_HAS_NATIVE_ID
1574 th->nt->tid = get_native_thread_id();
1581#ifdef RB_THREAD_LOCAL_SPECIFIER
1582static RB_THREAD_LOCAL_SPECIFIER
rb_thread_t *ruby_native_thread;
1584static pthread_key_t ruby_native_thread_key;
1596ruby_thread_from_native(
void)
1598#ifdef RB_THREAD_LOCAL_SPECIFIER
1599 return ruby_native_thread;
1601 return pthread_getspecific(ruby_native_thread_key);
1610 ccan_list_node_init(&th->sched.node.ubf);
1617 rb_ractor_set_current_ec(th->ractor, th->ec);
1619#ifdef RB_THREAD_LOCAL_SPECIFIER
1620 ruby_native_thread = th;
1623 return pthread_setspecific(ruby_native_thread_key, th) == 0;
1633#if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK)
1634 if (condattr_monotonic) {
1635 int r = pthread_condattr_init(condattr_monotonic);
1637 r = pthread_condattr_setclock(condattr_monotonic, CLOCK_MONOTONIC);
1639 if (r) condattr_monotonic = NULL;
1643#ifndef RB_THREAD_LOCAL_SPECIFIER
1644 if (pthread_key_create(&ruby_native_thread_key, 0) == EAGAIN) {
1645 rb_bug(
"pthread_key_create failed (ruby_native_thread_key)");
1647 if (pthread_key_create(&ruby_current_ec_key, 0) == EAGAIN) {
1648 rb_bug(
"pthread_key_create failed (ruby_current_ec_key)");
1651 ruby_posix_signal(SIGVTALRM, null_func);
1660 ccan_list_head_init(&vm->ractor.sched.grq);
1661 ccan_list_head_init(&vm->ractor.sched.timeslice_threads);
1662 ccan_list_head_init(&vm->ractor.sched.running_threads);
1665 main_th->nt->thread_id = pthread_self();
1666 main_th->nt->serial = 1;
1667#ifdef RUBY_NT_SERIAL
1670 ruby_thread_set_native(main_th);
1671 native_thread_setup(main_th->nt);
1672 native_thread_setup_on_thread(main_th->nt);
1674 TH_SCHED(main_th)->running = main_th;
1675 main_th->has_dedicated_nt = 1;
1677 thread_sched_setup_running_threads(TH_SCHED(main_th), main_th->ractor, vm, main_th, NULL, NULL);
1680 main_th->nt->dedicated = 1;
1681 main_th->nt->vm = vm;
1684 vm->ractor.sched.dnt_cnt = 1;
1687extern int ruby_mn_threads_enabled;
1690ruby_mn_threads_params(
void)
1695 const char *mn_threads_cstr = getenv(
"RUBY_MN_THREADS");
1696 bool enable_mn_threads =
false;
1698 if (USE_MN_THREADS && mn_threads_cstr && (enable_mn_threads = atoi(mn_threads_cstr) > 0)) {
1700 ruby_mn_threads_enabled = 1;
1702 main_ractor->threads.sched.enable_mn_threads = enable_mn_threads;
1704 const char *max_cpu_cstr = getenv(
"RUBY_MAX_CPU");
1705 const int default_max_cpu = 8;
1706 int max_cpu = default_max_cpu;
1708 if (USE_MN_THREADS && max_cpu_cstr) {
1709 int given_max_cpu = atoi(max_cpu_cstr);
1710 if (given_max_cpu > 0) {
1711 max_cpu = given_max_cpu;
1715 vm->ractor.sched.max_cpu = max_cpu;
1721 RUBY_DEBUG_LOG(
"nt:%d %d->%d", nt->serial, nt->dedicated, nt->dedicated + 1);
1723 if (nt->dedicated == 0) {
1724 ractor_sched_lock(vm, cr);
1726 vm->ractor.sched.snt_cnt--;
1727 vm->ractor.sched.dnt_cnt++;
1729 ractor_sched_unlock(vm, cr);
1738 RUBY_DEBUG_LOG(
"nt:%d %d->%d", nt->serial, nt->dedicated, nt->dedicated - 1);
1739 VM_ASSERT(nt->dedicated > 0);
1742 if (nt->dedicated == 0) {
1743 ractor_sched_lock(vm, cr);
1745 nt->vm->ractor.sched.snt_cnt++;
1746 nt->vm->ractor.sched.dnt_cnt--;
1748 ractor_sched_unlock(vm, cr);
1755#if USE_RUBY_DEBUG_LOG
1758 RUBY_DEBUG_LOG(
"th:%d nt:%d->%d", (
int)th->serial, (
int)th->nt->serial, (
int)nt->serial);
1761 RUBY_DEBUG_LOG(
"th:%d nt:NULL->%d", (
int)th->serial, (
int)nt->serial);
1766 RUBY_DEBUG_LOG(
"th:%d nt:%d->NULL", (
int)th->serial, (
int)th->nt->serial);
1769 RUBY_DEBUG_LOG(
"th:%d nt:NULL->NULL", (
int)th->serial);
1783 if (&nt->cond.readyq != &nt->cond.intr) {
1787 RB_ALTSTACK_FREE(nt->altstack);
1788 ruby_xfree(nt->nt_context);
1793#if defined HAVE_PTHREAD_GETATTR_NP || defined HAVE_PTHREAD_ATTR_GET_NP
1794#define STACKADDR_AVAILABLE 1
1795#elif defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP
1796#define STACKADDR_AVAILABLE 1
1797#undef MAINSTACKADDR_AVAILABLE
1798#define MAINSTACKADDR_AVAILABLE 1
1799void *pthread_get_stackaddr_np(pthread_t);
1800size_t pthread_get_stacksize_np(pthread_t);
1801#elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
1802#define STACKADDR_AVAILABLE 1
1803#elif defined HAVE_PTHREAD_GETTHRDS_NP
1804#define STACKADDR_AVAILABLE 1
1805#elif defined __HAIKU__
1806#define STACKADDR_AVAILABLE 1
1809#ifndef MAINSTACKADDR_AVAILABLE
1810# ifdef STACKADDR_AVAILABLE
1811# define MAINSTACKADDR_AVAILABLE 1
1813# define MAINSTACKADDR_AVAILABLE 0
1816#if MAINSTACKADDR_AVAILABLE && !defined(get_main_stack)
1817# define get_main_stack(addr, size) get_stack(addr, size)
1820#ifdef STACKADDR_AVAILABLE
1825get_stack(
void **addr,
size_t *size)
1827#define CHECK_ERR(expr) \
1828 {int err = (expr); if (err) return err;}
1829#ifdef HAVE_PTHREAD_GETATTR_NP
1830 pthread_attr_t attr;
1832 STACK_GROW_DIR_DETECTION;
1833 CHECK_ERR(pthread_getattr_np(pthread_self(), &attr));
1834# ifdef HAVE_PTHREAD_ATTR_GETSTACK
1835 CHECK_ERR(pthread_attr_getstack(&attr, addr, size));
1836 STACK_DIR_UPPER((
void)0, (
void)(*addr = (
char *)*addr + *size));
1838 CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
1839 CHECK_ERR(pthread_attr_getstacksize(&attr, size));
1841# ifdef HAVE_PTHREAD_ATTR_GETGUARDSIZE
1842 CHECK_ERR(pthread_attr_getguardsize(&attr, &guard));
1844 guard = getpagesize();
1847 pthread_attr_destroy(&attr);
1848#elif defined HAVE_PTHREAD_ATTR_GET_NP
1849 pthread_attr_t attr;
1850 CHECK_ERR(pthread_attr_init(&attr));
1851 CHECK_ERR(pthread_attr_get_np(pthread_self(), &attr));
1852# ifdef HAVE_PTHREAD_ATTR_GETSTACK
1853 CHECK_ERR(pthread_attr_getstack(&attr, addr, size));
1855 CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
1856 CHECK_ERR(pthread_attr_getstacksize(&attr, size));
1858 STACK_DIR_UPPER((
void)0, (
void)(*addr = (
char *)*addr + *size));
1859 pthread_attr_destroy(&attr);
1860#elif (defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP)
1861 pthread_t th = pthread_self();
1862 *addr = pthread_get_stackaddr_np(th);
1863 *size = pthread_get_stacksize_np(th);
1864#elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
1866# if defined HAVE_THR_STKSEGMENT
1867 CHECK_ERR(thr_stksegment(&stk));
1869 CHECK_ERR(pthread_stackseg_np(pthread_self(), &stk));
1872 *size = stk.ss_size;
1873#elif defined HAVE_PTHREAD_GETTHRDS_NP
1874 pthread_t th = pthread_self();
1875 struct __pthrdsinfo thinfo;
1877 int regsiz=
sizeof(reg);
1878 CHECK_ERR(pthread_getthrds_np(&th, PTHRDSINFO_QUERY_ALL,
1879 &thinfo,
sizeof(thinfo),
1881 *addr = thinfo.__pi_stackaddr;
1885 *size = thinfo.__pi_stackend - thinfo.__pi_stackaddr;
1886 STACK_DIR_UPPER((
void)0, (
void)(*addr = (
char *)*addr + *size));
1887#elif defined __HAIKU__
1889 STACK_GROW_DIR_DETECTION;
1890 CHECK_ERR(get_thread_info(find_thread(NULL), &info));
1891 *addr = info.stack_base;
1892 *size = (uintptr_t)info.stack_end - (uintptr_t)info.stack_base;
1893 STACK_DIR_UPPER((
void)0, (
void)(*addr = (
char *)*addr + *size));
1895#error STACKADDR_AVAILABLE is defined but not implemented.
1903 rb_nativethread_id_t id;
1904 size_t stack_maxsize;
1906} native_main_thread;
1908#ifdef STACK_END_ADDRESS
1909extern void *STACK_END_ADDRESS;
1913 RUBY_STACK_SPACE_LIMIT = 1024 * 1024,
1914 RUBY_STACK_SPACE_RATIO = 5
1918space_size(
size_t stack_size)
1920 size_t space_size = stack_size / RUBY_STACK_SPACE_RATIO;
1921 if (space_size > RUBY_STACK_SPACE_LIMIT) {
1922 return RUBY_STACK_SPACE_LIMIT;
1930native_thread_init_main_thread_stack(
void *addr)
1932 native_main_thread.id = pthread_self();
1933#ifdef RUBY_ASAN_ENABLED
1934 addr = asan_get_real_stack_addr((
void *)addr);
1937#if MAINSTACKADDR_AVAILABLE
1938 if (native_main_thread.stack_maxsize)
return;
1942 if (get_main_stack(&stackaddr, &size) == 0) {
1943 native_main_thread.stack_maxsize = size;
1944 native_main_thread.stack_start = stackaddr;
1949#ifdef STACK_END_ADDRESS
1950 native_main_thread.stack_start = STACK_END_ADDRESS;
1952 if (!native_main_thread.stack_start ||
1953 STACK_UPPER((
VALUE *)(
void *)&addr,
1954 native_main_thread.stack_start > (
VALUE *)addr,
1955 native_main_thread.stack_start < (
VALUE *)addr)) {
1956 native_main_thread.stack_start = (
VALUE *)addr;
1960#if defined(HAVE_GETRLIMIT)
1961#if defined(PTHREAD_STACK_DEFAULT)
1962# if PTHREAD_STACK_DEFAULT < RUBY_STACK_SPACE*5
1963# error "PTHREAD_STACK_DEFAULT is too small"
1965 size_t size = PTHREAD_STACK_DEFAULT;
1967 size_t size = RUBY_VM_THREAD_VM_STACK_SIZE;
1970 int pagesize = getpagesize();
1972 STACK_GROW_DIR_DETECTION;
1973 if (getrlimit(RLIMIT_STACK, &rlim) == 0) {
1974 size = (size_t)rlim.rlim_cur;
1976 addr = native_main_thread.stack_start;
1977 if (IS_STACK_DIR_UPPER()) {
1978 space = ((size_t)((
char *)addr + size) / pagesize) * pagesize - (size_t)addr;
1981 space = (size_t)addr - ((
size_t)((
char *)addr - size) / pagesize + 1) * pagesize;
1983 native_main_thread.stack_maxsize = space;
1987#if MAINSTACKADDR_AVAILABLE
1994 STACK_GROW_DIR_DETECTION;
1996 if (IS_STACK_DIR_UPPER()) {
1997 start = native_main_thread.stack_start;
1998 end = (
char *)native_main_thread.stack_start + native_main_thread.stack_maxsize;
2001 start = (
char *)native_main_thread.stack_start - native_main_thread.stack_maxsize;
2002 end = native_main_thread.stack_start;
2005 if ((
void *)addr < start || (
void *)addr > end) {
2007 native_main_thread.stack_start = (
VALUE *)addr;
2008 native_main_thread.stack_maxsize = 0;
2013#define CHECK_ERR(expr) \
2014 {int err = (expr); if (err) {rb_bug_errno(#expr, err);}}
2017native_thread_init_stack(
rb_thread_t *th,
void *local_in_parent_frame)
2019 rb_nativethread_id_t curr = pthread_self();
2020#ifdef RUBY_ASAN_ENABLED
2021 local_in_parent_frame = asan_get_real_stack_addr(local_in_parent_frame);
2022 th->ec->machine.asan_fake_stack_handle = asan_get_thread_fake_stack_handle();
2025 if (!native_main_thread.id) {
2028 native_thread_init_main_thread_stack(local_in_parent_frame);
2031 if (pthread_equal(curr, native_main_thread.id)) {
2032 th->ec->machine.stack_start = native_main_thread.stack_start;
2033 th->ec->machine.stack_maxsize = native_main_thread.stack_maxsize;
2036#ifdef STACKADDR_AVAILABLE
2037 if (th_has_dedicated_nt(th)) {
2041 if (get_stack(&start, &size) == 0) {
2042 uintptr_t diff = (uintptr_t)start - (uintptr_t)local_in_parent_frame;
2043 th->ec->machine.stack_start = local_in_parent_frame;
2044 th->ec->machine.stack_maxsize = size - diff;
2048 rb_raise(
rb_eNotImpError,
"ruby engine can initialize only in the main thread");
2067 pthread_attr_t attr;
2069 const size_t stack_size = nt->vm->default_params.thread_machine_stack_size;
2070 const size_t space = space_size(stack_size);
2072 nt->machine_stack_maxsize = stack_size - space;
2074#ifdef USE_SIGALTSTACK
2075 nt->altstack = rb_allocate_sigaltstack();
2078 CHECK_ERR(pthread_attr_init(&attr));
2080# ifdef PTHREAD_STACK_MIN
2081 RUBY_DEBUG_LOG(
"stack size: %lu", (
unsigned long)stack_size);
2082 CHECK_ERR(pthread_attr_setstacksize(&attr, stack_size));
2085# ifdef HAVE_PTHREAD_ATTR_SETINHERITSCHED
2086 CHECK_ERR(pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
2088 CHECK_ERR(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
2090 err = pthread_create(&nt->thread_id, &attr, nt_start, nt);
2092 RUBY_DEBUG_LOG(
"nt:%d err:%d", (
int)nt->serial, err);
2094 CHECK_ERR(pthread_attr_destroy(&attr));
2105 if (&nt->cond.readyq != &nt->cond.intr) {
2114#ifdef RB_THREAD_T_HAS_NATIVE_ID
2115 nt->tid = get_native_thread_id();
2119 RB_ALTSTACK_INIT(nt->altstack, nt->altstack);
2123native_thread_alloc(
void)
2126 native_thread_setup(nt);
2132#if USE_RUBY_DEBUG_LOG
2142 th->nt = native_thread_alloc();
2143 th->nt->vm = th->vm;
2144 th->nt->running_thread = th;
2145 th->nt->dedicated = 1;
2148 size_t vm_stack_word_size = th->vm->default_params.thread_vm_stack_size /
sizeof(
VALUE);
2149 void *vm_stack = ruby_xmalloc(vm_stack_word_size *
sizeof(
VALUE));
2150 th->sched.malloc_stack =
true;
2151 rb_ec_initialize_vm_stack(th->ec, vm_stack, vm_stack_word_size);
2152 th->sched.context_stack = vm_stack;
2155 int err = native_thread_create0(th->nt);
2158 thread_sched_to_ready(TH_SCHED(th), th);
2172 VALUE stack_start = 0;
2173 VALUE *stack_start_addr = asan_get_real_stack_addr(&stack_start);
2175 native_thread_init_stack(th, stack_start_addr);
2176 thread_start_func_2(th, th->ec->machine.stack_start);
2185 native_thread_setup_on_thread(nt);
2188#ifdef RB_THREAD_T_HAS_NATIVE_ID
2189 nt->tid = get_native_thread_id();
2192#if USE_RUBY_DEBUG_LOG && defined(RUBY_NT_SERIAL)
2193 ruby_nt_serial = nt->serial;
2196 RUBY_DEBUG_LOG(
"nt:%u", nt->serial);
2198 if (!nt->dedicated) {
2199 coroutine_initialize_main(nt->nt_context);
2203 if (nt->dedicated) {
2208 RUBY_DEBUG_LOG(
"on dedicated th:%u", rb_th_serial(th));
2209 ruby_thread_set_native(th);
2211 thread_sched_lock(sched, th);
2213 if (sched->running == th) {
2214 thread_sched_add_running_thread(sched, th);
2216 thread_sched_wait_running_turn(sched, th,
false);
2218 thread_sched_unlock(sched, th);
2221 call_thread_start_func_2(th);
2225 RUBY_DEBUG_LOG(
"check next");
2231 thread_sched_lock(sched, NULL);
2235 if (next_th && next_th->nt == NULL) {
2236 RUBY_DEBUG_LOG(
"nt:%d next_th:%d", (
int)nt->serial, (
int)next_th->serial);
2237 thread_sched_switch0(nt->nt_context, next_th, nt,
false);
2240 RUBY_DEBUG_LOG(
"no schedulable threads -- next_th:%p", next_th);
2243 thread_sched_unlock(sched, NULL);
2250 if (nt->dedicated) {
2260static int native_thread_create_shared(
rb_thread_t *th);
2263static void nt_free_stack(
void *mstack);
2270 if (th->sched.malloc_stack) {
2276 th->sched.finished =
false;
2280 ccan_list_add(&vm->ractor.sched.zombie_threads, &th->sched.node.zombie_threads);
2291 if (th->sched.malloc_stack) {
2293 ruby_xfree(th->sched.context_stack);
2294 native_thread_destroy(th->nt);
2297 nt_free_stack(th->sched.context_stack);
2301 ruby_xfree(th->sched.context);
2302 th->sched.context = NULL;
2305 ruby_xfree(th->sched.context_stack);
2306 native_thread_destroy(th->nt);
2313rb_thread_sched_mark_zombies(
rb_vm_t *vm)
2315 if (!ccan_list_empty(&vm->ractor.sched.zombie_threads)) {
2317 ccan_list_for_each_safe(&vm->ractor.sched.zombie_threads, zombie_th, next_zombie_th, sched.node.zombie_threads) {
2318 if (zombie_th->sched.finished) {
2319 ccan_list_del_init(&zombie_th->sched.node.zombie_threads);
2322 rb_gc_mark(zombie_th->self);
2331 VM_ASSERT(th->nt == 0);
2332 RUBY_DEBUG_LOG(
"th:%d has_dnt:%d", th->serial, th->has_dedicated_nt);
2335 if (!th->ractor->threads.sched.enable_mn_threads) {
2336 th->has_dedicated_nt = 1;
2339 if (th->has_dedicated_nt) {
2340 return native_thread_create_dedicated(th);
2343 return native_thread_create_shared(th);
2347#if USE_NATIVE_THREAD_PRIORITY
2352#if defined(_POSIX_PRIORITY_SCHEDULING) && (_POSIX_PRIORITY_SCHEDULING > 0)
2353 struct sched_param sp;
2355 int priority = 0 - th->priority;
2357 pthread_getschedparam(th->nt->thread_id, &policy, &sp);
2358 max = sched_get_priority_max(policy);
2359 min = sched_get_priority_min(policy);
2361 if (min > priority) {
2364 else if (max < priority) {
2368 sp.sched_priority = priority;
2369 pthread_setschedparam(th->nt->thread_id, policy, &sp);
2380 return rb_fd_select(n, readfds, writefds, exceptfds, timeout);
2384ubf_pthread_cond_signal(
void *ptr)
2387 RUBY_DEBUG_LOG(
"th:%u on nt:%d", rb_th_serial(th), (
int)th->nt->serial);
2392native_cond_sleep(
rb_thread_t *th, rb_hrtime_t *rel)
2394 rb_nativethread_lock_t *lock = &th->interrupt_lock;
2395 rb_nativethread_cond_t *cond = &th->nt->cond.intr;
2405 const rb_hrtime_t max = (rb_hrtime_t)100000000 * RB_HRTIME_PER_SEC;
2407 THREAD_BLOCKING_BEGIN(th);
2410 th->unblock.func = ubf_pthread_cond_signal;
2411 th->unblock.arg = th;
2413 if (RUBY_VM_INTERRUPTED(th->ec)) {
2415 RUBY_DEBUG_LOG(
"interrupted before sleep th:%u", rb_th_serial(th));
2428 end = native_cond_timeout(cond, *rel);
2429 native_cond_timedwait(cond, lock, &end);
2432 th->unblock.func = 0;
2436 THREAD_BLOCKING_END(th);
2438 RUBY_DEBUG_LOG(
"done th:%u", rb_th_serial(th));
2442static CCAN_LIST_HEAD(ubf_list_head);
2443static rb_nativethread_lock_t ubf_list_lock = RB_NATIVETHREAD_LOCK_INIT;
2446ubf_list_atfork(
void)
2448 ccan_list_head_init(&ubf_list_head);
2457 ccan_list_for_each(&ubf_list_head, list_th, sched.node.ubf) {
2458 if (list_th == th)
return true;
2467 RUBY_DEBUG_LOG(
"th:%u", rb_th_serial(th));
2468 struct ccan_list_node *node = &th->sched.node.ubf;
2470 VM_ASSERT(th->unblock.func != NULL);
2475 if (ccan_list_empty((
struct ccan_list_head*)node)) {
2476 VM_ASSERT(!ubf_list_contain_p(th));
2477 ccan_list_add(&ubf_list_head, node);
2482 timer_thread_wakeup();
2489 RUBY_DEBUG_LOG(
"th:%u", rb_th_serial(th));
2490 struct ccan_list_node *node = &th->sched.node.ubf;
2493 VM_ASSERT(th->unblock.func == NULL);
2495 if (!ccan_list_empty((
struct ccan_list_head*)node)) {
2498 VM_ASSERT(ubf_list_contain_p(th));
2499 ccan_list_del_init(node);
2512 RUBY_DEBUG_LOG(
"th:%u thread_id:%p", rb_th_serial(th), (
void *)th->nt->thread_id);
2514 pthread_kill(th->nt->thread_id, SIGVTALRM);
2518ubf_select(
void *ptr)
2521 RUBY_DEBUG_LOG(
"wakeup th:%u", rb_th_serial(th));
2522 ubf_wakeup_thread(th);
2523 register_ubf_list(th);
2527ubf_threads_empty(
void)
2529 return ccan_list_empty(&ubf_list_head) != 0;
2533ubf_wakeup_all_threads(
void)
2535 if (!ubf_threads_empty()) {
2539 ccan_list_for_each(&ubf_list_head, th, sched.node.ubf) {
2540 ubf_wakeup_thread(th);
2548#define register_ubf_list(th) (void)(th)
2549#define unregister_ubf_list(th) (void)(th)
2551static void ubf_wakeup_all_threads(
void) {
return; }
2552static bool ubf_threads_empty(
void) {
return true; }
2553#define ubf_list_atfork() do {} while (0)
2557#define WRITE_CONST(fd, str) (void)(write((fd),(str),sizeof(str)-1)<0)
2560rb_thread_wakeup_timer_thread(
int sig)
2566 timer_thread_wakeup_force();
2569 if (system_working) {
2577 RUBY_VM_SET_TRAP_INTERRUPT(main_th_ec);
2579 if (vm->ubf_async_safe && main_th->unblock.func) {
2580 (main_th->unblock.func)(main_th->unblock.arg);
2587#define CLOSE_INVALIDATE_PAIR(expr) \
2588 close_invalidate_pair(expr,"close_invalidate: "#expr)
2590close_invalidate(
int *fdp,
const char *msg)
2595 if (close(fd) < 0) {
2596 async_bug_fd(msg,
errno, fd);
2601close_invalidate_pair(
int fds[2],
const char *msg)
2603 if (USE_EVENTFD && fds[0] == fds[1]) {
2605 close_invalidate(&fds[0], msg);
2608 close_invalidate(&fds[1], msg);
2609 close_invalidate(&fds[0], msg);
2619 oflags = fcntl(fd, F_GETFL);
2622 oflags |= O_NONBLOCK;
2623 err = fcntl(fd, F_SETFL, oflags);
2630setup_communication_pipe_internal(
int pipes[2])
2634 if (pipes[0] > 0 || pipes[1] > 0) {
2635 VM_ASSERT(pipes[0] > 0);
2636 VM_ASSERT(pipes[1] > 0);
2644#if USE_EVENTFD && defined(EFD_NONBLOCK) && defined(EFD_CLOEXEC)
2645 pipes[0] = pipes[1] = eventfd(0, EFD_NONBLOCK|EFD_CLOEXEC);
2647 if (pipes[0] >= 0) {
2655 rb_bug(
"can not create communication pipe");
2659 set_nonblock(pipes[0]);
2660 set_nonblock(pipes[1]);
2663#if !defined(SET_CURRENT_THREAD_NAME) && defined(__linux__) && defined(PR_SET_NAME)
2664# define SET_CURRENT_THREAD_NAME(name) prctl(PR_SET_NAME, name)
2669#if defined(__linux__)
2671#elif defined(__APPLE__)
2684#ifdef SET_CURRENT_THREAD_NAME
2686 if (!
NIL_P(loc = th->name)) {
2687 SET_CURRENT_THREAD_NAME(RSTRING_PTR(loc));
2689 else if ((loc = threadptr_invoke_proc_location(th)) !=
Qnil) {
2691 char buf[THREAD_NAME_MAX];
2696 p = strrchr(name,
'/');
2704 if (
len >=
sizeof(buf)) {
2705 buf[
sizeof(buf)-2] =
'*';
2706 buf[
sizeof(buf)-1] =
'\0';
2708 SET_CURRENT_THREAD_NAME(buf);
2714native_set_another_thread_name(rb_nativethread_id_t thread_id,
VALUE name)
2716#if defined SET_ANOTHER_THREAD_NAME || defined SET_CURRENT_THREAD_NAME
2717 char buf[THREAD_NAME_MAX];
2719# if !defined SET_ANOTHER_THREAD_NAME
2720 if (!pthread_equal(pthread_self(), thread_id))
return;
2725 if (n >= (
int)
sizeof(buf)) {
2726 memcpy(buf, s,
sizeof(buf)-1);
2727 buf[
sizeof(buf)-1] =
'\0';
2731# if defined SET_ANOTHER_THREAD_NAME
2732 SET_ANOTHER_THREAD_NAME(thread_id, s);
2733# elif defined SET_CURRENT_THREAD_NAME
2734 SET_CURRENT_THREAD_NAME(s);
2739#if defined(RB_THREAD_T_HAS_NATIVE_ID) || defined(__APPLE__)
2741native_thread_native_thread_id(
rb_thread_t *target_th)
2743 if (!target_th->nt)
return Qnil;
2745#ifdef RB_THREAD_T_HAS_NATIVE_ID
2746 int tid = target_th->nt->tid;
2747 if (tid == 0)
return Qnil;
2749#elif defined(__APPLE__)
2755# if (!defined(MAC_OS_X_VERSION_10_6) || \
2756 (MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_6) || \
2757 defined(__POWERPC__) )
2758 const bool no_pthread_threadid_np =
true;
2759# define NO_PTHREAD_MACH_THREAD_NP 1
2760# elif MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_6
2761 const bool no_pthread_threadid_np =
false;
2763# if !(defined(__has_attribute) && __has_attribute(availability))
2765 __attribute__((weak))
int pthread_threadid_np(pthread_t, uint64_t*);
2768 const bool no_pthread_threadid_np = !&pthread_threadid_np;
2770 if (no_pthread_threadid_np) {
2771 return ULL2NUM(pthread_mach_thread_np(pthread_self()));
2773# ifndef NO_PTHREAD_MACH_THREAD_NP
2774 int e = pthread_threadid_np(target_th->nt->thread_id, &tid);
2776 return ULL2NUM((
unsigned long long)tid);
2780# define USE_NATIVE_THREAD_NATIVE_THREAD_ID 1
2782# define USE_NATIVE_THREAD_NATIVE_THREAD_ID 0
2786 rb_serial_t created_fork_gen;
2787 pthread_t pthread_id;
2791#if (HAVE_SYS_EPOLL_H || HAVE_SYS_EVENT_H) && USE_MN_THREADS
2794#if HAVE_SYS_EPOLL_H && USE_MN_THREADS
2795#define EPOLL_EVENTS_MAX 0x10
2796 struct epoll_event finished_events[EPOLL_EVENTS_MAX];
2797#elif HAVE_SYS_EVENT_H && USE_MN_THREADS
2798#define KQUEUE_EVENTS_MAX 0x10
2799 struct kevent finished_events[KQUEUE_EVENTS_MAX];
2803 struct ccan_list_head waiting;
2804 pthread_mutex_t waiting_lock;
2806 .created_fork_gen = 0,
2809#define TIMER_THREAD_CREATED_P() (timer_th.created_fork_gen == current_fork_gen)
2811static void timer_thread_check_timeslice(
rb_vm_t *vm);
2812static int timer_thread_set_timeout(
rb_vm_t *vm);
2813static void timer_thread_wakeup_thread(
rb_thread_t *th);
2815#include "thread_pthread_mn.c"
2829timer_thread_set_timeout(
rb_vm_t *vm)
2836 ractor_sched_lock(vm, NULL);
2838 if ( !ccan_list_empty(&vm->ractor.sched.timeslice_threads)
2839 || !ubf_threads_empty()
2840 || vm->ractor.sched.grq_cnt > 0
2843 RUBY_DEBUG_LOG(
"timeslice:%d ubf:%d grq:%d",
2844 !ccan_list_empty(&vm->ractor.sched.timeslice_threads),
2845 !ubf_threads_empty(),
2846 (vm->ractor.sched.grq_cnt > 0));
2849 vm->ractor.sched.timeslice_wait_inf =
false;
2852 vm->ractor.sched.timeslice_wait_inf =
true;
2855 ractor_sched_unlock(vm, NULL);
2857 if (vm->ractor.sched.timeslice_wait_inf) {
2863 if (th && (th->sched.waiting_reason.flags & thread_sched_waiting_timeout)) {
2864 rb_hrtime_t now = rb_hrtime_now();
2865 rb_hrtime_t hrrel = rb_hrtime_sub(th->sched.waiting_reason.data.timeout, now);
2867 RUBY_DEBUG_LOG(
"th:%u now:%lu rel:%lu", rb_th_serial(th), (
unsigned long)now, (
unsigned long)hrrel);
2870 timeout = (int)((hrrel + RB_HRTIME_PER_MSEC - 1) / RB_HRTIME_PER_MSEC);
2876 RUBY_DEBUG_LOG(
"timeout:%d inf:%d", timeout, (
int)vm->ractor.sched.timeslice_wait_inf);
2884timer_thread_check_signal(
rb_vm_t *vm)
2888 int signum = rb_signal_buff_size();
2889 if (UNLIKELY(signum > 0) && vm->ractor.main_thread) {
2890 RUBY_DEBUG_LOG(
"signum:%d", signum);
2891 threadptr_trap_interrupt(vm->ractor.main_thread);
2896timer_thread_check_exceed(rb_hrtime_t abs, rb_hrtime_t now)
2901 else if (abs - now < RB_HRTIME_PER_MSEC) {
2910timer_thread_deq_wakeup(
rb_vm_t *vm, rb_hrtime_t now)
2915 (w->flags & thread_sched_waiting_timeout) &&
2916 timer_thread_check_exceed(w->data.timeout, now)) {
2918 RUBY_DEBUG_LOG(
"wakeup th:%u", rb_th_serial(thread_sched_waiting_thread(w)));
2921 ccan_list_del_init(&w->node);
2924 w->flags = thread_sched_waiting_none;
2927 return thread_sched_waiting_thread(w);
2936 RUBY_DEBUG_LOG(
"th:%u", rb_th_serial(th));
2939 thread_sched_lock(sched, th);
2941 if (sched->running != th) {
2942 thread_sched_to_ready_common(sched, th,
true,
false);
2948 thread_sched_unlock(sched, th);
2952timer_thread_check_timeout(
rb_vm_t *vm)
2954 rb_hrtime_t now = rb_hrtime_now();
2959 while ((th = timer_thread_deq_wakeup(vm, now)) != NULL) {
2960 timer_thread_wakeup_thread(th);
2967timer_thread_check_timeslice(
rb_vm_t *vm)
2971 ccan_list_for_each(&vm->ractor.sched.timeslice_threads, th, sched.node.timeslice_threads) {
2972 RUBY_DEBUG_LOG(
"timeslice th:%u", rb_th_serial(th));
2973 RUBY_VM_SET_TIMER_INTERRUPT(th->ec);
2981 pthread_sigmask(0, NULL, &oldmask);
2982 if (sigismember(&oldmask, SIGVTALRM)) {
2986 RUBY_DEBUG_LOG(
"ok");
2991timer_thread_func(
void *ptr)
2994#if defined(RUBY_NT_SERIAL)
2998 RUBY_DEBUG_LOG(
"started%s",
"");
3000 while (system_working) {
3001 timer_thread_check_signal(vm);
3002 timer_thread_check_timeout(vm);
3003 ubf_wakeup_all_threads();
3005 RUBY_DEBUG_LOG(
"system_working:%d", system_working);
3006 timer_thread_polling(vm);
3009 RUBY_DEBUG_LOG(
"terminated");
3015signal_communication_pipe(
int fd)
3018 const uint64_t buff = 1;
3020 const char buff =
'!';
3027 if ((result = write(fd, &buff,
sizeof(buff))) <= 0) {
3030 case EINTR:
goto retry;
3032#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
3037 async_bug_fd(
"rb_thread_wakeup_timer_thread: write", e, fd);
3040 if (TT_DEBUG) WRITE_CONST(2,
"rb_thread_wakeup_timer_thread: write\n");
3048timer_thread_wakeup_force(
void)
3051 signal_communication_pipe(timer_th.comm_fds[1]);
3055timer_thread_wakeup_locked(
rb_vm_t *vm)
3058 ASSERT_ractor_sched_locked(vm, NULL);
3060 if (timer_th.created_fork_gen == current_fork_gen) {
3061 if (vm->ractor.sched.timeslice_wait_inf) {
3062 RUBY_DEBUG_LOG(
"wakeup with fd:%d", timer_th.comm_fds[1]);
3063 timer_thread_wakeup_force();
3066 RUBY_DEBUG_LOG(
"will be wakeup...");
3072timer_thread_wakeup(
void)
3076 ractor_sched_lock(vm, NULL);
3078 timer_thread_wakeup_locked(vm);
3080 ractor_sched_unlock(vm, NULL);
3084rb_thread_create_timer_thread(
void)
3086 rb_serial_t created_fork_gen = timer_th.created_fork_gen;
3088 RUBY_DEBUG_LOG(
"fork_gen create:%d current:%d", (
int)created_fork_gen, (
int)current_fork_gen);
3090 timer_th.created_fork_gen = current_fork_gen;
3092 if (created_fork_gen != current_fork_gen) {
3093 if (created_fork_gen != 0) {
3094 RUBY_DEBUG_LOG(
"forked child process");
3096 CLOSE_INVALIDATE_PAIR(timer_th.comm_fds);
3097#if HAVE_SYS_EPOLL_H && USE_MN_THREADS
3098 close_invalidate(&timer_th.event_fd,
"close event_fd");
3103 ccan_list_head_init(&timer_th.waiting);
3107 setup_communication_pipe_internal(timer_th.comm_fds);
3110 timer_thread_setup_mn();
3113 pthread_create(&timer_th.pthread_id, NULL, timer_thread_func, GET_VM());
3117native_stop_timer_thread(
void)
3120 stopped = --system_working <= 0;
3123 RUBY_DEBUG_LOG(
"wakeup send %d", timer_th.comm_fds[1]);
3124 timer_thread_wakeup_force();
3125 RUBY_DEBUG_LOG(
"wakeup sent");
3126 pthread_join(timer_th.pthread_id, NULL);
3129 if (TT_DEBUG) fprintf(stderr,
"stop timer thread\n");
3134native_reset_timer_thread(
void)
3139#ifdef HAVE_SIGALTSTACK
3141ruby_stack_overflowed_p(
const rb_thread_t *th,
const void *addr)
3145 const size_t water_mark = 1024 * 1024;
3146 STACK_GROW_DIR_DETECTION;
3148#ifdef STACKADDR_AVAILABLE
3149 if (get_stack(&base, &size) == 0) {
3151 if (pthread_equal(th->nt->thread_id, native_main_thread.id)) {
3153 if (getrlimit(RLIMIT_STACK, &rlim) == 0 && rlim.rlim_cur > size) {
3154 size = (size_t)rlim.rlim_cur;
3158 base = (
char *)base + STACK_DIR_UPPER(+size, -size);
3163 size = th->ec->machine.stack_maxsize;
3164 base = (
char *)th->ec->machine.stack_start - STACK_DIR_UPPER(0, size);
3169 size /= RUBY_STACK_SPACE_RATIO;
3170 if (size > water_mark) size = water_mark;
3171 if (IS_STACK_DIR_UPPER()) {
3172 if (size > ~(
size_t)base+1) size = ~(
size_t)base+1;
3173 if (addr > base && addr <= (
void *)((
char *)base + size))
return 1;
3176 if (size > (
size_t)base) size = (
size_t)base;
3177 if (addr > (
void *)((
char *)base - size) && addr <= base)
return 1;
3187 if (fd < 0)
return 0;
3189 if (fd == timer_th.comm_fds[0] ||
3190 fd == timer_th.comm_fds[1]
3191#if (HAVE_SYS_EPOLL_H || HAVE_SYS_EVENT_H) && USE_MN_THREADS
3192 || fd == timer_th.event_fd
3195 goto check_fork_gen;
3200 if (timer_th.created_fork_gen == current_fork_gen) {
3212 return pthread_self();
3215#if defined(USE_POLL) && !defined(HAVE_PPOLL)
3218ruby_ppoll(
struct pollfd *fds, nfds_t nfds,
3219 const struct timespec *ts,
const sigset_t *sigmask)
3226 if (ts->tv_sec > INT_MAX/1000)
3227 timeout_ms = INT_MAX;
3229 tmp = (int)(ts->tv_sec * 1000);
3231 tmp2 = (int)((ts->tv_nsec + 999999L) / (1000L * 1000L));
3232 if (INT_MAX - tmp < tmp2)
3233 timeout_ms = INT_MAX;
3235 timeout_ms = (int)(tmp + tmp2);
3241 return poll(fds, nfds, timeout_ms);
3243# define ppoll(fds,nfds,ts,sigmask) ruby_ppoll((fds),(nfds),(ts),(sigmask))
3256#define THREAD_BLOCKING_YIELD(th) do { \
3257 const rb_thread_t *next_th; \
3258 struct rb_thread_sched *sched = TH_SCHED(th); \
3259 RB_VM_SAVE_MACHINE_CONTEXT(th); \
3260 thread_sched_to_waiting(sched, (th)); \
3261 next_th = sched->running; \
3262 rb_native_mutex_unlock(&sched->lock_); \
3263 native_thread_yield(); \
3264 if (!next_th && rb_ractor_living_thread_num(th->ractor) > 1) { \
3265 native_thread_yield(); \
3273 RUBY_DEBUG_LOG(
"rel:%d", rel ? (
int)*rel : 0);
3275 if (th_has_dedicated_nt(th)) {
3276 native_cond_sleep(th, rel);
3279 thread_sched_wait_events(sched, th, -1, thread_sched_waiting_timeout, rel);
3283 thread_sched_to_waiting_until_wakeup(sched, th);
3286 RUBY_DEBUG_LOG(
"wakeup");
3290static pthread_rwlock_t rb_thread_fork_rw_lock = PTHREAD_RWLOCK_INITIALIZER;
3293rb_thread_release_fork_lock(
void)
3296 if ((r = pthread_rwlock_unlock(&rb_thread_fork_rw_lock))) {
3302rb_thread_reset_fork_lock(
void)
3305 if ((r = pthread_rwlock_destroy(&rb_thread_fork_rw_lock))) {
3309 if ((r = pthread_rwlock_init(&rb_thread_fork_rw_lock, NULL))) {
3315rb_thread_prevent_fork(
void *(*func)(
void *),
void *data)
3318 if ((r = pthread_rwlock_rdlock(&rb_thread_fork_rw_lock))) {
3321 void *result = func(data);
3322 rb_thread_release_fork_lock();
3327rb_thread_acquire_fork_lock(
void)
3330 if ((r = pthread_rwlock_wrlock(&rb_thread_fork_rw_lock))) {
3337struct rb_internal_thread_event_hook {
3338 rb_internal_thread_event_callback callback;
3342 struct rb_internal_thread_event_hook *next;
3345static pthread_rwlock_t rb_internal_thread_event_hooks_rw_lock = PTHREAD_RWLOCK_INITIALIZER;
3347rb_internal_thread_event_hook_t *
3350 rb_internal_thread_event_hook_t *hook =
ALLOC_N(rb_internal_thread_event_hook_t, 1);
3351 hook->callback = callback;
3352 hook->user_data = user_data;
3353 hook->event = internal_event;
3356 if ((r = pthread_rwlock_wrlock(&rb_internal_thread_event_hooks_rw_lock))) {
3360 hook->next = rb_internal_thread_event_hooks;
3361 ATOMIC_PTR_EXCHANGE(rb_internal_thread_event_hooks, hook);
3363 if ((r = pthread_rwlock_unlock(&rb_internal_thread_event_hooks_rw_lock))) {
3373 if ((r = pthread_rwlock_wrlock(&rb_internal_thread_event_hooks_rw_lock))) {
3377 bool success = FALSE;
3379 if (rb_internal_thread_event_hooks == hook) {
3380 ATOMIC_PTR_EXCHANGE(rb_internal_thread_event_hooks, hook->next);
3384 rb_internal_thread_event_hook_t *h = rb_internal_thread_event_hooks;
3387 if (h->next == hook) {
3388 h->next = hook->next;
3392 }
while ((h = h->next));
3395 if ((r = pthread_rwlock_unlock(&rb_internal_thread_event_hooks_rw_lock))) {
3409 if ((r = pthread_rwlock_rdlock(&rb_internal_thread_event_hooks_rw_lock))) {
3413 if (rb_internal_thread_event_hooks) {
3414 rb_internal_thread_event_hook_t *h = rb_internal_thread_event_hooks;
3416 if (h->event & event) {
3420 (*h->callback)(event, &event_data, h->user_data);
3422 }
while((h = h->next));
3424 if ((r = pthread_rwlock_unlock(&rb_internal_thread_event_hooks_rw_lock))) {
3435 bool is_snt = th->nt->dedicated == 0;
3436 native_thread_dedicated_inc(th->vm, th->ractor, th->nt);
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
#define RUBY_ATOMIC_FETCH_ADD(var, val)
Atomically replaces the value pointed by var with the result of addition of val to the old value of v...
uint32_t rb_event_flag_t
Represents event(s).
#define INT2FIX
Old name of RB_INT2FIX.
#define ZALLOC
Old name of RB_ZALLOC.
#define ALLOC_N
Old name of RB_ALLOC_N.
#define ULL2NUM
Old name of RB_ULL2NUM.
#define NUM2INT
Old name of RB_NUM2INT.
#define Qnil
Old name of RUBY_Qnil.
#define NIL_P
Old name of RB_NIL_P.
VALUE rb_eNotImpError
NotImplementedError exception.
void rb_syserr_fail(int e, const char *mesg)
Raises appropriate exception that represents a C errno.
void rb_bug_errno(const char *mesg, int errno_arg)
This is a wrapper of rb_bug() which automatically constructs appropriate message from the passed errn...
int rb_cloexec_pipe(int fildes[2])
Opens a pipe with closing on exec.
void rb_update_max_fd(int fd)
Informs the interpreter that the passed fd can be the max.
int rb_reserved_fd_p(int fd)
Queries if the given FD is reserved or not.
void rb_unblock_function_t(void *)
This is the type of UBFs.
void rb_timespec_now(struct timespec *ts)
Fills the current time into the given struct.
int len
Length of the buffer.
#define RUBY_INTERNAL_THREAD_EVENT_RESUMED
Triggered when a thread successfully acquired the GVL.
rb_internal_thread_event_hook_t * rb_internal_thread_add_event_hook(rb_internal_thread_event_callback func, rb_event_flag_t events, void *data)
Registers a thread event hook function.
#define RUBY_INTERNAL_THREAD_EVENT_EXITED
Triggered when a thread exits.
#define RUBY_INTERNAL_THREAD_EVENT_SUSPENDED
Triggered when a thread released the GVL.
bool rb_thread_lock_native_thread(void)
Declare the current Ruby thread should acquire a dedicated native thread on M:N thread scheduler.
#define RUBY_INTERNAL_THREAD_EVENT_STARTED
Triggered when a new thread is started.
bool rb_internal_thread_remove_event_hook(rb_internal_thread_event_hook_t *hook)
Unregister the passed hook.
#define RUBY_INTERNAL_THREAD_EVENT_READY
Triggered when a thread attempt to acquire the GVL.
#define RBIMPL_ATTR_MAYBE_UNUSED()
Wraps (or simulates) [[maybe_unused]]
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
#define rb_fd_select
Waits for multiple file descriptors at once.
#define RARRAY_AREF(a, i)
#define RSTRING_GETMEM(str, ptrvar, lenvar)
Convenient macro to obtain the contents and length at once.
#define errno
Ractor-aware version of errno.
The data structure which wraps the fd_set bitmap used by select(2).
rb_nativethread_id_t rb_nativethread_self(void)
Queries the ID of the native thread that is calling this function.
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_cond_initialize(rb_nativethread_cond_t *cond)
Fills the passed condition variable with an initial value.
int rb_native_mutex_trylock(rb_nativethread_lock_t *lock)
Identical to rb_native_mutex_lock(), except it doesn't block in case rb_native_mutex_lock() would.
void rb_native_cond_broadcast(rb_nativethread_cond_t *cond)
Signals a condition variable.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy.
void rb_native_cond_destroy(rb_nativethread_cond_t *cond)
Destroys the passed condition variable.
void rb_native_cond_signal(rb_nativethread_cond_t *cond)
Signals a condition variable.
void rb_native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex)
Waits for the passed condition variable to be signalled.
void rb_native_cond_timedwait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex, unsigned long msec)
Identical to rb_native_cond_wait(), except it additionally takes timeout in msec resolution.
uintptr_t VALUE
Type that represents a Ruby object.