12#ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
14#include "internal/gc.h"
15#include "internal/sanitizers.h"
17#ifdef HAVE_SYS_RESOURCE_H
18#include <sys/resource.h>
20#ifdef HAVE_THR_STKSEGMENT
23#if defined(HAVE_FCNTL_H)
25#elif defined(HAVE_SYS_FCNTL_H)
28#ifdef HAVE_SYS_PRCTL_H
31#if defined(HAVE_SYS_TIME_H)
38#include <sys/syscall.h>
44# include <AvailabilityMacros.h>
47#if defined(HAVE_SYS_EVENTFD_H) && defined(HAVE_EVENTFD)
48# define USE_EVENTFD (1)
49# include <sys/eventfd.h>
51# define USE_EVENTFD (0)
54#if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK) && \
55 defined(CLOCK_REALTIME) && defined(CLOCK_MONOTONIC) && \
56 defined(HAVE_CLOCK_GETTIME)
57static pthread_condattr_t condattr_mono;
58static pthread_condattr_t *condattr_monotonic = &condattr_mono;
60static const void *
const condattr_monotonic = NULL;
65#ifndef HAVE_SYS_EVENT_H
66#define HAVE_SYS_EVENT_H 0
69#ifndef HAVE_SYS_EPOLL_H
70#define HAVE_SYS_EPOLL_H 0
78 #if defined(__EMSCRIPTEN__) || defined(COROUTINE_PTHREAD_CONTEXT)
81 #define USE_MN_THREADS 0
82 #elif HAVE_SYS_EPOLL_H
83 #include <sys/epoll.h>
84 #define USE_MN_THREADS 1
85 #elif HAVE_SYS_EVENT_H
86 #include <sys/event.h>
87 #define USE_MN_THREADS 1
89 #define USE_MN_THREADS 0
95#define NATIVE_MUTEX_LOCK_DEBUG 0
98mutex_debug(
const char *msg,
void *lock)
100 if (NATIVE_MUTEX_LOCK_DEBUG) {
102 static pthread_mutex_t dbglock = PTHREAD_MUTEX_INITIALIZER;
104 if ((r = pthread_mutex_lock(&dbglock)) != 0) {exit(EXIT_FAILURE);}
105 fprintf(stdout,
"%s: %p\n", msg, lock);
106 if ((r = pthread_mutex_unlock(&dbglock)) != 0) {exit(EXIT_FAILURE);}
114 mutex_debug(
"lock", lock);
115 if ((r = pthread_mutex_lock(lock)) != 0) {
124 mutex_debug(
"unlock", lock);
125 if ((r = pthread_mutex_unlock(lock)) != 0) {
134 mutex_debug(
"trylock", lock);
135 if ((r = pthread_mutex_trylock(lock)) != 0) {
149 int r = pthread_mutex_init(lock, 0);
150 mutex_debug(
"init", lock);
159 int r = pthread_mutex_destroy(lock);
160 mutex_debug(
"destroy", lock);
169 int r = pthread_cond_init(cond, condattr_monotonic);
178 int r = pthread_cond_destroy(cond);
199 r = pthread_cond_signal(cond);
200 }
while (r == EAGAIN);
211 r = pthread_cond_broadcast(cond);
212 }
while (r == EAGAIN);
221 int r = pthread_cond_wait(cond, mutex);
228native_cond_timedwait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex,
const rb_hrtime_t *abs)
240 rb_hrtime2timespec(&ts, abs);
241 r = pthread_cond_timedwait(cond, mutex, &ts);
242 }
while (r == EINTR);
244 if (r != 0 && r != ETIMEDOUT) {
252native_cond_timeout(rb_nativethread_cond_t *cond,
const rb_hrtime_t rel)
254 if (condattr_monotonic) {
255 return rb_hrtime_add(rb_hrtime_now(), rel);
261 return rb_hrtime_add(rb_timespec2hrtime(&ts), rel);
268 rb_hrtime_t hrmsec = native_cond_timeout(cond, RB_HRTIME_PER_MSEC * msec);
269 native_cond_timedwait(cond, mutex, &hrmsec);
274static rb_internal_thread_event_hook_t *rb_internal_thread_event_hooks = NULL;
296#define RB_INTERNAL_THREAD_HOOK(event, th) \
297 if (UNLIKELY(rb_internal_thread_event_hooks)) { \
298 fprintf(stderr, "[thread=%"PRIxVALUE"] %s in %s (%s:%d)\n", th->self, event_name(event), __func__, __FILE__, __LINE__); \
299 rb_thread_execute_hooks(event, th); \
302#define RB_INTERNAL_THREAD_HOOK(event, th) if (UNLIKELY(rb_internal_thread_event_hooks)) { rb_thread_execute_hooks(event, th); }
305static rb_serial_t current_fork_gen = 1;
307#if defined(SIGVTALRM) && !defined(__EMSCRIPTEN__)
308# define USE_UBF_LIST 1
311static void threadptr_trap_interrupt(
rb_thread_t *);
313#ifdef HAVE_SCHED_YIELD
314#define native_thread_yield() (void)sched_yield()
316#define native_thread_yield() ((void)0)
324static void timer_thread_wakeup(
void);
325static void timer_thread_wakeup_locked(
rb_vm_t *vm);
326static void timer_thread_wakeup_force(
void);
331#define thread_sched_dump(s) thread_sched_dump_(__FILE__, __LINE__, s)
337 return th->nt->dedicated > 0;
342thread_sched_dump_(const
char *file,
int line, struct
rb_thread_sched *sched)
344 fprintf(stderr,
"@%s:%d running:%d\n", file, line, sched->running ? (
int)sched->running->serial : -1);
347 ccan_list_for_each(&sched->readyq, th, sched.node.readyq) {
348 i++;
if (i>10) rb_bug(
"too many");
349 fprintf(stderr,
" ready:%d (%sNT:%d)\n", th->serial,
350 th->nt ? (th->nt->dedicated ?
"D" :
"S") :
"x",
351 th->nt ? (int)th->nt->serial : -1);
355#define ractor_sched_dump(s) ractor_sched_dump_(__FILE__, __LINE__, s)
359ractor_sched_dump_(const
char *file,
int line,
rb_vm_t *vm)
363 fprintf(stderr,
"ractor_sched_dump %s:%d\n", file, line);
366 ccan_list_for_each(&vm->ractor.sched.grq, r, threads.sched.grq_node) {
368 if (i>10) rb_bug(
"!!");
369 fprintf(stderr,
" %d ready:%d\n", i, rb_ractor_id(r));
373#define thread_sched_lock(a, b) thread_sched_lock_(a, b, __FILE__, __LINE__)
374#define thread_sched_unlock(a, b) thread_sched_unlock_(a, b, __FILE__, __LINE__)
382 RUBY_DEBUG_LOG2(file, line,
"th:%u prev_owner:%u", rb_th_serial(th), rb_th_serial(sched->lock_owner));
383 VM_ASSERT(sched->lock_owner == NULL);
384 sched->lock_owner = th;
386 RUBY_DEBUG_LOG2(file, line,
"th:%u", rb_th_serial(th));
393 RUBY_DEBUG_LOG2(file, line,
"th:%u", rb_th_serial(th));
396 VM_ASSERT(sched->lock_owner == th);
397 sched->lock_owner = NULL;
406 RUBY_DEBUG_LOG(
"th:%u", rb_th_serial(th));
409 sched->lock_owner = th;
420 VM_ASSERT(sched->lock_owner == th);
423 VM_ASSERT(sched->lock_owner != NULL);
428#define ractor_sched_lock(a, b) ractor_sched_lock_(a, b, __FILE__, __LINE__)
429#define ractor_sched_unlock(a, b) ractor_sched_unlock_(a, b, __FILE__, __LINE__)
435 return rb_ractor_id(r);
446 VM_ASSERT(vm->ractor.sched.lock_owner == NULL);
447 VM_ASSERT(vm->ractor.sched.locked ==
false);
449 vm->ractor.sched.lock_owner = cr;
450 vm->ractor.sched.locked =
true;
458 VM_ASSERT(vm->ractor.sched.locked);
459 VM_ASSERT(vm->ractor.sched.lock_owner == cr);
461 vm->ractor.sched.locked =
false;
462 vm->ractor.sched.lock_owner = NULL;
472 RUBY_DEBUG_LOG2(file, line,
"cr:%u prev_owner:%u", rb_ractor_serial(cr), rb_ractor_serial(vm->ractor.sched.lock_owner));
474 RUBY_DEBUG_LOG2(file, line,
"cr:%u", rb_ractor_serial(cr));
477 ractor_sched_set_locked(vm, cr);
483 RUBY_DEBUG_LOG2(file, line,
"cr:%u", rb_ractor_serial(cr));
485 ractor_sched_set_unlocked(vm, cr);
493 VM_ASSERT(vm->ractor.sched.locked);
494 VM_ASSERT(cr == NULL || vm->ractor.sched.lock_owner == cr);
502 ccan_list_for_each(&vm->ractor.sched.running_threads, rth, sched.node.running_threads) {
503 if (rth == th)
return true;
510ractor_sched_running_threads_size(
rb_vm_t *vm)
514 ccan_list_for_each(&vm->ractor.sched.running_threads, th, sched.node.running_threads) {
522ractor_sched_timeslice_threads_size(
rb_vm_t *vm)
526 ccan_list_for_each(&vm->ractor.sched.timeslice_threads, th, sched.node.timeslice_threads) {
537 ccan_list_for_each(&vm->ractor.sched.timeslice_threads, rth, sched.node.timeslice_threads) {
538 if (rth == th)
return true;
543static void ractor_sched_barrier_join_signal_locked(
rb_vm_t *vm);
551#if USE_RUBY_DEBUG_LOG
552 unsigned int prev_running_cnt = vm->ractor.sched.running_cnt;
557 if (del_th && sched->is_running_timeslice) {
558 del_timeslice_th = del_th;
559 sched->is_running_timeslice =
false;
562 del_timeslice_th = NULL;
565 RUBY_DEBUG_LOG(
"+:%u -:%u +ts:%u -ts:%u",
566 rb_th_serial(add_th), rb_th_serial(del_th),
567 rb_th_serial(add_timeslice_th), rb_th_serial(del_timeslice_th));
569 ractor_sched_lock(vm, cr);
573 VM_ASSERT(ractor_sched_running_threads_contain_p(vm, del_th));
574 VM_ASSERT(del_timeslice_th != NULL ||
575 !ractor_sched_timeslice_threads_contain_p(vm, del_th));
577 ccan_list_del_init(&del_th->sched.node.running_threads);
578 vm->ractor.sched.running_cnt--;
580 if (UNLIKELY(vm->ractor.sched.barrier_waiting)) {
581 ractor_sched_barrier_join_signal_locked(vm);
583 sched->is_running =
false;
587 while (UNLIKELY(vm->ractor.sched.barrier_waiting)) {
588 RUBY_DEBUG_LOG(
"barrier-wait");
590 ractor_sched_barrier_join_signal_locked(vm);
591 ractor_sched_barrier_join_wait_locked(vm, add_th);
594 VM_ASSERT(!ractor_sched_running_threads_contain_p(vm, add_th));
595 VM_ASSERT(!ractor_sched_timeslice_threads_contain_p(vm, add_th));
597 ccan_list_add(&vm->ractor.sched.running_threads, &add_th->sched.node.running_threads);
598 vm->ractor.sched.running_cnt++;
599 sched->is_running =
true;
600 VM_ASSERT(!vm->ractor.sched.barrier_waiting);
603 if (add_timeslice_th) {
605 int was_empty = ccan_list_empty(&vm->ractor.sched.timeslice_threads);
606 VM_ASSERT(!ractor_sched_timeslice_threads_contain_p(vm, add_timeslice_th));
607 ccan_list_add(&vm->ractor.sched.timeslice_threads, &add_timeslice_th->sched.node.timeslice_threads);
608 sched->is_running_timeslice =
true;
610 timer_thread_wakeup_locked(vm);
614 if (del_timeslice_th) {
615 VM_ASSERT(ractor_sched_timeslice_threads_contain_p(vm, del_timeslice_th));
616 ccan_list_del_init(&del_timeslice_th->sched.node.timeslice_threads);
619 VM_ASSERT(ractor_sched_running_threads_size(vm) == vm->ractor.sched.running_cnt);
620 VM_ASSERT(ractor_sched_timeslice_threads_size(vm) <= vm->ractor.sched.running_cnt);
622 ractor_sched_unlock(vm, cr);
624 if (add_th && !del_th && UNLIKELY(vm->ractor.sync.lock_owner != NULL)) {
628 lock_owner = sched->lock_owner;
630 thread_sched_unlock(sched, lock_owner);
635 thread_sched_lock(sched, lock_owner);
641 RUBY_DEBUG_LOG(
"run:%u->%u", prev_running_cnt, vm->ractor.sched.running_cnt);
647 ASSERT_thread_sched_locked(sched, th);
648 VM_ASSERT(sched->running == th);
651 thread_sched_setup_running_threads(sched, th->ractor, vm, th, NULL, ccan_list_empty(&sched->readyq) ? NULL : th);
657 ASSERT_thread_sched_locked(sched, th);
660 thread_sched_setup_running_threads(sched, th->ractor, vm, NULL, th, NULL);
668 thread_sched_lock(sched, th);
670 thread_sched_add_running_thread(sched, th);
672 thread_sched_unlock(sched, th);
680 thread_sched_lock(sched, th);
682 thread_sched_del_running_thread(sched, th);
684 thread_sched_unlock(sched, th);
694 RUBY_DEBUG_LOG(
"th:%u->th:%u", rb_th_serial(sched->running), rb_th_serial(th));
695 VM_ASSERT(sched->running != th);
705 ccan_list_for_each(&sched->readyq, rth, sched.node.readyq) {
706 if (rth == th)
return true;
718 ASSERT_thread_sched_locked(sched, NULL);
721 VM_ASSERT(sched->running != NULL);
723 if (ccan_list_empty(&sched->readyq)) {
727 next_th = ccan_list_pop(&sched->readyq,
rb_thread_t, sched.node.readyq);
729 VM_ASSERT(sched->readyq_cnt > 0);
731 ccan_list_node_init(&next_th->sched.node.readyq);
734 RUBY_DEBUG_LOG(
"next_th:%u readyq_cnt:%d", rb_th_serial(next_th), sched->readyq_cnt);
743 ASSERT_thread_sched_locked(sched, NULL);
744 RUBY_DEBUG_LOG(
"ready_th:%u readyq_cnt:%d", rb_th_serial(ready_th), sched->readyq_cnt);
746 VM_ASSERT(sched->running != NULL);
747 VM_ASSERT(!thread_sched_readyq_contain_p(sched, ready_th));
749 if (sched->is_running) {
750 if (ccan_list_empty(&sched->readyq)) {
752 thread_sched_setup_running_threads(sched, ready_th->ractor, ready_th->vm, NULL, NULL, sched->running);
756 VM_ASSERT(!ractor_sched_timeslice_threads_contain_p(ready_th->vm, sched->running));
759 ccan_list_add_tail(&sched->readyq, &ready_th->sched.node.readyq);
768 ASSERT_thread_sched_locked(sched, NULL);
769 VM_ASSERT(sched->running == next_th);
773 if (th_has_dedicated_nt(next_th)) {
774 RUBY_DEBUG_LOG(
"pinning th:%u", next_th->serial);
779 RUBY_DEBUG_LOG(
"th:%u is already running.", next_th->serial);
784 RUBY_DEBUG_LOG(
"th:%u (do nothing)", rb_th_serial(next_th));
787 RUBY_DEBUG_LOG(
"th:%u (enq)", rb_th_serial(next_th));
788 ractor_sched_enq(next_th->vm, next_th->ractor);
793 RUBY_DEBUG_LOG(
"no waiting threads%s",
"");
801 RUBY_DEBUG_LOG(
"th:%u running:%u redyq_cnt:%d", rb_th_serial(th), rb_th_serial(sched->running), sched->readyq_cnt);
803 VM_ASSERT(sched->running != th);
804 VM_ASSERT(!thread_sched_readyq_contain_p(sched, th));
807 if (sched->running == NULL) {
808 thread_sched_set_running(sched, th);
809 if (wakeup) thread_sched_wakeup_running_thread(sched, th, will_switch);
812 thread_sched_enq(sched, th);
824 RUBY_DEBUG_LOG(
"th:%u", rb_th_serial(th));
826 thread_sched_lock(sched, th);
828 thread_sched_to_ready_common(sched, th,
true,
false);
830 thread_sched_unlock(sched, th);
837 RUBY_DEBUG_LOG(
"th:%u", rb_th_serial(th));
839 ASSERT_thread_sched_locked(sched, th);
840 VM_ASSERT(th == rb_ec_thread_ptr(rb_current_ec_noinline()));
842 if (th != sched->running) {
848 while((next_th = sched->running) != th) {
849 if (th_has_dedicated_nt(th)) {
850 RUBY_DEBUG_LOG(
"(nt) sleep th:%u running:%u", rb_th_serial(th), rb_th_serial(sched->running));
852 thread_sched_set_lock_owner(sched, NULL);
854 RUBY_DEBUG_LOG(
"nt:%d cond:%p", th->nt->serial, &th->nt->cond.readyq);
857 thread_sched_set_lock_owner(sched, th);
859 RUBY_DEBUG_LOG(
"(nt) wakeup %s", sched->running == th ?
"success" :
"failed");
860 if (th == sched->running) {
861 rb_ractor_thread_switch(th->ractor, th);
866 if (can_direct_transfer &&
867 (next_th = sched->running) != NULL &&
871 RUBY_DEBUG_LOG(
"th:%u->%u (direct)", rb_th_serial(th), rb_th_serial(next_th));
873 thread_sched_set_lock_owner(sched, NULL);
875 rb_ractor_set_current_ec(th->ractor, NULL);
876 thread_sched_switch(th, next_th);
878 thread_sched_set_lock_owner(sched, th);
883 native_thread_assign(NULL, th);
885 RUBY_DEBUG_LOG(
"th:%u->%u (ractor scheduling)", rb_th_serial(th), rb_th_serial(next_th));
887 thread_sched_set_lock_owner(sched, NULL);
889 rb_ractor_set_current_ec(th->ractor, NULL);
890 coroutine_transfer0(th->sched.context, nt->nt_context,
false);
892 thread_sched_set_lock_owner(sched, th);
895 VM_ASSERT(rb_current_ec_noinline() == th->ec);
899 VM_ASSERT(th->nt != NULL);
900 VM_ASSERT(rb_current_ec_noinline() == th->ec);
901 VM_ASSERT(th->sched.waiting_reason.flags == thread_sched_waiting_none);
904 thread_sched_add_running_thread(sched, th);
915 RUBY_DEBUG_LOG(
"th:%u dedicated:%d", rb_th_serial(th), th_has_dedicated_nt(th));
917 VM_ASSERT(sched->running != th);
918 VM_ASSERT(th_has_dedicated_nt(th));
919 VM_ASSERT(GET_THREAD() == th);
921 native_thread_dedicated_dec(th->vm, th->ractor, th->nt);
924 thread_sched_to_ready_common(sched, th,
false,
false);
926 if (sched->running == th) {
927 thread_sched_add_running_thread(sched, th);
931 thread_sched_wait_running_turn(sched, th,
false);
943 thread_sched_lock(sched, th);
945 thread_sched_to_running_common(sched, th);
947 thread_sched_unlock(sched, th);
960 ASSERT_thread_sched_locked(sched, th);
962 VM_ASSERT(sched->running == th);
963 VM_ASSERT(sched->running->nt != NULL);
967 RUBY_DEBUG_LOG(
"next_th:%u", rb_th_serial(next_th));
968 VM_ASSERT(th != next_th);
970 thread_sched_set_running(sched, next_th);
971 VM_ASSERT(next_th == sched->running);
972 thread_sched_wakeup_running_thread(sched, next_th, will_switch);
975 thread_sched_del_running_thread(sched, th);
992 if (!to_dead) native_thread_dedicated_inc(th->vm, th->ractor, th->nt);
994 RUBY_DEBUG_LOG(
"%sth:%u", to_dead ?
"to_dead " :
"", rb_th_serial(th));
996 bool can_switch = to_dead ? !th_has_dedicated_nt(th) : false;
997 thread_sched_wakeup_next_thread(sched, th, can_switch);
1004 RUBY_DEBUG_LOG(
"dedicated:%d", th->nt->dedicated);
1005 thread_sched_to_waiting_common0(sched, th,
true);
1013 thread_sched_lock(sched, th);
1015 thread_sched_to_dead_common(sched, th);
1017 thread_sched_unlock(sched, th);
1026 RUBY_DEBUG_LOG(
"dedicated:%d", th->nt->dedicated);
1027 thread_sched_to_waiting_common0(sched, th,
false);
1036 thread_sched_lock(sched, th);
1038 thread_sched_to_waiting_common(sched, th);
1040 thread_sched_unlock(sched, th);
1049 th->unblock.func = func;
1050 th->unblock.arg = arg;
1056ubf_waiting(
void *ptr)
1062 th->unblock.func = NULL;
1063 th->unblock.arg = NULL;
1065 RUBY_DEBUG_LOG(
"th:%u", rb_th_serial(th));
1067 thread_sched_lock(sched, th);
1069 if (sched->running == th) {
1073 thread_sched_to_ready_common(sched, th,
true,
false);
1076 thread_sched_unlock(sched, th);
1085 RUBY_DEBUG_LOG(
"th:%u", rb_th_serial(th));
1087 RB_VM_SAVE_MACHINE_CONTEXT(th);
1088 setup_ubf(th, ubf_waiting, (
void *)th);
1092 thread_sched_lock(sched, th);
1094 if (!RUBY_VM_INTERRUPTED(th->ec)) {
1095 bool can_direct_transfer = !th_has_dedicated_nt(th);
1096 thread_sched_wakeup_next_thread(sched, th, can_direct_transfer);
1097 thread_sched_wait_running_turn(sched, th, can_direct_transfer);
1100 RUBY_DEBUG_LOG(
"th:%u interrupted", rb_th_serial(th));
1103 thread_sched_unlock(sched, th);
1105 setup_ubf(th, NULL, NULL);
1113 RUBY_DEBUG_LOG(
"th:%d sched->readyq_cnt:%d", (
int)th->serial, sched->readyq_cnt);
1115 thread_sched_lock(sched, th);
1117 if (!ccan_list_empty(&sched->readyq)) {
1119 thread_sched_wakeup_next_thread(sched, th, !th_has_dedicated_nt(th));
1120 bool can_direct_transfer = !th_has_dedicated_nt(th);
1121 thread_sched_to_ready_common(sched, th,
false, can_direct_transfer);
1122 thread_sched_wait_running_turn(sched, th, can_direct_transfer);
1125 VM_ASSERT(sched->readyq_cnt == 0);
1128 thread_sched_unlock(sched, th);
1137 sched->lock_owner = NULL;
1140 ccan_list_head_init(&sched->readyq);
1141 sched->readyq_cnt = 0;
1144 if (!atfork) sched->enable_mn_threads =
true;
1151#ifdef RUBY_ASAN_ENABLED
1152 void **fake_stack = to_dead ? NULL : &transfer_from->fake_stack;
1153 __sanitizer_start_switch_fiber(fake_stack, transfer_to->stack_base, transfer_to->stack_size);
1157 struct
coroutine_context *returning_from = coroutine_transfer(transfer_from, transfer_to);
1161 VM_ASSERT(!to_dead);
1162#ifdef RUBY_ASAN_ENABLED
1163 __sanitizer_finish_switch_fiber(transfer_from->fake_stack,
1164 (
const void**)&returning_from->stack_base, &returning_from->stack_size);
1172 VM_ASSERT(!nt->dedicated);
1173 VM_ASSERT(next_th->nt == NULL);
1175 RUBY_DEBUG_LOG(
"next_th:%u", rb_th_serial(next_th));
1177 ruby_thread_set_native(next_th);
1178 native_thread_assign(nt, next_th);
1180 coroutine_transfer0(current_cont, next_th->sched.context, to_dead);
1187 native_thread_assign(NULL, cth);
1188 RUBY_DEBUG_LOG(
"th:%u->%u on nt:%d", rb_th_serial(cth), rb_th_serial(next_th), nt->serial);
1189 thread_sched_switch0(cth->sched.context, next_th, nt, cth->status == THREAD_KILLED);
1192#if VM_CHECK_MODE > 0
1197 ASSERT_ractor_sched_locked(vm, cr);
1202 ccan_list_for_each(&vm->ractor.sched.grq, r, threads.sched.grq_node) {
1205 VM_ASSERT(r != prev_r);
1218 VM_ASSERT(sched->running != NULL);
1219 VM_ASSERT(sched->running->nt == NULL);
1221 ractor_sched_lock(vm, cr);
1223#if VM_CHECK_MODE > 0
1226 ccan_list_for_each(&vm->ractor.sched.grq,
tr, threads.sched.grq_node) {
1231 ccan_list_add_tail(&vm->ractor.sched.grq, &sched->grq_node);
1232 vm->ractor.sched.grq_cnt++;
1233 VM_ASSERT(grq_size(vm, cr) == vm->ractor.sched.grq_cnt);
1235 RUBY_DEBUG_LOG(
"r:%u th:%u grq_cnt:%u", rb_ractor_id(r), rb_th_serial(sched->running), vm->ractor.sched.grq_cnt);
1241 ractor_sched_unlock(vm, cr);
1245#ifndef SNT_KEEP_SECONDS
1246#define SNT_KEEP_SECONDS 0
1251#define MINIMUM_SNT 0
1259 ractor_sched_lock(vm, cr);
1261 RUBY_DEBUG_LOG(
"empty? %d", ccan_list_empty(&vm->ractor.sched.grq));
1264 VM_ASSERT(rb_current_execution_context(
false) == NULL);
1265 VM_ASSERT(grq_size(vm, cr) == vm->ractor.sched.grq_cnt);
1267 while ((r = ccan_list_pop(&vm->ractor.sched.grq,
rb_ractor_t, threads.sched.grq_node)) == NULL) {
1268 RUBY_DEBUG_LOG(
"wait grq_cnt:%d", (
int)vm->ractor.sched.grq_cnt);
1270#if SNT_KEEP_SECONDS > 0
1271 rb_hrtime_t abs = rb_hrtime_add(rb_hrtime_now(), RB_HRTIME_PER_SEC * SNT_KEEP_SECONDS);
1272 if (native_cond_timedwait(&vm->ractor.sched.cond, &vm->ractor.sched.lock, &abs) == ETIMEDOUT) {
1273 RUBY_DEBUG_LOG(
"timeout, grq_cnt:%d", (
int)vm->ractor.sched.grq_cnt);
1274 VM_ASSERT(r == NULL);
1275 vm->ractor.sched.snt_cnt--;
1276 vm->ractor.sched.running_cnt--;
1280 RUBY_DEBUG_LOG(
"wakeup grq_cnt:%d", (
int)vm->ractor.sched.grq_cnt);
1283 ractor_sched_set_unlocked(vm, cr);
1285 ractor_sched_set_locked(vm, cr);
1287 RUBY_DEBUG_LOG(
"wakeup grq_cnt:%d", (
int)vm->ractor.sched.grq_cnt);
1291 VM_ASSERT(rb_current_execution_context(
false) == NULL);
1294 VM_ASSERT(vm->ractor.sched.grq_cnt > 0);
1295 vm->ractor.sched.grq_cnt--;
1296 RUBY_DEBUG_LOG(
"r:%d grq_cnt:%u", (
int)rb_ractor_id(r), vm->ractor.sched.grq_cnt);
1299 VM_ASSERT(SNT_KEEP_SECONDS > 0);
1303 ractor_sched_unlock(vm, cr);
1318 cr->sync.wait.waiting_thread = th;
1320 setup_ubf(th, ubf, (
void *)cr);
1322 thread_sched_lock(sched, th);
1324 rb_ractor_unlock_self(cr);
1326 if (RUBY_VM_INTERRUPTED(th->ec)) {
1327 RUBY_DEBUG_LOG(
"interrupted");
1329 else if (cr->sync.wait.wakeup_status != wakeup_none) {
1330 RUBY_DEBUG_LOG(
"awaken:%d", (
int)cr->sync.wait.wakeup_status);
1334 RB_VM_SAVE_MACHINE_CONTEXT(th);
1335 th->status = THREAD_STOPPED_FOREVER;
1339 bool can_direct_transfer = !th_has_dedicated_nt(th);
1340 thread_sched_wakeup_next_thread(sched, th, can_direct_transfer);
1341 thread_sched_wait_running_turn(sched, th, can_direct_transfer);
1342 th->status = THREAD_RUNNABLE;
1347 thread_sched_unlock(sched, th);
1349 setup_ubf(th, NULL, NULL);
1351 rb_ractor_lock_self(cr);
1352 cr->sync.wait.waiting_thread = NULL;
1362 VM_ASSERT(r->sync.wait.wakeup_status != 0);
1364 thread_sched_lock(sched, r_th);
1366 if (r_th->status == THREAD_STOPPED_FOREVER) {
1367 thread_sched_to_ready_common(sched, r_th,
true,
false);
1370 thread_sched_unlock(sched, r_th);
1374ractor_sched_barrier_completed_p(
rb_vm_t *vm)
1376 RUBY_DEBUG_LOG(
"run:%u wait:%u", vm->ractor.sched.running_cnt, vm->ractor.sched.barrier_waiting_cnt);
1377 VM_ASSERT(vm->ractor.sched.running_cnt - 1 >= vm->ractor.sched.barrier_waiting_cnt);
1378 return (vm->ractor.sched.running_cnt - vm->ractor.sched.barrier_waiting_cnt) == 1;
1384 VM_ASSERT(cr == GET_RACTOR());
1385 VM_ASSERT(vm->ractor.sync.lock_owner == cr);
1386 VM_ASSERT(!vm->ractor.sched.barrier_waiting);
1387 VM_ASSERT(vm->ractor.sched.barrier_waiting_cnt == 0);
1389 RUBY_DEBUG_LOG(
"start serial:%u", vm->ractor.sched.barrier_serial);
1391 unsigned int lock_rec;
1393 ractor_sched_lock(vm, cr);
1395 vm->ractor.sched.barrier_waiting =
true;
1398 lock_rec = vm->ractor.sync.lock_rec;
1399 vm->ractor.sync.lock_rec = 0;
1400 vm->ractor.sync.lock_owner = NULL;
1405 ccan_list_for_each(&vm->ractor.sched.running_threads, ith, sched.node.running_threads) {
1406 if (ith->ractor != cr) {
1407 RUBY_DEBUG_LOG(
"barrier int:%u", rb_th_serial(ith));
1408 RUBY_VM_SET_VM_BARRIER_INTERRUPT(ith->ec);
1413 while (!ractor_sched_barrier_completed_p(vm)) {
1414 ractor_sched_set_unlocked(vm, cr);
1416 ractor_sched_set_locked(vm, cr);
1420 ractor_sched_unlock(vm, cr);
1424 vm->ractor.sync.lock_rec = lock_rec;
1425 vm->ractor.sync.lock_owner = cr;
1427 RUBY_DEBUG_LOG(
"completed seirial:%u", vm->ractor.sched.barrier_serial);
1429 ractor_sched_lock(vm, cr);
1431 vm->ractor.sched.barrier_waiting =
false;
1432 vm->ractor.sched.barrier_serial++;
1433 vm->ractor.sched.barrier_waiting_cnt = 0;
1436 ractor_sched_unlock(vm, cr);
1440ractor_sched_barrier_join_signal_locked(
rb_vm_t *vm)
1442 if (ractor_sched_barrier_completed_p(vm)) {
1450 VM_ASSERT(vm->ractor.sched.barrier_waiting);
1452 unsigned int barrier_serial = vm->ractor.sched.barrier_serial;
1454 while (vm->ractor.sched.barrier_serial == barrier_serial) {
1455 RUBY_DEBUG_LOG(
"sleep serial:%u", barrier_serial);
1456 RB_VM_SAVE_MACHINE_CONTEXT(th);
1459 ractor_sched_set_unlocked(vm, cr);
1461 ractor_sched_set_locked(vm, cr);
1463 RUBY_DEBUG_LOG(
"wakeup serial:%u", barrier_serial);
1470 VM_ASSERT(cr->threads.sched.running != NULL);
1471 VM_ASSERT(cr == GET_RACTOR());
1472 VM_ASSERT(vm->ractor.sync.lock_owner == NULL);
1473 VM_ASSERT(vm->ractor.sched.barrier_waiting);
1475#if USE_RUBY_DEBUG_LOG || VM_CHECK_MODE > 0
1476 unsigned int barrier_serial = vm->ractor.sched.barrier_serial;
1479 RUBY_DEBUG_LOG(
"join");
1483 VM_ASSERT(vm->ractor.sched.barrier_waiting);
1484 VM_ASSERT(vm->ractor.sched.barrier_serial == barrier_serial);
1486 ractor_sched_lock(vm, cr);
1489 vm->ractor.sched.barrier_waiting_cnt++;
1490 RUBY_DEBUG_LOG(
"waiting_cnt:%u serial:%u", vm->ractor.sched.barrier_waiting_cnt, barrier_serial);
1492 ractor_sched_barrier_join_signal_locked(vm);
1493 ractor_sched_barrier_join_wait_locked(vm, cr->threads.sched.running);
1495 ractor_sched_unlock(vm, cr);
1505static void clear_thread_cache_altstack(
void);
1518 clear_thread_cache_altstack();
1522#ifdef RB_THREAD_T_HAS_NATIVE_ID
1524get_native_thread_id(
void)
1527 return (
int)syscall(SYS_gettid);
1528#elif defined(__FreeBSD__)
1529 return pthread_getthreadid_np();
1534#if defined(HAVE_WORKING_FORK)
1539 rb_thread_sched_init(sched,
true);
1543 if (th_has_dedicated_nt(th)) {
1544 vm->ractor.sched.snt_cnt = 0;
1547 vm->ractor.sched.snt_cnt = 1;
1549 vm->ractor.sched.running_cnt = 0;
1552#if VM_CHECK_MODE > 0
1553 vm->ractor.sched.lock_owner = NULL;
1554 vm->ractor.sched.locked =
false;
1562 ccan_list_head_init(&vm->ractor.sched.grq);
1563 ccan_list_head_init(&vm->ractor.sched.timeslice_threads);
1564 ccan_list_head_init(&vm->ractor.sched.running_threads);
1566 VM_ASSERT(sched->is_running);
1567 sched->is_running_timeslice =
false;
1569 if (sched->running != th) {
1570 thread_sched_to_running(sched, th);
1573 thread_sched_setup_running_threads(sched, th->ractor, vm, th, NULL, NULL);
1576#ifdef RB_THREAD_T_HAS_NATIVE_ID
1578 th->nt->tid = get_native_thread_id();
1585#ifdef RB_THREAD_LOCAL_SPECIFIER
1586static RB_THREAD_LOCAL_SPECIFIER
rb_thread_t *ruby_native_thread;
1588static pthread_key_t ruby_native_thread_key;
1600ruby_thread_from_native(
void)
1602#ifdef RB_THREAD_LOCAL_SPECIFIER
1603 return ruby_native_thread;
1605 return pthread_getspecific(ruby_native_thread_key);
1614 ccan_list_node_init(&th->sched.node.ubf);
1621 rb_ractor_set_current_ec(th->ractor, th->ec);
1623#ifdef RB_THREAD_LOCAL_SPECIFIER
1624 ruby_native_thread = th;
1627 return pthread_setspecific(ruby_native_thread_key, th) == 0;
1637#if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK)
1638 if (condattr_monotonic) {
1639 int r = pthread_condattr_init(condattr_monotonic);
1641 r = pthread_condattr_setclock(condattr_monotonic, CLOCK_MONOTONIC);
1643 if (r) condattr_monotonic = NULL;
1647#ifndef RB_THREAD_LOCAL_SPECIFIER
1648 if (pthread_key_create(&ruby_native_thread_key, 0) == EAGAIN) {
1649 rb_bug(
"pthread_key_create failed (ruby_native_thread_key)");
1651 if (pthread_key_create(&ruby_current_ec_key, 0) == EAGAIN) {
1652 rb_bug(
"pthread_key_create failed (ruby_current_ec_key)");
1655 ruby_posix_signal(SIGVTALRM, null_func);
1664 ccan_list_head_init(&vm->ractor.sched.grq);
1665 ccan_list_head_init(&vm->ractor.sched.timeslice_threads);
1666 ccan_list_head_init(&vm->ractor.sched.running_threads);
1669 main_th->nt->thread_id = pthread_self();
1670 main_th->nt->serial = 1;
1671#ifdef RUBY_NT_SERIAL
1674 ruby_thread_set_native(main_th);
1675 native_thread_setup(main_th->nt);
1676 native_thread_setup_on_thread(main_th->nt);
1678 TH_SCHED(main_th)->running = main_th;
1679 main_th->has_dedicated_nt = 1;
1681 thread_sched_setup_running_threads(TH_SCHED(main_th), main_th->ractor, vm, main_th, NULL, NULL);
1684 main_th->nt->dedicated = 1;
1685 main_th->nt->vm = vm;
1688 vm->ractor.sched.dnt_cnt = 1;
1691extern int ruby_mn_threads_enabled;
1694ruby_mn_threads_params(
void)
1699 const char *mn_threads_cstr = getenv(
"RUBY_MN_THREADS");
1700 bool enable_mn_threads =
false;
1702 if (USE_MN_THREADS && mn_threads_cstr && (enable_mn_threads = atoi(mn_threads_cstr) > 0)) {
1704 ruby_mn_threads_enabled = 1;
1706 main_ractor->threads.sched.enable_mn_threads = enable_mn_threads;
1708 const char *max_cpu_cstr = getenv(
"RUBY_MAX_CPU");
1709 const int default_max_cpu = 8;
1710 int max_cpu = default_max_cpu;
1712 if (USE_MN_THREADS && max_cpu_cstr) {
1713 int given_max_cpu = atoi(max_cpu_cstr);
1714 if (given_max_cpu > 0) {
1715 max_cpu = given_max_cpu;
1719 vm->ractor.sched.max_cpu = max_cpu;
1725 RUBY_DEBUG_LOG(
"nt:%d %d->%d", nt->serial, nt->dedicated, nt->dedicated + 1);
1727 if (nt->dedicated == 0) {
1728 ractor_sched_lock(vm, cr);
1730 vm->ractor.sched.snt_cnt--;
1731 vm->ractor.sched.dnt_cnt++;
1733 ractor_sched_unlock(vm, cr);
1742 RUBY_DEBUG_LOG(
"nt:%d %d->%d", nt->serial, nt->dedicated, nt->dedicated - 1);
1743 VM_ASSERT(nt->dedicated > 0);
1746 if (nt->dedicated == 0) {
1747 ractor_sched_lock(vm, cr);
1749 nt->vm->ractor.sched.snt_cnt++;
1750 nt->vm->ractor.sched.dnt_cnt--;
1752 ractor_sched_unlock(vm, cr);
1759#if USE_RUBY_DEBUG_LOG
1762 RUBY_DEBUG_LOG(
"th:%d nt:%d->%d", (
int)th->serial, (
int)th->nt->serial, (
int)nt->serial);
1765 RUBY_DEBUG_LOG(
"th:%d nt:NULL->%d", (
int)th->serial, (
int)nt->serial);
1770 RUBY_DEBUG_LOG(
"th:%d nt:%d->NULL", (
int)th->serial, (
int)th->nt->serial);
1773 RUBY_DEBUG_LOG(
"th:%d nt:NULL->NULL", (
int)th->serial);
1787 if (&nt->cond.readyq != &nt->cond.intr) {
1791 RB_ALTSTACK_FREE(nt->altstack);
1792 ruby_xfree(nt->nt_context);
1797#if defined HAVE_PTHREAD_GETATTR_NP || defined HAVE_PTHREAD_ATTR_GET_NP
1798#define STACKADDR_AVAILABLE 1
1799#elif defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP
1800#define STACKADDR_AVAILABLE 1
1801#undef MAINSTACKADDR_AVAILABLE
1802#define MAINSTACKADDR_AVAILABLE 1
1803void *pthread_get_stackaddr_np(pthread_t);
1804size_t pthread_get_stacksize_np(pthread_t);
1805#elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
1806#define STACKADDR_AVAILABLE 1
1807#elif defined HAVE_PTHREAD_GETTHRDS_NP
1808#define STACKADDR_AVAILABLE 1
1809#elif defined __HAIKU__
1810#define STACKADDR_AVAILABLE 1
1813#ifndef MAINSTACKADDR_AVAILABLE
1814# ifdef STACKADDR_AVAILABLE
1815# define MAINSTACKADDR_AVAILABLE 1
1817# define MAINSTACKADDR_AVAILABLE 0
1820#if MAINSTACKADDR_AVAILABLE && !defined(get_main_stack)
1821# define get_main_stack(addr, size) get_stack(addr, size)
1824#ifdef STACKADDR_AVAILABLE
1829get_stack(
void **addr,
size_t *size)
1831#define CHECK_ERR(expr) \
1832 {int err = (expr); if (err) return err;}
1833#ifdef HAVE_PTHREAD_GETATTR_NP
1834 pthread_attr_t attr;
1836 STACK_GROW_DIR_DETECTION;
1837 CHECK_ERR(pthread_getattr_np(pthread_self(), &attr));
1838# ifdef HAVE_PTHREAD_ATTR_GETSTACK
1839 CHECK_ERR(pthread_attr_getstack(&attr, addr, size));
1840 STACK_DIR_UPPER((
void)0, (
void)(*addr = (
char *)*addr + *size));
1842 CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
1843 CHECK_ERR(pthread_attr_getstacksize(&attr, size));
1845# ifdef HAVE_PTHREAD_ATTR_GETGUARDSIZE
1846 CHECK_ERR(pthread_attr_getguardsize(&attr, &guard));
1848 guard = getpagesize();
1851 pthread_attr_destroy(&attr);
1852#elif defined HAVE_PTHREAD_ATTR_GET_NP
1853 pthread_attr_t attr;
1854 CHECK_ERR(pthread_attr_init(&attr));
1855 CHECK_ERR(pthread_attr_get_np(pthread_self(), &attr));
1856# ifdef HAVE_PTHREAD_ATTR_GETSTACK
1857 CHECK_ERR(pthread_attr_getstack(&attr, addr, size));
1859 CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
1860 CHECK_ERR(pthread_attr_getstacksize(&attr, size));
1862 STACK_DIR_UPPER((
void)0, (
void)(*addr = (
char *)*addr + *size));
1863 pthread_attr_destroy(&attr);
1864#elif (defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP)
1865 pthread_t th = pthread_self();
1866 *addr = pthread_get_stackaddr_np(th);
1867 *size = pthread_get_stacksize_np(th);
1868#elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
1870# if defined HAVE_THR_STKSEGMENT
1871 CHECK_ERR(thr_stksegment(&stk));
1873 CHECK_ERR(pthread_stackseg_np(pthread_self(), &stk));
1876 *size = stk.ss_size;
1877#elif defined HAVE_PTHREAD_GETTHRDS_NP
1878 pthread_t th = pthread_self();
1879 struct __pthrdsinfo thinfo;
1881 int regsiz=
sizeof(reg);
1882 CHECK_ERR(pthread_getthrds_np(&th, PTHRDSINFO_QUERY_ALL,
1883 &thinfo,
sizeof(thinfo),
1885 *addr = thinfo.__pi_stackaddr;
1889 *size = thinfo.__pi_stackend - thinfo.__pi_stackaddr;
1890 STACK_DIR_UPPER((
void)0, (
void)(*addr = (
char *)*addr + *size));
1891#elif defined __HAIKU__
1893 STACK_GROW_DIR_DETECTION;
1894 CHECK_ERR(get_thread_info(find_thread(NULL), &info));
1895 *addr = info.stack_base;
1896 *size = (uintptr_t)info.stack_end - (uintptr_t)info.stack_base;
1897 STACK_DIR_UPPER((
void)0, (
void)(*addr = (
char *)*addr + *size));
1899#error STACKADDR_AVAILABLE is defined but not implemented.
1907 rb_nativethread_id_t id;
1908 size_t stack_maxsize;
1910} native_main_thread;
1912#ifdef STACK_END_ADDRESS
1913extern void *STACK_END_ADDRESS;
1917 RUBY_STACK_SPACE_LIMIT = 1024 * 1024,
1918 RUBY_STACK_SPACE_RATIO = 5
1922space_size(
size_t stack_size)
1924 size_t space_size = stack_size / RUBY_STACK_SPACE_RATIO;
1925 if (space_size > RUBY_STACK_SPACE_LIMIT) {
1926 return RUBY_STACK_SPACE_LIMIT;
1934native_thread_init_main_thread_stack(
void *addr)
1936 native_main_thread.id = pthread_self();
1937#ifdef RUBY_ASAN_ENABLED
1938 addr = asan_get_real_stack_addr((
void *)addr);
1941#if MAINSTACKADDR_AVAILABLE
1942 if (native_main_thread.stack_maxsize)
return;
1946 if (get_main_stack(&stackaddr, &size) == 0) {
1947 native_main_thread.stack_maxsize = size;
1948 native_main_thread.stack_start = stackaddr;
1953#ifdef STACK_END_ADDRESS
1954 native_main_thread.stack_start = STACK_END_ADDRESS;
1956 if (!native_main_thread.stack_start ||
1957 STACK_UPPER((
VALUE *)(
void *)&addr,
1958 native_main_thread.stack_start > (
VALUE *)addr,
1959 native_main_thread.stack_start < (
VALUE *)addr)) {
1960 native_main_thread.stack_start = (
VALUE *)addr;
1964#if defined(HAVE_GETRLIMIT)
1965#if defined(PTHREAD_STACK_DEFAULT)
1966# if PTHREAD_STACK_DEFAULT < RUBY_STACK_SPACE*5
1967# error "PTHREAD_STACK_DEFAULT is too small"
1969 size_t size = PTHREAD_STACK_DEFAULT;
1971 size_t size = RUBY_VM_THREAD_VM_STACK_SIZE;
1974 int pagesize = getpagesize();
1976 STACK_GROW_DIR_DETECTION;
1977 if (getrlimit(RLIMIT_STACK, &rlim) == 0) {
1978 size = (size_t)rlim.rlim_cur;
1980 addr = native_main_thread.stack_start;
1981 if (IS_STACK_DIR_UPPER()) {
1982 space = ((size_t)((
char *)addr + size) / pagesize) * pagesize - (size_t)addr;
1985 space = (size_t)addr - ((
size_t)((
char *)addr - size) / pagesize + 1) * pagesize;
1987 native_main_thread.stack_maxsize = space;
1991#if MAINSTACKADDR_AVAILABLE
1998 STACK_GROW_DIR_DETECTION;
2000 if (IS_STACK_DIR_UPPER()) {
2001 start = native_main_thread.stack_start;
2002 end = (
char *)native_main_thread.stack_start + native_main_thread.stack_maxsize;
2005 start = (
char *)native_main_thread.stack_start - native_main_thread.stack_maxsize;
2006 end = native_main_thread.stack_start;
2009 if ((
void *)addr < start || (
void *)addr > end) {
2011 native_main_thread.stack_start = (
VALUE *)addr;
2012 native_main_thread.stack_maxsize = 0;
2017#define CHECK_ERR(expr) \
2018 {int err = (expr); if (err) {rb_bug_errno(#expr, err);}}
2021native_thread_init_stack(
rb_thread_t *th,
void *local_in_parent_frame)
2023 rb_nativethread_id_t curr = pthread_self();
2024#ifdef RUBY_ASAN_ENABLED
2025 local_in_parent_frame = asan_get_real_stack_addr(local_in_parent_frame);
2026 th->ec->machine.asan_fake_stack_handle = asan_get_thread_fake_stack_handle();
2029 if (!native_main_thread.id) {
2032 native_thread_init_main_thread_stack(local_in_parent_frame);
2035 if (pthread_equal(curr, native_main_thread.id)) {
2036 th->ec->machine.stack_start = native_main_thread.stack_start;
2037 th->ec->machine.stack_maxsize = native_main_thread.stack_maxsize;
2040#ifdef STACKADDR_AVAILABLE
2041 if (th_has_dedicated_nt(th)) {
2045 if (get_stack(&start, &size) == 0) {
2046 uintptr_t diff = (uintptr_t)start - (uintptr_t)local_in_parent_frame;
2047 th->ec->machine.stack_start = local_in_parent_frame;
2048 th->ec->machine.stack_maxsize = size - diff;
2052 rb_raise(
rb_eNotImpError,
"ruby engine can initialize only in the main thread");
2071 pthread_attr_t attr;
2073 const size_t stack_size = nt->vm->default_params.thread_machine_stack_size;
2074 const size_t space = space_size(stack_size);
2076 nt->machine_stack_maxsize = stack_size - space;
2078#ifdef USE_SIGALTSTACK
2079 nt->altstack = rb_allocate_sigaltstack();
2082 CHECK_ERR(pthread_attr_init(&attr));
2084# ifdef PTHREAD_STACK_MIN
2085 RUBY_DEBUG_LOG(
"stack size: %lu", (
unsigned long)stack_size);
2086 CHECK_ERR(pthread_attr_setstacksize(&attr, stack_size));
2089# ifdef HAVE_PTHREAD_ATTR_SETINHERITSCHED
2090 CHECK_ERR(pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
2092 CHECK_ERR(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
2094 err = pthread_create(&nt->thread_id, &attr, nt_start, nt);
2096 RUBY_DEBUG_LOG(
"nt:%d err:%d", (
int)nt->serial, err);
2098 CHECK_ERR(pthread_attr_destroy(&attr));
2109 if (&nt->cond.readyq != &nt->cond.intr) {
2118#ifdef RB_THREAD_T_HAS_NATIVE_ID
2119 nt->tid = get_native_thread_id();
2123 RB_ALTSTACK_INIT(nt->altstack, nt->altstack);
2127native_thread_alloc(
void)
2130 native_thread_setup(nt);
2136#if USE_RUBY_DEBUG_LOG
2146 th->nt = native_thread_alloc();
2147 th->nt->vm = th->vm;
2148 th->nt->running_thread = th;
2149 th->nt->dedicated = 1;
2152 size_t vm_stack_word_size = th->vm->default_params.thread_vm_stack_size /
sizeof(
VALUE);
2153 void *vm_stack = ruby_xmalloc(vm_stack_word_size *
sizeof(
VALUE));
2154 th->sched.malloc_stack =
true;
2155 rb_ec_initialize_vm_stack(th->ec, vm_stack, vm_stack_word_size);
2156 th->sched.context_stack = vm_stack;
2159 int err = native_thread_create0(th->nt);
2162 thread_sched_to_ready(TH_SCHED(th), th);
2176 VALUE stack_start = 0;
2177 VALUE *stack_start_addr = asan_get_real_stack_addr(&stack_start);
2179 native_thread_init_stack(th, stack_start_addr);
2180 thread_start_func_2(th, th->ec->machine.stack_start);
2189 native_thread_setup_on_thread(nt);
2192#ifdef RB_THREAD_T_HAS_NATIVE_ID
2193 nt->tid = get_native_thread_id();
2196#if USE_RUBY_DEBUG_LOG && defined(RUBY_NT_SERIAL)
2197 ruby_nt_serial = nt->serial;
2200 RUBY_DEBUG_LOG(
"nt:%u", nt->serial);
2202 if (!nt->dedicated) {
2203 coroutine_initialize_main(nt->nt_context);
2207 if (nt->dedicated) {
2212 RUBY_DEBUG_LOG(
"on dedicated th:%u", rb_th_serial(th));
2213 ruby_thread_set_native(th);
2215 thread_sched_lock(sched, th);
2217 if (sched->running == th) {
2218 thread_sched_add_running_thread(sched, th);
2220 thread_sched_wait_running_turn(sched, th,
false);
2222 thread_sched_unlock(sched, th);
2225 call_thread_start_func_2(th);
2229 RUBY_DEBUG_LOG(
"check next");
2235 thread_sched_lock(sched, NULL);
2239 if (next_th && next_th->nt == NULL) {
2240 RUBY_DEBUG_LOG(
"nt:%d next_th:%d", (
int)nt->serial, (
int)next_th->serial);
2241 thread_sched_switch0(nt->nt_context, next_th, nt,
false);
2244 RUBY_DEBUG_LOG(
"no schedulable threads -- next_th:%p", next_th);
2247 thread_sched_unlock(sched, NULL);
2254 if (nt->dedicated) {
2264static int native_thread_create_shared(
rb_thread_t *th);
2267static void nt_free_stack(
void *mstack);
2274 if (th->sched.malloc_stack) {
2280 th->sched.finished =
false;
2284 ccan_list_add(&vm->ractor.sched.zombie_threads, &th->sched.node.zombie_threads);
2295 if (th->sched.malloc_stack) {
2297 ruby_xfree(th->sched.context_stack);
2298 native_thread_destroy(th->nt);
2301 nt_free_stack(th->sched.context_stack);
2305 ruby_xfree(th->sched.context);
2306 th->sched.context = NULL;
2309 ruby_xfree(th->sched.context_stack);
2310 native_thread_destroy(th->nt);
2317rb_thread_sched_mark_zombies(
rb_vm_t *vm)
2319 if (!ccan_list_empty(&vm->ractor.sched.zombie_threads)) {
2321 ccan_list_for_each_safe(&vm->ractor.sched.zombie_threads, zombie_th, next_zombie_th, sched.node.zombie_threads) {
2322 if (zombie_th->sched.finished) {
2323 ccan_list_del_init(&zombie_th->sched.node.zombie_threads);
2326 rb_gc_mark(zombie_th->self);
2335 VM_ASSERT(th->nt == 0);
2336 RUBY_DEBUG_LOG(
"th:%d has_dnt:%d", th->serial, th->has_dedicated_nt);
2339 if (!th->ractor->threads.sched.enable_mn_threads) {
2340 th->has_dedicated_nt = 1;
2343 if (th->has_dedicated_nt) {
2344 return native_thread_create_dedicated(th);
2347 return native_thread_create_shared(th);
2351#if USE_NATIVE_THREAD_PRIORITY
2356#if defined(_POSIX_PRIORITY_SCHEDULING) && (_POSIX_PRIORITY_SCHEDULING > 0)
2357 struct sched_param sp;
2359 int priority = 0 - th->priority;
2361 pthread_getschedparam(th->nt->thread_id, &policy, &sp);
2362 max = sched_get_priority_max(policy);
2363 min = sched_get_priority_min(policy);
2365 if (min > priority) {
2368 else if (max < priority) {
2372 sp.sched_priority = priority;
2373 pthread_setschedparam(th->nt->thread_id, policy, &sp);
2384 return rb_fd_select(n, readfds, writefds, exceptfds, timeout);
2388ubf_pthread_cond_signal(
void *ptr)
2391 RUBY_DEBUG_LOG(
"th:%u on nt:%d", rb_th_serial(th), (
int)th->nt->serial);
2396native_cond_sleep(
rb_thread_t *th, rb_hrtime_t *rel)
2398 rb_nativethread_lock_t *lock = &th->interrupt_lock;
2399 rb_nativethread_cond_t *cond = &th->nt->cond.intr;
2409 const rb_hrtime_t max = (rb_hrtime_t)100000000 * RB_HRTIME_PER_SEC;
2411 THREAD_BLOCKING_BEGIN(th);
2414 th->unblock.func = ubf_pthread_cond_signal;
2415 th->unblock.arg = th;
2417 if (RUBY_VM_INTERRUPTED(th->ec)) {
2419 RUBY_DEBUG_LOG(
"interrupted before sleep th:%u", rb_th_serial(th));
2432 end = native_cond_timeout(cond, *rel);
2433 native_cond_timedwait(cond, lock, &end);
2436 th->unblock.func = 0;
2440 THREAD_BLOCKING_END(th);
2442 RUBY_DEBUG_LOG(
"done th:%u", rb_th_serial(th));
2446static CCAN_LIST_HEAD(ubf_list_head);
2447static rb_nativethread_lock_t ubf_list_lock = RB_NATIVETHREAD_LOCK_INIT;
2450ubf_list_atfork(
void)
2452 ccan_list_head_init(&ubf_list_head);
2461 ccan_list_for_each(&ubf_list_head, list_th, sched.node.ubf) {
2462 if (list_th == th)
return true;
2471 RUBY_DEBUG_LOG(
"th:%u", rb_th_serial(th));
2472 struct ccan_list_node *node = &th->sched.node.ubf;
2474 VM_ASSERT(th->unblock.func != NULL);
2479 if (ccan_list_empty((
struct ccan_list_head*)node)) {
2480 VM_ASSERT(!ubf_list_contain_p(th));
2481 ccan_list_add(&ubf_list_head, node);
2486 timer_thread_wakeup();
2493 RUBY_DEBUG_LOG(
"th:%u", rb_th_serial(th));
2494 struct ccan_list_node *node = &th->sched.node.ubf;
2497 VM_ASSERT(th->unblock.func == NULL);
2499 if (!ccan_list_empty((
struct ccan_list_head*)node)) {
2502 VM_ASSERT(ubf_list_contain_p(th));
2503 ccan_list_del_init(node);
2516 RUBY_DEBUG_LOG(
"th:%u thread_id:%p", rb_th_serial(th), (
void *)th->nt->thread_id);
2518 pthread_kill(th->nt->thread_id, SIGVTALRM);
2522ubf_select(
void *ptr)
2525 RUBY_DEBUG_LOG(
"wakeup th:%u", rb_th_serial(th));
2526 ubf_wakeup_thread(th);
2527 register_ubf_list(th);
2531ubf_threads_empty(
void)
2533 return ccan_list_empty(&ubf_list_head) != 0;
2537ubf_wakeup_all_threads(
void)
2539 if (!ubf_threads_empty()) {
2543 ccan_list_for_each(&ubf_list_head, th, sched.node.ubf) {
2544 ubf_wakeup_thread(th);
2552#define register_ubf_list(th) (void)(th)
2553#define unregister_ubf_list(th) (void)(th)
2555static void ubf_wakeup_all_threads(
void) {
return; }
2556static bool ubf_threads_empty(
void) {
return true; }
2557#define ubf_list_atfork() do {} while (0)
2561#define WRITE_CONST(fd, str) (void)(write((fd),(str),sizeof(str)-1)<0)
2564rb_thread_wakeup_timer_thread(
int sig)
2570 timer_thread_wakeup_force();
2573 if (system_working) {
2581 RUBY_VM_SET_TRAP_INTERRUPT(main_th_ec);
2583 if (vm->ubf_async_safe && main_th->unblock.func) {
2584 (main_th->unblock.func)(main_th->unblock.arg);
2591#define CLOSE_INVALIDATE_PAIR(expr) \
2592 close_invalidate_pair(expr,"close_invalidate: "#expr)
2594close_invalidate(
int *fdp,
const char *msg)
2599 if (close(fd) < 0) {
2600 async_bug_fd(msg,
errno, fd);
2605close_invalidate_pair(
int fds[2],
const char *msg)
2607 if (USE_EVENTFD && fds[0] == fds[1]) {
2609 close_invalidate(&fds[0], msg);
2612 close_invalidate(&fds[1], msg);
2613 close_invalidate(&fds[0], msg);
2623 oflags = fcntl(fd, F_GETFL);
2626 oflags |= O_NONBLOCK;
2627 err = fcntl(fd, F_SETFL, oflags);
2634setup_communication_pipe_internal(
int pipes[2])
2638 if (pipes[0] > 0 || pipes[1] > 0) {
2639 VM_ASSERT(pipes[0] > 0);
2640 VM_ASSERT(pipes[1] > 0);
2648#if USE_EVENTFD && defined(EFD_NONBLOCK) && defined(EFD_CLOEXEC)
2649 pipes[0] = pipes[1] = eventfd(0, EFD_NONBLOCK|EFD_CLOEXEC);
2651 if (pipes[0] >= 0) {
2659 rb_bug(
"can not create communication pipe");
2663 set_nonblock(pipes[0]);
2664 set_nonblock(pipes[1]);
2667#if !defined(SET_CURRENT_THREAD_NAME) && defined(__linux__) && defined(PR_SET_NAME)
2668# define SET_CURRENT_THREAD_NAME(name) prctl(PR_SET_NAME, name)
2673#if defined(__linux__)
2675#elif defined(__APPLE__)
2688#ifdef SET_CURRENT_THREAD_NAME
2690 if (!
NIL_P(loc = th->name)) {
2691 SET_CURRENT_THREAD_NAME(RSTRING_PTR(loc));
2693 else if ((loc = threadptr_invoke_proc_location(th)) !=
Qnil) {
2695 char buf[THREAD_NAME_MAX];
2700 p = strrchr(name,
'/');
2708 if (
len >=
sizeof(buf)) {
2709 buf[
sizeof(buf)-2] =
'*';
2710 buf[
sizeof(buf)-1] =
'\0';
2712 SET_CURRENT_THREAD_NAME(buf);
2718native_set_another_thread_name(rb_nativethread_id_t thread_id,
VALUE name)
2720#if defined SET_ANOTHER_THREAD_NAME || defined SET_CURRENT_THREAD_NAME
2721 char buf[THREAD_NAME_MAX];
2723# if !defined SET_ANOTHER_THREAD_NAME
2724 if (!pthread_equal(pthread_self(), thread_id))
return;
2729 if (n >= (
int)
sizeof(buf)) {
2730 memcpy(buf, s,
sizeof(buf)-1);
2731 buf[
sizeof(buf)-1] =
'\0';
2735# if defined SET_ANOTHER_THREAD_NAME
2736 SET_ANOTHER_THREAD_NAME(thread_id, s);
2737# elif defined SET_CURRENT_THREAD_NAME
2738 SET_CURRENT_THREAD_NAME(s);
2743#if defined(RB_THREAD_T_HAS_NATIVE_ID) || defined(__APPLE__)
2745native_thread_native_thread_id(
rb_thread_t *target_th)
2747 if (!target_th->nt)
return Qnil;
2749#ifdef RB_THREAD_T_HAS_NATIVE_ID
2750 int tid = target_th->nt->tid;
2751 if (tid == 0)
return Qnil;
2753#elif defined(__APPLE__)
2759# if (!defined(MAC_OS_X_VERSION_10_6) || \
2760 (MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_6) || \
2761 defined(__POWERPC__) )
2762 const bool no_pthread_threadid_np =
true;
2763# define NO_PTHREAD_MACH_THREAD_NP 1
2764# elif MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_6
2765 const bool no_pthread_threadid_np =
false;
2767# if !(defined(__has_attribute) && __has_attribute(availability))
2769 __attribute__((weak))
int pthread_threadid_np(pthread_t, uint64_t*);
2772 const bool no_pthread_threadid_np = !&pthread_threadid_np;
2774 if (no_pthread_threadid_np) {
2775 return ULL2NUM(pthread_mach_thread_np(pthread_self()));
2777# ifndef NO_PTHREAD_MACH_THREAD_NP
2778 int e = pthread_threadid_np(target_th->nt->thread_id, &tid);
2780 return ULL2NUM((
unsigned long long)tid);
2784# define USE_NATIVE_THREAD_NATIVE_THREAD_ID 1
2786# define USE_NATIVE_THREAD_NATIVE_THREAD_ID 0
2790 rb_serial_t created_fork_gen;
2791 pthread_t pthread_id;
2795#if (HAVE_SYS_EPOLL_H || HAVE_SYS_EVENT_H) && USE_MN_THREADS
2798#if HAVE_SYS_EPOLL_H && USE_MN_THREADS
2799#define EPOLL_EVENTS_MAX 0x10
2800 struct epoll_event finished_events[EPOLL_EVENTS_MAX];
2801#elif HAVE_SYS_EVENT_H && USE_MN_THREADS
2802#define KQUEUE_EVENTS_MAX 0x10
2803 struct kevent finished_events[KQUEUE_EVENTS_MAX];
2807 struct ccan_list_head waiting;
2808 pthread_mutex_t waiting_lock;
2810 .created_fork_gen = 0,
2813#define TIMER_THREAD_CREATED_P() (timer_th.created_fork_gen == current_fork_gen)
2815static void timer_thread_check_timeslice(
rb_vm_t *vm);
2816static int timer_thread_set_timeout(
rb_vm_t *vm);
2817static void timer_thread_wakeup_thread(
rb_thread_t *th);
2819#include "thread_pthread_mn.c"
2833timer_thread_set_timeout(
rb_vm_t *vm)
2840 ractor_sched_lock(vm, NULL);
2842 if ( !ccan_list_empty(&vm->ractor.sched.timeslice_threads)
2843 || !ubf_threads_empty()
2844 || vm->ractor.sched.grq_cnt > 0
2847 RUBY_DEBUG_LOG(
"timeslice:%d ubf:%d grq:%d",
2848 !ccan_list_empty(&vm->ractor.sched.timeslice_threads),
2849 !ubf_threads_empty(),
2850 (vm->ractor.sched.grq_cnt > 0));
2853 vm->ractor.sched.timeslice_wait_inf =
false;
2856 vm->ractor.sched.timeslice_wait_inf =
true;
2859 ractor_sched_unlock(vm, NULL);
2861 if (vm->ractor.sched.timeslice_wait_inf) {
2867 if (th && (th->sched.waiting_reason.flags & thread_sched_waiting_timeout)) {
2868 rb_hrtime_t now = rb_hrtime_now();
2869 rb_hrtime_t hrrel = rb_hrtime_sub(th->sched.waiting_reason.data.timeout, now);
2871 RUBY_DEBUG_LOG(
"th:%u now:%lu rel:%lu", rb_th_serial(th), (
unsigned long)now, (
unsigned long)hrrel);
2874 timeout = (int)((hrrel + RB_HRTIME_PER_MSEC - 1) / RB_HRTIME_PER_MSEC);
2880 RUBY_DEBUG_LOG(
"timeout:%d inf:%d", timeout, (
int)vm->ractor.sched.timeslice_wait_inf);
2888timer_thread_check_signal(
rb_vm_t *vm)
2892 int signum = rb_signal_buff_size();
2893 if (UNLIKELY(signum > 0) && vm->ractor.main_thread) {
2894 RUBY_DEBUG_LOG(
"signum:%d", signum);
2895 threadptr_trap_interrupt(vm->ractor.main_thread);
2900timer_thread_check_exceed(rb_hrtime_t abs, rb_hrtime_t now)
2905 else if (abs - now < RB_HRTIME_PER_MSEC) {
2914timer_thread_deq_wakeup(
rb_vm_t *vm, rb_hrtime_t now)
2919 (w->flags & thread_sched_waiting_timeout) &&
2920 timer_thread_check_exceed(w->data.timeout, now)) {
2922 RUBY_DEBUG_LOG(
"wakeup th:%u", rb_th_serial(thread_sched_waiting_thread(w)));
2925 ccan_list_del_init(&w->node);
2928 w->flags = thread_sched_waiting_none;
2931 return thread_sched_waiting_thread(w);
2940 RUBY_DEBUG_LOG(
"th:%u", rb_th_serial(th));
2943 thread_sched_lock(sched, th);
2945 if (sched->running != th) {
2946 thread_sched_to_ready_common(sched, th,
true,
false);
2952 thread_sched_unlock(sched, th);
2956timer_thread_check_timeout(
rb_vm_t *vm)
2958 rb_hrtime_t now = rb_hrtime_now();
2963 while ((th = timer_thread_deq_wakeup(vm, now)) != NULL) {
2964 timer_thread_wakeup_thread(th);
2971timer_thread_check_timeslice(
rb_vm_t *vm)
2975 ccan_list_for_each(&vm->ractor.sched.timeslice_threads, th, sched.node.timeslice_threads) {
2976 RUBY_DEBUG_LOG(
"timeslice th:%u", rb_th_serial(th));
2977 RUBY_VM_SET_TIMER_INTERRUPT(th->ec);
2985 pthread_sigmask(0, NULL, &oldmask);
2986 if (sigismember(&oldmask, SIGVTALRM)) {
2990 RUBY_DEBUG_LOG(
"ok");
2995timer_thread_func(
void *ptr)
2998#if defined(RUBY_NT_SERIAL)
3002 RUBY_DEBUG_LOG(
"started%s",
"");
3004 while (system_working) {
3005 timer_thread_check_signal(vm);
3006 timer_thread_check_timeout(vm);
3007 ubf_wakeup_all_threads();
3009 RUBY_DEBUG_LOG(
"system_working:%d", system_working);
3010 timer_thread_polling(vm);
3013 RUBY_DEBUG_LOG(
"terminated");
3019signal_communication_pipe(
int fd)
3022 const uint64_t buff = 1;
3024 const char buff =
'!';
3031 if ((result = write(fd, &buff,
sizeof(buff))) <= 0) {
3034 case EINTR:
goto retry;
3036#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
3041 async_bug_fd(
"rb_thread_wakeup_timer_thread: write", e, fd);
3044 if (TT_DEBUG) WRITE_CONST(2,
"rb_thread_wakeup_timer_thread: write\n");
3052timer_thread_wakeup_force(
void)
3055 signal_communication_pipe(timer_th.comm_fds[1]);
3059timer_thread_wakeup_locked(
rb_vm_t *vm)
3062 ASSERT_ractor_sched_locked(vm, NULL);
3064 if (timer_th.created_fork_gen == current_fork_gen) {
3065 if (vm->ractor.sched.timeslice_wait_inf) {
3066 RUBY_DEBUG_LOG(
"wakeup with fd:%d", timer_th.comm_fds[1]);
3067 timer_thread_wakeup_force();
3070 RUBY_DEBUG_LOG(
"will be wakeup...");
3076timer_thread_wakeup(
void)
3080 ractor_sched_lock(vm, NULL);
3082 timer_thread_wakeup_locked(vm);
3084 ractor_sched_unlock(vm, NULL);
3088rb_thread_create_timer_thread(
void)
3090 rb_serial_t created_fork_gen = timer_th.created_fork_gen;
3092 RUBY_DEBUG_LOG(
"fork_gen create:%d current:%d", (
int)created_fork_gen, (
int)current_fork_gen);
3094 timer_th.created_fork_gen = current_fork_gen;
3096 if (created_fork_gen != current_fork_gen) {
3097 if (created_fork_gen != 0) {
3098 RUBY_DEBUG_LOG(
"forked child process");
3100 CLOSE_INVALIDATE_PAIR(timer_th.comm_fds);
3101#if HAVE_SYS_EPOLL_H && USE_MN_THREADS
3102 close_invalidate(&timer_th.event_fd,
"close event_fd");
3107 ccan_list_head_init(&timer_th.waiting);
3111 setup_communication_pipe_internal(timer_th.comm_fds);
3114 timer_thread_setup_mn();
3117 pthread_create(&timer_th.pthread_id, NULL, timer_thread_func, GET_VM());
3121native_stop_timer_thread(
void)
3124 stopped = --system_working <= 0;
3127 RUBY_DEBUG_LOG(
"wakeup send %d", timer_th.comm_fds[1]);
3128 timer_thread_wakeup_force();
3129 RUBY_DEBUG_LOG(
"wakeup sent");
3130 pthread_join(timer_th.pthread_id, NULL);
3133 if (TT_DEBUG) fprintf(stderr,
"stop timer thread\n");
3138native_reset_timer_thread(
void)
3143#ifdef HAVE_SIGALTSTACK
3145ruby_stack_overflowed_p(
const rb_thread_t *th,
const void *addr)
3149 const size_t water_mark = 1024 * 1024;
3150 STACK_GROW_DIR_DETECTION;
3152#ifdef STACKADDR_AVAILABLE
3153 if (get_stack(&base, &size) == 0) {
3155 if (pthread_equal(th->nt->thread_id, native_main_thread.id)) {
3157 if (getrlimit(RLIMIT_STACK, &rlim) == 0 && rlim.rlim_cur > size) {
3158 size = (size_t)rlim.rlim_cur;
3162 base = (
char *)base + STACK_DIR_UPPER(+size, -size);
3167 size = th->ec->machine.stack_maxsize;
3168 base = (
char *)th->ec->machine.stack_start - STACK_DIR_UPPER(0, size);
3173 size /= RUBY_STACK_SPACE_RATIO;
3174 if (size > water_mark) size = water_mark;
3175 if (IS_STACK_DIR_UPPER()) {
3176 if (size > ~(
size_t)base+1) size = ~(
size_t)base+1;
3177 if (addr > base && addr <= (
void *)((
char *)base + size))
return 1;
3180 if (size > (
size_t)base) size = (
size_t)base;
3181 if (addr > (
void *)((
char *)base - size) && addr <= base)
return 1;
3191 if (fd < 0)
return 0;
3193 if (fd == timer_th.comm_fds[0] ||
3194 fd == timer_th.comm_fds[1]
3195#if (HAVE_SYS_EPOLL_H || HAVE_SYS_EVENT_H) && USE_MN_THREADS
3196 || fd == timer_th.event_fd
3199 goto check_fork_gen;
3204 if (timer_th.created_fork_gen == current_fork_gen) {
3216 return pthread_self();
3219#if defined(USE_POLL) && !defined(HAVE_PPOLL)
3222ruby_ppoll(
struct pollfd *fds, nfds_t nfds,
3223 const struct timespec *ts,
const sigset_t *sigmask)
3230 if (ts->tv_sec > INT_MAX/1000)
3231 timeout_ms = INT_MAX;
3233 tmp = (int)(ts->tv_sec * 1000);
3235 tmp2 = (int)((ts->tv_nsec + 999999L) / (1000L * 1000L));
3236 if (INT_MAX - tmp < tmp2)
3237 timeout_ms = INT_MAX;
3239 timeout_ms = (int)(tmp + tmp2);
3245 return poll(fds, nfds, timeout_ms);
3247# define ppoll(fds,nfds,ts,sigmask) ruby_ppoll((fds),(nfds),(ts),(sigmask))
3260#define THREAD_BLOCKING_YIELD(th) do { \
3261 const rb_thread_t *next_th; \
3262 struct rb_thread_sched *sched = TH_SCHED(th); \
3263 RB_VM_SAVE_MACHINE_CONTEXT(th); \
3264 thread_sched_to_waiting(sched, (th)); \
3265 next_th = sched->running; \
3266 rb_native_mutex_unlock(&sched->lock_); \
3267 native_thread_yield(); \
3268 if (!next_th && rb_ractor_living_thread_num(th->ractor) > 1) { \
3269 native_thread_yield(); \
3277 RUBY_DEBUG_LOG(
"rel:%d", rel ? (
int)*rel : 0);
3279 if (th_has_dedicated_nt(th)) {
3280 native_cond_sleep(th, rel);
3283 thread_sched_wait_events(sched, th, -1, thread_sched_waiting_timeout, rel);
3287 thread_sched_to_waiting_until_wakeup(sched, th);
3290 RUBY_DEBUG_LOG(
"wakeup");
3294static pthread_rwlock_t rb_thread_fork_rw_lock = PTHREAD_RWLOCK_INITIALIZER;
3297rb_thread_release_fork_lock(
void)
3300 if ((r = pthread_rwlock_unlock(&rb_thread_fork_rw_lock))) {
3306rb_thread_reset_fork_lock(
void)
3309 if ((r = pthread_rwlock_destroy(&rb_thread_fork_rw_lock))) {
3313 if ((r = pthread_rwlock_init(&rb_thread_fork_rw_lock, NULL))) {
3319rb_thread_prevent_fork(
void *(*func)(
void *),
void *data)
3322 if ((r = pthread_rwlock_rdlock(&rb_thread_fork_rw_lock))) {
3325 void *result = func(data);
3326 rb_thread_release_fork_lock();
3331rb_thread_acquire_fork_lock(
void)
3334 if ((r = pthread_rwlock_wrlock(&rb_thread_fork_rw_lock))) {
3341struct rb_internal_thread_event_hook {
3342 rb_internal_thread_event_callback callback;
3346 struct rb_internal_thread_event_hook *next;
3349static pthread_rwlock_t rb_internal_thread_event_hooks_rw_lock = PTHREAD_RWLOCK_INITIALIZER;
3351rb_internal_thread_event_hook_t *
3354 rb_internal_thread_event_hook_t *hook =
ALLOC_N(rb_internal_thread_event_hook_t, 1);
3355 hook->callback = callback;
3356 hook->user_data = user_data;
3357 hook->event = internal_event;
3360 if ((r = pthread_rwlock_wrlock(&rb_internal_thread_event_hooks_rw_lock))) {
3364 hook->next = rb_internal_thread_event_hooks;
3365 ATOMIC_PTR_EXCHANGE(rb_internal_thread_event_hooks, hook);
3367 if ((r = pthread_rwlock_unlock(&rb_internal_thread_event_hooks_rw_lock))) {
3377 if ((r = pthread_rwlock_wrlock(&rb_internal_thread_event_hooks_rw_lock))) {
3381 bool success = FALSE;
3383 if (rb_internal_thread_event_hooks == hook) {
3384 ATOMIC_PTR_EXCHANGE(rb_internal_thread_event_hooks, hook->next);
3388 rb_internal_thread_event_hook_t *h = rb_internal_thread_event_hooks;
3391 if (h->next == hook) {
3392 h->next = hook->next;
3396 }
while ((h = h->next));
3399 if ((r = pthread_rwlock_unlock(&rb_internal_thread_event_hooks_rw_lock))) {
3413 if ((r = pthread_rwlock_rdlock(&rb_internal_thread_event_hooks_rw_lock))) {
3417 if (rb_internal_thread_event_hooks) {
3418 rb_internal_thread_event_hook_t *h = rb_internal_thread_event_hooks;
3420 if (h->event & event) {
3424 (*h->callback)(event, &event_data, h->user_data);
3426 }
while((h = h->next));
3428 if ((r = pthread_rwlock_unlock(&rb_internal_thread_event_hooks_rw_lock))) {
3439 bool is_snt = th->nt->dedicated == 0;
3440 native_thread_dedicated_inc(th->vm, th->ractor, th->nt);
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
#define RUBY_ATOMIC_FETCH_ADD(var, val)
Atomically replaces the value pointed by var with the result of addition of val to the old value of v...
uint32_t rb_event_flag_t
Represents event(s).
#define INT2FIX
Old name of RB_INT2FIX.
#define ZALLOC
Old name of RB_ZALLOC.
#define ALLOC_N
Old name of RB_ALLOC_N.
#define ULL2NUM
Old name of RB_ULL2NUM.
#define NUM2INT
Old name of RB_NUM2INT.
#define Qnil
Old name of RUBY_Qnil.
#define NIL_P
Old name of RB_NIL_P.
VALUE rb_eNotImpError
NotImplementedError exception.
void rb_syserr_fail(int e, const char *mesg)
Raises appropriate exception that represents a C errno.
void rb_bug_errno(const char *mesg, int errno_arg)
This is a wrapper of rb_bug() which automatically constructs appropriate message from the passed errn...
int rb_cloexec_pipe(int fildes[2])
Opens a pipe with closing on exec.
void rb_update_max_fd(int fd)
Informs the interpreter that the passed fd can be the max.
int rb_reserved_fd_p(int fd)
Queries if the given FD is reserved or not.
void rb_unblock_function_t(void *)
This is the type of UBFs.
void rb_timespec_now(struct timespec *ts)
Fills the current time into the given struct.
int len
Length of the buffer.
#define RUBY_INTERNAL_THREAD_EVENT_RESUMED
Triggered when a thread successfully acquired the GVL.
rb_internal_thread_event_hook_t * rb_internal_thread_add_event_hook(rb_internal_thread_event_callback func, rb_event_flag_t events, void *data)
Registers a thread event hook function.
#define RUBY_INTERNAL_THREAD_EVENT_EXITED
Triggered when a thread exits.
#define RUBY_INTERNAL_THREAD_EVENT_SUSPENDED
Triggered when a thread released the GVL.
bool rb_thread_lock_native_thread(void)
Declare the current Ruby thread should acquire a dedicated native thread on M:N thread scheduler.
#define RUBY_INTERNAL_THREAD_EVENT_STARTED
Triggered when a new thread is started.
bool rb_internal_thread_remove_event_hook(rb_internal_thread_event_hook_t *hook)
Unregister the passed hook.
#define RUBY_INTERNAL_THREAD_EVENT_READY
Triggered when a thread attempt to acquire the GVL.
#define RBIMPL_ATTR_MAYBE_UNUSED()
Wraps (or simulates) [[maybe_unused]]
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
#define rb_fd_select
Waits for multiple file descriptors at once.
#define RARRAY_AREF(a, i)
#define RSTRING_GETMEM(str, ptrvar, lenvar)
Convenient macro to obtain the contents and length at once.
#define errno
Ractor-aware version of errno.
The data structure which wraps the fd_set bitmap used by select(2).
rb_nativethread_id_t rb_nativethread_self(void)
Queries the ID of the native thread that is calling this function.
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_cond_initialize(rb_nativethread_cond_t *cond)
Fills the passed condition variable with an initial value.
int rb_native_mutex_trylock(rb_nativethread_lock_t *lock)
Identical to rb_native_mutex_lock(), except it doesn't block in case rb_native_mutex_lock() would.
void rb_native_cond_broadcast(rb_nativethread_cond_t *cond)
Signals a condition variable.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy.
void rb_native_cond_destroy(rb_nativethread_cond_t *cond)
Destroys the passed condition variable.
void rb_native_cond_signal(rb_nativethread_cond_t *cond)
Signals a condition variable.
void rb_native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex)
Waits for the passed condition variable to be signalled.
void rb_native_cond_timedwait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex, unsigned long msec)
Identical to rb_native_cond_wait(), except it additionally takes timeout in msec resolution.
uintptr_t VALUE
Type that represents a Ruby object.