Ruby 3.5.0dev (2025-04-11 revision f13e86a70edffe9af9b8bca15a0a9652cf143386)
thread_pthread.c (f13e86a70edffe9af9b8bca15a0a9652cf143386)
1/* -*-c-*- */
2/**********************************************************************
3
4 thread_pthread.c -
5
6 $Author$
7
8 Copyright (C) 2004-2007 Koichi Sasada
9
10**********************************************************************/
11
12#ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
13
14#include "internal/gc.h"
15#include "internal/sanitizers.h"
16
17#ifdef HAVE_SYS_RESOURCE_H
18#include <sys/resource.h>
19#endif
20#ifdef HAVE_THR_STKSEGMENT
21#include <thread.h>
22#endif
23#if defined(HAVE_FCNTL_H)
24#include <fcntl.h>
25#elif defined(HAVE_SYS_FCNTL_H)
26#include <sys/fcntl.h>
27#endif
28#ifdef HAVE_SYS_PRCTL_H
29#include <sys/prctl.h>
30#endif
31#if defined(HAVE_SYS_TIME_H)
32#include <sys/time.h>
33#endif
34#if defined(__HAIKU__)
35#include <kernel/OS.h>
36#endif
37#ifdef __linux__
38#include <sys/syscall.h> /* for SYS_gettid */
39#endif
40#include <time.h>
41#include <signal.h>
42
43#if defined __APPLE__
44# include <AvailabilityMacros.h>
45#endif
46
47#if defined(HAVE_SYS_EVENTFD_H) && defined(HAVE_EVENTFD)
48# define USE_EVENTFD (1)
49# include <sys/eventfd.h>
50#else
51# define USE_EVENTFD (0)
52#endif
53
54#if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK) && \
55 defined(CLOCK_REALTIME) && defined(CLOCK_MONOTONIC) && \
56 defined(HAVE_CLOCK_GETTIME)
57static pthread_condattr_t condattr_mono;
58static pthread_condattr_t *condattr_monotonic = &condattr_mono;
59#else
60static const void *const condattr_monotonic = NULL;
61#endif
62
63#include COROUTINE_H
64
65#ifndef HAVE_SYS_EVENT_H
66#define HAVE_SYS_EVENT_H 0
67#endif
68
69#ifndef HAVE_SYS_EPOLL_H
70#define HAVE_SYS_EPOLL_H 0
71#else
72// force setting for debug
73// #undef HAVE_SYS_EPOLL_H
74// #define HAVE_SYS_EPOLL_H 0
75#endif
76
77#ifndef USE_MN_THREADS
78 #if defined(__EMSCRIPTEN__) || defined(COROUTINE_PTHREAD_CONTEXT)
79 // on __EMSCRIPTEN__ provides epoll* declarations, but no implementations.
80 // on COROUTINE_PTHREAD_CONTEXT, it doesn't worth to use it.
81 #define USE_MN_THREADS 0
82 #elif HAVE_SYS_EPOLL_H
83 #include <sys/epoll.h>
84 #define USE_MN_THREADS 1
85 #elif HAVE_SYS_EVENT_H
86 #include <sys/event.h>
87 #define USE_MN_THREADS 1
88 #else
89 #define USE_MN_THREADS 0
90 #endif
91#endif
92
93// native thread wrappers
94
95#define NATIVE_MUTEX_LOCK_DEBUG 0
96
97static void
98mutex_debug(const char *msg, void *lock)
99{
100 if (NATIVE_MUTEX_LOCK_DEBUG) {
101 int r;
102 static pthread_mutex_t dbglock = PTHREAD_MUTEX_INITIALIZER;
103
104 if ((r = pthread_mutex_lock(&dbglock)) != 0) {exit(EXIT_FAILURE);}
105 fprintf(stdout, "%s: %p\n", msg, lock);
106 if ((r = pthread_mutex_unlock(&dbglock)) != 0) {exit(EXIT_FAILURE);}
107 }
108}
109
110void
111rb_native_mutex_lock(pthread_mutex_t *lock)
112{
113 int r;
114 mutex_debug("lock", lock);
115 if ((r = pthread_mutex_lock(lock)) != 0) {
116 rb_bug_errno("pthread_mutex_lock", r);
117 }
118}
119
120void
121rb_native_mutex_unlock(pthread_mutex_t *lock)
122{
123 int r;
124 mutex_debug("unlock", lock);
125 if ((r = pthread_mutex_unlock(lock)) != 0) {
126 rb_bug_errno("pthread_mutex_unlock", r);
127 }
128}
129
130int
131rb_native_mutex_trylock(pthread_mutex_t *lock)
132{
133 int r;
134 mutex_debug("trylock", lock);
135 if ((r = pthread_mutex_trylock(lock)) != 0) {
136 if (r == EBUSY) {
137 return EBUSY;
138 }
139 else {
140 rb_bug_errno("pthread_mutex_trylock", r);
141 }
142 }
143 return 0;
144}
145
146void
147rb_native_mutex_initialize(pthread_mutex_t *lock)
148{
149 int r = pthread_mutex_init(lock, 0);
150 mutex_debug("init", lock);
151 if (r != 0) {
152 rb_bug_errno("pthread_mutex_init", r);
153 }
154}
155
156void
157rb_native_mutex_destroy(pthread_mutex_t *lock)
158{
159 int r = pthread_mutex_destroy(lock);
160 mutex_debug("destroy", lock);
161 if (r != 0) {
162 rb_bug_errno("pthread_mutex_destroy", r);
163 }
164}
165
166void
167rb_native_cond_initialize(rb_nativethread_cond_t *cond)
168{
169 int r = pthread_cond_init(cond, condattr_monotonic);
170 if (r != 0) {
171 rb_bug_errno("pthread_cond_init", r);
172 }
173}
174
175void
176rb_native_cond_destroy(rb_nativethread_cond_t *cond)
177{
178 int r = pthread_cond_destroy(cond);
179 if (r != 0) {
180 rb_bug_errno("pthread_cond_destroy", r);
181 }
182}
183
184/*
185 * In OS X 10.7 (Lion), pthread_cond_signal and pthread_cond_broadcast return
186 * EAGAIN after retrying 8192 times. You can see them in the following page:
187 *
188 * http://www.opensource.apple.com/source/Libc/Libc-763.11/pthreads/pthread_cond.c
189 *
190 * The following rb_native_cond_signal and rb_native_cond_broadcast functions
191 * need to retrying until pthread functions don't return EAGAIN.
192 */
193
194void
195rb_native_cond_signal(rb_nativethread_cond_t *cond)
196{
197 int r;
198 do {
199 r = pthread_cond_signal(cond);
200 } while (r == EAGAIN);
201 if (r != 0) {
202 rb_bug_errno("pthread_cond_signal", r);
203 }
204}
205
206void
207rb_native_cond_broadcast(rb_nativethread_cond_t *cond)
208{
209 int r;
210 do {
211 r = pthread_cond_broadcast(cond);
212 } while (r == EAGAIN);
213 if (r != 0) {
214 rb_bug_errno("rb_native_cond_broadcast", r);
215 }
216}
217
218void
219rb_native_cond_wait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex)
220{
221 int r = pthread_cond_wait(cond, mutex);
222 if (r != 0) {
223 rb_bug_errno("pthread_cond_wait", r);
224 }
225}
226
227static int
228native_cond_timedwait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex, const rb_hrtime_t *abs)
229{
230 int r;
231 struct timespec ts;
232
233 /*
234 * An old Linux may return EINTR. Even though POSIX says
235 * "These functions shall not return an error code of [EINTR]".
236 * http://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_cond_timedwait.html
237 * Let's hide it from arch generic code.
238 */
239 do {
240 rb_hrtime2timespec(&ts, abs);
241 r = pthread_cond_timedwait(cond, mutex, &ts);
242 } while (r == EINTR);
243
244 if (r != 0 && r != ETIMEDOUT) {
245 rb_bug_errno("pthread_cond_timedwait", r);
246 }
247
248 return r;
249}
250
251static rb_hrtime_t
252native_cond_timeout(rb_nativethread_cond_t *cond, const rb_hrtime_t rel)
253{
254 if (condattr_monotonic) {
255 return rb_hrtime_add(rb_hrtime_now(), rel);
256 }
257 else {
258 struct timespec ts;
259
260 rb_timespec_now(&ts);
261 return rb_hrtime_add(rb_timespec2hrtime(&ts), rel);
262 }
263}
264
265void
266rb_native_cond_timedwait(rb_nativethread_cond_t *cond, pthread_mutex_t *mutex, unsigned long msec)
267{
268 rb_hrtime_t hrmsec = native_cond_timeout(cond, RB_HRTIME_PER_MSEC * msec);
269 native_cond_timedwait(cond, mutex, &hrmsec);
270}
271
272// thread scheduling
273
274static rb_internal_thread_event_hook_t *rb_internal_thread_event_hooks = NULL;
275static void rb_thread_execute_hooks(rb_event_flag_t event, rb_thread_t *th);
276
277#if 0
278static const char *
279event_name(rb_event_flag_t event)
280{
281 switch (event) {
283 return "STARTED";
285 return "READY";
287 return "RESUMED";
289 return "SUSPENDED";
291 return "EXITED";
292 }
293 return "no-event";
294}
295
296#define RB_INTERNAL_THREAD_HOOK(event, th) \
297 if (UNLIKELY(rb_internal_thread_event_hooks)) { \
298 fprintf(stderr, "[thread=%"PRIxVALUE"] %s in %s (%s:%d)\n", th->self, event_name(event), __func__, __FILE__, __LINE__); \
299 rb_thread_execute_hooks(event, th); \
300 }
301#else
302#define RB_INTERNAL_THREAD_HOOK(event, th) if (UNLIKELY(rb_internal_thread_event_hooks)) { rb_thread_execute_hooks(event, th); }
303#endif
304
305static rb_serial_t current_fork_gen = 1; /* We can't use GET_VM()->fork_gen */
306
307#if defined(SIGVTALRM) && !defined(__EMSCRIPTEN__)
308# define USE_UBF_LIST 1
309#endif
310
311static void threadptr_trap_interrupt(rb_thread_t *);
312
313#ifdef HAVE_SCHED_YIELD
314#define native_thread_yield() (void)sched_yield()
315#else
316#define native_thread_yield() ((void)0)
317#endif
318
319static void native_thread_dedicated_inc(rb_vm_t *vm, rb_ractor_t *cr, struct rb_native_thread *nt);
320static void native_thread_dedicated_dec(rb_vm_t *vm, rb_ractor_t *cr, struct rb_native_thread *nt);
321static void native_thread_assign(struct rb_native_thread *nt, rb_thread_t *th);
322
323static void ractor_sched_enq(rb_vm_t *vm, rb_ractor_t *r);
324static void timer_thread_wakeup(void);
325static void timer_thread_wakeup_locked(rb_vm_t *vm);
326static void timer_thread_wakeup_force(void);
327static void thread_sched_switch(rb_thread_t *cth, rb_thread_t *next_th);
328static void coroutine_transfer0(struct coroutine_context *transfer_from,
329 struct coroutine_context *transfer_to, bool to_dead);
330
331#define thread_sched_dump(s) thread_sched_dump_(__FILE__, __LINE__, s)
332
333static bool
334th_has_dedicated_nt(const rb_thread_t *th)
335{
336 // TODO: th->has_dedicated_nt
337 return th->nt->dedicated > 0;
338}
339
341static void
342thread_sched_dump_(const char *file, int line, struct rb_thread_sched *sched)
343{
344 fprintf(stderr, "@%s:%d running:%d\n", file, line, sched->running ? (int)sched->running->serial : -1);
345 rb_thread_t *th;
346 int i = 0;
347 ccan_list_for_each(&sched->readyq, th, sched.node.readyq) {
348 i++; if (i>10) rb_bug("too many");
349 fprintf(stderr, " ready:%d (%sNT:%d)\n", th->serial,
350 th->nt ? (th->nt->dedicated ? "D" : "S") : "x",
351 th->nt ? (int)th->nt->serial : -1);
352 }
353}
354
355#define ractor_sched_dump(s) ractor_sched_dump_(__FILE__, __LINE__, s)
356
358static void
359ractor_sched_dump_(const char *file, int line, rb_vm_t *vm)
360{
361 rb_ractor_t *r;
362
363 fprintf(stderr, "ractor_sched_dump %s:%d\n", file, line);
364
365 int i = 0;
366 ccan_list_for_each(&vm->ractor.sched.grq, r, threads.sched.grq_node) {
367 i++;
368 if (i>10) rb_bug("!!");
369 fprintf(stderr, " %d ready:%d\n", i, rb_ractor_id(r));
370 }
371}
372
373#define thread_sched_lock(a, b) thread_sched_lock_(a, b, __FILE__, __LINE__)
374#define thread_sched_unlock(a, b) thread_sched_unlock_(a, b, __FILE__, __LINE__)
375
376static void
377thread_sched_lock_(struct rb_thread_sched *sched, rb_thread_t *th, const char *file, int line)
378{
379 rb_native_mutex_lock(&sched->lock_);
380
381#if VM_CHECK_MODE
382 RUBY_DEBUG_LOG2(file, line, "th:%u prev_owner:%u", rb_th_serial(th), rb_th_serial(sched->lock_owner));
383 VM_ASSERT(sched->lock_owner == NULL);
384 sched->lock_owner = th;
385#else
386 RUBY_DEBUG_LOG2(file, line, "th:%u", rb_th_serial(th));
387#endif
388}
389
390static void
391thread_sched_unlock_(struct rb_thread_sched *sched, rb_thread_t *th, const char *file, int line)
392{
393 RUBY_DEBUG_LOG2(file, line, "th:%u", rb_th_serial(th));
394
395#if VM_CHECK_MODE
396 VM_ASSERT(sched->lock_owner == th);
397 sched->lock_owner = NULL;
398#endif
399
400 rb_native_mutex_unlock(&sched->lock_);
401}
402
403static void
404thread_sched_set_lock_owner(struct rb_thread_sched *sched, rb_thread_t *th)
405{
406 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
407
408#if VM_CHECK_MODE > 0
409 sched->lock_owner = th;
410#endif
411}
412
413static void
414ASSERT_thread_sched_locked(struct rb_thread_sched *sched, rb_thread_t *th)
415{
416 VM_ASSERT(rb_native_mutex_trylock(&sched->lock_) == EBUSY);
417
418#if VM_CHECK_MODE
419 if (th) {
420 VM_ASSERT(sched->lock_owner == th);
421 }
422 else {
423 VM_ASSERT(sched->lock_owner != NULL);
424 }
425#endif
426}
427
428#define ractor_sched_lock(a, b) ractor_sched_lock_(a, b, __FILE__, __LINE__)
429#define ractor_sched_unlock(a, b) ractor_sched_unlock_(a, b, __FILE__, __LINE__)
430
432static unsigned int
433rb_ractor_serial(const rb_ractor_t *r) {
434 if (r) {
435 return rb_ractor_id(r);
436 }
437 else {
438 return 0;
439 }
440}
441
442static void
443ractor_sched_set_locked(rb_vm_t *vm, rb_ractor_t *cr)
444{
445#if VM_CHECK_MODE > 0
446 VM_ASSERT(vm->ractor.sched.lock_owner == NULL);
447 VM_ASSERT(vm->ractor.sched.locked == false);
448
449 vm->ractor.sched.lock_owner = cr;
450 vm->ractor.sched.locked = true;
451#endif
452}
453
454static void
455ractor_sched_set_unlocked(rb_vm_t *vm, rb_ractor_t *cr)
456{
457#if VM_CHECK_MODE > 0
458 VM_ASSERT(vm->ractor.sched.locked);
459 VM_ASSERT(vm->ractor.sched.lock_owner == cr);
460
461 vm->ractor.sched.locked = false;
462 vm->ractor.sched.lock_owner = NULL;
463#endif
464}
465
466static void
467ractor_sched_lock_(rb_vm_t *vm, rb_ractor_t *cr, const char *file, int line)
468{
469 rb_native_mutex_lock(&vm->ractor.sched.lock);
470
471#if VM_CHECK_MODE
472 RUBY_DEBUG_LOG2(file, line, "cr:%u prev_owner:%u", rb_ractor_serial(cr), rb_ractor_serial(vm->ractor.sched.lock_owner));
473#else
474 RUBY_DEBUG_LOG2(file, line, "cr:%u", rb_ractor_serial(cr));
475#endif
476
477 ractor_sched_set_locked(vm, cr);
478}
479
480static void
481ractor_sched_unlock_(rb_vm_t *vm, rb_ractor_t *cr, const char *file, int line)
482{
483 RUBY_DEBUG_LOG2(file, line, "cr:%u", rb_ractor_serial(cr));
484
485 ractor_sched_set_unlocked(vm, cr);
486 rb_native_mutex_unlock(&vm->ractor.sched.lock);
487}
488
489static void
490ASSERT_ractor_sched_locked(rb_vm_t *vm, rb_ractor_t *cr)
491{
492 VM_ASSERT(rb_native_mutex_trylock(&vm->ractor.sched.lock) == EBUSY);
493 VM_ASSERT(vm->ractor.sched.locked);
494 VM_ASSERT(cr == NULL || vm->ractor.sched.lock_owner == cr);
495}
496
498static bool
499ractor_sched_running_threads_contain_p(rb_vm_t *vm, rb_thread_t *th)
500{
501 rb_thread_t *rth;
502 ccan_list_for_each(&vm->ractor.sched.running_threads, rth, sched.node.running_threads) {
503 if (rth == th) return true;
504 }
505 return false;
506}
507
509static unsigned int
510ractor_sched_running_threads_size(rb_vm_t *vm)
511{
512 rb_thread_t *th;
513 unsigned int i = 0;
514 ccan_list_for_each(&vm->ractor.sched.running_threads, th, sched.node.running_threads) {
515 i++;
516 }
517 return i;
518}
519
521static unsigned int
522ractor_sched_timeslice_threads_size(rb_vm_t *vm)
523{
524 rb_thread_t *th;
525 unsigned int i = 0;
526 ccan_list_for_each(&vm->ractor.sched.timeslice_threads, th, sched.node.timeslice_threads) {
527 i++;
528 }
529 return i;
530}
531
533static bool
534ractor_sched_timeslice_threads_contain_p(rb_vm_t *vm, rb_thread_t *th)
535{
536 rb_thread_t *rth;
537 ccan_list_for_each(&vm->ractor.sched.timeslice_threads, rth, sched.node.timeslice_threads) {
538 if (rth == th) return true;
539 }
540 return false;
541}
542
543static void ractor_sched_barrier_join_signal_locked(rb_vm_t *vm);
544static void ractor_sched_barrier_join_wait_locked(rb_vm_t *vm, rb_thread_t *th);
545
546// setup timeslice signals by the timer thread.
547static void
548thread_sched_setup_running_threads(struct rb_thread_sched *sched, rb_ractor_t *cr, rb_vm_t *vm,
549 rb_thread_t *add_th, rb_thread_t *del_th, rb_thread_t *add_timeslice_th)
550{
551#if USE_RUBY_DEBUG_LOG
552 unsigned int prev_running_cnt = vm->ractor.sched.running_cnt;
553#endif
554
555 rb_thread_t *del_timeslice_th;
556
557 if (del_th && sched->is_running_timeslice) {
558 del_timeslice_th = del_th;
559 sched->is_running_timeslice = false;
560 }
561 else {
562 del_timeslice_th = NULL;
563 }
564
565 RUBY_DEBUG_LOG("+:%u -:%u +ts:%u -ts:%u",
566 rb_th_serial(add_th), rb_th_serial(del_th),
567 rb_th_serial(add_timeslice_th), rb_th_serial(del_timeslice_th));
568
569 ractor_sched_lock(vm, cr);
570 {
571 // update running_threads
572 if (del_th) {
573 VM_ASSERT(ractor_sched_running_threads_contain_p(vm, del_th));
574 VM_ASSERT(del_timeslice_th != NULL ||
575 !ractor_sched_timeslice_threads_contain_p(vm, del_th));
576
577 ccan_list_del_init(&del_th->sched.node.running_threads);
578 vm->ractor.sched.running_cnt--;
579
580 if (UNLIKELY(vm->ractor.sched.barrier_waiting)) {
581 ractor_sched_barrier_join_signal_locked(vm);
582 }
583 sched->is_running = false;
584 }
585
586 if (add_th) {
587 while (UNLIKELY(vm->ractor.sched.barrier_waiting)) {
588 RUBY_DEBUG_LOG("barrier-wait");
589
590 ractor_sched_barrier_join_signal_locked(vm);
591 ractor_sched_barrier_join_wait_locked(vm, add_th);
592 }
593
594 VM_ASSERT(!ractor_sched_running_threads_contain_p(vm, add_th));
595 VM_ASSERT(!ractor_sched_timeslice_threads_contain_p(vm, add_th));
596
597 ccan_list_add(&vm->ractor.sched.running_threads, &add_th->sched.node.running_threads);
598 vm->ractor.sched.running_cnt++;
599 sched->is_running = true;
600 VM_ASSERT(!vm->ractor.sched.barrier_waiting);
601 }
602
603 if (add_timeslice_th) {
604 // update timeslice threads
605 int was_empty = ccan_list_empty(&vm->ractor.sched.timeslice_threads);
606 VM_ASSERT(!ractor_sched_timeslice_threads_contain_p(vm, add_timeslice_th));
607 ccan_list_add(&vm->ractor.sched.timeslice_threads, &add_timeslice_th->sched.node.timeslice_threads);
608 sched->is_running_timeslice = true;
609 if (was_empty) {
610 timer_thread_wakeup_locked(vm);
611 }
612 }
613
614 if (del_timeslice_th) {
615 VM_ASSERT(ractor_sched_timeslice_threads_contain_p(vm, del_timeslice_th));
616 ccan_list_del_init(&del_timeslice_th->sched.node.timeslice_threads);
617 }
618
619 VM_ASSERT(ractor_sched_running_threads_size(vm) == vm->ractor.sched.running_cnt);
620 VM_ASSERT(ractor_sched_timeslice_threads_size(vm) <= vm->ractor.sched.running_cnt);
621 }
622 ractor_sched_unlock(vm, cr);
623
624 if (add_th && !del_th && UNLIKELY(vm->ractor.sync.lock_owner != NULL)) {
625 // it can be after barrier synchronization by another ractor
626 rb_thread_t *lock_owner = NULL;
627#if VM_CHECK_MODE
628 lock_owner = sched->lock_owner;
629#endif
630 thread_sched_unlock(sched, lock_owner);
631 {
632 RB_VM_LOCK_ENTER();
633 RB_VM_LOCK_LEAVE();
634 }
635 thread_sched_lock(sched, lock_owner);
636 }
637
638 //RUBY_DEBUG_LOG("+:%u -:%u +ts:%u -ts:%u run:%u->%u",
639 // rb_th_serial(add_th), rb_th_serial(del_th),
640 // rb_th_serial(add_timeslice_th), rb_th_serial(del_timeslice_th),
641 RUBY_DEBUG_LOG("run:%u->%u", prev_running_cnt, vm->ractor.sched.running_cnt);
642}
643
644static void
645thread_sched_add_running_thread(struct rb_thread_sched *sched, rb_thread_t *th)
646{
647 ASSERT_thread_sched_locked(sched, th);
648 VM_ASSERT(sched->running == th);
649
650 rb_vm_t *vm = th->vm;
651 thread_sched_setup_running_threads(sched, th->ractor, vm, th, NULL, ccan_list_empty(&sched->readyq) ? NULL : th);
652}
653
654static void
655thread_sched_del_running_thread(struct rb_thread_sched *sched, rb_thread_t *th)
656{
657 ASSERT_thread_sched_locked(sched, th);
658
659 rb_vm_t *vm = th->vm;
660 thread_sched_setup_running_threads(sched, th->ractor, vm, NULL, th, NULL);
661}
662
663void
664rb_add_running_thread(rb_thread_t *th)
665{
666 struct rb_thread_sched *sched = TH_SCHED(th);
667
668 thread_sched_lock(sched, th);
669 {
670 thread_sched_add_running_thread(sched, th);
671 }
672 thread_sched_unlock(sched, th);
673}
674
675void
676rb_del_running_thread(rb_thread_t *th)
677{
678 struct rb_thread_sched *sched = TH_SCHED(th);
679
680 thread_sched_lock(sched, th);
681 {
682 thread_sched_del_running_thread(sched, th);
683 }
684 thread_sched_unlock(sched, th);
685}
686
687// setup current or next running thread
688// sched->running should be set only on this function.
689//
690// if th is NULL, there is no running threads.
691static void
692thread_sched_set_running(struct rb_thread_sched *sched, rb_thread_t *th)
693{
694 RUBY_DEBUG_LOG("th:%u->th:%u", rb_th_serial(sched->running), rb_th_serial(th));
695 VM_ASSERT(sched->running != th);
696
697 sched->running = th;
698}
699
701static bool
702thread_sched_readyq_contain_p(struct rb_thread_sched *sched, rb_thread_t *th)
703{
704 rb_thread_t *rth;
705 ccan_list_for_each(&sched->readyq, rth, sched.node.readyq) {
706 if (rth == th) return true;
707 }
708 return false;
709}
710
711// deque thread from the ready queue.
712// if the ready queue is empty, return NULL.
713//
714// return deque'ed running thread (or NULL).
715static rb_thread_t *
716thread_sched_deq(struct rb_thread_sched *sched)
717{
718 ASSERT_thread_sched_locked(sched, NULL);
719 rb_thread_t *next_th;
720
721 VM_ASSERT(sched->running != NULL);
722
723 if (ccan_list_empty(&sched->readyq)) {
724 next_th = NULL;
725 }
726 else {
727 next_th = ccan_list_pop(&sched->readyq, rb_thread_t, sched.node.readyq);
728
729 VM_ASSERT(sched->readyq_cnt > 0);
730 sched->readyq_cnt--;
731 ccan_list_node_init(&next_th->sched.node.readyq);
732 }
733
734 RUBY_DEBUG_LOG("next_th:%u readyq_cnt:%d", rb_th_serial(next_th), sched->readyq_cnt);
735
736 return next_th;
737}
738
739// enqueue ready thread to the ready queue.
740static void
741thread_sched_enq(struct rb_thread_sched *sched, rb_thread_t *ready_th)
742{
743 ASSERT_thread_sched_locked(sched, NULL);
744 RUBY_DEBUG_LOG("ready_th:%u readyq_cnt:%d", rb_th_serial(ready_th), sched->readyq_cnt);
745
746 VM_ASSERT(sched->running != NULL);
747 VM_ASSERT(!thread_sched_readyq_contain_p(sched, ready_th));
748
749 if (sched->is_running) {
750 if (ccan_list_empty(&sched->readyq)) {
751 // add sched->running to timeslice
752 thread_sched_setup_running_threads(sched, ready_th->ractor, ready_th->vm, NULL, NULL, sched->running);
753 }
754 }
755 else {
756 VM_ASSERT(!ractor_sched_timeslice_threads_contain_p(ready_th->vm, sched->running));
757 }
758
759 ccan_list_add_tail(&sched->readyq, &ready_th->sched.node.readyq);
760 sched->readyq_cnt++;
761}
762
763// DNT: kick condvar
764// SNT: TODO
765static void
766thread_sched_wakeup_running_thread(struct rb_thread_sched *sched, rb_thread_t *next_th, bool will_switch)
767{
768 ASSERT_thread_sched_locked(sched, NULL);
769 VM_ASSERT(sched->running == next_th);
770
771 if (next_th) {
772 if (next_th->nt) {
773 if (th_has_dedicated_nt(next_th)) {
774 RUBY_DEBUG_LOG("pinning th:%u", next_th->serial);
775 rb_native_cond_signal(&next_th->nt->cond.readyq);
776 }
777 else {
778 // TODO
779 RUBY_DEBUG_LOG("th:%u is already running.", next_th->serial);
780 }
781 }
782 else {
783 if (will_switch) {
784 RUBY_DEBUG_LOG("th:%u (do nothing)", rb_th_serial(next_th));
785 }
786 else {
787 RUBY_DEBUG_LOG("th:%u (enq)", rb_th_serial(next_th));
788 ractor_sched_enq(next_th->vm, next_th->ractor);
789 }
790 }
791 }
792 else {
793 RUBY_DEBUG_LOG("no waiting threads%s", "");
794 }
795}
796
797// waiting -> ready (locked)
798static void
799thread_sched_to_ready_common(struct rb_thread_sched *sched, rb_thread_t *th, bool wakeup, bool will_switch)
800{
801 RUBY_DEBUG_LOG("th:%u running:%u redyq_cnt:%d", rb_th_serial(th), rb_th_serial(sched->running), sched->readyq_cnt);
802
803 VM_ASSERT(sched->running != th);
804 VM_ASSERT(!thread_sched_readyq_contain_p(sched, th));
805 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_READY, th);
806
807 if (sched->running == NULL) {
808 thread_sched_set_running(sched, th);
809 if (wakeup) thread_sched_wakeup_running_thread(sched, th, will_switch);
810 }
811 else {
812 thread_sched_enq(sched, th);
813 }
814}
815
816// waiting -> ready
817//
818// `th` had became "waiting" state by `thread_sched_to_waiting`
819// and `thread_sched_to_ready` enqueue `th` to the thread ready queue.
821static void
822thread_sched_to_ready(struct rb_thread_sched *sched, rb_thread_t *th)
823{
824 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
825
826 thread_sched_lock(sched, th);
827 {
828 thread_sched_to_ready_common(sched, th, true, false);
829 }
830 thread_sched_unlock(sched, th);
831}
832
833// wait until sched->running is `th`.
834static void
835thread_sched_wait_running_turn(struct rb_thread_sched *sched, rb_thread_t *th, bool can_direct_transfer)
836{
837 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
838
839 ASSERT_thread_sched_locked(sched, th);
840 VM_ASSERT(th == rb_ec_thread_ptr(rb_current_ec_noinline()));
841
842 if (th != sched->running) {
843 // already deleted from running threads
844 // VM_ASSERT(!ractor_sched_running_threads_contain_p(th->vm, th)); // need locking
845
846 // wait for execution right
847 rb_thread_t *next_th;
848 while((next_th = sched->running) != th) {
849 if (th_has_dedicated_nt(th)) {
850 RUBY_DEBUG_LOG("(nt) sleep th:%u running:%u", rb_th_serial(th), rb_th_serial(sched->running));
851
852 thread_sched_set_lock_owner(sched, NULL);
853 {
854 RUBY_DEBUG_LOG("nt:%d cond:%p", th->nt->serial, &th->nt->cond.readyq);
855 rb_native_cond_wait(&th->nt->cond.readyq, &sched->lock_);
856 }
857 thread_sched_set_lock_owner(sched, th);
858
859 RUBY_DEBUG_LOG("(nt) wakeup %s", sched->running == th ? "success" : "failed");
860 if (th == sched->running) {
861 rb_ractor_thread_switch(th->ractor, th);
862 }
863 }
864 else {
865 // search another ready thread
866 if (can_direct_transfer &&
867 (next_th = sched->running) != NULL &&
868 !next_th->nt // next_th is running or has dedicated nt
869 ) {
870
871 RUBY_DEBUG_LOG("th:%u->%u (direct)", rb_th_serial(th), rb_th_serial(next_th));
872
873 thread_sched_set_lock_owner(sched, NULL);
874 {
875 rb_ractor_set_current_ec(th->ractor, NULL);
876 thread_sched_switch(th, next_th);
877 }
878 thread_sched_set_lock_owner(sched, th);
879 }
880 else {
881 // search another ready ractor
882 struct rb_native_thread *nt = th->nt;
883 native_thread_assign(NULL, th);
884
885 RUBY_DEBUG_LOG("th:%u->%u (ractor scheduling)", rb_th_serial(th), rb_th_serial(next_th));
886
887 thread_sched_set_lock_owner(sched, NULL);
888 {
889 rb_ractor_set_current_ec(th->ractor, NULL);
890 coroutine_transfer0(th->sched.context, nt->nt_context, false);
891 }
892 thread_sched_set_lock_owner(sched, th);
893 }
894
895 VM_ASSERT(rb_current_ec_noinline() == th->ec);
896 }
897 }
898
899 VM_ASSERT(th->nt != NULL);
900 VM_ASSERT(rb_current_ec_noinline() == th->ec);
901 VM_ASSERT(th->sched.waiting_reason.flags == thread_sched_waiting_none);
902
903 // add th to running threads
904 thread_sched_add_running_thread(sched, th);
905 }
906
907 // VM_ASSERT(ractor_sched_running_threads_contain_p(th->vm, th)); need locking
908 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_RESUMED, th);
909}
910
911// waiting -> ready -> running (locked)
912static void
913thread_sched_to_running_common(struct rb_thread_sched *sched, rb_thread_t *th)
914{
915 RUBY_DEBUG_LOG("th:%u dedicated:%d", rb_th_serial(th), th_has_dedicated_nt(th));
916
917 VM_ASSERT(sched->running != th);
918 VM_ASSERT(th_has_dedicated_nt(th));
919 VM_ASSERT(GET_THREAD() == th);
920
921 native_thread_dedicated_dec(th->vm, th->ractor, th->nt);
922
923 // waiting -> ready
924 thread_sched_to_ready_common(sched, th, false, false);
925
926 if (sched->running == th) {
927 thread_sched_add_running_thread(sched, th);
928 }
929
930 // TODO: check SNT number
931 thread_sched_wait_running_turn(sched, th, false);
932}
933
934// waiting -> ready -> running
935//
936// `th` had been waiting by `thread_sched_to_waiting()`
937// and run a dedicated task (like waitpid and so on).
938// After the dedicated task, this function is called
939// to join a normal thread-scheduling.
940static void
941thread_sched_to_running(struct rb_thread_sched *sched, rb_thread_t *th)
942{
943 thread_sched_lock(sched, th);
944 {
945 thread_sched_to_running_common(sched, th);
946 }
947 thread_sched_unlock(sched, th);
948}
949
950// resume a next thread in the thread ready queue.
951//
952// deque next running thread from the ready thread queue and
953// resume this thread if available.
954//
955// If the next therad has a dedicated native thraed, simply signal to resume.
956// Otherwise, make the ractor ready and other nt will run the ractor and the thread.
957static void
958thread_sched_wakeup_next_thread(struct rb_thread_sched *sched, rb_thread_t *th, bool will_switch)
959{
960 ASSERT_thread_sched_locked(sched, th);
961
962 VM_ASSERT(sched->running == th);
963 VM_ASSERT(sched->running->nt != NULL);
964
965 rb_thread_t *next_th = thread_sched_deq(sched);
966
967 RUBY_DEBUG_LOG("next_th:%u", rb_th_serial(next_th));
968 VM_ASSERT(th != next_th);
969
970 thread_sched_set_running(sched, next_th);
971 VM_ASSERT(next_th == sched->running);
972 thread_sched_wakeup_running_thread(sched, next_th, will_switch);
973
974 if (th != next_th) {
975 thread_sched_del_running_thread(sched, th);
976 }
977}
978
979// running -> waiting
980//
981// to_dead: false
982// th will run dedicated task.
983// run another ready thread.
984// to_dead: true
985// th will be dead.
986// run another ready thread.
987static void
988thread_sched_to_waiting_common0(struct rb_thread_sched *sched, rb_thread_t *th, bool to_dead)
989{
990 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED, th);
991
992 if (!to_dead) native_thread_dedicated_inc(th->vm, th->ractor, th->nt);
993
994 RUBY_DEBUG_LOG("%sth:%u", to_dead ? "to_dead " : "", rb_th_serial(th));
995
996 bool can_switch = to_dead ? !th_has_dedicated_nt(th) : false;
997 thread_sched_wakeup_next_thread(sched, th, can_switch);
998}
999
1000// running -> dead (locked)
1001static void
1002thread_sched_to_dead_common(struct rb_thread_sched *sched, rb_thread_t *th)
1003{
1004 RUBY_DEBUG_LOG("dedicated:%d", th->nt->dedicated);
1005 thread_sched_to_waiting_common0(sched, th, true);
1006 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_EXITED, th);
1007}
1008
1009// running -> dead
1010static void
1011thread_sched_to_dead(struct rb_thread_sched *sched, rb_thread_t *th)
1012{
1013 thread_sched_lock(sched, th);
1014 {
1015 thread_sched_to_dead_common(sched, th);
1016 }
1017 thread_sched_unlock(sched, th);
1018}
1019
1020// running -> waiting (locked)
1021//
1022// This thread will run dedicated task (th->nt->dedicated++).
1023static void
1024thread_sched_to_waiting_common(struct rb_thread_sched *sched, rb_thread_t *th)
1025{
1026 RUBY_DEBUG_LOG("dedicated:%d", th->nt->dedicated);
1027 thread_sched_to_waiting_common0(sched, th, false);
1028}
1029
1030// running -> waiting
1031//
1032// This thread will run a dedicated task.
1033static void
1034thread_sched_to_waiting(struct rb_thread_sched *sched, rb_thread_t *th)
1035{
1036 thread_sched_lock(sched, th);
1037 {
1038 thread_sched_to_waiting_common(sched, th);
1039 }
1040 thread_sched_unlock(sched, th);
1041}
1042
1043// mini utility func
1044static void
1045setup_ubf(rb_thread_t *th, rb_unblock_function_t *func, void *arg)
1046{
1047 rb_native_mutex_lock(&th->interrupt_lock);
1048 {
1049 th->unblock.func = func;
1050 th->unblock.arg = arg;
1051 }
1052 rb_native_mutex_unlock(&th->interrupt_lock);
1053}
1054
1055static void
1056ubf_waiting(void *ptr)
1057{
1058 rb_thread_t *th = (rb_thread_t *)ptr;
1059 struct rb_thread_sched *sched = TH_SCHED(th);
1060
1061 // only once. it is safe because th->interrupt_lock is already acquired.
1062 th->unblock.func = NULL;
1063 th->unblock.arg = NULL;
1064
1065 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
1066
1067 thread_sched_lock(sched, th);
1068 {
1069 if (sched->running == th) {
1070 // not sleeping yet.
1071 }
1072 else {
1073 thread_sched_to_ready_common(sched, th, true, false);
1074 }
1075 }
1076 thread_sched_unlock(sched, th);
1077}
1078
1079// running -> waiting
1080//
1081// This thread will sleep until other thread wakeup the thread.
1082static void
1083thread_sched_to_waiting_until_wakeup(struct rb_thread_sched *sched, rb_thread_t *th)
1084{
1085 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
1086
1087 RB_VM_SAVE_MACHINE_CONTEXT(th);
1088 setup_ubf(th, ubf_waiting, (void *)th);
1089
1090 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED, th);
1091
1092 thread_sched_lock(sched, th);
1093 {
1094 if (!RUBY_VM_INTERRUPTED(th->ec)) {
1095 bool can_direct_transfer = !th_has_dedicated_nt(th);
1096 thread_sched_wakeup_next_thread(sched, th, can_direct_transfer);
1097 thread_sched_wait_running_turn(sched, th, can_direct_transfer);
1098 }
1099 else {
1100 RUBY_DEBUG_LOG("th:%u interrupted", rb_th_serial(th));
1101 }
1102 }
1103 thread_sched_unlock(sched, th);
1104
1105 setup_ubf(th, NULL, NULL);
1106}
1107
1108// run another thread in the ready queue.
1109// continue to run if there are no ready threads.
1110static void
1111thread_sched_yield(struct rb_thread_sched *sched, rb_thread_t *th)
1112{
1113 RUBY_DEBUG_LOG("th:%d sched->readyq_cnt:%d", (int)th->serial, sched->readyq_cnt);
1114
1115 thread_sched_lock(sched, th);
1116 {
1117 if (!ccan_list_empty(&sched->readyq)) {
1118 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED, th);
1119 thread_sched_wakeup_next_thread(sched, th, !th_has_dedicated_nt(th));
1120 bool can_direct_transfer = !th_has_dedicated_nt(th);
1121 thread_sched_to_ready_common(sched, th, false, can_direct_transfer);
1122 thread_sched_wait_running_turn(sched, th, can_direct_transfer);
1123 }
1124 else {
1125 VM_ASSERT(sched->readyq_cnt == 0);
1126 }
1127 }
1128 thread_sched_unlock(sched, th);
1129}
1130
1131void
1132rb_thread_sched_init(struct rb_thread_sched *sched, bool atfork)
1133{
1134 rb_native_mutex_initialize(&sched->lock_);
1135
1136#if VM_CHECK_MODE
1137 sched->lock_owner = NULL;
1138#endif
1139
1140 ccan_list_head_init(&sched->readyq);
1141 sched->readyq_cnt = 0;
1142
1143#if USE_MN_THREADS
1144 if (!atfork) sched->enable_mn_threads = true; // MN is enabled on Ractors
1145#endif
1146}
1147
1148static void
1149coroutine_transfer0(struct coroutine_context *transfer_from, struct coroutine_context *transfer_to, bool to_dead)
1150{
1151#ifdef RUBY_ASAN_ENABLED
1152 void **fake_stack = to_dead ? NULL : &transfer_from->fake_stack;
1153 __sanitizer_start_switch_fiber(fake_stack, transfer_to->stack_base, transfer_to->stack_size);
1154#endif
1155
1157 struct coroutine_context *returning_from = coroutine_transfer(transfer_from, transfer_to);
1158
1159 /* if to_dead was passed, the caller is promising that this coroutine is finished and it should
1160 * never be resumed! */
1161 VM_ASSERT(!to_dead);
1162#ifdef RUBY_ASAN_ENABLED
1163 __sanitizer_finish_switch_fiber(transfer_from->fake_stack,
1164 (const void**)&returning_from->stack_base, &returning_from->stack_size);
1165#endif
1166
1167}
1168
1169static void
1170thread_sched_switch0(struct coroutine_context *current_cont, rb_thread_t *next_th, struct rb_native_thread *nt, bool to_dead)
1171{
1172 VM_ASSERT(!nt->dedicated);
1173 VM_ASSERT(next_th->nt == NULL);
1174
1175 RUBY_DEBUG_LOG("next_th:%u", rb_th_serial(next_th));
1176
1177 ruby_thread_set_native(next_th);
1178 native_thread_assign(nt, next_th);
1179
1180 coroutine_transfer0(current_cont, next_th->sched.context, to_dead);
1181}
1182
1183static void
1184thread_sched_switch(rb_thread_t *cth, rb_thread_t *next_th)
1185{
1186 struct rb_native_thread *nt = cth->nt;
1187 native_thread_assign(NULL, cth);
1188 RUBY_DEBUG_LOG("th:%u->%u on nt:%d", rb_th_serial(cth), rb_th_serial(next_th), nt->serial);
1189 thread_sched_switch0(cth->sched.context, next_th, nt, cth->status == THREAD_KILLED);
1190}
1191
1192#if VM_CHECK_MODE > 0
1194static unsigned int
1195grq_size(rb_vm_t *vm, rb_ractor_t *cr)
1196{
1197 ASSERT_ractor_sched_locked(vm, cr);
1198
1199 rb_ractor_t *r, *prev_r = NULL;
1200 unsigned int i = 0;
1201
1202 ccan_list_for_each(&vm->ractor.sched.grq, r, threads.sched.grq_node) {
1203 i++;
1204
1205 VM_ASSERT(r != prev_r);
1206 prev_r = r;
1207 }
1208 return i;
1209}
1210#endif
1211
1212static void
1213ractor_sched_enq(rb_vm_t *vm, rb_ractor_t *r)
1214{
1215 struct rb_thread_sched *sched = &r->threads.sched;
1216 rb_ractor_t *cr = NULL; // timer thread can call this function
1217
1218 VM_ASSERT(sched->running != NULL);
1219 VM_ASSERT(sched->running->nt == NULL);
1220
1221 ractor_sched_lock(vm, cr);
1222 {
1223#if VM_CHECK_MODE > 0
1224 // check if grq contains r
1225 rb_ractor_t *tr;
1226 ccan_list_for_each(&vm->ractor.sched.grq, tr, threads.sched.grq_node) {
1227 VM_ASSERT(r != tr);
1228 }
1229#endif
1230
1231 ccan_list_add_tail(&vm->ractor.sched.grq, &sched->grq_node);
1232 vm->ractor.sched.grq_cnt++;
1233 VM_ASSERT(grq_size(vm, cr) == vm->ractor.sched.grq_cnt);
1234
1235 RUBY_DEBUG_LOG("r:%u th:%u grq_cnt:%u", rb_ractor_id(r), rb_th_serial(sched->running), vm->ractor.sched.grq_cnt);
1236
1237 rb_native_cond_signal(&vm->ractor.sched.cond);
1238
1239 // ractor_sched_dump(vm);
1240 }
1241 ractor_sched_unlock(vm, cr);
1242}
1243
1244
1245#ifndef SNT_KEEP_SECONDS
1246#define SNT_KEEP_SECONDS 0
1247#endif
1248
1249#ifndef MINIMUM_SNT
1250// make at least MINIMUM_SNT snts for debug.
1251#define MINIMUM_SNT 0
1252#endif
1253
1254static rb_ractor_t *
1255ractor_sched_deq(rb_vm_t *vm, rb_ractor_t *cr)
1256{
1257 rb_ractor_t *r;
1258
1259 ractor_sched_lock(vm, cr);
1260 {
1261 RUBY_DEBUG_LOG("empty? %d", ccan_list_empty(&vm->ractor.sched.grq));
1262 // ractor_sched_dump(vm);
1263
1264 VM_ASSERT(rb_current_execution_context(false) == NULL);
1265 VM_ASSERT(grq_size(vm, cr) == vm->ractor.sched.grq_cnt);
1266
1267 while ((r = ccan_list_pop(&vm->ractor.sched.grq, rb_ractor_t, threads.sched.grq_node)) == NULL) {
1268 RUBY_DEBUG_LOG("wait grq_cnt:%d", (int)vm->ractor.sched.grq_cnt);
1269
1270#if SNT_KEEP_SECONDS > 0
1271 rb_hrtime_t abs = rb_hrtime_add(rb_hrtime_now(), RB_HRTIME_PER_SEC * SNT_KEEP_SECONDS);
1272 if (native_cond_timedwait(&vm->ractor.sched.cond, &vm->ractor.sched.lock, &abs) == ETIMEDOUT) {
1273 RUBY_DEBUG_LOG("timeout, grq_cnt:%d", (int)vm->ractor.sched.grq_cnt);
1274 VM_ASSERT(r == NULL);
1275 vm->ractor.sched.snt_cnt--;
1276 vm->ractor.sched.running_cnt--;
1277 break;
1278 }
1279 else {
1280 RUBY_DEBUG_LOG("wakeup grq_cnt:%d", (int)vm->ractor.sched.grq_cnt);
1281 }
1282#else
1283 ractor_sched_set_unlocked(vm, cr);
1284 rb_native_cond_wait(&vm->ractor.sched.cond, &vm->ractor.sched.lock);
1285 ractor_sched_set_locked(vm, cr);
1286
1287 RUBY_DEBUG_LOG("wakeup grq_cnt:%d", (int)vm->ractor.sched.grq_cnt);
1288#endif
1289 }
1290
1291 VM_ASSERT(rb_current_execution_context(false) == NULL);
1292
1293 if (r) {
1294 VM_ASSERT(vm->ractor.sched.grq_cnt > 0);
1295 vm->ractor.sched.grq_cnt--;
1296 RUBY_DEBUG_LOG("r:%d grq_cnt:%u", (int)rb_ractor_id(r), vm->ractor.sched.grq_cnt);
1297 }
1298 else {
1299 VM_ASSERT(SNT_KEEP_SECONDS > 0);
1300 // timeout
1301 }
1302 }
1303 ractor_sched_unlock(vm, cr);
1304
1305 return r;
1306}
1307
1308void rb_ractor_lock_self(rb_ractor_t *r);
1309void rb_ractor_unlock_self(rb_ractor_t *r);
1310
1311void
1312rb_ractor_sched_sleep(rb_execution_context_t *ec, rb_ractor_t *cr, rb_unblock_function_t *ubf)
1313{
1314 // ractor lock of cr is acquired
1315 // r is sleeping status
1316 rb_thread_t * volatile th = rb_ec_thread_ptr(ec);
1317 struct rb_thread_sched *sched = TH_SCHED(th);
1318 cr->sync.wait.waiting_thread = th; // TODO: multi-thread
1319
1320 setup_ubf(th, ubf, (void *)cr);
1321
1322 thread_sched_lock(sched, th);
1323 {
1324 rb_ractor_unlock_self(cr);
1325 {
1326 if (RUBY_VM_INTERRUPTED(th->ec)) {
1327 RUBY_DEBUG_LOG("interrupted");
1328 }
1329 else if (cr->sync.wait.wakeup_status != wakeup_none) {
1330 RUBY_DEBUG_LOG("awaken:%d", (int)cr->sync.wait.wakeup_status);
1331 }
1332 else {
1333 // sleep
1334 RB_VM_SAVE_MACHINE_CONTEXT(th);
1335 th->status = THREAD_STOPPED_FOREVER;
1336
1337 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED, th);
1338
1339 bool can_direct_transfer = !th_has_dedicated_nt(th);
1340 thread_sched_wakeup_next_thread(sched, th, can_direct_transfer);
1341 thread_sched_wait_running_turn(sched, th, can_direct_transfer);
1342 th->status = THREAD_RUNNABLE;
1343 // wakeup
1344 }
1345 }
1346 }
1347 thread_sched_unlock(sched, th);
1348
1349 setup_ubf(th, NULL, NULL);
1350
1351 rb_ractor_lock_self(cr);
1352 cr->sync.wait.waiting_thread = NULL;
1353}
1354
1355void
1356rb_ractor_sched_wakeup(rb_ractor_t *r)
1357{
1358 rb_thread_t *r_th = r->sync.wait.waiting_thread;
1359 // ractor lock of r is acquired
1360 struct rb_thread_sched *sched = TH_SCHED(r_th);
1361
1362 VM_ASSERT(r->sync.wait.wakeup_status != 0);
1363
1364 thread_sched_lock(sched, r_th);
1365 {
1366 if (r_th->status == THREAD_STOPPED_FOREVER) {
1367 thread_sched_to_ready_common(sched, r_th, true, false);
1368 }
1369 }
1370 thread_sched_unlock(sched, r_th);
1371}
1372
1373static bool
1374ractor_sched_barrier_completed_p(rb_vm_t *vm)
1375{
1376 RUBY_DEBUG_LOG("run:%u wait:%u", vm->ractor.sched.running_cnt, vm->ractor.sched.barrier_waiting_cnt);
1377 VM_ASSERT(vm->ractor.sched.running_cnt - 1 >= vm->ractor.sched.barrier_waiting_cnt);
1378 return (vm->ractor.sched.running_cnt - vm->ractor.sched.barrier_waiting_cnt) == 1;
1379}
1380
1381void
1382rb_ractor_sched_barrier_start(rb_vm_t *vm, rb_ractor_t *cr)
1383{
1384 VM_ASSERT(cr == GET_RACTOR());
1385 VM_ASSERT(vm->ractor.sync.lock_owner == cr); // VM is locked
1386 VM_ASSERT(!vm->ractor.sched.barrier_waiting);
1387 VM_ASSERT(vm->ractor.sched.barrier_waiting_cnt == 0);
1388
1389 RUBY_DEBUG_LOG("start serial:%u", vm->ractor.sched.barrier_serial);
1390
1391 unsigned int lock_rec;
1392
1393 ractor_sched_lock(vm, cr);
1394 {
1395 vm->ractor.sched.barrier_waiting = true;
1396
1397 // release VM lock
1398 lock_rec = vm->ractor.sync.lock_rec;
1399 vm->ractor.sync.lock_rec = 0;
1400 vm->ractor.sync.lock_owner = NULL;
1401 rb_native_mutex_unlock(&vm->ractor.sync.lock);
1402 {
1403 // interrupts all running threads
1404 rb_thread_t *ith;
1405 ccan_list_for_each(&vm->ractor.sched.running_threads, ith, sched.node.running_threads) {
1406 if (ith->ractor != cr) {
1407 RUBY_DEBUG_LOG("barrier int:%u", rb_th_serial(ith));
1408 RUBY_VM_SET_VM_BARRIER_INTERRUPT(ith->ec);
1409 }
1410 }
1411
1412 // wait for other ractors
1413 while (!ractor_sched_barrier_completed_p(vm)) {
1414 ractor_sched_set_unlocked(vm, cr);
1415 rb_native_cond_wait(&vm->ractor.sched.barrier_complete_cond, &vm->ractor.sched.lock);
1416 ractor_sched_set_locked(vm, cr);
1417 }
1418 }
1419 }
1420 ractor_sched_unlock(vm, cr);
1421
1422 // acquire VM lock
1423 rb_native_mutex_lock(&vm->ractor.sync.lock);
1424 vm->ractor.sync.lock_rec = lock_rec;
1425 vm->ractor.sync.lock_owner = cr;
1426
1427 RUBY_DEBUG_LOG("completed seirial:%u", vm->ractor.sched.barrier_serial);
1428
1429 ractor_sched_lock(vm, cr);
1430 {
1431 vm->ractor.sched.barrier_waiting = false;
1432 vm->ractor.sched.barrier_serial++;
1433 vm->ractor.sched.barrier_waiting_cnt = 0;
1434 rb_native_cond_broadcast(&vm->ractor.sched.barrier_release_cond);
1435 }
1436 ractor_sched_unlock(vm, cr);
1437}
1438
1439static void
1440ractor_sched_barrier_join_signal_locked(rb_vm_t *vm)
1441{
1442 if (ractor_sched_barrier_completed_p(vm)) {
1443 rb_native_cond_signal(&vm->ractor.sched.barrier_complete_cond);
1444 }
1445}
1446
1447static void
1448ractor_sched_barrier_join_wait_locked(rb_vm_t *vm, rb_thread_t *th)
1449{
1450 VM_ASSERT(vm->ractor.sched.barrier_waiting);
1451
1452 unsigned int barrier_serial = vm->ractor.sched.barrier_serial;
1453
1454 while (vm->ractor.sched.barrier_serial == barrier_serial) {
1455 RUBY_DEBUG_LOG("sleep serial:%u", barrier_serial);
1456 RB_VM_SAVE_MACHINE_CONTEXT(th);
1457
1458 rb_ractor_t *cr = th->ractor;
1459 ractor_sched_set_unlocked(vm, cr);
1460 rb_native_cond_wait(&vm->ractor.sched.barrier_release_cond, &vm->ractor.sched.lock);
1461 ractor_sched_set_locked(vm, cr);
1462
1463 RUBY_DEBUG_LOG("wakeup serial:%u", barrier_serial);
1464 }
1465}
1466
1467void
1468rb_ractor_sched_barrier_join(rb_vm_t *vm, rb_ractor_t *cr)
1469{
1470 VM_ASSERT(cr->threads.sched.running != NULL); // running ractor
1471 VM_ASSERT(cr == GET_RACTOR());
1472 VM_ASSERT(vm->ractor.sync.lock_owner == NULL); // VM is locked, but owner == NULL
1473 VM_ASSERT(vm->ractor.sched.barrier_waiting); // VM needs barrier sync
1474
1475#if USE_RUBY_DEBUG_LOG || VM_CHECK_MODE > 0
1476 unsigned int barrier_serial = vm->ractor.sched.barrier_serial;
1477#endif
1478
1479 RUBY_DEBUG_LOG("join");
1480
1481 rb_native_mutex_unlock(&vm->ractor.sync.lock);
1482 {
1483 VM_ASSERT(vm->ractor.sched.barrier_waiting); // VM needs barrier sync
1484 VM_ASSERT(vm->ractor.sched.barrier_serial == barrier_serial);
1485
1486 ractor_sched_lock(vm, cr);
1487 {
1488 // running_cnt
1489 vm->ractor.sched.barrier_waiting_cnt++;
1490 RUBY_DEBUG_LOG("waiting_cnt:%u serial:%u", vm->ractor.sched.barrier_waiting_cnt, barrier_serial);
1491
1492 ractor_sched_barrier_join_signal_locked(vm);
1493 ractor_sched_barrier_join_wait_locked(vm, cr->threads.sched.running);
1494 }
1495 ractor_sched_unlock(vm, cr);
1496 }
1497
1498 rb_native_mutex_lock(&vm->ractor.sync.lock);
1499 // VM locked here
1500}
1501
1502#if 0
1503// TODO
1504
1505static void clear_thread_cache_altstack(void);
1506
1507static void
1508rb_thread_sched_destroy(struct rb_thread_sched *sched)
1509{
1510 /*
1511 * only called once at VM shutdown (not atfork), another thread
1512 * may still grab vm->gvl.lock when calling gvl_release at
1513 * the end of thread_start_func_2
1514 */
1515 if (0) {
1516 rb_native_mutex_destroy(&sched->lock);
1517 }
1518 clear_thread_cache_altstack();
1519}
1520#endif
1521
1522#ifdef RB_THREAD_T_HAS_NATIVE_ID
1523static int
1524get_native_thread_id(void)
1525{
1526#ifdef __linux__
1527 return (int)syscall(SYS_gettid);
1528#elif defined(__FreeBSD__)
1529 return pthread_getthreadid_np();
1530#endif
1531}
1532#endif
1533
1534#if defined(HAVE_WORKING_FORK)
1535static void
1536thread_sched_atfork(struct rb_thread_sched *sched)
1537{
1538 current_fork_gen++;
1539 rb_thread_sched_init(sched, true);
1540 rb_thread_t *th = GET_THREAD();
1541 rb_vm_t *vm = GET_VM();
1542
1543 if (th_has_dedicated_nt(th)) {
1544 vm->ractor.sched.snt_cnt = 0;
1545 }
1546 else {
1547 vm->ractor.sched.snt_cnt = 1;
1548 }
1549 vm->ractor.sched.running_cnt = 0;
1550
1551 rb_native_mutex_initialize(&vm->ractor.sched.lock);
1552#if VM_CHECK_MODE > 0
1553 vm->ractor.sched.lock_owner = NULL;
1554 vm->ractor.sched.locked = false;
1555#endif
1556
1557 // rb_native_cond_destroy(&vm->ractor.sched.cond);
1558 rb_native_cond_initialize(&vm->ractor.sched.cond);
1559 rb_native_cond_initialize(&vm->ractor.sched.barrier_complete_cond);
1560 rb_native_cond_initialize(&vm->ractor.sched.barrier_release_cond);
1561
1562 ccan_list_head_init(&vm->ractor.sched.grq);
1563 ccan_list_head_init(&vm->ractor.sched.timeslice_threads);
1564 ccan_list_head_init(&vm->ractor.sched.running_threads);
1565
1566 VM_ASSERT(sched->is_running);
1567 sched->is_running_timeslice = false;
1568
1569 if (sched->running != th) {
1570 thread_sched_to_running(sched, th);
1571 }
1572 else {
1573 thread_sched_setup_running_threads(sched, th->ractor, vm, th, NULL, NULL);
1574 }
1575
1576#ifdef RB_THREAD_T_HAS_NATIVE_ID
1577 if (th->nt) {
1578 th->nt->tid = get_native_thread_id();
1579 }
1580#endif
1581}
1582
1583#endif
1584
1585#ifdef RB_THREAD_LOCAL_SPECIFIER
1586static RB_THREAD_LOCAL_SPECIFIER rb_thread_t *ruby_native_thread;
1587#else
1588static pthread_key_t ruby_native_thread_key;
1589#endif
1590
1591static void
1592null_func(int i)
1593{
1594 /* null */
1595 // This function can be called from signal handler
1596 // RUBY_DEBUG_LOG("i:%d", i);
1597}
1598
1600ruby_thread_from_native(void)
1601{
1602#ifdef RB_THREAD_LOCAL_SPECIFIER
1603 return ruby_native_thread;
1604#else
1605 return pthread_getspecific(ruby_native_thread_key);
1606#endif
1607}
1608
1609int
1610ruby_thread_set_native(rb_thread_t *th)
1611{
1612 if (th) {
1613#ifdef USE_UBF_LIST
1614 ccan_list_node_init(&th->sched.node.ubf);
1615#endif
1616 }
1617
1618 // setup TLS
1619
1620 if (th && th->ec) {
1621 rb_ractor_set_current_ec(th->ractor, th->ec);
1622 }
1623#ifdef RB_THREAD_LOCAL_SPECIFIER
1624 ruby_native_thread = th;
1625 return 1;
1626#else
1627 return pthread_setspecific(ruby_native_thread_key, th) == 0;
1628#endif
1629}
1630
1631static void native_thread_setup(struct rb_native_thread *nt);
1632static void native_thread_setup_on_thread(struct rb_native_thread *nt);
1633
1634void
1635Init_native_thread(rb_thread_t *main_th)
1636{
1637#if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK)
1638 if (condattr_monotonic) {
1639 int r = pthread_condattr_init(condattr_monotonic);
1640 if (r == 0) {
1641 r = pthread_condattr_setclock(condattr_monotonic, CLOCK_MONOTONIC);
1642 }
1643 if (r) condattr_monotonic = NULL;
1644 }
1645#endif
1646
1647#ifndef RB_THREAD_LOCAL_SPECIFIER
1648 if (pthread_key_create(&ruby_native_thread_key, 0) == EAGAIN) {
1649 rb_bug("pthread_key_create failed (ruby_native_thread_key)");
1650 }
1651 if (pthread_key_create(&ruby_current_ec_key, 0) == EAGAIN) {
1652 rb_bug("pthread_key_create failed (ruby_current_ec_key)");
1653 }
1654#endif
1655 ruby_posix_signal(SIGVTALRM, null_func);
1656
1657 // setup vm
1658 rb_vm_t *vm = main_th->vm;
1659 rb_native_mutex_initialize(&vm->ractor.sched.lock);
1660 rb_native_cond_initialize(&vm->ractor.sched.cond);
1661 rb_native_cond_initialize(&vm->ractor.sched.barrier_complete_cond);
1662 rb_native_cond_initialize(&vm->ractor.sched.barrier_release_cond);
1663
1664 ccan_list_head_init(&vm->ractor.sched.grq);
1665 ccan_list_head_init(&vm->ractor.sched.timeslice_threads);
1666 ccan_list_head_init(&vm->ractor.sched.running_threads);
1667
1668 // setup main thread
1669 main_th->nt->thread_id = pthread_self();
1670 main_th->nt->serial = 1;
1671#ifdef RUBY_NT_SERIAL
1672 ruby_nt_serial = 1;
1673#endif
1674 ruby_thread_set_native(main_th);
1675 native_thread_setup(main_th->nt);
1676 native_thread_setup_on_thread(main_th->nt);
1677
1678 TH_SCHED(main_th)->running = main_th;
1679 main_th->has_dedicated_nt = 1;
1680
1681 thread_sched_setup_running_threads(TH_SCHED(main_th), main_th->ractor, vm, main_th, NULL, NULL);
1682
1683 // setup main NT
1684 main_th->nt->dedicated = 1;
1685 main_th->nt->vm = vm;
1686
1687 // setup mn
1688 vm->ractor.sched.dnt_cnt = 1;
1689}
1690
1691extern int ruby_mn_threads_enabled;
1692
1693void
1694ruby_mn_threads_params(void)
1695{
1696 rb_vm_t *vm = GET_VM();
1697 rb_ractor_t *main_ractor = GET_RACTOR();
1698
1699 const char *mn_threads_cstr = getenv("RUBY_MN_THREADS");
1700 bool enable_mn_threads = false;
1701
1702 if (USE_MN_THREADS && mn_threads_cstr && (enable_mn_threads = atoi(mn_threads_cstr) > 0)) {
1703 // enabled
1704 ruby_mn_threads_enabled = 1;
1705 }
1706 main_ractor->threads.sched.enable_mn_threads = enable_mn_threads;
1707
1708 const char *max_cpu_cstr = getenv("RUBY_MAX_CPU");
1709 const int default_max_cpu = 8; // TODO: CPU num?
1710 int max_cpu = default_max_cpu;
1711
1712 if (USE_MN_THREADS && max_cpu_cstr) {
1713 int given_max_cpu = atoi(max_cpu_cstr);
1714 if (given_max_cpu > 0) {
1715 max_cpu = given_max_cpu;
1716 }
1717 }
1718
1719 vm->ractor.sched.max_cpu = max_cpu;
1720}
1721
1722static void
1723native_thread_dedicated_inc(rb_vm_t *vm, rb_ractor_t *cr, struct rb_native_thread *nt)
1724{
1725 RUBY_DEBUG_LOG("nt:%d %d->%d", nt->serial, nt->dedicated, nt->dedicated + 1);
1726
1727 if (nt->dedicated == 0) {
1728 ractor_sched_lock(vm, cr);
1729 {
1730 vm->ractor.sched.snt_cnt--;
1731 vm->ractor.sched.dnt_cnt++;
1732 }
1733 ractor_sched_unlock(vm, cr);
1734 }
1735
1736 nt->dedicated++;
1737}
1738
1739static void
1740native_thread_dedicated_dec(rb_vm_t *vm, rb_ractor_t *cr, struct rb_native_thread *nt)
1741{
1742 RUBY_DEBUG_LOG("nt:%d %d->%d", nt->serial, nt->dedicated, nt->dedicated - 1);
1743 VM_ASSERT(nt->dedicated > 0);
1744 nt->dedicated--;
1745
1746 if (nt->dedicated == 0) {
1747 ractor_sched_lock(vm, cr);
1748 {
1749 nt->vm->ractor.sched.snt_cnt++;
1750 nt->vm->ractor.sched.dnt_cnt--;
1751 }
1752 ractor_sched_unlock(vm, cr);
1753 }
1754}
1755
1756static void
1757native_thread_assign(struct rb_native_thread *nt, rb_thread_t *th)
1758{
1759#if USE_RUBY_DEBUG_LOG
1760 if (nt) {
1761 if (th->nt) {
1762 RUBY_DEBUG_LOG("th:%d nt:%d->%d", (int)th->serial, (int)th->nt->serial, (int)nt->serial);
1763 }
1764 else {
1765 RUBY_DEBUG_LOG("th:%d nt:NULL->%d", (int)th->serial, (int)nt->serial);
1766 }
1767 }
1768 else {
1769 if (th->nt) {
1770 RUBY_DEBUG_LOG("th:%d nt:%d->NULL", (int)th->serial, (int)th->nt->serial);
1771 }
1772 else {
1773 RUBY_DEBUG_LOG("th:%d nt:NULL->NULL", (int)th->serial);
1774 }
1775 }
1776#endif
1777
1778 th->nt = nt;
1779}
1780
1781static void
1782native_thread_destroy(struct rb_native_thread *nt)
1783{
1784 if (nt) {
1785 rb_native_cond_destroy(&nt->cond.readyq);
1786
1787 if (&nt->cond.readyq != &nt->cond.intr) {
1788 rb_native_cond_destroy(&nt->cond.intr);
1789 }
1790
1791 RB_ALTSTACK_FREE(nt->altstack);
1792 ruby_xfree(nt->nt_context);
1793 ruby_xfree(nt);
1794 }
1795}
1796
1797#if defined HAVE_PTHREAD_GETATTR_NP || defined HAVE_PTHREAD_ATTR_GET_NP
1798#define STACKADDR_AVAILABLE 1
1799#elif defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP
1800#define STACKADDR_AVAILABLE 1
1801#undef MAINSTACKADDR_AVAILABLE
1802#define MAINSTACKADDR_AVAILABLE 1
1803void *pthread_get_stackaddr_np(pthread_t);
1804size_t pthread_get_stacksize_np(pthread_t);
1805#elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
1806#define STACKADDR_AVAILABLE 1
1807#elif defined HAVE_PTHREAD_GETTHRDS_NP
1808#define STACKADDR_AVAILABLE 1
1809#elif defined __HAIKU__
1810#define STACKADDR_AVAILABLE 1
1811#endif
1812
1813#ifndef MAINSTACKADDR_AVAILABLE
1814# ifdef STACKADDR_AVAILABLE
1815# define MAINSTACKADDR_AVAILABLE 1
1816# else
1817# define MAINSTACKADDR_AVAILABLE 0
1818# endif
1819#endif
1820#if MAINSTACKADDR_AVAILABLE && !defined(get_main_stack)
1821# define get_main_stack(addr, size) get_stack(addr, size)
1822#endif
1823
1824#ifdef STACKADDR_AVAILABLE
1825/*
1826 * Get the initial address and size of current thread's stack
1827 */
1828static int
1829get_stack(void **addr, size_t *size)
1830{
1831#define CHECK_ERR(expr) \
1832 {int err = (expr); if (err) return err;}
1833#ifdef HAVE_PTHREAD_GETATTR_NP /* Linux */
1834 pthread_attr_t attr;
1835 size_t guard = 0;
1836 STACK_GROW_DIR_DETECTION;
1837 CHECK_ERR(pthread_getattr_np(pthread_self(), &attr));
1838# ifdef HAVE_PTHREAD_ATTR_GETSTACK
1839 CHECK_ERR(pthread_attr_getstack(&attr, addr, size));
1840 STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
1841# else
1842 CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
1843 CHECK_ERR(pthread_attr_getstacksize(&attr, size));
1844# endif
1845# ifdef HAVE_PTHREAD_ATTR_GETGUARDSIZE
1846 CHECK_ERR(pthread_attr_getguardsize(&attr, &guard));
1847# else
1848 guard = getpagesize();
1849# endif
1850 *size -= guard;
1851 pthread_attr_destroy(&attr);
1852#elif defined HAVE_PTHREAD_ATTR_GET_NP /* FreeBSD, DragonFly BSD, NetBSD */
1853 pthread_attr_t attr;
1854 CHECK_ERR(pthread_attr_init(&attr));
1855 CHECK_ERR(pthread_attr_get_np(pthread_self(), &attr));
1856# ifdef HAVE_PTHREAD_ATTR_GETSTACK
1857 CHECK_ERR(pthread_attr_getstack(&attr, addr, size));
1858# else
1859 CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
1860 CHECK_ERR(pthread_attr_getstacksize(&attr, size));
1861# endif
1862 STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
1863 pthread_attr_destroy(&attr);
1864#elif (defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP) /* MacOS X */
1865 pthread_t th = pthread_self();
1866 *addr = pthread_get_stackaddr_np(th);
1867 *size = pthread_get_stacksize_np(th);
1868#elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
1869 stack_t stk;
1870# if defined HAVE_THR_STKSEGMENT /* Solaris */
1871 CHECK_ERR(thr_stksegment(&stk));
1872# else /* OpenBSD */
1873 CHECK_ERR(pthread_stackseg_np(pthread_self(), &stk));
1874# endif
1875 *addr = stk.ss_sp;
1876 *size = stk.ss_size;
1877#elif defined HAVE_PTHREAD_GETTHRDS_NP /* AIX */
1878 pthread_t th = pthread_self();
1879 struct __pthrdsinfo thinfo;
1880 char reg[256];
1881 int regsiz=sizeof(reg);
1882 CHECK_ERR(pthread_getthrds_np(&th, PTHRDSINFO_QUERY_ALL,
1883 &thinfo, sizeof(thinfo),
1884 &reg, &regsiz));
1885 *addr = thinfo.__pi_stackaddr;
1886 /* Must not use thinfo.__pi_stacksize for size.
1887 It is around 3KB smaller than the correct size
1888 calculated by thinfo.__pi_stackend - thinfo.__pi_stackaddr. */
1889 *size = thinfo.__pi_stackend - thinfo.__pi_stackaddr;
1890 STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
1891#elif defined __HAIKU__
1892 thread_info info;
1893 STACK_GROW_DIR_DETECTION;
1894 CHECK_ERR(get_thread_info(find_thread(NULL), &info));
1895 *addr = info.stack_base;
1896 *size = (uintptr_t)info.stack_end - (uintptr_t)info.stack_base;
1897 STACK_DIR_UPPER((void)0, (void)(*addr = (char *)*addr + *size));
1898#else
1899#error STACKADDR_AVAILABLE is defined but not implemented.
1900#endif
1901 return 0;
1902#undef CHECK_ERR
1903}
1904#endif
1905
1906static struct {
1907 rb_nativethread_id_t id;
1908 size_t stack_maxsize;
1909 VALUE *stack_start;
1910} native_main_thread;
1911
1912#ifdef STACK_END_ADDRESS
1913extern void *STACK_END_ADDRESS;
1914#endif
1915
1916enum {
1917 RUBY_STACK_SPACE_LIMIT = 1024 * 1024, /* 1024KB */
1918 RUBY_STACK_SPACE_RATIO = 5
1919};
1920
1921static size_t
1922space_size(size_t stack_size)
1923{
1924 size_t space_size = stack_size / RUBY_STACK_SPACE_RATIO;
1925 if (space_size > RUBY_STACK_SPACE_LIMIT) {
1926 return RUBY_STACK_SPACE_LIMIT;
1927 }
1928 else {
1929 return space_size;
1930 }
1931}
1932
1933static void
1934native_thread_init_main_thread_stack(void *addr)
1935{
1936 native_main_thread.id = pthread_self();
1937#ifdef RUBY_ASAN_ENABLED
1938 addr = asan_get_real_stack_addr((void *)addr);
1939#endif
1940
1941#if MAINSTACKADDR_AVAILABLE
1942 if (native_main_thread.stack_maxsize) return;
1943 {
1944 void* stackaddr;
1945 size_t size;
1946 if (get_main_stack(&stackaddr, &size) == 0) {
1947 native_main_thread.stack_maxsize = size;
1948 native_main_thread.stack_start = stackaddr;
1949 goto bound_check;
1950 }
1951 }
1952#endif
1953#ifdef STACK_END_ADDRESS
1954 native_main_thread.stack_start = STACK_END_ADDRESS;
1955#else
1956 if (!native_main_thread.stack_start ||
1957 STACK_UPPER((VALUE *)(void *)&addr,
1958 native_main_thread.stack_start > (VALUE *)addr,
1959 native_main_thread.stack_start < (VALUE *)addr)) {
1960 native_main_thread.stack_start = (VALUE *)addr;
1961 }
1962#endif
1963 {
1964#if defined(HAVE_GETRLIMIT)
1965#if defined(PTHREAD_STACK_DEFAULT)
1966# if PTHREAD_STACK_DEFAULT < RUBY_STACK_SPACE*5
1967# error "PTHREAD_STACK_DEFAULT is too small"
1968# endif
1969 size_t size = PTHREAD_STACK_DEFAULT;
1970#else
1971 size_t size = RUBY_VM_THREAD_VM_STACK_SIZE;
1972#endif
1973 size_t space;
1974 int pagesize = getpagesize();
1975 struct rlimit rlim;
1976 STACK_GROW_DIR_DETECTION;
1977 if (getrlimit(RLIMIT_STACK, &rlim) == 0) {
1978 size = (size_t)rlim.rlim_cur;
1979 }
1980 addr = native_main_thread.stack_start;
1981 if (IS_STACK_DIR_UPPER()) {
1982 space = ((size_t)((char *)addr + size) / pagesize) * pagesize - (size_t)addr;
1983 }
1984 else {
1985 space = (size_t)addr - ((size_t)((char *)addr - size) / pagesize + 1) * pagesize;
1986 }
1987 native_main_thread.stack_maxsize = space;
1988#endif
1989 }
1990
1991#if MAINSTACKADDR_AVAILABLE
1992 bound_check:
1993#endif
1994 /* If addr is out of range of main-thread stack range estimation, */
1995 /* it should be on co-routine (alternative stack). [Feature #2294] */
1996 {
1997 void *start, *end;
1998 STACK_GROW_DIR_DETECTION;
1999
2000 if (IS_STACK_DIR_UPPER()) {
2001 start = native_main_thread.stack_start;
2002 end = (char *)native_main_thread.stack_start + native_main_thread.stack_maxsize;
2003 }
2004 else {
2005 start = (char *)native_main_thread.stack_start - native_main_thread.stack_maxsize;
2006 end = native_main_thread.stack_start;
2007 }
2008
2009 if ((void *)addr < start || (void *)addr > end) {
2010 /* out of range */
2011 native_main_thread.stack_start = (VALUE *)addr;
2012 native_main_thread.stack_maxsize = 0; /* unknown */
2013 }
2014 }
2015}
2016
2017#define CHECK_ERR(expr) \
2018 {int err = (expr); if (err) {rb_bug_errno(#expr, err);}}
2019
2020static int
2021native_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame)
2022{
2023 rb_nativethread_id_t curr = pthread_self();
2024#ifdef RUBY_ASAN_ENABLED
2025 local_in_parent_frame = asan_get_real_stack_addr(local_in_parent_frame);
2026 th->ec->machine.asan_fake_stack_handle = asan_get_thread_fake_stack_handle();
2027#endif
2028
2029 if (!native_main_thread.id) {
2030 /* This thread is the first thread, must be the main thread -
2031 * configure the native_main_thread object */
2032 native_thread_init_main_thread_stack(local_in_parent_frame);
2033 }
2034
2035 if (pthread_equal(curr, native_main_thread.id)) {
2036 th->ec->machine.stack_start = native_main_thread.stack_start;
2037 th->ec->machine.stack_maxsize = native_main_thread.stack_maxsize;
2038 }
2039 else {
2040#ifdef STACKADDR_AVAILABLE
2041 if (th_has_dedicated_nt(th)) {
2042 void *start;
2043 size_t size;
2044
2045 if (get_stack(&start, &size) == 0) {
2046 uintptr_t diff = (uintptr_t)start - (uintptr_t)local_in_parent_frame;
2047 th->ec->machine.stack_start = local_in_parent_frame;
2048 th->ec->machine.stack_maxsize = size - diff;
2049 }
2050 }
2051#else
2052 rb_raise(rb_eNotImpError, "ruby engine can initialize only in the main thread");
2053#endif
2054 }
2055
2056 return 0;
2057}
2058
2059struct nt_param {
2060 rb_vm_t *vm;
2061 struct rb_native_thread *nt;
2062};
2063
2064static void *
2065nt_start(void *ptr);
2066
2067static int
2068native_thread_create0(struct rb_native_thread *nt)
2069{
2070 int err = 0;
2071 pthread_attr_t attr;
2072
2073 const size_t stack_size = nt->vm->default_params.thread_machine_stack_size;
2074 const size_t space = space_size(stack_size);
2075
2076 nt->machine_stack_maxsize = stack_size - space;
2077
2078#ifdef USE_SIGALTSTACK
2079 nt->altstack = rb_allocate_sigaltstack();
2080#endif
2081
2082 CHECK_ERR(pthread_attr_init(&attr));
2083
2084# ifdef PTHREAD_STACK_MIN
2085 RUBY_DEBUG_LOG("stack size: %lu", (unsigned long)stack_size);
2086 CHECK_ERR(pthread_attr_setstacksize(&attr, stack_size));
2087# endif
2088
2089# ifdef HAVE_PTHREAD_ATTR_SETINHERITSCHED
2090 CHECK_ERR(pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED));
2091# endif
2092 CHECK_ERR(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED));
2093
2094 err = pthread_create(&nt->thread_id, &attr, nt_start, nt);
2095
2096 RUBY_DEBUG_LOG("nt:%d err:%d", (int)nt->serial, err);
2097
2098 CHECK_ERR(pthread_attr_destroy(&attr));
2099
2100 return err;
2101}
2102
2103static void
2104native_thread_setup(struct rb_native_thread *nt)
2105{
2106 // init cond
2107 rb_native_cond_initialize(&nt->cond.readyq);
2108
2109 if (&nt->cond.readyq != &nt->cond.intr) {
2110 rb_native_cond_initialize(&nt->cond.intr);
2111 }
2112}
2113
2114static void
2115native_thread_setup_on_thread(struct rb_native_thread *nt)
2116{
2117 // init tid
2118#ifdef RB_THREAD_T_HAS_NATIVE_ID
2119 nt->tid = get_native_thread_id();
2120#endif
2121
2122 // init signal handler
2123 RB_ALTSTACK_INIT(nt->altstack, nt->altstack);
2124}
2125
2126static struct rb_native_thread *
2127native_thread_alloc(void)
2128{
2129 struct rb_native_thread *nt = ZALLOC(struct rb_native_thread);
2130 native_thread_setup(nt);
2131
2132#if USE_MN_THREADS
2133 nt->nt_context = ruby_xmalloc(sizeof(struct coroutine_context));
2134#endif
2135
2136#if USE_RUBY_DEBUG_LOG
2137 static rb_atomic_t nt_serial = 2;
2138 nt->serial = RUBY_ATOMIC_FETCH_ADD(nt_serial, 1);
2139#endif
2140 return nt;
2141}
2142
2143static int
2144native_thread_create_dedicated(rb_thread_t *th)
2145{
2146 th->nt = native_thread_alloc();
2147 th->nt->vm = th->vm;
2148 th->nt->running_thread = th;
2149 th->nt->dedicated = 1;
2150
2151 // vm stack
2152 size_t vm_stack_word_size = th->vm->default_params.thread_vm_stack_size / sizeof(VALUE);
2153 void *vm_stack = ruby_xmalloc(vm_stack_word_size * sizeof(VALUE));
2154 th->sched.malloc_stack = true;
2155 rb_ec_initialize_vm_stack(th->ec, vm_stack, vm_stack_word_size);
2156 th->sched.context_stack = vm_stack;
2157
2158
2159 int err = native_thread_create0(th->nt);
2160 if (!err) {
2161 // setup
2162 thread_sched_to_ready(TH_SCHED(th), th);
2163 }
2164 return err;
2165}
2166
2167static void
2168call_thread_start_func_2(rb_thread_t *th)
2169{
2170 /* Capture the address of a local in this stack frame to mark the beginning of the
2171 machine stack for this thread. This is required even if we can tell the real
2172 stack beginning from the pthread API in native_thread_init_stack, because
2173 glibc stores some of its own data on the stack before calling into user code
2174 on a new thread, and replacing that data on fiber-switch would break it (see
2175 bug #13887) */
2176 VALUE stack_start = 0;
2177 VALUE *stack_start_addr = asan_get_real_stack_addr(&stack_start);
2178
2179 native_thread_init_stack(th, stack_start_addr);
2180 thread_start_func_2(th, th->ec->machine.stack_start);
2181}
2182
2183static void *
2184nt_start(void *ptr)
2185{
2186 struct rb_native_thread *nt = (struct rb_native_thread *)ptr;
2187 rb_vm_t *vm = nt->vm;
2188
2189 native_thread_setup_on_thread(nt);
2190
2191 // init tid
2192#ifdef RB_THREAD_T_HAS_NATIVE_ID
2193 nt->tid = get_native_thread_id();
2194#endif
2195
2196#if USE_RUBY_DEBUG_LOG && defined(RUBY_NT_SERIAL)
2197 ruby_nt_serial = nt->serial;
2198#endif
2199
2200 RUBY_DEBUG_LOG("nt:%u", nt->serial);
2201
2202 if (!nt->dedicated) {
2203 coroutine_initialize_main(nt->nt_context);
2204 }
2205
2206 while (1) {
2207 if (nt->dedicated) {
2208 // wait running turn
2209 rb_thread_t *th = nt->running_thread;
2210 struct rb_thread_sched *sched = TH_SCHED(th);
2211
2212 RUBY_DEBUG_LOG("on dedicated th:%u", rb_th_serial(th));
2213 ruby_thread_set_native(th);
2214
2215 thread_sched_lock(sched, th);
2216 {
2217 if (sched->running == th) {
2218 thread_sched_add_running_thread(sched, th);
2219 }
2220 thread_sched_wait_running_turn(sched, th, false);
2221 }
2222 thread_sched_unlock(sched, th);
2223
2224 // start threads
2225 call_thread_start_func_2(th);
2226 break; // TODO: allow to change to the SNT
2227 }
2228 else {
2229 RUBY_DEBUG_LOG("check next");
2230 rb_ractor_t *r = ractor_sched_deq(vm, NULL);
2231
2232 if (r) {
2233 struct rb_thread_sched *sched = &r->threads.sched;
2234
2235 thread_sched_lock(sched, NULL);
2236 {
2237 rb_thread_t *next_th = sched->running;
2238
2239 if (next_th && next_th->nt == NULL) {
2240 RUBY_DEBUG_LOG("nt:%d next_th:%d", (int)nt->serial, (int)next_th->serial);
2241 thread_sched_switch0(nt->nt_context, next_th, nt, false);
2242 }
2243 else {
2244 RUBY_DEBUG_LOG("no schedulable threads -- next_th:%p", next_th);
2245 }
2246 }
2247 thread_sched_unlock(sched, NULL);
2248 }
2249 else {
2250 // timeout -> deleted.
2251 break;
2252 }
2253
2254 if (nt->dedicated) {
2255 // SNT becomes DNT while running
2256 break;
2257 }
2258 }
2259 }
2260
2261 return NULL;
2262}
2263
2264static int native_thread_create_shared(rb_thread_t *th);
2265
2266#if USE_MN_THREADS
2267static void nt_free_stack(void *mstack);
2268#endif
2269
2270void
2271rb_threadptr_remove(rb_thread_t *th)
2272{
2273#if USE_MN_THREADS
2274 if (th->sched.malloc_stack) {
2275 // dedicated
2276 return;
2277 }
2278 else {
2279 rb_vm_t *vm = th->vm;
2280 th->sched.finished = false;
2281
2282 RB_VM_LOCK_ENTER();
2283 {
2284 ccan_list_add(&vm->ractor.sched.zombie_threads, &th->sched.node.zombie_threads);
2285 }
2286 RB_VM_LOCK_LEAVE();
2287 }
2288#endif
2289}
2290
2291void
2292rb_threadptr_sched_free(rb_thread_t *th)
2293{
2294#if USE_MN_THREADS
2295 if (th->sched.malloc_stack) {
2296 // has dedicated
2297 ruby_xfree(th->sched.context_stack);
2298 native_thread_destroy(th->nt);
2299 }
2300 else {
2301 nt_free_stack(th->sched.context_stack);
2302 // TODO: how to free nt and nt->altstack?
2303 }
2304
2305 ruby_xfree(th->sched.context);
2306 th->sched.context = NULL;
2307 // VM_ASSERT(th->sched.context == NULL);
2308#else
2309 ruby_xfree(th->sched.context_stack);
2310 native_thread_destroy(th->nt);
2311#endif
2312
2313 th->nt = NULL;
2314}
2315
2316void
2317rb_thread_sched_mark_zombies(rb_vm_t *vm)
2318{
2319 if (!ccan_list_empty(&vm->ractor.sched.zombie_threads)) {
2320 rb_thread_t *zombie_th, *next_zombie_th;
2321 ccan_list_for_each_safe(&vm->ractor.sched.zombie_threads, zombie_th, next_zombie_th, sched.node.zombie_threads) {
2322 if (zombie_th->sched.finished) {
2323 ccan_list_del_init(&zombie_th->sched.node.zombie_threads);
2324 }
2325 else {
2326 rb_gc_mark(zombie_th->self);
2327 }
2328 }
2329 }
2330}
2331
2332static int
2333native_thread_create(rb_thread_t *th)
2334{
2335 VM_ASSERT(th->nt == 0);
2336 RUBY_DEBUG_LOG("th:%d has_dnt:%d", th->serial, th->has_dedicated_nt);
2337 RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_STARTED, th);
2338
2339 if (!th->ractor->threads.sched.enable_mn_threads) {
2340 th->has_dedicated_nt = 1;
2341 }
2342
2343 if (th->has_dedicated_nt) {
2344 return native_thread_create_dedicated(th);
2345 }
2346 else {
2347 return native_thread_create_shared(th);
2348 }
2349}
2350
2351#if USE_NATIVE_THREAD_PRIORITY
2352
2353static void
2354native_thread_apply_priority(rb_thread_t *th)
2355{
2356#if defined(_POSIX_PRIORITY_SCHEDULING) && (_POSIX_PRIORITY_SCHEDULING > 0)
2357 struct sched_param sp;
2358 int policy;
2359 int priority = 0 - th->priority;
2360 int max, min;
2361 pthread_getschedparam(th->nt->thread_id, &policy, &sp);
2362 max = sched_get_priority_max(policy);
2363 min = sched_get_priority_min(policy);
2364
2365 if (min > priority) {
2366 priority = min;
2367 }
2368 else if (max < priority) {
2369 priority = max;
2370 }
2371
2372 sp.sched_priority = priority;
2373 pthread_setschedparam(th->nt->thread_id, policy, &sp);
2374#else
2375 /* not touched */
2376#endif
2377}
2378
2379#endif /* USE_NATIVE_THREAD_PRIORITY */
2380
2381static int
2382native_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout, rb_thread_t *th)
2383{
2384 return rb_fd_select(n, readfds, writefds, exceptfds, timeout);
2385}
2386
2387static void
2388ubf_pthread_cond_signal(void *ptr)
2389{
2390 rb_thread_t *th = (rb_thread_t *)ptr;
2391 RUBY_DEBUG_LOG("th:%u on nt:%d", rb_th_serial(th), (int)th->nt->serial);
2392 rb_native_cond_signal(&th->nt->cond.intr);
2393}
2394
2395static void
2396native_cond_sleep(rb_thread_t *th, rb_hrtime_t *rel)
2397{
2398 rb_nativethread_lock_t *lock = &th->interrupt_lock;
2399 rb_nativethread_cond_t *cond = &th->nt->cond.intr;
2400
2401 /* Solaris cond_timedwait() return EINVAL if an argument is greater than
2402 * current_time + 100,000,000. So cut up to 100,000,000. This is
2403 * considered as a kind of spurious wakeup. The caller to native_sleep
2404 * should care about spurious wakeup.
2405 *
2406 * See also [Bug #1341] [ruby-core:29702]
2407 * http://download.oracle.com/docs/cd/E19683-01/816-0216/6m6ngupgv/index.html
2408 */
2409 const rb_hrtime_t max = (rb_hrtime_t)100000000 * RB_HRTIME_PER_SEC;
2410
2411 THREAD_BLOCKING_BEGIN(th);
2412 {
2414 th->unblock.func = ubf_pthread_cond_signal;
2415 th->unblock.arg = th;
2416
2417 if (RUBY_VM_INTERRUPTED(th->ec)) {
2418 /* interrupted. return immediate */
2419 RUBY_DEBUG_LOG("interrupted before sleep th:%u", rb_th_serial(th));
2420 }
2421 else {
2422 if (!rel) {
2423 rb_native_cond_wait(cond, lock);
2424 }
2425 else {
2426 rb_hrtime_t end;
2427
2428 if (*rel > max) {
2429 *rel = max;
2430 }
2431
2432 end = native_cond_timeout(cond, *rel);
2433 native_cond_timedwait(cond, lock, &end);
2434 }
2435 }
2436 th->unblock.func = 0;
2437
2439 }
2440 THREAD_BLOCKING_END(th);
2441
2442 RUBY_DEBUG_LOG("done th:%u", rb_th_serial(th));
2443}
2444
2445#ifdef USE_UBF_LIST
2446static CCAN_LIST_HEAD(ubf_list_head);
2447static rb_nativethread_lock_t ubf_list_lock = RB_NATIVETHREAD_LOCK_INIT;
2448
2449static void
2450ubf_list_atfork(void)
2451{
2452 ccan_list_head_init(&ubf_list_head);
2453 rb_native_mutex_initialize(&ubf_list_lock);
2454}
2455
2457static bool
2458ubf_list_contain_p(rb_thread_t *th)
2459{
2460 rb_thread_t *list_th;
2461 ccan_list_for_each(&ubf_list_head, list_th, sched.node.ubf) {
2462 if (list_th == th) return true;
2463 }
2464 return false;
2465}
2466
2467/* The thread 'th' is registered to be trying unblock. */
2468static void
2469register_ubf_list(rb_thread_t *th)
2470{
2471 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
2472 struct ccan_list_node *node = &th->sched.node.ubf;
2473
2474 VM_ASSERT(th->unblock.func != NULL);
2475
2476 rb_native_mutex_lock(&ubf_list_lock);
2477 {
2478 // check not connected yet
2479 if (ccan_list_empty((struct ccan_list_head*)node)) {
2480 VM_ASSERT(!ubf_list_contain_p(th));
2481 ccan_list_add(&ubf_list_head, node);
2482 }
2483 }
2484 rb_native_mutex_unlock(&ubf_list_lock);
2485
2486 timer_thread_wakeup();
2487}
2488
2489/* The thread 'th' is unblocked. It no longer need to be registered. */
2490static void
2491unregister_ubf_list(rb_thread_t *th)
2492{
2493 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
2494 struct ccan_list_node *node = &th->sched.node.ubf;
2495
2496 /* we can't allow re-entry into ubf_list_head */
2497 VM_ASSERT(th->unblock.func == NULL);
2498
2499 if (!ccan_list_empty((struct ccan_list_head*)node)) {
2500 rb_native_mutex_lock(&ubf_list_lock);
2501 {
2502 VM_ASSERT(ubf_list_contain_p(th));
2503 ccan_list_del_init(node);
2504 }
2505 rb_native_mutex_unlock(&ubf_list_lock);
2506 }
2507}
2508
2509/*
2510 * send a signal to intent that a target thread return from blocking syscall.
2511 * Maybe any signal is ok, but we chose SIGVTALRM.
2512 */
2513static void
2514ubf_wakeup_thread(rb_thread_t *th)
2515{
2516 RUBY_DEBUG_LOG("th:%u thread_id:%p", rb_th_serial(th), (void *)th->nt->thread_id);
2517
2518 pthread_kill(th->nt->thread_id, SIGVTALRM);
2519}
2520
2521static void
2522ubf_select(void *ptr)
2523{
2524 rb_thread_t *th = (rb_thread_t *)ptr;
2525 RUBY_DEBUG_LOG("wakeup th:%u", rb_th_serial(th));
2526 ubf_wakeup_thread(th);
2527 register_ubf_list(th);
2528}
2529
2530static bool
2531ubf_threads_empty(void)
2532{
2533 return ccan_list_empty(&ubf_list_head) != 0;
2534}
2535
2536static void
2537ubf_wakeup_all_threads(void)
2538{
2539 if (!ubf_threads_empty()) {
2540 rb_thread_t *th;
2541 rb_native_mutex_lock(&ubf_list_lock);
2542 {
2543 ccan_list_for_each(&ubf_list_head, th, sched.node.ubf) {
2544 ubf_wakeup_thread(th);
2545 }
2546 }
2547 rb_native_mutex_unlock(&ubf_list_lock);
2548 }
2549}
2550
2551#else /* USE_UBF_LIST */
2552#define register_ubf_list(th) (void)(th)
2553#define unregister_ubf_list(th) (void)(th)
2554#define ubf_select 0
2555static void ubf_wakeup_all_threads(void) { return; }
2556static bool ubf_threads_empty(void) { return true; }
2557#define ubf_list_atfork() do {} while (0)
2558#endif /* USE_UBF_LIST */
2559
2560#define TT_DEBUG 0
2561#define WRITE_CONST(fd, str) (void)(write((fd),(str),sizeof(str)-1)<0)
2562
2563void
2564rb_thread_wakeup_timer_thread(int sig)
2565{
2566 // This function can be called from signal handlers so that
2567 // pthread_mutex_lock() should not be used.
2568
2569 // wakeup timer thread
2570 timer_thread_wakeup_force();
2571
2572 // interrupt main thread if main thread is available
2573 if (system_working) {
2574 rb_vm_t *vm = GET_VM();
2575 rb_thread_t *main_th = vm->ractor.main_thread;
2576
2577 if (main_th) {
2578 volatile rb_execution_context_t *main_th_ec = ACCESS_ONCE(rb_execution_context_t *, main_th->ec);
2579
2580 if (main_th_ec) {
2581 RUBY_VM_SET_TRAP_INTERRUPT(main_th_ec);
2582
2583 if (vm->ubf_async_safe && main_th->unblock.func) {
2584 (main_th->unblock.func)(main_th->unblock.arg);
2585 }
2586 }
2587 }
2588 }
2589}
2590
2591#define CLOSE_INVALIDATE_PAIR(expr) \
2592 close_invalidate_pair(expr,"close_invalidate: "#expr)
2593static void
2594close_invalidate(int *fdp, const char *msg)
2595{
2596 int fd = *fdp;
2597
2598 *fdp = -1;
2599 if (close(fd) < 0) {
2600 async_bug_fd(msg, errno, fd);
2601 }
2602}
2603
2604static void
2605close_invalidate_pair(int fds[2], const char *msg)
2606{
2607 if (USE_EVENTFD && fds[0] == fds[1]) {
2608 fds[1] = -1; // disable write port first
2609 close_invalidate(&fds[0], msg);
2610 }
2611 else {
2612 close_invalidate(&fds[1], msg);
2613 close_invalidate(&fds[0], msg);
2614 }
2615}
2616
2617static void
2618set_nonblock(int fd)
2619{
2620 int oflags;
2621 int err;
2622
2623 oflags = fcntl(fd, F_GETFL);
2624 if (oflags == -1)
2625 rb_sys_fail(0);
2626 oflags |= O_NONBLOCK;
2627 err = fcntl(fd, F_SETFL, oflags);
2628 if (err == -1)
2629 rb_sys_fail(0);
2630}
2631
2632/* communication pipe with timer thread and signal handler */
2633static void
2634setup_communication_pipe_internal(int pipes[2])
2635{
2636 int err;
2637
2638 if (pipes[0] > 0 || pipes[1] > 0) {
2639 VM_ASSERT(pipes[0] > 0);
2640 VM_ASSERT(pipes[1] > 0);
2641 return;
2642 }
2643
2644 /*
2645 * Don't bother with eventfd on ancient Linux 2.6.22..2.6.26 which were
2646 * missing EFD_* flags, they can fall back to pipe
2647 */
2648#if USE_EVENTFD && defined(EFD_NONBLOCK) && defined(EFD_CLOEXEC)
2649 pipes[0] = pipes[1] = eventfd(0, EFD_NONBLOCK|EFD_CLOEXEC);
2650
2651 if (pipes[0] >= 0) {
2652 rb_update_max_fd(pipes[0]);
2653 return;
2654 }
2655#endif
2656
2657 err = rb_cloexec_pipe(pipes);
2658 if (err != 0) {
2659 rb_bug("can not create communication pipe");
2660 }
2661 rb_update_max_fd(pipes[0]);
2662 rb_update_max_fd(pipes[1]);
2663 set_nonblock(pipes[0]);
2664 set_nonblock(pipes[1]);
2665}
2666
2667#if !defined(SET_CURRENT_THREAD_NAME) && defined(__linux__) && defined(PR_SET_NAME)
2668# define SET_CURRENT_THREAD_NAME(name) prctl(PR_SET_NAME, name)
2669#endif
2670
2671enum {
2672 THREAD_NAME_MAX =
2673#if defined(__linux__)
2674 16
2675#elif defined(__APPLE__)
2676/* Undocumented, and main thread seems unlimited */
2677 64
2678#else
2679 16
2680#endif
2681};
2682
2683static VALUE threadptr_invoke_proc_location(rb_thread_t *th);
2684
2685static void
2686native_set_thread_name(rb_thread_t *th)
2687{
2688#ifdef SET_CURRENT_THREAD_NAME
2689 VALUE loc;
2690 if (!NIL_P(loc = th->name)) {
2691 SET_CURRENT_THREAD_NAME(RSTRING_PTR(loc));
2692 }
2693 else if ((loc = threadptr_invoke_proc_location(th)) != Qnil) {
2694 char *name, *p;
2695 char buf[THREAD_NAME_MAX];
2696 size_t len;
2697 int n;
2698
2699 name = RSTRING_PTR(RARRAY_AREF(loc, 0));
2700 p = strrchr(name, '/'); /* show only the basename of the path. */
2701 if (p && p[1])
2702 name = p + 1;
2703
2704 n = snprintf(buf, sizeof(buf), "%s:%d", name, NUM2INT(RARRAY_AREF(loc, 1)));
2705 RB_GC_GUARD(loc);
2706
2707 len = (size_t)n;
2708 if (len >= sizeof(buf)) {
2709 buf[sizeof(buf)-2] = '*';
2710 buf[sizeof(buf)-1] = '\0';
2711 }
2712 SET_CURRENT_THREAD_NAME(buf);
2713 }
2714#endif
2715}
2716
2717static void
2718native_set_another_thread_name(rb_nativethread_id_t thread_id, VALUE name)
2719{
2720#if defined SET_ANOTHER_THREAD_NAME || defined SET_CURRENT_THREAD_NAME
2721 char buf[THREAD_NAME_MAX];
2722 const char *s = "";
2723# if !defined SET_ANOTHER_THREAD_NAME
2724 if (!pthread_equal(pthread_self(), thread_id)) return;
2725# endif
2726 if (!NIL_P(name)) {
2727 long n;
2728 RSTRING_GETMEM(name, s, n);
2729 if (n >= (int)sizeof(buf)) {
2730 memcpy(buf, s, sizeof(buf)-1);
2731 buf[sizeof(buf)-1] = '\0';
2732 s = buf;
2733 }
2734 }
2735# if defined SET_ANOTHER_THREAD_NAME
2736 SET_ANOTHER_THREAD_NAME(thread_id, s);
2737# elif defined SET_CURRENT_THREAD_NAME
2738 SET_CURRENT_THREAD_NAME(s);
2739# endif
2740#endif
2741}
2742
2743#if defined(RB_THREAD_T_HAS_NATIVE_ID) || defined(__APPLE__)
2744static VALUE
2745native_thread_native_thread_id(rb_thread_t *target_th)
2746{
2747 if (!target_th->nt) return Qnil;
2748
2749#ifdef RB_THREAD_T_HAS_NATIVE_ID
2750 int tid = target_th->nt->tid;
2751 if (tid == 0) return Qnil;
2752 return INT2FIX(tid);
2753#elif defined(__APPLE__)
2754 uint64_t tid;
2755/* The first condition is needed because MAC_OS_X_VERSION_10_6
2756 is not defined on 10.5, and while __POWERPC__ takes care of ppc/ppc64,
2757 i386 will be broken without this. Note, 10.5 is supported with GCC upstream,
2758 so it has C++17 and everything needed to build modern Ruby. */
2759# if (!defined(MAC_OS_X_VERSION_10_6) || \
2760 (MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_6) || \
2761 defined(__POWERPC__) /* never defined for PowerPC platforms */)
2762 const bool no_pthread_threadid_np = true;
2763# define NO_PTHREAD_MACH_THREAD_NP 1
2764# elif MAC_OS_X_VERSION_MIN_REQUIRED >= MAC_OS_X_VERSION_10_6
2765 const bool no_pthread_threadid_np = false;
2766# else
2767# if !(defined(__has_attribute) && __has_attribute(availability))
2768 /* __API_AVAILABLE macro does nothing on gcc */
2769 __attribute__((weak)) int pthread_threadid_np(pthread_t, uint64_t*);
2770# endif
2771 /* Check weakly linked symbol */
2772 const bool no_pthread_threadid_np = !&pthread_threadid_np;
2773# endif
2774 if (no_pthread_threadid_np) {
2775 return ULL2NUM(pthread_mach_thread_np(pthread_self()));
2776 }
2777# ifndef NO_PTHREAD_MACH_THREAD_NP
2778 int e = pthread_threadid_np(target_th->nt->thread_id, &tid);
2779 if (e != 0) rb_syserr_fail(e, "pthread_threadid_np");
2780 return ULL2NUM((unsigned long long)tid);
2781# endif
2782#endif
2783}
2784# define USE_NATIVE_THREAD_NATIVE_THREAD_ID 1
2785#else
2786# define USE_NATIVE_THREAD_NATIVE_THREAD_ID 0
2787#endif
2788
2789static struct {
2790 rb_serial_t created_fork_gen;
2791 pthread_t pthread_id;
2792
2793 int comm_fds[2]; // r, w
2794
2795#if (HAVE_SYS_EPOLL_H || HAVE_SYS_EVENT_H) && USE_MN_THREADS
2796 int event_fd; // kernel event queue fd (epoll/kqueue)
2797#endif
2798#if HAVE_SYS_EPOLL_H && USE_MN_THREADS
2799#define EPOLL_EVENTS_MAX 0x10
2800 struct epoll_event finished_events[EPOLL_EVENTS_MAX];
2801#elif HAVE_SYS_EVENT_H && USE_MN_THREADS
2802#define KQUEUE_EVENTS_MAX 0x10
2803 struct kevent finished_events[KQUEUE_EVENTS_MAX];
2804#endif
2805
2806 // waiting threads list
2807 struct ccan_list_head waiting; // waiting threads in ractors
2808 pthread_mutex_t waiting_lock;
2809} timer_th = {
2810 .created_fork_gen = 0,
2811};
2812
2813#define TIMER_THREAD_CREATED_P() (timer_th.created_fork_gen == current_fork_gen)
2814
2815static void timer_thread_check_timeslice(rb_vm_t *vm);
2816static int timer_thread_set_timeout(rb_vm_t *vm);
2817static void timer_thread_wakeup_thread(rb_thread_t *th);
2818
2819#include "thread_pthread_mn.c"
2820
2821static rb_thread_t *
2822thread_sched_waiting_thread(struct rb_thread_sched_waiting *w)
2823{
2824 if (w) {
2825 return (rb_thread_t *)((size_t)w - offsetof(rb_thread_t, sched.waiting_reason));
2826 }
2827 else {
2828 return NULL;
2829 }
2830}
2831
2832static int
2833timer_thread_set_timeout(rb_vm_t *vm)
2834{
2835#if 0
2836 return 10; // ms
2837#else
2838 int timeout = -1;
2839
2840 ractor_sched_lock(vm, NULL);
2841 {
2842 if ( !ccan_list_empty(&vm->ractor.sched.timeslice_threads) // (1-1) Provide time slice for active NTs
2843 || !ubf_threads_empty() // (1-3) Periodic UBF
2844 || vm->ractor.sched.grq_cnt > 0 // (1-4) Lazy GRQ deq start
2845 ) {
2846
2847 RUBY_DEBUG_LOG("timeslice:%d ubf:%d grq:%d",
2848 !ccan_list_empty(&vm->ractor.sched.timeslice_threads),
2849 !ubf_threads_empty(),
2850 (vm->ractor.sched.grq_cnt > 0));
2851
2852 timeout = 10; // ms
2853 vm->ractor.sched.timeslice_wait_inf = false;
2854 }
2855 else {
2856 vm->ractor.sched.timeslice_wait_inf = true;
2857 }
2858 }
2859 ractor_sched_unlock(vm, NULL);
2860
2861 if (vm->ractor.sched.timeslice_wait_inf) {
2862 rb_native_mutex_lock(&timer_th.waiting_lock);
2863 {
2864 struct rb_thread_sched_waiting *w = ccan_list_top(&timer_th.waiting, struct rb_thread_sched_waiting, node);
2865 rb_thread_t *th = thread_sched_waiting_thread(w);
2866
2867 if (th && (th->sched.waiting_reason.flags & thread_sched_waiting_timeout)) {
2868 rb_hrtime_t now = rb_hrtime_now();
2869 rb_hrtime_t hrrel = rb_hrtime_sub(th->sched.waiting_reason.data.timeout, now);
2870
2871 RUBY_DEBUG_LOG("th:%u now:%lu rel:%lu", rb_th_serial(th), (unsigned long)now, (unsigned long)hrrel);
2872
2873 // TODO: overflow?
2874 timeout = (int)((hrrel + RB_HRTIME_PER_MSEC - 1) / RB_HRTIME_PER_MSEC); // ms
2875 }
2876 }
2877 rb_native_mutex_unlock(&timer_th.waiting_lock);
2878 }
2879
2880 RUBY_DEBUG_LOG("timeout:%d inf:%d", timeout, (int)vm->ractor.sched.timeslice_wait_inf);
2881
2882 // fprintf(stderr, "timeout:%d\n", timeout);
2883 return timeout;
2884#endif
2885}
2886
2887static void
2888timer_thread_check_signal(rb_vm_t *vm)
2889{
2890 // ruby_sigchld_handler(vm); TODO
2891
2892 int signum = rb_signal_buff_size();
2893 if (UNLIKELY(signum > 0) && vm->ractor.main_thread) {
2894 RUBY_DEBUG_LOG("signum:%d", signum);
2895 threadptr_trap_interrupt(vm->ractor.main_thread);
2896 }
2897}
2898
2899static bool
2900timer_thread_check_exceed(rb_hrtime_t abs, rb_hrtime_t now)
2901{
2902 if (abs < now) {
2903 return true;
2904 }
2905 else if (abs - now < RB_HRTIME_PER_MSEC) {
2906 return true; // too short time
2907 }
2908 else {
2909 return false;
2910 }
2911}
2912
2913static rb_thread_t *
2914timer_thread_deq_wakeup(rb_vm_t *vm, rb_hrtime_t now)
2915{
2916 struct rb_thread_sched_waiting *w = ccan_list_top(&timer_th.waiting, struct rb_thread_sched_waiting, node);
2917
2918 if (w != NULL &&
2919 (w->flags & thread_sched_waiting_timeout) &&
2920 timer_thread_check_exceed(w->data.timeout, now)) {
2921
2922 RUBY_DEBUG_LOG("wakeup th:%u", rb_th_serial(thread_sched_waiting_thread(w)));
2923
2924 // delete from waiting list
2925 ccan_list_del_init(&w->node);
2926
2927 // setup result
2928 w->flags = thread_sched_waiting_none;
2929 w->data.result = 0;
2930
2931 return thread_sched_waiting_thread(w);
2932 }
2933
2934 return NULL;
2935}
2936
2937static void
2938timer_thread_wakeup_thread(rb_thread_t *th)
2939{
2940 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
2941 struct rb_thread_sched *sched = TH_SCHED(th);
2942
2943 thread_sched_lock(sched, th);
2944 {
2945 if (sched->running != th) {
2946 thread_sched_to_ready_common(sched, th, true, false);
2947 }
2948 else {
2949 // will be release the execution right
2950 }
2951 }
2952 thread_sched_unlock(sched, th);
2953}
2954
2955static void
2956timer_thread_check_timeout(rb_vm_t *vm)
2957{
2958 rb_hrtime_t now = rb_hrtime_now();
2959 rb_thread_t *th;
2960
2961 rb_native_mutex_lock(&timer_th.waiting_lock);
2962 {
2963 while ((th = timer_thread_deq_wakeup(vm, now)) != NULL) {
2964 timer_thread_wakeup_thread(th);
2965 }
2966 }
2967 rb_native_mutex_unlock(&timer_th.waiting_lock);
2968}
2969
2970static void
2971timer_thread_check_timeslice(rb_vm_t *vm)
2972{
2973 // TODO: check time
2974 rb_thread_t *th;
2975 ccan_list_for_each(&vm->ractor.sched.timeslice_threads, th, sched.node.timeslice_threads) {
2976 RUBY_DEBUG_LOG("timeslice th:%u", rb_th_serial(th));
2977 RUBY_VM_SET_TIMER_INTERRUPT(th->ec);
2978 }
2979}
2980
2981void
2982rb_assert_sig(void)
2983{
2984 sigset_t oldmask;
2985 pthread_sigmask(0, NULL, &oldmask);
2986 if (sigismember(&oldmask, SIGVTALRM)) {
2987 rb_bug("!!!");
2988 }
2989 else {
2990 RUBY_DEBUG_LOG("ok");
2991 }
2992}
2993
2994static void *
2995timer_thread_func(void *ptr)
2996{
2997 rb_vm_t *vm = (rb_vm_t *)ptr;
2998#if defined(RUBY_NT_SERIAL)
2999 ruby_nt_serial = (rb_atomic_t)-1;
3000#endif
3001
3002 RUBY_DEBUG_LOG("started%s", "");
3003
3004 while (system_working) {
3005 timer_thread_check_signal(vm);
3006 timer_thread_check_timeout(vm);
3007 ubf_wakeup_all_threads();
3008
3009 RUBY_DEBUG_LOG("system_working:%d", system_working);
3010 timer_thread_polling(vm);
3011 }
3012
3013 RUBY_DEBUG_LOG("terminated");
3014 return NULL;
3015}
3016
3017/* only use signal-safe system calls here */
3018static void
3019signal_communication_pipe(int fd)
3020{
3021#if USE_EVENTFD
3022 const uint64_t buff = 1;
3023#else
3024 const char buff = '!';
3025#endif
3026 ssize_t result;
3027
3028 /* already opened */
3029 if (fd >= 0) {
3030 retry:
3031 if ((result = write(fd, &buff, sizeof(buff))) <= 0) {
3032 int e = errno;
3033 switch (e) {
3034 case EINTR: goto retry;
3035 case EAGAIN:
3036#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
3037 case EWOULDBLOCK:
3038#endif
3039 break;
3040 default:
3041 async_bug_fd("rb_thread_wakeup_timer_thread: write", e, fd);
3042 }
3043 }
3044 if (TT_DEBUG) WRITE_CONST(2, "rb_thread_wakeup_timer_thread: write\n");
3045 }
3046 else {
3047 // ignore wakeup
3048 }
3049}
3050
3051static void
3052timer_thread_wakeup_force(void)
3053{
3054 // should not use RUBY_DEBUG_LOG() because it can be called within signal handlers.
3055 signal_communication_pipe(timer_th.comm_fds[1]);
3056}
3057
3058static void
3059timer_thread_wakeup_locked(rb_vm_t *vm)
3060{
3061 // should be locked before.
3062 ASSERT_ractor_sched_locked(vm, NULL);
3063
3064 if (timer_th.created_fork_gen == current_fork_gen) {
3065 if (vm->ractor.sched.timeslice_wait_inf) {
3066 RUBY_DEBUG_LOG("wakeup with fd:%d", timer_th.comm_fds[1]);
3067 timer_thread_wakeup_force();
3068 }
3069 else {
3070 RUBY_DEBUG_LOG("will be wakeup...");
3071 }
3072 }
3073}
3074
3075static void
3076timer_thread_wakeup(void)
3077{
3078 rb_vm_t *vm = GET_VM();
3079
3080 ractor_sched_lock(vm, NULL);
3081 {
3082 timer_thread_wakeup_locked(vm);
3083 }
3084 ractor_sched_unlock(vm, NULL);
3085}
3086
3087static void
3088rb_thread_create_timer_thread(void)
3089{
3090 rb_serial_t created_fork_gen = timer_th.created_fork_gen;
3091
3092 RUBY_DEBUG_LOG("fork_gen create:%d current:%d", (int)created_fork_gen, (int)current_fork_gen);
3093
3094 timer_th.created_fork_gen = current_fork_gen;
3095
3096 if (created_fork_gen != current_fork_gen) {
3097 if (created_fork_gen != 0) {
3098 RUBY_DEBUG_LOG("forked child process");
3099
3100 CLOSE_INVALIDATE_PAIR(timer_th.comm_fds);
3101#if HAVE_SYS_EPOLL_H && USE_MN_THREADS
3102 close_invalidate(&timer_th.event_fd, "close event_fd");
3103#endif
3104 rb_native_mutex_destroy(&timer_th.waiting_lock);
3105 }
3106
3107 ccan_list_head_init(&timer_th.waiting);
3108 rb_native_mutex_initialize(&timer_th.waiting_lock);
3109
3110 // open communication channel
3111 setup_communication_pipe_internal(timer_th.comm_fds);
3112
3113 // open event fd
3114 timer_thread_setup_mn();
3115 }
3116
3117 pthread_create(&timer_th.pthread_id, NULL, timer_thread_func, GET_VM());
3118}
3119
3120static int
3121native_stop_timer_thread(void)
3122{
3123 int stopped;
3124 stopped = --system_working <= 0;
3125
3126 if (stopped) {
3127 RUBY_DEBUG_LOG("wakeup send %d", timer_th.comm_fds[1]);
3128 timer_thread_wakeup_force();
3129 RUBY_DEBUG_LOG("wakeup sent");
3130 pthread_join(timer_th.pthread_id, NULL);
3131 }
3132
3133 if (TT_DEBUG) fprintf(stderr, "stop timer thread\n");
3134 return stopped;
3135}
3136
3137static void
3138native_reset_timer_thread(void)
3139{
3140 //
3141}
3142
3143#ifdef HAVE_SIGALTSTACK
3144int
3145ruby_stack_overflowed_p(const rb_thread_t *th, const void *addr)
3146{
3147 void *base;
3148 size_t size;
3149 const size_t water_mark = 1024 * 1024;
3150 STACK_GROW_DIR_DETECTION;
3151
3152#ifdef STACKADDR_AVAILABLE
3153 if (get_stack(&base, &size) == 0) {
3154# ifdef __APPLE__
3155 if (pthread_equal(th->nt->thread_id, native_main_thread.id)) {
3156 struct rlimit rlim;
3157 if (getrlimit(RLIMIT_STACK, &rlim) == 0 && rlim.rlim_cur > size) {
3158 size = (size_t)rlim.rlim_cur;
3159 }
3160 }
3161# endif
3162 base = (char *)base + STACK_DIR_UPPER(+size, -size);
3163 }
3164 else
3165#endif
3166 if (th) {
3167 size = th->ec->machine.stack_maxsize;
3168 base = (char *)th->ec->machine.stack_start - STACK_DIR_UPPER(0, size);
3169 }
3170 else {
3171 return 0;
3172 }
3173 size /= RUBY_STACK_SPACE_RATIO;
3174 if (size > water_mark) size = water_mark;
3175 if (IS_STACK_DIR_UPPER()) {
3176 if (size > ~(size_t)base+1) size = ~(size_t)base+1;
3177 if (addr > base && addr <= (void *)((char *)base + size)) return 1;
3178 }
3179 else {
3180 if (size > (size_t)base) size = (size_t)base;
3181 if (addr > (void *)((char *)base - size) && addr <= base) return 1;
3182 }
3183 return 0;
3184}
3185#endif
3186
3187int
3188rb_reserved_fd_p(int fd)
3189{
3190 /* no false-positive if out-of-FD at startup */
3191 if (fd < 0) return 0;
3192
3193 if (fd == timer_th.comm_fds[0] ||
3194 fd == timer_th.comm_fds[1]
3195#if (HAVE_SYS_EPOLL_H || HAVE_SYS_EVENT_H) && USE_MN_THREADS
3196 || fd == timer_th.event_fd
3197#endif
3198 ) {
3199 goto check_fork_gen;
3200 }
3201 return 0;
3202
3203 check_fork_gen:
3204 if (timer_th.created_fork_gen == current_fork_gen) {
3205 /* async-signal-safe */
3206 return 1;
3207 }
3208 else {
3209 return 0;
3210 }
3211}
3212
3213rb_nativethread_id_t
3215{
3216 return pthread_self();
3217}
3218
3219#if defined(USE_POLL) && !defined(HAVE_PPOLL)
3220/* TODO: don't ignore sigmask */
3221static int
3222ruby_ppoll(struct pollfd *fds, nfds_t nfds,
3223 const struct timespec *ts, const sigset_t *sigmask)
3224{
3225 int timeout_ms;
3226
3227 if (ts) {
3228 int tmp, tmp2;
3229
3230 if (ts->tv_sec > INT_MAX/1000)
3231 timeout_ms = INT_MAX;
3232 else {
3233 tmp = (int)(ts->tv_sec * 1000);
3234 /* round up 1ns to 1ms to avoid excessive wakeups for <1ms sleep */
3235 tmp2 = (int)((ts->tv_nsec + 999999L) / (1000L * 1000L));
3236 if (INT_MAX - tmp < tmp2)
3237 timeout_ms = INT_MAX;
3238 else
3239 timeout_ms = (int)(tmp + tmp2);
3240 }
3241 }
3242 else
3243 timeout_ms = -1;
3244
3245 return poll(fds, nfds, timeout_ms);
3246}
3247# define ppoll(fds,nfds,ts,sigmask) ruby_ppoll((fds),(nfds),(ts),(sigmask))
3248#endif
3249
3250/*
3251 * Single CPU setups benefit from explicit sched_yield() before ppoll(),
3252 * since threads may be too starved to enter the GVL waitqueue for
3253 * us to detect contention. Instead, we want to kick other threads
3254 * so they can run and possibly prevent us from entering slow paths
3255 * in ppoll() or similar syscalls.
3256 *
3257 * Confirmed on FreeBSD 11.2 and Linux 4.19.
3258 * [ruby-core:90417] [Bug #15398]
3259 */
3260#define THREAD_BLOCKING_YIELD(th) do { \
3261 const rb_thread_t *next_th; \
3262 struct rb_thread_sched *sched = TH_SCHED(th); \
3263 RB_VM_SAVE_MACHINE_CONTEXT(th); \
3264 thread_sched_to_waiting(sched, (th)); \
3265 next_th = sched->running; \
3266 rb_native_mutex_unlock(&sched->lock_); \
3267 native_thread_yield(); /* TODO: needed? */ \
3268 if (!next_th && rb_ractor_living_thread_num(th->ractor) > 1) { \
3269 native_thread_yield(); \
3270 }
3271
3272static void
3273native_sleep(rb_thread_t *th, rb_hrtime_t *rel)
3274{
3275 struct rb_thread_sched *sched = TH_SCHED(th);
3276
3277 RUBY_DEBUG_LOG("rel:%d", rel ? (int)*rel : 0);
3278 if (rel) {
3279 if (th_has_dedicated_nt(th)) {
3280 native_cond_sleep(th, rel);
3281 }
3282 else {
3283 thread_sched_wait_events(sched, th, -1, thread_sched_waiting_timeout, rel);
3284 }
3285 }
3286 else {
3287 thread_sched_to_waiting_until_wakeup(sched, th);
3288 }
3289
3290 RUBY_DEBUG_LOG("wakeup");
3291}
3292
3293// fork read-write lock (only for pthread)
3294static pthread_rwlock_t rb_thread_fork_rw_lock = PTHREAD_RWLOCK_INITIALIZER;
3295
3296void
3297rb_thread_release_fork_lock(void)
3298{
3299 int r;
3300 if ((r = pthread_rwlock_unlock(&rb_thread_fork_rw_lock))) {
3301 rb_bug_errno("pthread_rwlock_unlock", r);
3302 }
3303}
3304
3305void
3306rb_thread_reset_fork_lock(void)
3307{
3308 int r;
3309 if ((r = pthread_rwlock_destroy(&rb_thread_fork_rw_lock))) {
3310 rb_bug_errno("pthread_rwlock_destroy", r);
3311 }
3312
3313 if ((r = pthread_rwlock_init(&rb_thread_fork_rw_lock, NULL))) {
3314 rb_bug_errno("pthread_rwlock_init", r);
3315 }
3316}
3317
3318void *
3319rb_thread_prevent_fork(void *(*func)(void *), void *data)
3320{
3321 int r;
3322 if ((r = pthread_rwlock_rdlock(&rb_thread_fork_rw_lock))) {
3323 rb_bug_errno("pthread_rwlock_rdlock", r);
3324 }
3325 void *result = func(data);
3326 rb_thread_release_fork_lock();
3327 return result;
3328}
3329
3330void
3331rb_thread_acquire_fork_lock(void)
3332{
3333 int r;
3334 if ((r = pthread_rwlock_wrlock(&rb_thread_fork_rw_lock))) {
3335 rb_bug_errno("pthread_rwlock_wrlock", r);
3336 }
3337}
3338
3339// thread internal event hooks (only for pthread)
3340
3341struct rb_internal_thread_event_hook {
3342 rb_internal_thread_event_callback callback;
3343 rb_event_flag_t event;
3344 void *user_data;
3345
3346 struct rb_internal_thread_event_hook *next;
3347};
3348
3349static pthread_rwlock_t rb_internal_thread_event_hooks_rw_lock = PTHREAD_RWLOCK_INITIALIZER;
3350
3351rb_internal_thread_event_hook_t *
3352rb_internal_thread_add_event_hook(rb_internal_thread_event_callback callback, rb_event_flag_t internal_event, void *user_data)
3353{
3354 rb_internal_thread_event_hook_t *hook = ALLOC_N(rb_internal_thread_event_hook_t, 1);
3355 hook->callback = callback;
3356 hook->user_data = user_data;
3357 hook->event = internal_event;
3358
3359 int r;
3360 if ((r = pthread_rwlock_wrlock(&rb_internal_thread_event_hooks_rw_lock))) {
3361 rb_bug_errno("pthread_rwlock_wrlock", r);
3362 }
3363
3364 hook->next = rb_internal_thread_event_hooks;
3365 ATOMIC_PTR_EXCHANGE(rb_internal_thread_event_hooks, hook);
3366
3367 if ((r = pthread_rwlock_unlock(&rb_internal_thread_event_hooks_rw_lock))) {
3368 rb_bug_errno("pthread_rwlock_unlock", r);
3369 }
3370 return hook;
3371}
3372
3373bool
3374rb_internal_thread_remove_event_hook(rb_internal_thread_event_hook_t * hook)
3375{
3376 int r;
3377 if ((r = pthread_rwlock_wrlock(&rb_internal_thread_event_hooks_rw_lock))) {
3378 rb_bug_errno("pthread_rwlock_wrlock", r);
3379 }
3380
3381 bool success = FALSE;
3382
3383 if (rb_internal_thread_event_hooks == hook) {
3384 ATOMIC_PTR_EXCHANGE(rb_internal_thread_event_hooks, hook->next);
3385 success = TRUE;
3386 }
3387 else {
3388 rb_internal_thread_event_hook_t *h = rb_internal_thread_event_hooks;
3389
3390 do {
3391 if (h->next == hook) {
3392 h->next = hook->next;
3393 success = TRUE;
3394 break;
3395 }
3396 } while ((h = h->next));
3397 }
3398
3399 if ((r = pthread_rwlock_unlock(&rb_internal_thread_event_hooks_rw_lock))) {
3400 rb_bug_errno("pthread_rwlock_unlock", r);
3401 }
3402
3403 if (success) {
3404 ruby_xfree(hook);
3405 }
3406 return success;
3407}
3408
3409static void
3410rb_thread_execute_hooks(rb_event_flag_t event, rb_thread_t *th)
3411{
3412 int r;
3413 if ((r = pthread_rwlock_rdlock(&rb_internal_thread_event_hooks_rw_lock))) {
3414 rb_bug_errno("pthread_rwlock_rdlock", r);
3415 }
3416
3417 if (rb_internal_thread_event_hooks) {
3418 rb_internal_thread_event_hook_t *h = rb_internal_thread_event_hooks;
3419 do {
3420 if (h->event & event) {
3421 rb_internal_thread_event_data_t event_data = {
3422 .thread = th->self,
3423 };
3424 (*h->callback)(event, &event_data, h->user_data);
3425 }
3426 } while((h = h->next));
3427 }
3428 if ((r = pthread_rwlock_unlock(&rb_internal_thread_event_hooks_rw_lock))) {
3429 rb_bug_errno("pthread_rwlock_unlock", r);
3430 }
3431}
3432
3433// return true if the current thread acquires DNT.
3434// return false if the current thread already acquires DNT.
3435bool
3437{
3438 rb_thread_t *th = GET_THREAD();
3439 bool is_snt = th->nt->dedicated == 0;
3440 native_thread_dedicated_inc(th->vm, th->ractor, th->nt);
3441
3442 return is_snt;
3443}
3444
3445#endif /* THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION */
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define RUBY_ATOMIC_FETCH_ADD(var, val)
Atomically replaces the value pointed by var with the result of addition of val to the old value of v...
Definition atomic.h:93
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define ZALLOC
Old name of RB_ZALLOC.
Definition memory.h:402
#define ALLOC_N
Old name of RB_ALLOC_N.
Definition memory.h:399
#define ULL2NUM
Old name of RB_ULL2NUM.
Definition long_long.h:31
#define NUM2INT
Old name of RB_NUM2INT.
Definition int.h:44
#define Qnil
Old name of RUBY_Qnil.
#define NIL_P
Old name of RB_NIL_P.
VALUE rb_eNotImpError
NotImplementedError exception.
Definition error.c:1440
void rb_syserr_fail(int e, const char *mesg)
Raises appropriate exception that represents a C errno.
Definition error.c:3905
void rb_bug_errno(const char *mesg, int errno_arg)
This is a wrapper of rb_bug() which automatically constructs appropriate message from the passed errn...
Definition error.c:1140
int rb_cloexec_pipe(int fildes[2])
Opens a pipe with closing on exec.
Definition io.c:427
void rb_update_max_fd(int fd)
Informs the interpreter that the passed fd can be the max.
Definition io.c:248
int rb_reserved_fd_p(int fd)
Queries if the given FD is reserved or not.
void rb_unblock_function_t(void *)
This is the type of UBFs.
Definition thread.h:336
void rb_timespec_now(struct timespec *ts)
Fills the current time into the given struct.
Definition time.c:2015
int len
Length of the buffer.
Definition io.h:8
#define RUBY_INTERNAL_THREAD_EVENT_RESUMED
Triggered when a thread successfully acquired the GVL.
Definition thread.h:238
rb_internal_thread_event_hook_t * rb_internal_thread_add_event_hook(rb_internal_thread_event_callback func, rb_event_flag_t events, void *data)
Registers a thread event hook function.
#define RUBY_INTERNAL_THREAD_EVENT_EXITED
Triggered when a thread exits.
Definition thread.h:252
#define RUBY_INTERNAL_THREAD_EVENT_SUSPENDED
Triggered when a thread released the GVL.
Definition thread.h:245
bool rb_thread_lock_native_thread(void)
Declare the current Ruby thread should acquire a dedicated native thread on M:N thread scheduler.
#define RUBY_INTERNAL_THREAD_EVENT_STARTED
Triggered when a new thread is started.
Definition thread.h:224
bool rb_internal_thread_remove_event_hook(rb_internal_thread_event_hook_t *hook)
Unregister the passed hook.
#define RUBY_INTERNAL_THREAD_EVENT_READY
Triggered when a thread attempt to acquire the GVL.
Definition thread.h:231
#define RBIMPL_ATTR_MAYBE_UNUSED()
Wraps (or simulates) [[maybe_unused]]
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
#define rb_fd_select
Waits for multiple file descriptors at once.
Definition posix.h:66
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RSTRING_GETMEM(str, ptrvar, lenvar)
Convenient macro to obtain the contents and length at once.
Definition rstring.h:488
#define errno
Ractor-aware version of errno.
Definition ruby.h:388
The data structure which wraps the fd_set bitmap used by select(2).
Definition largesize.h:71
Definition string.c:8317
rb_nativethread_id_t rb_nativethread_self(void)
Queries the ID of the native thread that is calling this function.
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_cond_initialize(rb_nativethread_cond_t *cond)
Fills the passed condition variable with an initial value.
int rb_native_mutex_trylock(rb_nativethread_lock_t *lock)
Identical to rb_native_mutex_lock(), except it doesn't block in case rb_native_mutex_lock() would.
void rb_native_cond_broadcast(rb_nativethread_cond_t *cond)
Signals a condition variable.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy.
void rb_native_cond_destroy(rb_nativethread_cond_t *cond)
Destroys the passed condition variable.
void rb_native_cond_signal(rb_nativethread_cond_t *cond)
Signals a condition variable.
void rb_native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex)
Waits for the passed condition variable to be signalled.
void rb_native_cond_timedwait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex, unsigned long msec)
Identical to rb_native_cond_wait(), except it additionally takes timeout in msec resolution.
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40