Ruby 3.5.0dev (2025-04-24 revision bbd5d3348b519035a5d2cdf777e7c8d5c143055c)
thread.c (bbd5d3348b519035a5d2cdf777e7c8d5c143055c)
1/**********************************************************************
2
3 thread.c -
4
5 $Author$
6
7 Copyright (C) 2004-2007 Koichi Sasada
8
9**********************************************************************/
10
11/*
12 YARV Thread Design
13
14 model 1: Userlevel Thread
15 Same as traditional ruby thread.
16
17 model 2: Native Thread with Global VM lock
18 Using pthread (or Windows thread) and Ruby threads run concurrent.
19
20 model 3: Native Thread with fine grain lock
21 Using pthread and Ruby threads run concurrent or parallel.
22
23 model 4: M:N User:Native threads with Global VM lock
24 Combination of model 1 and 2
25
26 model 5: M:N User:Native thread with fine grain lock
27 Combination of model 1 and 3
28
29------------------------------------------------------------------------
30
31 model 2:
32 A thread has mutex (GVL: Global VM Lock or Giant VM Lock) can run.
33 When thread scheduling, running thread release GVL. If running thread
34 try blocking operation, this thread must release GVL and another
35 thread can continue this flow. After blocking operation, thread
36 must check interrupt (RUBY_VM_CHECK_INTS).
37
38 Every VM can run parallel.
39
40 Ruby threads are scheduled by OS thread scheduler.
41
42------------------------------------------------------------------------
43
44 model 3:
45 Every threads run concurrent or parallel and to access shared object
46 exclusive access control is needed. For example, to access String
47 object or Array object, fine grain lock must be locked every time.
48 */
49
50
51/*
52 * FD_SET, FD_CLR and FD_ISSET have a small sanity check when using glibc
53 * 2.15 or later and set _FORTIFY_SOURCE > 0.
54 * However, the implementation is wrong. Even though Linux's select(2)
55 * supports large fd size (>FD_SETSIZE), it wrongly assumes fd is always
56 * less than FD_SETSIZE (i.e. 1024). And then when enabling HAVE_RB_FD_INIT,
57 * it doesn't work correctly and makes program abort. Therefore we need to
58 * disable FORTIFY_SOURCE until glibc fixes it.
59 */
60#undef _FORTIFY_SOURCE
61#undef __USE_FORTIFY_LEVEL
62#define __USE_FORTIFY_LEVEL 0
63
64/* for model 2 */
65
66#include "ruby/internal/config.h"
67
68#ifdef __linux__
69// Normally, gcc(1) translates calls to alloca() with inlined code. This is not done when either the -ansi, -std=c89, -std=c99, or the -std=c11 option is given and the header <alloca.h> is not included.
70# include <alloca.h>
71#endif
72
73#define TH_SCHED(th) (&(th)->ractor->threads.sched)
74
75#include "eval_intern.h"
76#include "hrtime.h"
77#include "internal.h"
78#include "internal/class.h"
79#include "internal/cont.h"
80#include "internal/error.h"
81#include "internal/gc.h"
82#include "internal/hash.h"
83#include "internal/io.h"
84#include "internal/object.h"
85#include "internal/proc.h"
87#include "internal/signal.h"
88#include "internal/thread.h"
89#include "internal/time.h"
90#include "internal/warnings.h"
91#include "iseq.h"
92#include "ruby/debug.h"
93#include "ruby/io.h"
94#include "ruby/thread.h"
95#include "ruby/thread_native.h"
96#include "timev.h"
97#include "vm_core.h"
98#include "ractor_core.h"
99#include "vm_debug.h"
100#include "vm_sync.h"
101
102#ifndef USE_NATIVE_THREAD_PRIORITY
103#define USE_NATIVE_THREAD_PRIORITY 0
104#define RUBY_THREAD_PRIORITY_MAX 3
105#define RUBY_THREAD_PRIORITY_MIN -3
106#endif
107
108static VALUE rb_cThreadShield;
109static VALUE cThGroup;
110
111static VALUE sym_immediate;
112static VALUE sym_on_blocking;
113static VALUE sym_never;
114
115static uint32_t thread_default_quantum_ms = 100;
116
117#define THREAD_LOCAL_STORAGE_INITIALISED FL_USER13
118#define THREAD_LOCAL_STORAGE_INITIALISED_P(th) RB_FL_TEST_RAW((th), THREAD_LOCAL_STORAGE_INITIALISED)
119
120static inline VALUE
121rb_thread_local_storage(VALUE thread)
122{
123 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
124 rb_ivar_set(thread, idLocals, rb_hash_new());
125 RB_FL_SET_RAW(thread, THREAD_LOCAL_STORAGE_INITIALISED);
126 }
127 return rb_ivar_get(thread, idLocals);
128}
129
130enum SLEEP_FLAGS {
131 SLEEP_DEADLOCKABLE = 0x01,
132 SLEEP_SPURIOUS_CHECK = 0x02,
133 SLEEP_ALLOW_SPURIOUS = 0x04,
134 SLEEP_NO_CHECKINTS = 0x08,
135};
136
137static void sleep_forever(rb_thread_t *th, unsigned int fl);
138static int sleep_hrtime(rb_thread_t *, rb_hrtime_t, unsigned int fl);
139
140static void rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker, VALUE timeout, rb_hrtime_t end);
141static int rb_threadptr_dead(rb_thread_t *th);
142static void rb_check_deadlock(rb_ractor_t *r);
143static int rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th);
144static const char *thread_status_name(rb_thread_t *th, int detail);
145static int hrtime_update_expire(rb_hrtime_t *, const rb_hrtime_t);
146NORETURN(static void async_bug_fd(const char *mesg, int errno_arg, int fd));
147MAYBE_UNUSED(static int consume_communication_pipe(int fd));
148
149static volatile int system_working = 1;
150static rb_internal_thread_specific_key_t specific_key_count;
151
153 struct ccan_list_node wfd_node; /* <=> vm.waiting_fds */
154 rb_thread_t *th;
155 int fd;
156 struct rb_io_close_wait_list *busy;
157};
158
159/********************************************************************************/
160
161#define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
162
164 enum rb_thread_status prev_status;
165};
166
167static int unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted);
168static void unblock_function_clear(rb_thread_t *th);
169
170static inline int blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
171 rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted);
172static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region);
173
174#define THREAD_BLOCKING_BEGIN(th) do { \
175 struct rb_thread_sched * const sched = TH_SCHED(th); \
176 RB_VM_SAVE_MACHINE_CONTEXT(th); \
177 thread_sched_to_waiting((sched), (th));
178
179#define THREAD_BLOCKING_END(th) \
180 thread_sched_to_running((sched), (th)); \
181 rb_ractor_thread_switch(th->ractor, th); \
182} while(0)
183
184#ifdef __GNUC__
185#ifdef HAVE_BUILTIN___BUILTIN_CHOOSE_EXPR_CONSTANT_P
186#define only_if_constant(expr, notconst) __builtin_choose_expr(__builtin_constant_p(expr), (expr), (notconst))
187#else
188#define only_if_constant(expr, notconst) (__builtin_constant_p(expr) ? (expr) : (notconst))
189#endif
190#else
191#define only_if_constant(expr, notconst) notconst
192#endif
193#define BLOCKING_REGION(th, exec, ubf, ubfarg, fail_if_interrupted) do { \
194 struct rb_blocking_region_buffer __region; \
195 if (blocking_region_begin(th, &__region, (ubf), (ubfarg), fail_if_interrupted) || \
196 /* always return true unless fail_if_interrupted */ \
197 !only_if_constant(fail_if_interrupted, TRUE)) { \
198 /* Important that this is inlined into the macro, and not part of \
199 * blocking_region_begin - see bug #20493 */ \
200 RB_VM_SAVE_MACHINE_CONTEXT(th); \
201 thread_sched_to_waiting(TH_SCHED(th), th); \
202 exec; \
203 blocking_region_end(th, &__region); \
204 }; \
205} while(0)
206
207/*
208 * returns true if this thread was spuriously interrupted, false otherwise
209 * (e.g. hit by Thread#run or ran a Ruby-level Signal.trap handler)
210 */
211#define RUBY_VM_CHECK_INTS_BLOCKING(ec) vm_check_ints_blocking(ec)
212static inline int
213vm_check_ints_blocking(rb_execution_context_t *ec)
214{
215 rb_thread_t *th = rb_ec_thread_ptr(ec);
216
217 if (LIKELY(rb_threadptr_pending_interrupt_empty_p(th))) {
218 if (LIKELY(!RUBY_VM_INTERRUPTED_ANY(ec))) return FALSE;
219 }
220 else {
221 th->pending_interrupt_queue_checked = 0;
222 RUBY_VM_SET_INTERRUPT(ec);
223 }
224 return rb_threadptr_execute_interrupts(th, 1);
225}
226
227int
228rb_vm_check_ints_blocking(rb_execution_context_t *ec)
229{
230 return vm_check_ints_blocking(ec);
231}
232
233/*
234 * poll() is supported by many OSes, but so far Linux is the only
235 * one we know of that supports using poll() in all places select()
236 * would work.
237 */
238#if defined(HAVE_POLL)
239# if defined(__linux__)
240# define USE_POLL
241# endif
242# if defined(__FreeBSD_version) && __FreeBSD_version >= 1100000
243# define USE_POLL
244 /* FreeBSD does not set POLLOUT when POLLHUP happens */
245# define POLLERR_SET (POLLHUP | POLLERR)
246# endif
247#endif
248
249static void
250timeout_prepare(rb_hrtime_t **to, rb_hrtime_t *rel, rb_hrtime_t *end,
251 const struct timeval *timeout)
252{
253 if (timeout) {
254 *rel = rb_timeval2hrtime(timeout);
255 *end = rb_hrtime_add(rb_hrtime_now(), *rel);
256 *to = rel;
257 }
258 else {
259 *to = 0;
260 }
261}
262
263MAYBE_UNUSED(NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start)));
264MAYBE_UNUSED(static bool th_has_dedicated_nt(const rb_thread_t *th));
265MAYBE_UNUSED(static int waitfd_to_waiting_flag(int wfd_event));
266
267#include THREAD_IMPL_SRC
268
269/*
270 * TODO: somebody with win32 knowledge should be able to get rid of
271 * timer-thread by busy-waiting on signals. And it should be possible
272 * to make the GVL in thread_pthread.c be platform-independent.
273 */
274#ifndef BUSY_WAIT_SIGNALS
275# define BUSY_WAIT_SIGNALS (0)
276#endif
277
278#ifndef USE_EVENTFD
279# define USE_EVENTFD (0)
280#endif
281
282#include "thread_sync.c"
283
284void
285rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
286{
288}
289
290void
291rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
292{
294}
295
296void
297rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
298{
300}
301
302void
303rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
304{
306}
307
308static int
309unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted)
310{
311 do {
312 if (fail_if_interrupted) {
313 if (RUBY_VM_INTERRUPTED_ANY(th->ec)) {
314 return FALSE;
315 }
316 }
317 else {
318 RUBY_VM_CHECK_INTS(th->ec);
319 }
320
321 rb_native_mutex_lock(&th->interrupt_lock);
322 } while (!th->ec->raised_flag && RUBY_VM_INTERRUPTED_ANY(th->ec) &&
323 (rb_native_mutex_unlock(&th->interrupt_lock), TRUE));
324
325 VM_ASSERT(th->unblock.func == NULL);
326
327 th->unblock.func = func;
328 th->unblock.arg = arg;
329 rb_native_mutex_unlock(&th->interrupt_lock);
330
331 return TRUE;
332}
333
334static void
335unblock_function_clear(rb_thread_t *th)
336{
337 rb_native_mutex_lock(&th->interrupt_lock);
338 th->unblock.func = 0;
339 rb_native_mutex_unlock(&th->interrupt_lock);
340}
341
342static void
343threadptr_interrupt_locked(rb_thread_t *th, bool trap)
344{
345 // th->interrupt_lock should be acquired here
346
347 RUBY_DEBUG_LOG("th:%u trap:%d", rb_th_serial(th), trap);
348
349 if (trap) {
350 RUBY_VM_SET_TRAP_INTERRUPT(th->ec);
351 }
352 else {
353 RUBY_VM_SET_INTERRUPT(th->ec);
354 }
355
356 if (th->unblock.func != NULL) {
357 (th->unblock.func)(th->unblock.arg);
358 }
359 else {
360 /* none */
361 }
362}
363
364static void
365threadptr_interrupt(rb_thread_t *th, int trap)
366{
367 rb_native_mutex_lock(&th->interrupt_lock);
368 {
369 threadptr_interrupt_locked(th, trap);
370 }
371 rb_native_mutex_unlock(&th->interrupt_lock);
372}
373
374void
375rb_threadptr_interrupt(rb_thread_t *th)
376{
377 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
378 threadptr_interrupt(th, false);
379}
380
381static void
382threadptr_trap_interrupt(rb_thread_t *th)
383{
384 threadptr_interrupt(th, true);
385}
386
387static void
388terminate_all(rb_ractor_t *r, const rb_thread_t *main_thread)
389{
390 rb_thread_t *th = 0;
391
392 ccan_list_for_each(&r->threads.set, th, lt_node) {
393 if (th != main_thread) {
394 RUBY_DEBUG_LOG("terminate start th:%u status:%s", rb_th_serial(th), thread_status_name(th, TRUE));
395
396 rb_threadptr_pending_interrupt_enque(th, RUBY_FATAL_THREAD_TERMINATED);
397 rb_threadptr_interrupt(th);
398
399 RUBY_DEBUG_LOG("terminate done th:%u status:%s", rb_th_serial(th), thread_status_name(th, TRUE));
400 }
401 else {
402 RUBY_DEBUG_LOG("main thread th:%u", rb_th_serial(th));
403 }
404 }
405}
406
407static void
408rb_threadptr_join_list_wakeup(rb_thread_t *thread)
409{
410 while (thread->join_list) {
411 struct rb_waiting_list *join_list = thread->join_list;
412
413 // Consume the entry from the join list:
414 thread->join_list = join_list->next;
415
416 rb_thread_t *target_thread = join_list->thread;
417
418 if (target_thread->scheduler != Qnil && join_list->fiber) {
419 rb_fiber_scheduler_unblock(target_thread->scheduler, target_thread->self, rb_fiberptr_self(join_list->fiber));
420 }
421 else {
422 rb_threadptr_interrupt(target_thread);
423
424 switch (target_thread->status) {
425 case THREAD_STOPPED:
426 case THREAD_STOPPED_FOREVER:
427 target_thread->status = THREAD_RUNNABLE;
428 break;
429 default:
430 break;
431 }
432 }
433 }
434}
435
436void
437rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th)
438{
439 while (th->keeping_mutexes) {
440 rb_mutex_t *mutex = th->keeping_mutexes;
441 th->keeping_mutexes = mutex->next_mutex;
442
443 // rb_warn("mutex #<%p> was not unlocked by thread #<%p>", (void *)mutex, (void*)th);
444
445 const char *error_message = rb_mutex_unlock_th(mutex, th, mutex->fiber);
446 if (error_message) rb_bug("invalid keeping_mutexes: %s", error_message);
447 }
448}
449
450void
451rb_thread_terminate_all(rb_thread_t *th)
452{
453 rb_ractor_t *cr = th->ractor;
454 rb_execution_context_t * volatile ec = th->ec;
455 volatile int sleeping = 0;
456
457 if (cr->threads.main != th) {
458 rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)",
459 (void *)cr->threads.main, (void *)th);
460 }
461
462 /* unlock all locking mutexes */
463 rb_threadptr_unlock_all_locking_mutexes(th);
464
465 EC_PUSH_TAG(ec);
466 if (EC_EXEC_TAG() == TAG_NONE) {
467 retry:
468 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
469
470 terminate_all(cr, th);
471
472 while (rb_ractor_living_thread_num(cr) > 1) {
473 rb_hrtime_t rel = RB_HRTIME_PER_SEC;
474 /*q
475 * Thread exiting routine in thread_start_func_2 notify
476 * me when the last sub-thread exit.
477 */
478 sleeping = 1;
479 native_sleep(th, &rel);
480 RUBY_VM_CHECK_INTS_BLOCKING(ec);
481 sleeping = 0;
482 }
483 }
484 else {
485 /*
486 * When caught an exception (e.g. Ctrl+C), let's broadcast
487 * kill request again to ensure killing all threads even
488 * if they are blocked on sleep, mutex, etc.
489 */
490 if (sleeping) {
491 sleeping = 0;
492 goto retry;
493 }
494 }
495 EC_POP_TAG();
496}
497
498void rb_threadptr_root_fiber_terminate(rb_thread_t *th);
499static void threadptr_interrupt_exec_cleanup(rb_thread_t *th);
500
501static void
502thread_cleanup_func_before_exec(void *th_ptr)
503{
504 rb_thread_t *th = th_ptr;
505 th->status = THREAD_KILLED;
506
507 // The thread stack doesn't exist in the forked process:
508 th->ec->machine.stack_start = th->ec->machine.stack_end = NULL;
509
510 threadptr_interrupt_exec_cleanup(th);
511 rb_threadptr_root_fiber_terminate(th);
512}
513
514static void
515thread_cleanup_func(void *th_ptr, int atfork)
516{
517 rb_thread_t *th = th_ptr;
518
519 th->locking_mutex = Qfalse;
520 thread_cleanup_func_before_exec(th_ptr);
521
522 /*
523 * Unfortunately, we can't release native threading resource at fork
524 * because libc may have unstable locking state therefore touching
525 * a threading resource may cause a deadlock.
526 */
527 if (atfork) {
528 th->nt = NULL;
529 return;
530 }
531
532 rb_native_mutex_destroy(&th->interrupt_lock);
533}
534
535static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *);
536static VALUE rb_thread_to_s(VALUE thread);
537
538void
539ruby_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame)
540{
541 native_thread_init_stack(th, local_in_parent_frame);
542}
543
544const VALUE *
545rb_vm_proc_local_ep(VALUE proc)
546{
547 const VALUE *ep = vm_proc_ep(proc);
548
549 if (ep) {
550 return rb_vm_ep_local_ep(ep);
551 }
552 else {
553 return NULL;
554 }
555}
556
557// for ractor, defined in vm.c
558VALUE rb_vm_invoke_proc_with_self(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
559 int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler);
560
561static VALUE
562thread_do_start_proc(rb_thread_t *th)
563{
564 VALUE args = th->invoke_arg.proc.args;
565 const VALUE *args_ptr;
566 int args_len;
567 VALUE procval = th->invoke_arg.proc.proc;
568 rb_proc_t *proc;
569 GetProcPtr(procval, proc);
570
571 th->ec->errinfo = Qnil;
572 th->ec->root_lep = rb_vm_proc_local_ep(procval);
573 th->ec->root_svar = Qfalse;
574
575 vm_check_ints_blocking(th->ec);
576
577 if (th->invoke_type == thread_invoke_type_ractor_proc) {
578 VALUE self = rb_ractor_self(th->ractor);
579 th->thgroup = th->ractor->thgroup_default = rb_obj_alloc(cThGroup);
580
581 VM_ASSERT(FIXNUM_P(args));
582 args_len = FIX2INT(args);
583 args_ptr = ALLOCA_N(VALUE, args_len);
584 rb_ractor_receive_parameters(th->ec, th->ractor, args_len, (VALUE *)args_ptr);
585 vm_check_ints_blocking(th->ec);
586
587 return rb_vm_invoke_proc_with_self(
588 th->ec, proc, self,
589 args_len, args_ptr,
590 th->invoke_arg.proc.kw_splat,
591 VM_BLOCK_HANDLER_NONE
592 );
593 }
594 else {
595 args_len = RARRAY_LENINT(args);
596 if (args_len < 8) {
597 /* free proc.args if the length is enough small */
598 args_ptr = ALLOCA_N(VALUE, args_len);
599 MEMCPY((VALUE *)args_ptr, RARRAY_CONST_PTR(args), VALUE, args_len);
600 th->invoke_arg.proc.args = Qnil;
601 }
602 else {
603 args_ptr = RARRAY_CONST_PTR(args);
604 }
605
606 vm_check_ints_blocking(th->ec);
607
608 return rb_vm_invoke_proc(
609 th->ec, proc,
610 args_len, args_ptr,
611 th->invoke_arg.proc.kw_splat,
612 VM_BLOCK_HANDLER_NONE
613 );
614 }
615}
616
617static VALUE
618thread_do_start(rb_thread_t *th)
619{
620 native_set_thread_name(th);
621 VALUE result = Qundef;
622
623 switch (th->invoke_type) {
624 case thread_invoke_type_proc:
625 result = thread_do_start_proc(th);
626 break;
627
628 case thread_invoke_type_ractor_proc:
629 result = thread_do_start_proc(th);
630 rb_ractor_atexit(th->ec, result);
631 break;
632
633 case thread_invoke_type_func:
634 result = (*th->invoke_arg.func.func)(th->invoke_arg.func.arg);
635 break;
636
637 case thread_invoke_type_none:
638 rb_bug("unreachable");
639 }
640
641 return result;
642}
643
644void rb_ec_clear_current_thread_trace_func(const rb_execution_context_t *ec);
645
646static int
647thread_start_func_2(rb_thread_t *th, VALUE *stack_start)
648{
649 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
650 VM_ASSERT(th != th->vm->ractor.main_thread);
651
652 enum ruby_tag_type state;
653 VALUE errinfo = Qnil;
654 rb_thread_t *ractor_main_th = th->ractor->threads.main;
655
656 // setup ractor
657 if (rb_ractor_status_p(th->ractor, ractor_blocking)) {
658 RB_VM_LOCK();
659 {
660 rb_vm_ractor_blocking_cnt_dec(th->vm, th->ractor, __FILE__, __LINE__);
661 rb_ractor_t *r = th->ractor;
662 r->r_stdin = rb_io_prep_stdin();
663 r->r_stdout = rb_io_prep_stdout();
664 r->r_stderr = rb_io_prep_stderr();
665 }
666 RB_VM_UNLOCK();
667 }
668
669 // Ensure that we are not joinable.
670 VM_ASSERT(UNDEF_P(th->value));
671
672 int fiber_scheduler_closed = 0, event_thread_end_hooked = 0;
673 VALUE result = Qundef;
674
675 EC_PUSH_TAG(th->ec);
676
677 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
678 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_BEGIN, th->self, 0, 0, 0, Qundef);
679
680 result = thread_do_start(th);
681 }
682
683 if (!fiber_scheduler_closed) {
684 fiber_scheduler_closed = 1;
686 }
687
688 if (!event_thread_end_hooked) {
689 event_thread_end_hooked = 1;
690 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_END, th->self, 0, 0, 0, Qundef);
691 }
692
693 if (state == TAG_NONE) {
694 // This must be set AFTER doing all user-level code. At this point, the thread is effectively finished and calls to `Thread#join` will succeed.
695 th->value = result;
696 }
697 else {
698 errinfo = th->ec->errinfo;
699
700 VALUE exc = rb_vm_make_jump_tag_but_local_jump(state, Qundef);
701 if (!NIL_P(exc)) errinfo = exc;
702
703 if (state == TAG_FATAL) {
704 if (th->invoke_type == thread_invoke_type_ractor_proc) {
705 rb_ractor_atexit(th->ec, Qnil);
706 }
707 /* fatal error within this thread, need to stop whole script */
708 }
709 else if (rb_obj_is_kind_of(errinfo, rb_eSystemExit)) {
710 /* exit on main_thread. */
711 }
712 else {
713 if (th->report_on_exception) {
714 VALUE mesg = rb_thread_to_s(th->self);
715 rb_str_cat_cstr(mesg, " terminated with exception (report_on_exception is true):\n");
716 rb_write_error_str(mesg);
717 rb_ec_error_print(th->ec, errinfo);
718 }
719
720 if (th->invoke_type == thread_invoke_type_ractor_proc) {
721 rb_ractor_atexit_exception(th->ec);
722 }
723
724 if (th->vm->thread_abort_on_exception ||
725 th->abort_on_exception || RTEST(ruby_debug)) {
726 /* exit on main_thread */
727 }
728 else {
729 errinfo = Qnil;
730 }
731 }
732 th->value = Qnil;
733 }
734
735 // The thread is effectively finished and can be joined.
736 VM_ASSERT(!UNDEF_P(th->value));
737
738 rb_threadptr_join_list_wakeup(th);
739 rb_threadptr_unlock_all_locking_mutexes(th);
740
741 if (th->invoke_type == thread_invoke_type_ractor_proc) {
742 rb_thread_terminate_all(th);
743 rb_ractor_teardown(th->ec);
744 }
745
746 th->status = THREAD_KILLED;
747 RUBY_DEBUG_LOG("killed th:%u", rb_th_serial(th));
748
749 if (th->vm->ractor.main_thread == th) {
750 ruby_stop(0);
751 }
752
753 if (RB_TYPE_P(errinfo, T_OBJECT)) {
754 /* treat with normal error object */
755 rb_threadptr_raise(ractor_main_th, 1, &errinfo);
756 }
757
758 EC_POP_TAG();
759
760 rb_ec_clear_current_thread_trace_func(th->ec);
761
762 /* locking_mutex must be Qfalse */
763 if (th->locking_mutex != Qfalse) {
764 rb_bug("thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
765 (void *)th, th->locking_mutex);
766 }
767
768 if (ractor_main_th->status == THREAD_KILLED &&
769 th->ractor->threads.cnt <= 2 /* main thread and this thread */) {
770 /* I'm last thread. wake up main thread from rb_thread_terminate_all */
771 rb_threadptr_interrupt(ractor_main_th);
772 }
773
774 rb_check_deadlock(th->ractor);
775
776 rb_fiber_close(th->ec->fiber_ptr);
777
778 thread_cleanup_func(th, FALSE);
779 VM_ASSERT(th->ec->vm_stack == NULL);
780
781 if (th->invoke_type == thread_invoke_type_ractor_proc) {
782 // after rb_ractor_living_threads_remove()
783 // GC will happen anytime and this ractor can be collected (and destroy GVL).
784 // So gvl_release() should be before it.
785 thread_sched_to_dead(TH_SCHED(th), th);
786 rb_ractor_living_threads_remove(th->ractor, th);
787 }
788 else {
789 rb_ractor_living_threads_remove(th->ractor, th);
790 thread_sched_to_dead(TH_SCHED(th), th);
791 }
792
793 return 0;
794}
797 enum thread_invoke_type type;
798
799 // for normal proc thread
800 VALUE args;
801 VALUE proc;
802
803 // for ractor
804 rb_ractor_t *g;
805
806 // for func
807 VALUE (*fn)(void *);
808};
809
810static void thread_specific_storage_alloc(rb_thread_t *th);
811
812static VALUE
813thread_create_core(VALUE thval, struct thread_create_params *params)
814{
815 rb_execution_context_t *ec = GET_EC();
816 rb_thread_t *th = rb_thread_ptr(thval), *current_th = rb_ec_thread_ptr(ec);
817 int err;
818
819 thread_specific_storage_alloc(th);
820
821 if (OBJ_FROZEN(current_th->thgroup)) {
822 rb_raise(rb_eThreadError,
823 "can't start a new thread (frozen ThreadGroup)");
824 }
825
826 rb_fiber_inherit_storage(ec, th->ec->fiber_ptr);
827
828 switch (params->type) {
829 case thread_invoke_type_proc:
830 th->invoke_type = thread_invoke_type_proc;
831 th->invoke_arg.proc.args = params->args;
832 th->invoke_arg.proc.proc = params->proc;
833 th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
834 break;
835
836 case thread_invoke_type_ractor_proc:
837#if RACTOR_CHECK_MODE > 0
838 rb_ractor_setup_belonging_to(thval, rb_ractor_id(params->g));
839#endif
840 th->invoke_type = thread_invoke_type_ractor_proc;
841 th->ractor = params->g;
842 th->ractor->threads.main = th;
843 th->invoke_arg.proc.proc = rb_proc_isolate_bang(params->proc);
844 th->invoke_arg.proc.args = INT2FIX(RARRAY_LENINT(params->args));
845 th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
846 rb_ractor_send_parameters(ec, params->g, params->args);
847 break;
848
849 case thread_invoke_type_func:
850 th->invoke_type = thread_invoke_type_func;
851 th->invoke_arg.func.func = params->fn;
852 th->invoke_arg.func.arg = (void *)params->args;
853 break;
854
855 default:
856 rb_bug("unreachable");
857 }
858
859 th->priority = current_th->priority;
860 th->thgroup = current_th->thgroup;
861
862 th->pending_interrupt_queue = rb_ary_hidden_new(0);
863 th->pending_interrupt_queue_checked = 0;
864 th->pending_interrupt_mask_stack = rb_ary_dup(current_th->pending_interrupt_mask_stack);
865 RBASIC_CLEAR_CLASS(th->pending_interrupt_mask_stack);
866
867 rb_native_mutex_initialize(&th->interrupt_lock);
868
869 RUBY_DEBUG_LOG("r:%u th:%u", rb_ractor_id(th->ractor), rb_th_serial(th));
870
871 rb_ractor_living_threads_insert(th->ractor, th);
872
873 /* kick thread */
874 err = native_thread_create(th);
875 if (err) {
876 th->status = THREAD_KILLED;
877 rb_ractor_living_threads_remove(th->ractor, th);
878 rb_raise(rb_eThreadError, "can't create Thread: %s", strerror(err));
879 }
880 return thval;
881}
882
883#define threadptr_initialized(th) ((th)->invoke_type != thread_invoke_type_none)
884
885/*
886 * call-seq:
887 * Thread.new { ... } -> thread
888 * Thread.new(*args, &proc) -> thread
889 * Thread.new(*args) { |args| ... } -> thread
890 *
891 * Creates a new thread executing the given block.
892 *
893 * Any +args+ given to ::new will be passed to the block:
894 *
895 * arr = []
896 * a, b, c = 1, 2, 3
897 * Thread.new(a,b,c) { |d,e,f| arr << d << e << f }.join
898 * arr #=> [1, 2, 3]
899 *
900 * A ThreadError exception is raised if ::new is called without a block.
901 *
902 * If you're going to subclass Thread, be sure to call super in your
903 * +initialize+ method, otherwise a ThreadError will be raised.
904 */
905static VALUE
906thread_s_new(int argc, VALUE *argv, VALUE klass)
907{
908 rb_thread_t *th;
909 VALUE thread = rb_thread_alloc(klass);
910
911 if (GET_RACTOR()->threads.main->status == THREAD_KILLED) {
912 rb_raise(rb_eThreadError, "can't alloc thread");
913 }
914
915 rb_obj_call_init_kw(thread, argc, argv, RB_PASS_CALLED_KEYWORDS);
916 th = rb_thread_ptr(thread);
917 if (!threadptr_initialized(th)) {
918 rb_raise(rb_eThreadError, "uninitialized thread - check '%"PRIsVALUE"#initialize'",
919 klass);
920 }
921 return thread;
922}
923
924/*
925 * call-seq:
926 * Thread.start([args]*) {|args| block } -> thread
927 * Thread.fork([args]*) {|args| block } -> thread
928 *
929 * Basically the same as ::new. However, if class Thread is subclassed, then
930 * calling +start+ in that subclass will not invoke the subclass's
931 * +initialize+ method.
932 */
933
934static VALUE
935thread_start(VALUE klass, VALUE args)
936{
937 struct thread_create_params params = {
938 .type = thread_invoke_type_proc,
939 .args = args,
940 .proc = rb_block_proc(),
941 };
942 return thread_create_core(rb_thread_alloc(klass), &params);
943}
944
945static VALUE
946threadptr_invoke_proc_location(rb_thread_t *th)
947{
948 if (th->invoke_type == thread_invoke_type_proc) {
949 return rb_proc_location(th->invoke_arg.proc.proc);
950 }
951 else {
952 return Qnil;
953 }
954}
955
956/* :nodoc: */
957static VALUE
958thread_initialize(VALUE thread, VALUE args)
959{
960 rb_thread_t *th = rb_thread_ptr(thread);
961
962 if (!rb_block_given_p()) {
963 rb_raise(rb_eThreadError, "must be called with a block");
964 }
965 else if (th->invoke_type != thread_invoke_type_none) {
966 VALUE loc = threadptr_invoke_proc_location(th);
967 if (!NIL_P(loc)) {
968 rb_raise(rb_eThreadError,
969 "already initialized thread - %"PRIsVALUE":%"PRIsVALUE,
970 RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
971 }
972 else {
973 rb_raise(rb_eThreadError, "already initialized thread");
974 }
975 }
976 else {
977 struct thread_create_params params = {
978 .type = thread_invoke_type_proc,
979 .args = args,
980 .proc = rb_block_proc(),
981 };
982 return thread_create_core(thread, &params);
983 }
984}
985
986VALUE
987rb_thread_create(VALUE (*fn)(void *), void *arg)
988{
989 struct thread_create_params params = {
990 .type = thread_invoke_type_func,
991 .fn = fn,
992 .args = (VALUE)arg,
993 };
994 return thread_create_core(rb_thread_alloc(rb_cThread), &params);
995}
996
997VALUE
998rb_thread_create_ractor(rb_ractor_t *r, VALUE args, VALUE proc)
999{
1000 struct thread_create_params params = {
1001 .type = thread_invoke_type_ractor_proc,
1002 .g = r,
1003 .args = args,
1004 .proc = proc,
1005 };
1006 return thread_create_core(rb_thread_alloc(rb_cThread), &params);;
1007}
1008
1010struct join_arg {
1011 struct rb_waiting_list *waiter;
1012 rb_thread_t *target;
1013 VALUE timeout;
1014 rb_hrtime_t *limit;
1015};
1016
1017static VALUE
1018remove_from_join_list(VALUE arg)
1019{
1020 struct join_arg *p = (struct join_arg *)arg;
1021 rb_thread_t *target_thread = p->target;
1022
1023 if (target_thread->status != THREAD_KILLED) {
1024 struct rb_waiting_list **join_list = &target_thread->join_list;
1025
1026 while (*join_list) {
1027 if (*join_list == p->waiter) {
1028 *join_list = (*join_list)->next;
1029 break;
1030 }
1031
1032 join_list = &(*join_list)->next;
1033 }
1034 }
1035
1036 return Qnil;
1037}
1038
1039static int
1040thread_finished(rb_thread_t *th)
1041{
1042 return th->status == THREAD_KILLED || !UNDEF_P(th->value);
1043}
1044
1045static VALUE
1046thread_join_sleep(VALUE arg)
1047{
1048 struct join_arg *p = (struct join_arg *)arg;
1049 rb_thread_t *target_th = p->target, *th = p->waiter->thread;
1050 rb_hrtime_t end = 0, *limit = p->limit;
1051
1052 if (limit) {
1053 end = rb_hrtime_add(*limit, rb_hrtime_now());
1054 }
1055
1056 while (!thread_finished(target_th)) {
1057 VALUE scheduler = rb_fiber_scheduler_current();
1058
1059 if (scheduler != Qnil) {
1060 rb_fiber_scheduler_block(scheduler, target_th->self, p->timeout);
1061 // Check if the target thread is finished after blocking:
1062 if (thread_finished(target_th)) break;
1063 // Otherwise, a timeout occurred:
1064 else return Qfalse;
1065 }
1066 else if (!limit) {
1067 sleep_forever(th, SLEEP_DEADLOCKABLE | SLEEP_ALLOW_SPURIOUS | SLEEP_NO_CHECKINTS);
1068 }
1069 else {
1070 if (hrtime_update_expire(limit, end)) {
1071 RUBY_DEBUG_LOG("timeout target_th:%u", rb_th_serial(target_th));
1072 return Qfalse;
1073 }
1074 th->status = THREAD_STOPPED;
1075 native_sleep(th, limit);
1076 }
1077 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1078 th->status = THREAD_RUNNABLE;
1079
1080 RUBY_DEBUG_LOG("interrupted target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1081 }
1082
1083 return Qtrue;
1084}
1085
1086static VALUE
1087thread_join(rb_thread_t *target_th, VALUE timeout, rb_hrtime_t *limit)
1088{
1089 rb_execution_context_t *ec = GET_EC();
1090 rb_thread_t *th = ec->thread_ptr;
1091 rb_fiber_t *fiber = ec->fiber_ptr;
1092
1093 if (th == target_th) {
1094 rb_raise(rb_eThreadError, "Target thread must not be current thread");
1095 }
1096
1097 if (th->ractor->threads.main == target_th) {
1098 rb_raise(rb_eThreadError, "Target thread must not be main thread");
1099 }
1100
1101 RUBY_DEBUG_LOG("target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1102
1103 if (target_th->status != THREAD_KILLED) {
1104 struct rb_waiting_list waiter;
1105 waiter.next = target_th->join_list;
1106 waiter.thread = th;
1107 waiter.fiber = rb_fiberptr_blocking(fiber) ? NULL : fiber;
1108 target_th->join_list = &waiter;
1109
1110 struct join_arg arg;
1111 arg.waiter = &waiter;
1112 arg.target = target_th;
1113 arg.timeout = timeout;
1114 arg.limit = limit;
1115
1116 if (!rb_ensure(thread_join_sleep, (VALUE)&arg, remove_from_join_list, (VALUE)&arg)) {
1117 return Qnil;
1118 }
1119 }
1120
1121 RUBY_DEBUG_LOG("success target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1122
1123 if (target_th->ec->errinfo != Qnil) {
1124 VALUE err = target_th->ec->errinfo;
1125
1126 if (FIXNUM_P(err)) {
1127 switch (err) {
1128 case INT2FIX(TAG_FATAL):
1129 RUBY_DEBUG_LOG("terminated target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1130
1131 /* OK. killed. */
1132 break;
1133 default:
1134 rb_bug("thread_join: Fixnum (%d) should not reach here.", FIX2INT(err));
1135 }
1136 }
1137 else if (THROW_DATA_P(target_th->ec->errinfo)) {
1138 rb_bug("thread_join: THROW_DATA should not reach here.");
1139 }
1140 else {
1141 /* normal exception */
1142 rb_exc_raise(err);
1143 }
1144 }
1145 return target_th->self;
1146}
1147
1148/*
1149 * call-seq:
1150 * thr.join -> thr
1151 * thr.join(limit) -> thr
1152 *
1153 * The calling thread will suspend execution and run this +thr+.
1154 *
1155 * Does not return until +thr+ exits or until the given +limit+ seconds have
1156 * passed.
1157 *
1158 * If the time limit expires, +nil+ will be returned, otherwise +thr+ is
1159 * returned.
1160 *
1161 * Any threads not joined will be killed when the main program exits.
1162 *
1163 * If +thr+ had previously raised an exception and the ::abort_on_exception or
1164 * $DEBUG flags are not set, (so the exception has not yet been processed), it
1165 * will be processed at this time.
1166 *
1167 * a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
1168 * x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
1169 * x.join # Let thread x finish, thread a will be killed on exit.
1170 * #=> "axyz"
1171 *
1172 * The following example illustrates the +limit+ parameter.
1173 *
1174 * y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
1175 * puts "Waiting" until y.join(0.15)
1176 *
1177 * This will produce:
1178 *
1179 * tick...
1180 * Waiting
1181 * tick...
1182 * Waiting
1183 * tick...
1184 * tick...
1185 */
1186
1187static VALUE
1188thread_join_m(int argc, VALUE *argv, VALUE self)
1189{
1190 VALUE timeout = Qnil;
1191 rb_hrtime_t rel = 0, *limit = 0;
1192
1193 if (rb_check_arity(argc, 0, 1)) {
1194 timeout = argv[0];
1195 }
1196
1197 // Convert the timeout eagerly, so it's always converted and deterministic
1198 /*
1199 * This supports INFINITY and negative values, so we can't use
1200 * rb_time_interval right now...
1201 */
1202 if (NIL_P(timeout)) {
1203 /* unlimited */
1204 }
1205 else if (FIXNUM_P(timeout)) {
1206 rel = rb_sec2hrtime(NUM2TIMET(timeout));
1207 limit = &rel;
1208 }
1209 else {
1210 limit = double2hrtime(&rel, rb_num2dbl(timeout));
1211 }
1212
1213 return thread_join(rb_thread_ptr(self), timeout, limit);
1214}
1215
1216/*
1217 * call-seq:
1218 * thr.value -> obj
1219 *
1220 * Waits for +thr+ to complete, using #join, and returns its value or raises
1221 * the exception which terminated the thread.
1222 *
1223 * a = Thread.new { 2 + 2 }
1224 * a.value #=> 4
1225 *
1226 * b = Thread.new { raise 'something went wrong' }
1227 * b.value #=> RuntimeError: something went wrong
1228 */
1229
1230static VALUE
1231thread_value(VALUE self)
1232{
1233 rb_thread_t *th = rb_thread_ptr(self);
1234 thread_join(th, Qnil, 0);
1235 if (UNDEF_P(th->value)) {
1236 // If the thread is dead because we forked th->value is still Qundef.
1237 return Qnil;
1238 }
1239 return th->value;
1240}
1241
1242/*
1243 * Thread Scheduling
1244 */
1245
1246static void
1247getclockofday(struct timespec *ts)
1248{
1249#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1250 if (clock_gettime(CLOCK_MONOTONIC, ts) == 0)
1251 return;
1252#endif
1253 rb_timespec_now(ts);
1254}
1255
1256/*
1257 * Don't inline this, since library call is already time consuming
1258 * and we don't want "struct timespec" on stack too long for GC
1259 */
1260NOINLINE(rb_hrtime_t rb_hrtime_now(void));
1261rb_hrtime_t
1262rb_hrtime_now(void)
1263{
1264 struct timespec ts;
1265
1266 getclockofday(&ts);
1267 return rb_timespec2hrtime(&ts);
1268}
1269
1270/*
1271 * at least gcc 7.2 and 7.3 complains about "rb_hrtime_t end"
1272 * being uninitialized, maybe other versions, too.
1273 */
1274COMPILER_WARNING_PUSH
1275#if defined(__GNUC__) && __GNUC__ == 7 && __GNUC_MINOR__ <= 3
1276COMPILER_WARNING_IGNORED(-Wmaybe-uninitialized)
1277#endif
1278#ifndef PRIu64
1279#define PRIu64 PRI_64_PREFIX "u"
1280#endif
1281/*
1282 * @end is the absolute time when @ts is set to expire
1283 * Returns true if @end has past
1284 * Updates @ts and returns false otherwise
1285 */
1286static int
1287hrtime_update_expire(rb_hrtime_t *timeout, const rb_hrtime_t end)
1288{
1289 rb_hrtime_t now = rb_hrtime_now();
1290
1291 if (now > end) return 1;
1292
1293 RUBY_DEBUG_LOG("%"PRIu64" > %"PRIu64"", (uint64_t)end, (uint64_t)now);
1294
1295 *timeout = end - now;
1296 return 0;
1297}
1298COMPILER_WARNING_POP
1299
1300static int
1301sleep_hrtime(rb_thread_t *th, rb_hrtime_t rel, unsigned int fl)
1302{
1303 enum rb_thread_status prev_status = th->status;
1304 int woke;
1305 rb_hrtime_t end = rb_hrtime_add(rb_hrtime_now(), rel);
1306
1307 th->status = THREAD_STOPPED;
1308 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1309 while (th->status == THREAD_STOPPED) {
1310 native_sleep(th, &rel);
1311 woke = vm_check_ints_blocking(th->ec);
1312 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1313 break;
1314 if (hrtime_update_expire(&rel, end))
1315 break;
1316 woke = 1;
1317 }
1318 th->status = prev_status;
1319 return woke;
1320}
1321
1322static int
1323sleep_hrtime_until(rb_thread_t *th, rb_hrtime_t end, unsigned int fl)
1324{
1325 enum rb_thread_status prev_status = th->status;
1326 int woke;
1327 rb_hrtime_t rel = rb_hrtime_sub(end, rb_hrtime_now());
1328
1329 th->status = THREAD_STOPPED;
1330 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1331 while (th->status == THREAD_STOPPED) {
1332 native_sleep(th, &rel);
1333 woke = vm_check_ints_blocking(th->ec);
1334 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1335 break;
1336 if (hrtime_update_expire(&rel, end))
1337 break;
1338 woke = 1;
1339 }
1340 th->status = prev_status;
1341 return woke;
1342}
1343
1344static void
1345sleep_forever(rb_thread_t *th, unsigned int fl)
1346{
1347 enum rb_thread_status prev_status = th->status;
1348 enum rb_thread_status status;
1349 int woke;
1350
1351 status = fl & SLEEP_DEADLOCKABLE ? THREAD_STOPPED_FOREVER : THREAD_STOPPED;
1352 th->status = status;
1353
1354 if (!(fl & SLEEP_NO_CHECKINTS)) RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1355
1356 while (th->status == status) {
1357 if (fl & SLEEP_DEADLOCKABLE) {
1358 rb_ractor_sleeper_threads_inc(th->ractor);
1359 rb_check_deadlock(th->ractor);
1360 }
1361 {
1362 native_sleep(th, 0);
1363 }
1364 if (fl & SLEEP_DEADLOCKABLE) {
1365 rb_ractor_sleeper_threads_dec(th->ractor);
1366 }
1367 if (fl & SLEEP_ALLOW_SPURIOUS) {
1368 break;
1369 }
1370
1371 woke = vm_check_ints_blocking(th->ec);
1372
1373 if (woke && !(fl & SLEEP_SPURIOUS_CHECK)) {
1374 break;
1375 }
1376 }
1377 th->status = prev_status;
1378}
1379
1380void
1382{
1383 RUBY_DEBUG_LOG("forever");
1384 sleep_forever(GET_THREAD(), SLEEP_SPURIOUS_CHECK);
1385}
1386
1387void
1389{
1390 RUBY_DEBUG_LOG("deadly");
1391 sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE|SLEEP_SPURIOUS_CHECK);
1392}
1393
1394static void
1395rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker, VALUE timeout, rb_hrtime_t end)
1396{
1397 VALUE scheduler = rb_fiber_scheduler_current();
1398 if (scheduler != Qnil) {
1399 rb_fiber_scheduler_block(scheduler, blocker, timeout);
1400 }
1401 else {
1402 RUBY_DEBUG_LOG("...");
1403 if (end) {
1404 sleep_hrtime_until(GET_THREAD(), end, SLEEP_SPURIOUS_CHECK);
1405 }
1406 else {
1407 sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE);
1408 }
1409 }
1410}
1411
1412void
1413rb_thread_wait_for(struct timeval time)
1414{
1415 rb_thread_t *th = GET_THREAD();
1416
1417 sleep_hrtime(th, rb_timeval2hrtime(&time), SLEEP_SPURIOUS_CHECK);
1418}
1419
1420void
1421rb_ec_check_ints(rb_execution_context_t *ec)
1422{
1423 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1424}
1425
1426/*
1427 * CAUTION: This function causes thread switching.
1428 * rb_thread_check_ints() check ruby's interrupts.
1429 * some interrupt needs thread switching/invoke handlers,
1430 * and so on.
1431 */
1432
1433void
1435{
1436 rb_ec_check_ints(GET_EC());
1437}
1438
1439/*
1440 * Hidden API for tcl/tk wrapper.
1441 * There is no guarantee to perpetuate it.
1442 */
1443int
1444rb_thread_check_trap_pending(void)
1445{
1446 return rb_signal_buff_size() != 0;
1447}
1448
1449/* This function can be called in blocking region. */
1452{
1453 return (int)RUBY_VM_INTERRUPTED(rb_thread_ptr(thval)->ec);
1454}
1455
1456void
1457rb_thread_sleep(int sec)
1458{
1460}
1461
1462static void
1463rb_thread_schedule_limits(uint32_t limits_us)
1464{
1465 if (!rb_thread_alone()) {
1466 rb_thread_t *th = GET_THREAD();
1467 RUBY_DEBUG_LOG("us:%u", (unsigned int)limits_us);
1468
1469 if (th->running_time_us >= limits_us) {
1470 RUBY_DEBUG_LOG("switch %s", "start");
1471
1472 RB_VM_SAVE_MACHINE_CONTEXT(th);
1473 thread_sched_yield(TH_SCHED(th), th);
1474 rb_ractor_thread_switch(th->ractor, th);
1475
1476 RUBY_DEBUG_LOG("switch %s", "done");
1477 }
1478 }
1479}
1480
1481void
1483{
1484 rb_thread_schedule_limits(0);
1485 RUBY_VM_CHECK_INTS(GET_EC());
1486}
1487
1488/* blocking region */
1489
1490static inline int
1491blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
1492 rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted)
1493{
1494#ifdef RUBY_ASSERT_CRITICAL_SECTION
1495 VM_ASSERT(ruby_assert_critical_section_entered == 0);
1496#endif
1497 VM_ASSERT(th == GET_THREAD());
1498
1499 region->prev_status = th->status;
1500 if (unblock_function_set(th, ubf, arg, fail_if_interrupted)) {
1501 th->blocking_region_buffer = region;
1502 th->status = THREAD_STOPPED;
1503 rb_ractor_blocking_threads_inc(th->ractor, __FILE__, __LINE__);
1504
1505 RUBY_DEBUG_LOG("thread_id:%p", (void *)th->nt->thread_id);
1506 return TRUE;
1507 }
1508 else {
1509 return FALSE;
1510 }
1511}
1512
1513static inline void
1514blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region)
1515{
1516 /* entry to ubf_list still permitted at this point, make it impossible: */
1517 unblock_function_clear(th);
1518 /* entry to ubf_list impossible at this point, so unregister is safe: */
1519 unregister_ubf_list(th);
1520
1521 thread_sched_to_running(TH_SCHED(th), th);
1522 rb_ractor_thread_switch(th->ractor, th);
1523
1524 th->blocking_region_buffer = 0;
1525 rb_ractor_blocking_threads_dec(th->ractor, __FILE__, __LINE__);
1526 if (th->status == THREAD_STOPPED) {
1527 th->status = region->prev_status;
1528 }
1529
1530 RUBY_DEBUG_LOG("end");
1531
1532#ifndef _WIN32
1533 // GET_THREAD() clears WSAGetLastError()
1534 VM_ASSERT(th == GET_THREAD());
1535#endif
1536}
1537
1538void *
1539rb_nogvl(void *(*func)(void *), void *data1,
1540 rb_unblock_function_t *ubf, void *data2,
1541 int flags)
1542{
1543 if (flags & RB_NOGVL_OFFLOAD_SAFE) {
1544 VALUE scheduler = rb_fiber_scheduler_current();
1545 if (scheduler != Qnil) {
1547
1548 VALUE result = rb_fiber_scheduler_blocking_operation_wait(scheduler, func, data1, ubf, data2, flags, &state);
1549
1550 if (!UNDEF_P(result)) {
1551 rb_errno_set(state.saved_errno);
1552 return state.result;
1553 }
1554 }
1555 }
1556
1557 void *val = 0;
1558 rb_execution_context_t *ec = GET_EC();
1559 rb_thread_t *th = rb_ec_thread_ptr(ec);
1560 rb_vm_t *vm = rb_ec_vm_ptr(ec);
1561 bool is_main_thread = vm->ractor.main_thread == th;
1562 int saved_errno = 0;
1563
1564 if ((ubf == RUBY_UBF_IO) || (ubf == RUBY_UBF_PROCESS)) {
1565 ubf = ubf_select;
1566 data2 = th;
1567 }
1568 else if (ubf && rb_ractor_living_thread_num(th->ractor) == 1 && is_main_thread) {
1569 if (flags & RB_NOGVL_UBF_ASYNC_SAFE) {
1570 vm->ubf_async_safe = 1;
1571 }
1572 }
1573
1574 rb_vm_t *volatile saved_vm = vm;
1575 BLOCKING_REGION(th, {
1576 val = func(data1);
1577 saved_errno = rb_errno();
1578 }, ubf, data2, flags & RB_NOGVL_INTR_FAIL);
1579 vm = saved_vm;
1580
1581 if (is_main_thread) vm->ubf_async_safe = 0;
1582
1583 if ((flags & RB_NOGVL_INTR_FAIL) == 0) {
1584 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1585 }
1586
1587 rb_errno_set(saved_errno);
1588
1589 return val;
1590}
1591
1592/*
1593 * rb_thread_call_without_gvl - permit concurrent/parallel execution.
1594 * rb_thread_call_without_gvl2 - permit concurrent/parallel execution
1595 * without interrupt process.
1596 *
1597 * rb_thread_call_without_gvl() does:
1598 * (1) Check interrupts.
1599 * (2) release GVL.
1600 * Other Ruby threads may run in parallel.
1601 * (3) call func with data1
1602 * (4) acquire GVL.
1603 * Other Ruby threads can not run in parallel any more.
1604 * (5) Check interrupts.
1605 *
1606 * rb_thread_call_without_gvl2() does:
1607 * (1) Check interrupt and return if interrupted.
1608 * (2) release GVL.
1609 * (3) call func with data1 and a pointer to the flags.
1610 * (4) acquire GVL.
1611 *
1612 * If another thread interrupts this thread (Thread#kill, signal delivery,
1613 * VM-shutdown request, and so on), `ubf()' is called (`ubf()' means
1614 * "un-blocking function"). `ubf()' should interrupt `func()' execution by
1615 * toggling a cancellation flag, canceling the invocation of a call inside
1616 * `func()' or similar. Note that `ubf()' may not be called with the GVL.
1617 *
1618 * There are built-in ubfs and you can specify these ubfs:
1619 *
1620 * * RUBY_UBF_IO: ubf for IO operation
1621 * * RUBY_UBF_PROCESS: ubf for process operation
1622 *
1623 * However, we can not guarantee our built-in ubfs interrupt your `func()'
1624 * correctly. Be careful to use rb_thread_call_without_gvl(). If you don't
1625 * provide proper ubf(), your program will not stop for Control+C or other
1626 * shutdown events.
1627 *
1628 * "Check interrupts" on above list means checking asynchronous
1629 * interrupt events (such as Thread#kill, signal delivery, VM-shutdown
1630 * request, and so on) and calling corresponding procedures
1631 * (such as `trap' for signals, raise an exception for Thread#raise).
1632 * If `func()' finished and received interrupts, you may skip interrupt
1633 * checking. For example, assume the following func() it reads data from file.
1634 *
1635 * read_func(...) {
1636 * // (a) before read
1637 * read(buffer); // (b) reading
1638 * // (c) after read
1639 * }
1640 *
1641 * If an interrupt occurs at (a) or (b), then `ubf()' cancels this
1642 * `read_func()' and interrupts are checked. However, if an interrupt occurs
1643 * at (c), after *read* operation is completed, checking interrupts is harmful
1644 * because it causes irrevocable side-effect, the read data will vanish. To
1645 * avoid such problem, the `read_func()' should be used with
1646 * `rb_thread_call_without_gvl2()'.
1647 *
1648 * If `rb_thread_call_without_gvl2()' detects interrupt, it returns
1649 * immediately. This function does not show when the execution was interrupted.
1650 * For example, there are 4 possible timing (a), (b), (c) and before calling
1651 * read_func(). You need to record progress of a read_func() and check
1652 * the progress after `rb_thread_call_without_gvl2()'. You may need to call
1653 * `rb_thread_check_ints()' correctly or your program can not process proper
1654 * process such as `trap' and so on.
1655 *
1656 * NOTE: You can not execute most of Ruby C API and touch Ruby
1657 * objects in `func()' and `ubf()', including raising an
1658 * exception, because current thread doesn't acquire GVL
1659 * (it causes synchronization problems). If you need to
1660 * call ruby functions either use rb_thread_call_with_gvl()
1661 * or read source code of C APIs and confirm safety by
1662 * yourself.
1663 *
1664 * NOTE: In short, this API is difficult to use safely. I recommend you
1665 * use other ways if you have. We lack experiences to use this API.
1666 * Please report your problem related on it.
1667 *
1668 * NOTE: Releasing GVL and re-acquiring GVL may be expensive operations
1669 * for a short running `func()'. Be sure to benchmark and use this
1670 * mechanism when `func()' consumes enough time.
1671 *
1672 * Safe C API:
1673 * * rb_thread_interrupted() - check interrupt flag
1674 * * ruby_xmalloc(), ruby_xrealloc(), ruby_xfree() -
1675 * they will work without GVL, and may acquire GVL when GC is needed.
1676 */
1677void *
1678rb_thread_call_without_gvl2(void *(*func)(void *), void *data1,
1679 rb_unblock_function_t *ubf, void *data2)
1680{
1681 return rb_nogvl(func, data1, ubf, data2, RB_NOGVL_INTR_FAIL);
1682}
1683
1684void *
1685rb_thread_call_without_gvl(void *(*func)(void *data), void *data1,
1686 rb_unblock_function_t *ubf, void *data2)
1687{
1688 return rb_nogvl(func, data1, ubf, data2, 0);
1689}
1690
1691static int
1692waitfd_to_waiting_flag(int wfd_event)
1693{
1694 return wfd_event << 1;
1695}
1696
1697static void
1698thread_io_setup_wfd(rb_thread_t *th, int fd, struct waiting_fd *wfd)
1699{
1700 wfd->fd = fd;
1701 wfd->th = th;
1702 wfd->busy = NULL;
1703
1704 RB_VM_LOCK_ENTER();
1705 {
1706 ccan_list_add(&th->vm->waiting_fds, &wfd->wfd_node);
1707 }
1708 RB_VM_LOCK_LEAVE();
1709}
1710
1711static void
1712thread_io_wake_pending_closer(struct waiting_fd *wfd)
1713{
1714 bool has_waiter = wfd->busy && RB_TEST(wfd->busy->wakeup_mutex);
1715 if (has_waiter) {
1716 rb_mutex_lock(wfd->busy->wakeup_mutex);
1717 }
1718
1719 /* Needs to be protected with RB_VM_LOCK because we don't know if
1720 wfd is on the global list of pending FD ops or if it's on a
1721 struct rb_io_close_wait_list close-waiter. */
1722 RB_VM_LOCK_ENTER();
1723 ccan_list_del(&wfd->wfd_node);
1724 RB_VM_LOCK_LEAVE();
1725
1726 if (has_waiter) {
1727 rb_thread_t *th = rb_thread_ptr(wfd->busy->closing_thread);
1728 if (th->scheduler != Qnil) {
1729 rb_fiber_scheduler_unblock(th->scheduler, wfd->busy->closing_thread, wfd->busy->closing_fiber);
1730 }
1731 else {
1732 rb_thread_wakeup(wfd->busy->closing_thread);
1733 }
1734 rb_mutex_unlock(wfd->busy->wakeup_mutex);
1735 }
1736}
1737
1738static bool
1739thread_io_mn_schedulable(rb_thread_t *th, int events, const struct timeval *timeout)
1740{
1741#if defined(USE_MN_THREADS) && USE_MN_THREADS
1742 return !th_has_dedicated_nt(th) && (events || timeout) && th->blocking;
1743#else
1744 return false;
1745#endif
1746}
1747
1748// true if need retry
1749static bool
1750thread_io_wait_events(rb_thread_t *th, int fd, int events, const struct timeval *timeout)
1751{
1752#if defined(USE_MN_THREADS) && USE_MN_THREADS
1753 if (thread_io_mn_schedulable(th, events, timeout)) {
1754 rb_hrtime_t rel, *prel;
1755
1756 if (timeout) {
1757 rel = rb_timeval2hrtime(timeout);
1758 prel = &rel;
1759 }
1760 else {
1761 prel = NULL;
1762 }
1763
1764 VM_ASSERT(prel || (events & (RB_WAITFD_IN | RB_WAITFD_OUT)));
1765
1766 if (thread_sched_wait_events(TH_SCHED(th), th, fd, waitfd_to_waiting_flag(events), prel)) {
1767 // timeout
1768 return false;
1769 }
1770 else {
1771 return true;
1772 }
1773 }
1774#endif // defined(USE_MN_THREADS) && USE_MN_THREADS
1775 return false;
1776}
1777
1778// assume read/write
1779static bool
1780blocking_call_retryable_p(int r, int eno)
1781{
1782 if (r != -1) return false;
1783
1784 switch (eno) {
1785 case EAGAIN:
1786#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
1787 case EWOULDBLOCK:
1788#endif
1789 return true;
1790 default:
1791 return false;
1792 }
1793}
1794
1795bool
1796rb_thread_mn_schedulable(VALUE thval)
1797{
1798 rb_thread_t *th = rb_thread_ptr(thval);
1799 return th->mn_schedulable;
1800}
1801
1802VALUE
1803rb_thread_io_blocking_call(struct rb_io* io, rb_blocking_function_t *func, void *data1, int events)
1804{
1805 rb_execution_context_t *volatile ec = GET_EC();
1806 rb_thread_t *volatile th = rb_ec_thread_ptr(ec);
1807
1808 RUBY_DEBUG_LOG("th:%u fd:%d ev:%d", rb_th_serial(th), io->fd, events);
1809
1810 struct waiting_fd waiting_fd;
1811 volatile VALUE val = Qundef; /* shouldn't be used */
1812 volatile int saved_errno = 0;
1813 enum ruby_tag_type state;
1814 volatile bool prev_mn_schedulable = th->mn_schedulable;
1815 th->mn_schedulable = thread_io_mn_schedulable(th, events, NULL);
1816
1817 int fd = io->fd;
1818
1819 // `errno` is only valid when there is an actual error - but we can't
1820 // extract that from the return value of `func` alone, so we clear any
1821 // prior `errno` value here so that we can later check if it was set by
1822 // `func` or not (as opposed to some previously set value).
1823 errno = 0;
1824
1825 thread_io_setup_wfd(th, fd, &waiting_fd);
1826 {
1827 EC_PUSH_TAG(ec);
1828 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
1829 volatile enum ruby_tag_type saved_state = state; /* for BLOCKING_REGION */
1830 retry:
1831 BLOCKING_REGION(th, {
1832 val = func(data1);
1833 saved_errno = errno;
1834 }, ubf_select, th, FALSE);
1835
1836 th = rb_ec_thread_ptr(ec);
1837 if (events &&
1838 blocking_call_retryable_p((int)val, saved_errno) &&
1839 thread_io_wait_events(th, fd, events, NULL)) {
1840 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1841 goto retry;
1842 }
1843 state = saved_state;
1844 }
1845 EC_POP_TAG();
1846
1847 th = rb_ec_thread_ptr(ec);
1848 th->mn_schedulable = prev_mn_schedulable;
1849 }
1850 /*
1851 * must be deleted before jump
1852 * this will delete either from waiting_fds or on-stack struct rb_io_close_wait_list
1853 */
1854 thread_io_wake_pending_closer(&waiting_fd);
1855
1856 if (state) {
1857 EC_JUMP_TAG(ec, state);
1858 }
1859 /* TODO: check func() */
1860 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1861
1862 // If the error was a timeout, we raise a specific exception for that:
1863 if (saved_errno == ETIMEDOUT) {
1864 rb_raise(rb_eIOTimeoutError, "Blocking operation timed out!");
1865 }
1866
1867 errno = saved_errno;
1868
1869 return val;
1870}
1871
1872VALUE
1873rb_thread_io_blocking_region(struct rb_io *io, rb_blocking_function_t *func, void *data1)
1874{
1875 return rb_thread_io_blocking_call(io, func, data1, 0);
1876}
1877
1878/*
1879 * rb_thread_call_with_gvl - re-enter the Ruby world after GVL release.
1880 *
1881 * After releasing GVL using
1882 * rb_thread_call_without_gvl() you can not access Ruby values or invoke
1883 * methods. If you need to access Ruby you must use this function
1884 * rb_thread_call_with_gvl().
1885 *
1886 * This function rb_thread_call_with_gvl() does:
1887 * (1) acquire GVL.
1888 * (2) call passed function `func'.
1889 * (3) release GVL.
1890 * (4) return a value which is returned at (2).
1891 *
1892 * NOTE: You should not return Ruby object at (2) because such Object
1893 * will not be marked.
1894 *
1895 * NOTE: If an exception is raised in `func', this function DOES NOT
1896 * protect (catch) the exception. If you have any resources
1897 * which should free before throwing exception, you need use
1898 * rb_protect() in `func' and return a value which represents
1899 * exception was raised.
1900 *
1901 * NOTE: This function should not be called by a thread which was not
1902 * created as Ruby thread (created by Thread.new or so). In other
1903 * words, this function *DOES NOT* associate or convert a NON-Ruby
1904 * thread to a Ruby thread.
1905 */
1906void *
1907rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
1908{
1909 rb_thread_t *th = ruby_thread_from_native();
1910 struct rb_blocking_region_buffer *brb;
1911 struct rb_unblock_callback prev_unblock;
1912 void *r;
1913
1914 if (th == 0) {
1915 /* Error has occurred, but we can't use rb_bug()
1916 * because this thread is not Ruby's thread.
1917 * What should we do?
1918 */
1919 bp();
1920 fprintf(stderr, "[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
1921 exit(EXIT_FAILURE);
1922 }
1923
1924 brb = (struct rb_blocking_region_buffer *)th->blocking_region_buffer;
1925 prev_unblock = th->unblock;
1926
1927 if (brb == 0) {
1928 rb_bug("rb_thread_call_with_gvl: called by a thread which has GVL.");
1929 }
1930
1931 blocking_region_end(th, brb);
1932 /* enter to Ruby world: You can access Ruby values, methods and so on. */
1933 r = (*func)(data1);
1934 /* leave from Ruby world: You can not access Ruby values, etc. */
1935 int released = blocking_region_begin(th, brb, prev_unblock.func, prev_unblock.arg, FALSE);
1936 RUBY_ASSERT_ALWAYS(released);
1937 RB_VM_SAVE_MACHINE_CONTEXT(th);
1938 thread_sched_to_waiting(TH_SCHED(th), th);
1939 return r;
1940}
1941
1942/*
1943 * ruby_thread_has_gvl_p - check if current native thread has GVL.
1944 */
1945
1947ruby_thread_has_gvl_p(void)
1948{
1949 rb_thread_t *th = ruby_thread_from_native();
1950
1951 if (th && th->blocking_region_buffer == 0) {
1952 return 1;
1953 }
1954 else {
1955 return 0;
1956 }
1957}
1958
1959/*
1960 * call-seq:
1961 * Thread.pass -> nil
1962 *
1963 * Give the thread scheduler a hint to pass execution to another thread.
1964 * A running thread may or may not switch, it depends on OS and processor.
1965 */
1966
1967static VALUE
1968thread_s_pass(VALUE klass)
1969{
1971 return Qnil;
1972}
1973
1974/*****************************************************/
1975
1976/*
1977 * rb_threadptr_pending_interrupt_* - manage asynchronous error queue
1978 *
1979 * Async events such as an exception thrown by Thread#raise,
1980 * Thread#kill and thread termination (after main thread termination)
1981 * will be queued to th->pending_interrupt_queue.
1982 * - clear: clear the queue.
1983 * - enque: enqueue err object into queue.
1984 * - deque: dequeue err object from queue.
1985 * - active_p: return 1 if the queue should be checked.
1986 *
1987 * All rb_threadptr_pending_interrupt_* functions are called by
1988 * a GVL acquired thread, of course.
1989 * Note that all "rb_" prefix APIs need GVL to call.
1990 */
1991
1992void
1993rb_threadptr_pending_interrupt_clear(rb_thread_t *th)
1994{
1995 rb_ary_clear(th->pending_interrupt_queue);
1996}
1997
1998void
1999rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
2000{
2001 rb_ary_push(th->pending_interrupt_queue, v);
2002 th->pending_interrupt_queue_checked = 0;
2003}
2004
2005static void
2006threadptr_check_pending_interrupt_queue(rb_thread_t *th)
2007{
2008 if (!th->pending_interrupt_queue) {
2009 rb_raise(rb_eThreadError, "uninitialized thread");
2010 }
2011}
2012
2013enum handle_interrupt_timing {
2014 INTERRUPT_NONE,
2015 INTERRUPT_IMMEDIATE,
2016 INTERRUPT_ON_BLOCKING,
2017 INTERRUPT_NEVER
2018};
2019
2020static enum handle_interrupt_timing
2021rb_threadptr_pending_interrupt_from_symbol(rb_thread_t *th, VALUE sym)
2022{
2023 if (sym == sym_immediate) {
2024 return INTERRUPT_IMMEDIATE;
2025 }
2026 else if (sym == sym_on_blocking) {
2027 return INTERRUPT_ON_BLOCKING;
2028 }
2029 else if (sym == sym_never) {
2030 return INTERRUPT_NEVER;
2031 }
2032 else {
2033 rb_raise(rb_eThreadError, "unknown mask signature");
2034 }
2035}
2036
2037static enum handle_interrupt_timing
2038rb_threadptr_pending_interrupt_check_mask(rb_thread_t *th, VALUE err)
2039{
2040 VALUE mask;
2041 long mask_stack_len = RARRAY_LEN(th->pending_interrupt_mask_stack);
2042 const VALUE *mask_stack = RARRAY_CONST_PTR(th->pending_interrupt_mask_stack);
2043 VALUE mod;
2044 long i;
2045
2046 for (i=0; i<mask_stack_len; i++) {
2047 mask = mask_stack[mask_stack_len-(i+1)];
2048
2049 if (SYMBOL_P(mask)) {
2050 /* do not match RUBY_FATAL_THREAD_KILLED etc */
2051 if (err != rb_cInteger) {
2052 return rb_threadptr_pending_interrupt_from_symbol(th, mask);
2053 }
2054 else {
2055 continue;
2056 }
2057 }
2058
2059 for (mod = err; mod; mod = RCLASS_SUPER(mod)) {
2060 VALUE klass = mod;
2061 VALUE sym;
2062
2063 if (BUILTIN_TYPE(mod) == T_ICLASS) {
2064 klass = RBASIC(mod)->klass;
2065 }
2066 else if (mod != RCLASS_ORIGIN(mod)) {
2067 continue;
2068 }
2069
2070 if ((sym = rb_hash_aref(mask, klass)) != Qnil) {
2071 return rb_threadptr_pending_interrupt_from_symbol(th, sym);
2072 }
2073 }
2074 /* try to next mask */
2075 }
2076 return INTERRUPT_NONE;
2077}
2078
2079static int
2080rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th)
2081{
2082 return RARRAY_LEN(th->pending_interrupt_queue) == 0;
2083}
2084
2085static int
2086rb_threadptr_pending_interrupt_include_p(rb_thread_t *th, VALUE err)
2087{
2088 int i;
2089 for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
2090 VALUE e = RARRAY_AREF(th->pending_interrupt_queue, i);
2091 if (rb_obj_is_kind_of(e, err)) {
2092 return TRUE;
2093 }
2094 }
2095 return FALSE;
2096}
2097
2098static VALUE
2099rb_threadptr_pending_interrupt_deque(rb_thread_t *th, enum handle_interrupt_timing timing)
2100{
2101#if 1 /* 1 to enable Thread#handle_interrupt, 0 to ignore it */
2102 int i;
2103
2104 for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
2105 VALUE err = RARRAY_AREF(th->pending_interrupt_queue, i);
2106
2107 enum handle_interrupt_timing mask_timing = rb_threadptr_pending_interrupt_check_mask(th, CLASS_OF(err));
2108
2109 switch (mask_timing) {
2110 case INTERRUPT_ON_BLOCKING:
2111 if (timing != INTERRUPT_ON_BLOCKING) {
2112 break;
2113 }
2114 /* fall through */
2115 case INTERRUPT_NONE: /* default: IMMEDIATE */
2116 case INTERRUPT_IMMEDIATE:
2117 rb_ary_delete_at(th->pending_interrupt_queue, i);
2118 return err;
2119 case INTERRUPT_NEVER:
2120 break;
2121 }
2122 }
2123
2124 th->pending_interrupt_queue_checked = 1;
2125 return Qundef;
2126#else
2127 VALUE err = rb_ary_shift(th->pending_interrupt_queue);
2128 if (rb_threadptr_pending_interrupt_empty_p(th)) {
2129 th->pending_interrupt_queue_checked = 1;
2130 }
2131 return err;
2132#endif
2133}
2134
2135static int
2136threadptr_pending_interrupt_active_p(rb_thread_t *th)
2137{
2138 /*
2139 * For optimization, we don't check async errinfo queue
2140 * if the queue and the thread interrupt mask were not changed
2141 * since last check.
2142 */
2143 if (th->pending_interrupt_queue_checked) {
2144 return 0;
2145 }
2146
2147 if (rb_threadptr_pending_interrupt_empty_p(th)) {
2148 return 0;
2149 }
2150
2151 return 1;
2152}
2153
2154static int
2155handle_interrupt_arg_check_i(VALUE key, VALUE val, VALUE args)
2156{
2157 VALUE *maskp = (VALUE *)args;
2158
2159 if (val != sym_immediate && val != sym_on_blocking && val != sym_never) {
2160 rb_raise(rb_eArgError, "unknown mask signature");
2161 }
2162
2163 if (key == rb_eException && (UNDEF_P(*maskp) || NIL_P(*maskp))) {
2164 *maskp = val;
2165 return ST_CONTINUE;
2166 }
2167
2168 if (RTEST(*maskp)) {
2169 if (!RB_TYPE_P(*maskp, T_HASH)) {
2170 VALUE prev = *maskp;
2171 *maskp = rb_ident_hash_new();
2172 if (SYMBOL_P(prev)) {
2173 rb_hash_aset(*maskp, rb_eException, prev);
2174 }
2175 }
2176 rb_hash_aset(*maskp, key, val);
2177 }
2178 else {
2179 *maskp = Qfalse;
2180 }
2181
2182 return ST_CONTINUE;
2183}
2184
2185/*
2186 * call-seq:
2187 * Thread.handle_interrupt(hash) { ... } -> result of the block
2188 *
2189 * Changes asynchronous interrupt timing.
2190 *
2191 * _interrupt_ means asynchronous event and corresponding procedure
2192 * by Thread#raise, Thread#kill, signal trap (not supported yet)
2193 * and main thread termination (if main thread terminates, then all
2194 * other thread will be killed).
2195 *
2196 * The given +hash+ has pairs like <code>ExceptionClass =>
2197 * :TimingSymbol</code>. Where the ExceptionClass is the interrupt handled by
2198 * the given block. The TimingSymbol can be one of the following symbols:
2199 *
2200 * [+:immediate+] Invoke interrupts immediately.
2201 * [+:on_blocking+] Invoke interrupts while _BlockingOperation_.
2202 * [+:never+] Never invoke all interrupts.
2203 *
2204 * _BlockingOperation_ means that the operation will block the calling thread,
2205 * such as read and write. On CRuby implementation, _BlockingOperation_ is any
2206 * operation executed without GVL.
2207 *
2208 * Masked asynchronous interrupts are delayed until they are enabled.
2209 * This method is similar to sigprocmask(3).
2210 *
2211 * === NOTE
2212 *
2213 * Asynchronous interrupts are difficult to use.
2214 *
2215 * If you need to communicate between threads, please consider to use another way such as Queue.
2216 *
2217 * Or use them with deep understanding about this method.
2218 *
2219 * === Usage
2220 *
2221 * In this example, we can guard from Thread#raise exceptions.
2222 *
2223 * Using the +:never+ TimingSymbol the RuntimeError exception will always be
2224 * ignored in the first block of the main thread. In the second
2225 * ::handle_interrupt block we can purposefully handle RuntimeError exceptions.
2226 *
2227 * th = Thread.new do
2228 * Thread.handle_interrupt(RuntimeError => :never) {
2229 * begin
2230 * # You can write resource allocation code safely.
2231 * Thread.handle_interrupt(RuntimeError => :immediate) {
2232 * # ...
2233 * }
2234 * ensure
2235 * # You can write resource deallocation code safely.
2236 * end
2237 * }
2238 * end
2239 * Thread.pass
2240 * # ...
2241 * th.raise "stop"
2242 *
2243 * While we are ignoring the RuntimeError exception, it's safe to write our
2244 * resource allocation code. Then, the ensure block is where we can safely
2245 * deallocate your resources.
2246 *
2247 * ==== Stack control settings
2248 *
2249 * It's possible to stack multiple levels of ::handle_interrupt blocks in order
2250 * to control more than one ExceptionClass and TimingSymbol at a time.
2251 *
2252 * Thread.handle_interrupt(FooError => :never) {
2253 * Thread.handle_interrupt(BarError => :never) {
2254 * # FooError and BarError are prohibited.
2255 * }
2256 * }
2257 *
2258 * ==== Inheritance with ExceptionClass
2259 *
2260 * All exceptions inherited from the ExceptionClass parameter will be considered.
2261 *
2262 * Thread.handle_interrupt(Exception => :never) {
2263 * # all exceptions inherited from Exception are prohibited.
2264 * }
2265 *
2266 * For handling all interrupts, use +Object+ and not +Exception+
2267 * as the ExceptionClass, as kill/terminate interrupts are not handled by +Exception+.
2268 */
2269static VALUE
2270rb_thread_s_handle_interrupt(VALUE self, VALUE mask_arg)
2271{
2272 VALUE mask = Qundef;
2273 rb_execution_context_t * volatile ec = GET_EC();
2274 rb_thread_t * volatile th = rb_ec_thread_ptr(ec);
2275 volatile VALUE r = Qnil;
2276 enum ruby_tag_type state;
2277
2278 if (!rb_block_given_p()) {
2279 rb_raise(rb_eArgError, "block is needed.");
2280 }
2281
2282 mask_arg = rb_to_hash_type(mask_arg);
2283
2284 if (OBJ_FROZEN(mask_arg) && rb_hash_compare_by_id_p(mask_arg)) {
2285 mask = Qnil;
2286 }
2287
2288 rb_hash_foreach(mask_arg, handle_interrupt_arg_check_i, (VALUE)&mask);
2289
2290 if (UNDEF_P(mask)) {
2291 return rb_yield(Qnil);
2292 }
2293
2294 if (!RTEST(mask)) {
2295 mask = mask_arg;
2296 }
2297 else if (RB_TYPE_P(mask, T_HASH)) {
2298 OBJ_FREEZE(mask);
2299 }
2300
2301 rb_ary_push(th->pending_interrupt_mask_stack, mask);
2302 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2303 th->pending_interrupt_queue_checked = 0;
2304 RUBY_VM_SET_INTERRUPT(th->ec);
2305 }
2306
2307 EC_PUSH_TAG(th->ec);
2308 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
2309 r = rb_yield(Qnil);
2310 }
2311 EC_POP_TAG();
2312
2313 rb_ary_pop(th->pending_interrupt_mask_stack);
2314 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2315 th->pending_interrupt_queue_checked = 0;
2316 RUBY_VM_SET_INTERRUPT(th->ec);
2317 }
2318
2319 RUBY_VM_CHECK_INTS(th->ec);
2320
2321 if (state) {
2322 EC_JUMP_TAG(th->ec, state);
2323 }
2324
2325 return r;
2326}
2327
2328/*
2329 * call-seq:
2330 * target_thread.pending_interrupt?(error = nil) -> true/false
2331 *
2332 * Returns whether or not the asynchronous queue is empty for the target thread.
2333 *
2334 * If +error+ is given, then check only for +error+ type deferred events.
2335 *
2336 * See ::pending_interrupt? for more information.
2337 */
2338static VALUE
2339rb_thread_pending_interrupt_p(int argc, VALUE *argv, VALUE target_thread)
2340{
2341 rb_thread_t *target_th = rb_thread_ptr(target_thread);
2342
2343 if (!target_th->pending_interrupt_queue) {
2344 return Qfalse;
2345 }
2346 if (rb_threadptr_pending_interrupt_empty_p(target_th)) {
2347 return Qfalse;
2348 }
2349 if (rb_check_arity(argc, 0, 1)) {
2350 VALUE err = argv[0];
2351 if (!rb_obj_is_kind_of(err, rb_cModule)) {
2352 rb_raise(rb_eTypeError, "class or module required for rescue clause");
2353 }
2354 return RBOOL(rb_threadptr_pending_interrupt_include_p(target_th, err));
2355 }
2356 else {
2357 return Qtrue;
2358 }
2359}
2360
2361/*
2362 * call-seq:
2363 * Thread.pending_interrupt?(error = nil) -> true/false
2364 *
2365 * Returns whether or not the asynchronous queue is empty.
2366 *
2367 * Since Thread::handle_interrupt can be used to defer asynchronous events,
2368 * this method can be used to determine if there are any deferred events.
2369 *
2370 * If you find this method returns true, then you may finish +:never+ blocks.
2371 *
2372 * For example, the following method processes deferred asynchronous events
2373 * immediately.
2374 *
2375 * def Thread.kick_interrupt_immediately
2376 * Thread.handle_interrupt(Object => :immediate) {
2377 * Thread.pass
2378 * }
2379 * end
2380 *
2381 * If +error+ is given, then check only for +error+ type deferred events.
2382 *
2383 * === Usage
2384 *
2385 * th = Thread.new{
2386 * Thread.handle_interrupt(RuntimeError => :on_blocking){
2387 * while true
2388 * ...
2389 * # reach safe point to invoke interrupt
2390 * if Thread.pending_interrupt?
2391 * Thread.handle_interrupt(Object => :immediate){}
2392 * end
2393 * ...
2394 * end
2395 * }
2396 * }
2397 * ...
2398 * th.raise # stop thread
2399 *
2400 * This example can also be written as the following, which you should use to
2401 * avoid asynchronous interrupts.
2402 *
2403 * flag = true
2404 * th = Thread.new{
2405 * Thread.handle_interrupt(RuntimeError => :on_blocking){
2406 * while true
2407 * ...
2408 * # reach safe point to invoke interrupt
2409 * break if flag == false
2410 * ...
2411 * end
2412 * }
2413 * }
2414 * ...
2415 * flag = false # stop thread
2416 */
2417
2418static VALUE
2419rb_thread_s_pending_interrupt_p(int argc, VALUE *argv, VALUE self)
2420{
2421 return rb_thread_pending_interrupt_p(argc, argv, GET_THREAD()->self);
2422}
2423
2424NORETURN(static void rb_threadptr_to_kill(rb_thread_t *th));
2425
2426static void
2427rb_threadptr_to_kill(rb_thread_t *th)
2428{
2429 rb_threadptr_pending_interrupt_clear(th);
2430 th->status = THREAD_RUNNABLE;
2431 th->to_kill = 1;
2432 th->ec->errinfo = INT2FIX(TAG_FATAL);
2433 EC_JUMP_TAG(th->ec, TAG_FATAL);
2434}
2435
2436static inline rb_atomic_t
2437threadptr_get_interrupts(rb_thread_t *th)
2438{
2439 rb_execution_context_t *ec = th->ec;
2440 rb_atomic_t interrupt;
2441 rb_atomic_t old;
2442
2443 do {
2444 interrupt = ec->interrupt_flag;
2445 old = ATOMIC_CAS(ec->interrupt_flag, interrupt, interrupt & ec->interrupt_mask);
2446 } while (old != interrupt);
2447 return interrupt & (rb_atomic_t)~ec->interrupt_mask;
2448}
2449
2450static void threadptr_interrupt_exec_exec(rb_thread_t *th);
2451
2452int
2453rb_threadptr_execute_interrupts(rb_thread_t *th, int blocking_timing)
2454{
2455 rb_atomic_t interrupt;
2456 int postponed_job_interrupt = 0;
2457 int ret = FALSE;
2458
2459 if (th->ec->raised_flag) return ret;
2460
2461 while ((interrupt = threadptr_get_interrupts(th)) != 0) {
2462 int sig;
2463 int timer_interrupt;
2464 int pending_interrupt;
2465 int trap_interrupt;
2466 int terminate_interrupt;
2467
2468 timer_interrupt = interrupt & TIMER_INTERRUPT_MASK;
2469 pending_interrupt = interrupt & PENDING_INTERRUPT_MASK;
2470 postponed_job_interrupt = interrupt & POSTPONED_JOB_INTERRUPT_MASK;
2471 trap_interrupt = interrupt & TRAP_INTERRUPT_MASK;
2472 terminate_interrupt = interrupt & TERMINATE_INTERRUPT_MASK; // request from other ractors
2473
2474 if (interrupt & VM_BARRIER_INTERRUPT_MASK) {
2475 RB_VM_LOCK_ENTER();
2476 RB_VM_LOCK_LEAVE();
2477 }
2478
2479 if (postponed_job_interrupt) {
2480 rb_postponed_job_flush(th->vm);
2481 }
2482
2483 if (trap_interrupt) {
2484 /* signal handling */
2485 if (th == th->vm->ractor.main_thread) {
2486 enum rb_thread_status prev_status = th->status;
2487
2488 th->status = THREAD_RUNNABLE;
2489 {
2490 while ((sig = rb_get_next_signal()) != 0) {
2491 ret |= rb_signal_exec(th, sig);
2492 }
2493 }
2494 th->status = prev_status;
2495 }
2496
2497 if (!ccan_list_empty(&th->interrupt_exec_tasks)) {
2498 enum rb_thread_status prev_status = th->status;
2499
2500 th->status = THREAD_RUNNABLE;
2501 {
2502 threadptr_interrupt_exec_exec(th);
2503 }
2504 th->status = prev_status;
2505 }
2506 }
2507
2508 /* exception from another thread */
2509 if (pending_interrupt && threadptr_pending_interrupt_active_p(th)) {
2510 VALUE err = rb_threadptr_pending_interrupt_deque(th, blocking_timing ? INTERRUPT_ON_BLOCKING : INTERRUPT_NONE);
2511 RUBY_DEBUG_LOG("err:%"PRIdVALUE, err);
2512 ret = TRUE;
2513
2514 if (UNDEF_P(err)) {
2515 /* no error */
2516 }
2517 else if (err == RUBY_FATAL_THREAD_KILLED /* Thread#kill received */ ||
2518 err == RUBY_FATAL_THREAD_TERMINATED /* Terminate thread */ ||
2519 err == INT2FIX(TAG_FATAL) /* Thread.exit etc. */ ) {
2520 terminate_interrupt = 1;
2521 }
2522 else {
2523 if (err == th->vm->special_exceptions[ruby_error_stream_closed]) {
2524 /* the only special exception to be queued across thread */
2525 err = ruby_vm_special_exception_copy(err);
2526 }
2527 /* set runnable if th was slept. */
2528 if (th->status == THREAD_STOPPED ||
2529 th->status == THREAD_STOPPED_FOREVER)
2530 th->status = THREAD_RUNNABLE;
2531 rb_exc_raise(err);
2532 }
2533 }
2534
2535 if (terminate_interrupt) {
2536 rb_threadptr_to_kill(th);
2537 }
2538
2539 if (timer_interrupt) {
2540 uint32_t limits_us = thread_default_quantum_ms * 1000;
2541
2542 if (th->priority > 0)
2543 limits_us <<= th->priority;
2544 else
2545 limits_us >>= -th->priority;
2546
2547 if (th->status == THREAD_RUNNABLE)
2548 th->running_time_us += 10 * 1000; // 10ms = 10_000us // TODO: use macro
2549
2550 VM_ASSERT(th->ec->cfp);
2551 EXEC_EVENT_HOOK(th->ec, RUBY_INTERNAL_EVENT_SWITCH, th->ec->cfp->self,
2552 0, 0, 0, Qundef);
2553
2554 rb_thread_schedule_limits(limits_us);
2555 }
2556 }
2557 return ret;
2558}
2559
2560void
2561rb_thread_execute_interrupts(VALUE thval)
2562{
2563 rb_threadptr_execute_interrupts(rb_thread_ptr(thval), 1);
2564}
2565
2566static void
2567rb_threadptr_ready(rb_thread_t *th)
2568{
2569 rb_threadptr_interrupt(th);
2570}
2571
2572static VALUE
2573rb_threadptr_raise(rb_thread_t *target_th, int argc, VALUE *argv)
2574{
2575 VALUE exc;
2576
2577 if (rb_threadptr_dead(target_th)) {
2578 return Qnil;
2579 }
2580
2581 if (argc == 0) {
2582 exc = rb_exc_new(rb_eRuntimeError, 0, 0);
2583 }
2584 else {
2585 exc = rb_make_exception(argc, argv);
2586 }
2587
2588 /* making an exception object can switch thread,
2589 so we need to check thread deadness again */
2590 if (rb_threadptr_dead(target_th)) {
2591 return Qnil;
2592 }
2593
2594 rb_ec_setup_exception(GET_EC(), exc, Qundef);
2595 rb_threadptr_pending_interrupt_enque(target_th, exc);
2596 rb_threadptr_interrupt(target_th);
2597 return Qnil;
2598}
2599
2600void
2601rb_threadptr_signal_raise(rb_thread_t *th, int sig)
2602{
2603 VALUE argv[2];
2604
2605 argv[0] = rb_eSignal;
2606 argv[1] = INT2FIX(sig);
2607 rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2608}
2609
2610void
2611rb_threadptr_signal_exit(rb_thread_t *th)
2612{
2613 VALUE argv[2];
2614
2615 argv[0] = rb_eSystemExit;
2616 argv[1] = rb_str_new2("exit");
2617
2618 // TODO: check signal raise deliverly
2619 rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2620}
2621
2622int
2623rb_ec_set_raised(rb_execution_context_t *ec)
2624{
2625 if (ec->raised_flag & RAISED_EXCEPTION) {
2626 return 1;
2627 }
2628 ec->raised_flag |= RAISED_EXCEPTION;
2629 return 0;
2630}
2631
2632int
2633rb_ec_reset_raised(rb_execution_context_t *ec)
2634{
2635 if (!(ec->raised_flag & RAISED_EXCEPTION)) {
2636 return 0;
2637 }
2638 ec->raised_flag &= ~RAISED_EXCEPTION;
2639 return 1;
2640}
2641
2642int
2643rb_notify_fd_close(int fd, struct rb_io_close_wait_list *busy)
2644{
2645 rb_vm_t *vm = GET_THREAD()->vm;
2646 struct waiting_fd *wfd = 0, *next;
2647 ccan_list_head_init(&busy->pending_fd_users);
2648 int has_any;
2649 VALUE wakeup_mutex;
2650
2651 RB_VM_LOCK_ENTER();
2652 {
2653 ccan_list_for_each_safe(&vm->waiting_fds, wfd, next, wfd_node) {
2654 if (wfd->fd == fd) {
2655 rb_thread_t *th = wfd->th;
2656 VALUE err;
2657
2658 ccan_list_del(&wfd->wfd_node);
2659 ccan_list_add(&busy->pending_fd_users, &wfd->wfd_node);
2660
2661 wfd->busy = busy;
2662 err = th->vm->special_exceptions[ruby_error_stream_closed];
2663 rb_threadptr_pending_interrupt_enque(th, err);
2664 rb_threadptr_interrupt(th);
2665 }
2666 }
2667 }
2668
2669 has_any = !ccan_list_empty(&busy->pending_fd_users);
2670 busy->closing_thread = rb_thread_current();
2671 busy->closing_fiber = rb_fiber_current();
2672 wakeup_mutex = Qnil;
2673 if (has_any) {
2674 wakeup_mutex = rb_mutex_new();
2675 RBASIC_CLEAR_CLASS(wakeup_mutex); /* hide from ObjectSpace */
2676 }
2677 busy->wakeup_mutex = wakeup_mutex;
2678
2679 RB_VM_LOCK_LEAVE();
2680
2681 /* If the caller didn't pass *busy as a pointer to something on the stack,
2682 we need to guard this mutex object on _our_ C stack for the duration
2683 of this function. */
2684 RB_GC_GUARD(wakeup_mutex);
2685 return has_any;
2686}
2687
2688void
2689rb_notify_fd_close_wait(struct rb_io_close_wait_list *busy)
2690{
2691 if (!RB_TEST(busy->wakeup_mutex)) {
2692 /* There was nobody else using this file when we closed it, so we
2693 never bothered to allocate a mutex*/
2694 return;
2695 }
2696
2697 rb_mutex_lock(busy->wakeup_mutex);
2698 while (!ccan_list_empty(&busy->pending_fd_users)) {
2699 rb_mutex_sleep(busy->wakeup_mutex, Qnil);
2700 }
2701 rb_mutex_unlock(busy->wakeup_mutex);
2702}
2703
2704void
2705rb_thread_fd_close(int fd)
2706{
2707 struct rb_io_close_wait_list busy;
2708
2709 if (rb_notify_fd_close(fd, &busy)) {
2710 rb_notify_fd_close_wait(&busy);
2711 }
2712}
2713
2714/*
2715 * call-seq:
2716 * thr.raise
2717 * thr.raise(string)
2718 * thr.raise(exception [, string [, array]])
2719 *
2720 * Raises an exception from the given thread. The caller does not have to be
2721 * +thr+. See Kernel#raise for more information.
2722 *
2723 * Thread.abort_on_exception = true
2724 * a = Thread.new { sleep(200) }
2725 * a.raise("Gotcha")
2726 *
2727 * This will produce:
2728 *
2729 * prog.rb:3: Gotcha (RuntimeError)
2730 * from prog.rb:2:in `initialize'
2731 * from prog.rb:2:in `new'
2732 * from prog.rb:2
2733 */
2734
2735static VALUE
2736thread_raise_m(int argc, VALUE *argv, VALUE self)
2737{
2738 rb_thread_t *target_th = rb_thread_ptr(self);
2739 const rb_thread_t *current_th = GET_THREAD();
2740
2741 threadptr_check_pending_interrupt_queue(target_th);
2742 rb_threadptr_raise(target_th, argc, argv);
2743
2744 /* To perform Thread.current.raise as Kernel.raise */
2745 if (current_th == target_th) {
2746 RUBY_VM_CHECK_INTS(target_th->ec);
2747 }
2748 return Qnil;
2749}
2750
2751
2752/*
2753 * call-seq:
2754 * thr.exit -> thr
2755 * thr.kill -> thr
2756 * thr.terminate -> thr
2757 *
2758 * Terminates +thr+ and schedules another thread to be run, returning
2759 * the terminated Thread. If this is the main thread, or the last
2760 * thread, exits the process.
2761 */
2762
2764rb_thread_kill(VALUE thread)
2765{
2766 rb_thread_t *target_th = rb_thread_ptr(thread);
2767
2768 if (target_th->to_kill || target_th->status == THREAD_KILLED) {
2769 return thread;
2770 }
2771 if (target_th == target_th->vm->ractor.main_thread) {
2772 rb_exit(EXIT_SUCCESS);
2773 }
2774
2775 RUBY_DEBUG_LOG("target_th:%u", rb_th_serial(target_th));
2776
2777 if (target_th == GET_THREAD()) {
2778 /* kill myself immediately */
2779 rb_threadptr_to_kill(target_th);
2780 }
2781 else {
2782 threadptr_check_pending_interrupt_queue(target_th);
2783 rb_threadptr_pending_interrupt_enque(target_th, RUBY_FATAL_THREAD_KILLED);
2784 rb_threadptr_interrupt(target_th);
2785 }
2786
2787 return thread;
2788}
2789
2790int
2791rb_thread_to_be_killed(VALUE thread)
2792{
2793 rb_thread_t *target_th = rb_thread_ptr(thread);
2794
2795 if (target_th->to_kill || target_th->status == THREAD_KILLED) {
2796 return TRUE;
2797 }
2798 return FALSE;
2799}
2800
2801/*
2802 * call-seq:
2803 * Thread.kill(thread) -> thread
2804 *
2805 * Causes the given +thread+ to exit, see also Thread::exit.
2806 *
2807 * count = 0
2808 * a = Thread.new { loop { count += 1 } }
2809 * sleep(0.1) #=> 0
2810 * Thread.kill(a) #=> #<Thread:0x401b3d30 dead>
2811 * count #=> 93947
2812 * a.alive? #=> false
2813 */
2814
2815static VALUE
2816rb_thread_s_kill(VALUE obj, VALUE th)
2817{
2818 return rb_thread_kill(th);
2819}
2820
2821
2822/*
2823 * call-seq:
2824 * Thread.exit -> thread
2825 *
2826 * Terminates the currently running thread and schedules another thread to be
2827 * run.
2828 *
2829 * If this thread is already marked to be killed, ::exit returns the Thread.
2830 *
2831 * If this is the main thread, or the last thread, exit the process.
2832 */
2833
2834static VALUE
2835rb_thread_exit(VALUE _)
2836{
2837 rb_thread_t *th = GET_THREAD();
2838 return rb_thread_kill(th->self);
2839}
2840
2841
2842/*
2843 * call-seq:
2844 * thr.wakeup -> thr
2845 *
2846 * Marks a given thread as eligible for scheduling, however it may still
2847 * remain blocked on I/O.
2848 *
2849 * *Note:* This does not invoke the scheduler, see #run for more information.
2850 *
2851 * c = Thread.new { Thread.stop; puts "hey!" }
2852 * sleep 0.1 while c.status!='sleep'
2853 * c.wakeup
2854 * c.join
2855 * #=> "hey!"
2856 */
2857
2859rb_thread_wakeup(VALUE thread)
2860{
2861 if (!RTEST(rb_thread_wakeup_alive(thread))) {
2862 rb_raise(rb_eThreadError, "killed thread");
2863 }
2864 return thread;
2865}
2866
2869{
2870 rb_thread_t *target_th = rb_thread_ptr(thread);
2871 if (target_th->status == THREAD_KILLED) return Qnil;
2872
2873 rb_threadptr_ready(target_th);
2874
2875 if (target_th->status == THREAD_STOPPED ||
2876 target_th->status == THREAD_STOPPED_FOREVER) {
2877 target_th->status = THREAD_RUNNABLE;
2878 }
2879
2880 return thread;
2881}
2882
2883
2884/*
2885 * call-seq:
2886 * thr.run -> thr
2887 *
2888 * Wakes up +thr+, making it eligible for scheduling.
2889 *
2890 * a = Thread.new { puts "a"; Thread.stop; puts "c" }
2891 * sleep 0.1 while a.status!='sleep'
2892 * puts "Got here"
2893 * a.run
2894 * a.join
2895 *
2896 * This will produce:
2897 *
2898 * a
2899 * Got here
2900 * c
2901 *
2902 * See also the instance method #wakeup.
2903 */
2904
2906rb_thread_run(VALUE thread)
2907{
2908 rb_thread_wakeup(thread);
2910 return thread;
2911}
2912
2913
2915rb_thread_stop(void)
2916{
2917 if (rb_thread_alone()) {
2918 rb_raise(rb_eThreadError,
2919 "stopping only thread\n\tnote: use sleep to stop forever");
2920 }
2922 return Qnil;
2923}
2924
2925/*
2926 * call-seq:
2927 * Thread.stop -> nil
2928 *
2929 * Stops execution of the current thread, putting it into a ``sleep'' state,
2930 * and schedules execution of another thread.
2931 *
2932 * a = Thread.new { print "a"; Thread.stop; print "c" }
2933 * sleep 0.1 while a.status!='sleep'
2934 * print "b"
2935 * a.run
2936 * a.join
2937 * #=> "abc"
2938 */
2939
2940static VALUE
2941thread_stop(VALUE _)
2942{
2943 return rb_thread_stop();
2944}
2945
2946/********************************************************************/
2947
2948VALUE
2949rb_thread_list(void)
2950{
2951 // TODO
2952 return rb_ractor_thread_list();
2953}
2954
2955/*
2956 * call-seq:
2957 * Thread.list -> array
2958 *
2959 * Returns an array of Thread objects for all threads that are either runnable
2960 * or stopped.
2961 *
2962 * Thread.new { sleep(200) }
2963 * Thread.new { 1000000.times {|i| i*i } }
2964 * Thread.new { Thread.stop }
2965 * Thread.list.each {|t| p t}
2966 *
2967 * This will produce:
2968 *
2969 * #<Thread:0x401b3e84 sleep>
2970 * #<Thread:0x401b3f38 run>
2971 * #<Thread:0x401b3fb0 sleep>
2972 * #<Thread:0x401bdf4c run>
2973 */
2974
2975static VALUE
2976thread_list(VALUE _)
2977{
2978 return rb_thread_list();
2979}
2980
2983{
2984 return GET_THREAD()->self;
2985}
2986
2987/*
2988 * call-seq:
2989 * Thread.current -> thread
2990 *
2991 * Returns the currently executing thread.
2992 *
2993 * Thread.current #=> #<Thread:0x401bdf4c run>
2994 */
2995
2996static VALUE
2997thread_s_current(VALUE klass)
2998{
2999 return rb_thread_current();
3000}
3001
3003rb_thread_main(void)
3004{
3005 return GET_RACTOR()->threads.main->self;
3006}
3007
3008/*
3009 * call-seq:
3010 * Thread.main -> thread
3011 *
3012 * Returns the main thread.
3013 */
3014
3015static VALUE
3016rb_thread_s_main(VALUE klass)
3017{
3018 return rb_thread_main();
3019}
3020
3021
3022/*
3023 * call-seq:
3024 * Thread.abort_on_exception -> true or false
3025 *
3026 * Returns the status of the global ``abort on exception'' condition.
3027 *
3028 * The default is +false+.
3029 *
3030 * When set to +true+, if any thread is aborted by an exception, the
3031 * raised exception will be re-raised in the main thread.
3032 *
3033 * Can also be specified by the global $DEBUG flag or command line option
3034 * +-d+.
3035 *
3036 * See also ::abort_on_exception=.
3037 *
3038 * There is also an instance level method to set this for a specific thread,
3039 * see #abort_on_exception.
3040 */
3041
3042static VALUE
3043rb_thread_s_abort_exc(VALUE _)
3044{
3045 return RBOOL(GET_THREAD()->vm->thread_abort_on_exception);
3046}
3047
3048
3049/*
3050 * call-seq:
3051 * Thread.abort_on_exception= boolean -> true or false
3052 *
3053 * When set to +true+, if any thread is aborted by an exception, the
3054 * raised exception will be re-raised in the main thread.
3055 * Returns the new state.
3056 *
3057 * Thread.abort_on_exception = true
3058 * t1 = Thread.new do
3059 * puts "In new thread"
3060 * raise "Exception from thread"
3061 * end
3062 * sleep(1)
3063 * puts "not reached"
3064 *
3065 * This will produce:
3066 *
3067 * In new thread
3068 * prog.rb:4: Exception from thread (RuntimeError)
3069 * from prog.rb:2:in `initialize'
3070 * from prog.rb:2:in `new'
3071 * from prog.rb:2
3072 *
3073 * See also ::abort_on_exception.
3074 *
3075 * There is also an instance level method to set this for a specific thread,
3076 * see #abort_on_exception=.
3077 */
3078
3079static VALUE
3080rb_thread_s_abort_exc_set(VALUE self, VALUE val)
3081{
3082 GET_THREAD()->vm->thread_abort_on_exception = RTEST(val);
3083 return val;
3084}
3085
3086
3087/*
3088 * call-seq:
3089 * thr.abort_on_exception -> true or false
3090 *
3091 * Returns the status of the thread-local ``abort on exception'' condition for
3092 * this +thr+.
3093 *
3094 * The default is +false+.
3095 *
3096 * See also #abort_on_exception=.
3097 *
3098 * There is also a class level method to set this for all threads, see
3099 * ::abort_on_exception.
3100 */
3101
3102static VALUE
3103rb_thread_abort_exc(VALUE thread)
3104{
3105 return RBOOL(rb_thread_ptr(thread)->abort_on_exception);
3106}
3107
3108
3109/*
3110 * call-seq:
3111 * thr.abort_on_exception= boolean -> true or false
3112 *
3113 * When set to +true+, if this +thr+ is aborted by an exception, the
3114 * raised exception will be re-raised in the main thread.
3115 *
3116 * See also #abort_on_exception.
3117 *
3118 * There is also a class level method to set this for all threads, see
3119 * ::abort_on_exception=.
3120 */
3121
3122static VALUE
3123rb_thread_abort_exc_set(VALUE thread, VALUE val)
3124{
3125 rb_thread_ptr(thread)->abort_on_exception = RTEST(val);
3126 return val;
3127}
3128
3129
3130/*
3131 * call-seq:
3132 * Thread.report_on_exception -> true or false
3133 *
3134 * Returns the status of the global ``report on exception'' condition.
3135 *
3136 * The default is +true+ since Ruby 2.5.
3137 *
3138 * All threads created when this flag is true will report
3139 * a message on $stderr if an exception kills the thread.
3140 *
3141 * Thread.new { 1.times { raise } }
3142 *
3143 * will produce this output on $stderr:
3144 *
3145 * #<Thread:...> terminated with exception (report_on_exception is true):
3146 * Traceback (most recent call last):
3147 * 2: from -e:1:in `block in <main>'
3148 * 1: from -e:1:in `times'
3149 *
3150 * This is done to catch errors in threads early.
3151 * In some cases, you might not want this output.
3152 * There are multiple ways to avoid the extra output:
3153 *
3154 * * If the exception is not intended, the best is to fix the cause of
3155 * the exception so it does not happen anymore.
3156 * * If the exception is intended, it might be better to rescue it closer to
3157 * where it is raised rather then let it kill the Thread.
3158 * * If it is guaranteed the Thread will be joined with Thread#join or
3159 * Thread#value, then it is safe to disable this report with
3160 * <code>Thread.current.report_on_exception = false</code>
3161 * when starting the Thread.
3162 * However, this might handle the exception much later, or not at all
3163 * if the Thread is never joined due to the parent thread being blocked, etc.
3164 *
3165 * See also ::report_on_exception=.
3166 *
3167 * There is also an instance level method to set this for a specific thread,
3168 * see #report_on_exception=.
3169 *
3170 */
3171
3172static VALUE
3173rb_thread_s_report_exc(VALUE _)
3174{
3175 return RBOOL(GET_THREAD()->vm->thread_report_on_exception);
3176}
3177
3178
3179/*
3180 * call-seq:
3181 * Thread.report_on_exception= boolean -> true or false
3182 *
3183 * Returns the new state.
3184 * When set to +true+, all threads created afterwards will inherit the
3185 * condition and report a message on $stderr if an exception kills a thread:
3186 *
3187 * Thread.report_on_exception = true
3188 * t1 = Thread.new do
3189 * puts "In new thread"
3190 * raise "Exception from thread"
3191 * end
3192 * sleep(1)
3193 * puts "In the main thread"
3194 *
3195 * This will produce:
3196 *
3197 * In new thread
3198 * #<Thread:...prog.rb:2> terminated with exception (report_on_exception is true):
3199 * Traceback (most recent call last):
3200 * prog.rb:4:in `block in <main>': Exception from thread (RuntimeError)
3201 * In the main thread
3202 *
3203 * See also ::report_on_exception.
3204 *
3205 * There is also an instance level method to set this for a specific thread,
3206 * see #report_on_exception=.
3207 */
3208
3209static VALUE
3210rb_thread_s_report_exc_set(VALUE self, VALUE val)
3211{
3212 GET_THREAD()->vm->thread_report_on_exception = RTEST(val);
3213 return val;
3214}
3215
3216
3217/*
3218 * call-seq:
3219 * Thread.ignore_deadlock -> true or false
3220 *
3221 * Returns the status of the global ``ignore deadlock'' condition.
3222 * The default is +false+, so that deadlock conditions are not ignored.
3223 *
3224 * See also ::ignore_deadlock=.
3225 *
3226 */
3227
3228static VALUE
3229rb_thread_s_ignore_deadlock(VALUE _)
3230{
3231 return RBOOL(GET_THREAD()->vm->thread_ignore_deadlock);
3232}
3233
3234
3235/*
3236 * call-seq:
3237 * Thread.ignore_deadlock = boolean -> true or false
3238 *
3239 * Returns the new state.
3240 * When set to +true+, the VM will not check for deadlock conditions.
3241 * It is only useful to set this if your application can break a
3242 * deadlock condition via some other means, such as a signal.
3243 *
3244 * Thread.ignore_deadlock = true
3245 * queue = Thread::Queue.new
3246 *
3247 * trap(:SIGUSR1){queue.push "Received signal"}
3248 *
3249 * # raises fatal error unless ignoring deadlock
3250 * puts queue.pop
3251 *
3252 * See also ::ignore_deadlock.
3253 */
3254
3255static VALUE
3256rb_thread_s_ignore_deadlock_set(VALUE self, VALUE val)
3257{
3258 GET_THREAD()->vm->thread_ignore_deadlock = RTEST(val);
3259 return val;
3260}
3261
3262
3263/*
3264 * call-seq:
3265 * thr.report_on_exception -> true or false
3266 *
3267 * Returns the status of the thread-local ``report on exception'' condition for
3268 * this +thr+.
3269 *
3270 * The default value when creating a Thread is the value of
3271 * the global flag Thread.report_on_exception.
3272 *
3273 * See also #report_on_exception=.
3274 *
3275 * There is also a class level method to set this for all new threads, see
3276 * ::report_on_exception=.
3277 */
3278
3279static VALUE
3280rb_thread_report_exc(VALUE thread)
3281{
3282 return RBOOL(rb_thread_ptr(thread)->report_on_exception);
3283}
3284
3285
3286/*
3287 * call-seq:
3288 * thr.report_on_exception= boolean -> true or false
3289 *
3290 * When set to +true+, a message is printed on $stderr if an exception
3291 * kills this +thr+. See ::report_on_exception for details.
3292 *
3293 * See also #report_on_exception.
3294 *
3295 * There is also a class level method to set this for all new threads, see
3296 * ::report_on_exception=.
3297 */
3298
3299static VALUE
3300rb_thread_report_exc_set(VALUE thread, VALUE val)
3301{
3302 rb_thread_ptr(thread)->report_on_exception = RTEST(val);
3303 return val;
3304}
3305
3306
3307/*
3308 * call-seq:
3309 * thr.group -> thgrp or nil
3310 *
3311 * Returns the ThreadGroup which contains the given thread.
3312 *
3313 * Thread.main.group #=> #<ThreadGroup:0x4029d914>
3314 */
3315
3316VALUE
3317rb_thread_group(VALUE thread)
3318{
3319 return rb_thread_ptr(thread)->thgroup;
3320}
3321
3322static const char *
3323thread_status_name(rb_thread_t *th, int detail)
3324{
3325 switch (th->status) {
3326 case THREAD_RUNNABLE:
3327 return th->to_kill ? "aborting" : "run";
3328 case THREAD_STOPPED_FOREVER:
3329 if (detail) return "sleep_forever";
3330 case THREAD_STOPPED:
3331 return "sleep";
3332 case THREAD_KILLED:
3333 return "dead";
3334 default:
3335 return "unknown";
3336 }
3337}
3338
3339static int
3340rb_threadptr_dead(rb_thread_t *th)
3341{
3342 return th->status == THREAD_KILLED;
3343}
3344
3345
3346/*
3347 * call-seq:
3348 * thr.status -> string, false or nil
3349 *
3350 * Returns the status of +thr+.
3351 *
3352 * [<tt>"sleep"</tt>]
3353 * Returned if this thread is sleeping or waiting on I/O
3354 * [<tt>"run"</tt>]
3355 * When this thread is executing
3356 * [<tt>"aborting"</tt>]
3357 * If this thread is aborting
3358 * [+false+]
3359 * When this thread is terminated normally
3360 * [+nil+]
3361 * If terminated with an exception.
3362 *
3363 * a = Thread.new { raise("die now") }
3364 * b = Thread.new { Thread.stop }
3365 * c = Thread.new { Thread.exit }
3366 * d = Thread.new { sleep }
3367 * d.kill #=> #<Thread:0x401b3678 aborting>
3368 * a.status #=> nil
3369 * b.status #=> "sleep"
3370 * c.status #=> false
3371 * d.status #=> "aborting"
3372 * Thread.current.status #=> "run"
3373 *
3374 * See also the instance methods #alive? and #stop?
3375 */
3376
3377static VALUE
3378rb_thread_status(VALUE thread)
3379{
3380 rb_thread_t *target_th = rb_thread_ptr(thread);
3381
3382 if (rb_threadptr_dead(target_th)) {
3383 if (!NIL_P(target_th->ec->errinfo) &&
3384 !FIXNUM_P(target_th->ec->errinfo)) {
3385 return Qnil;
3386 }
3387 else {
3388 return Qfalse;
3389 }
3390 }
3391 else {
3392 return rb_str_new2(thread_status_name(target_th, FALSE));
3393 }
3394}
3395
3396
3397/*
3398 * call-seq:
3399 * thr.alive? -> true or false
3400 *
3401 * Returns +true+ if +thr+ is running or sleeping.
3402 *
3403 * thr = Thread.new { }
3404 * thr.join #=> #<Thread:0x401b3fb0 dead>
3405 * Thread.current.alive? #=> true
3406 * thr.alive? #=> false
3407 *
3408 * See also #stop? and #status.
3409 */
3410
3411static VALUE
3412rb_thread_alive_p(VALUE thread)
3413{
3414 return RBOOL(!thread_finished(rb_thread_ptr(thread)));
3415}
3416
3417/*
3418 * call-seq:
3419 * thr.stop? -> true or false
3420 *
3421 * Returns +true+ if +thr+ is dead or sleeping.
3422 *
3423 * a = Thread.new { Thread.stop }
3424 * b = Thread.current
3425 * a.stop? #=> true
3426 * b.stop? #=> false
3427 *
3428 * See also #alive? and #status.
3429 */
3430
3431static VALUE
3432rb_thread_stop_p(VALUE thread)
3433{
3434 rb_thread_t *th = rb_thread_ptr(thread);
3435
3436 if (rb_threadptr_dead(th)) {
3437 return Qtrue;
3438 }
3439 return RBOOL(th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER);
3440}
3441
3442/*
3443 * call-seq:
3444 * thr.name -> string
3445 *
3446 * show the name of the thread.
3447 */
3448
3449static VALUE
3450rb_thread_getname(VALUE thread)
3451{
3452 return rb_thread_ptr(thread)->name;
3453}
3454
3455/*
3456 * call-seq:
3457 * thr.name=(name) -> string
3458 *
3459 * set given name to the ruby thread.
3460 * On some platform, it may set the name to pthread and/or kernel.
3461 */
3462
3463static VALUE
3464rb_thread_setname(VALUE thread, VALUE name)
3465{
3466 rb_thread_t *target_th = rb_thread_ptr(thread);
3467
3468 if (!NIL_P(name)) {
3469 rb_encoding *enc;
3470 StringValueCStr(name);
3471 enc = rb_enc_get(name);
3472 if (!rb_enc_asciicompat(enc)) {
3473 rb_raise(rb_eArgError, "ASCII incompatible encoding (%s)",
3474 rb_enc_name(enc));
3475 }
3476 name = rb_str_new_frozen(name);
3477 }
3478 target_th->name = name;
3479 if (threadptr_initialized(target_th) && target_th->has_dedicated_nt) {
3480 native_set_another_thread_name(target_th->nt->thread_id, name);
3481 }
3482 return name;
3483}
3484
3485#if USE_NATIVE_THREAD_NATIVE_THREAD_ID
3486/*
3487 * call-seq:
3488 * thr.native_thread_id -> integer
3489 *
3490 * Return the native thread ID which is used by the Ruby thread.
3491 *
3492 * The ID depends on the OS. (not POSIX thread ID returned by pthread_self(3))
3493 * * On Linux it is TID returned by gettid(2).
3494 * * On macOS it is the system-wide unique integral ID of thread returned
3495 * by pthread_threadid_np(3).
3496 * * On FreeBSD it is the unique integral ID of the thread returned by
3497 * pthread_getthreadid_np(3).
3498 * * On Windows it is the thread identifier returned by GetThreadId().
3499 * * On other platforms, it raises NotImplementedError.
3500 *
3501 * NOTE:
3502 * If the thread is not associated yet or already deassociated with a native
3503 * thread, it returns _nil_.
3504 * If the Ruby implementation uses M:N thread model, the ID may change
3505 * depending on the timing.
3506 */
3507
3508static VALUE
3509rb_thread_native_thread_id(VALUE thread)
3510{
3511 rb_thread_t *target_th = rb_thread_ptr(thread);
3512 if (rb_threadptr_dead(target_th)) return Qnil;
3513 return native_thread_native_thread_id(target_th);
3514}
3515#else
3516# define rb_thread_native_thread_id rb_f_notimplement
3517#endif
3518
3519/*
3520 * call-seq:
3521 * thr.to_s -> string
3522 *
3523 * Dump the name, id, and status of _thr_ to a string.
3524 */
3525
3526static VALUE
3527rb_thread_to_s(VALUE thread)
3528{
3529 VALUE cname = rb_class_path(rb_obj_class(thread));
3530 rb_thread_t *target_th = rb_thread_ptr(thread);
3531 const char *status;
3532 VALUE str, loc;
3533
3534 status = thread_status_name(target_th, TRUE);
3535 str = rb_sprintf("#<%"PRIsVALUE":%p", cname, (void *)thread);
3536 if (!NIL_P(target_th->name)) {
3537 rb_str_catf(str, "@%"PRIsVALUE, target_th->name);
3538 }
3539 if ((loc = threadptr_invoke_proc_location(target_th)) != Qnil) {
3540 rb_str_catf(str, " %"PRIsVALUE":%"PRIsVALUE,
3541 RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
3542 }
3543 rb_str_catf(str, " %s>", status);
3544
3545 return str;
3546}
3547
3548/* variables for recursive traversals */
3549#define recursive_key id__recursive_key__
3550
3551static VALUE
3552threadptr_local_aref(rb_thread_t *th, ID id)
3553{
3554 if (id == recursive_key) {
3555 return th->ec->local_storage_recursive_hash;
3556 }
3557 else {
3558 VALUE val;
3559 struct rb_id_table *local_storage = th->ec->local_storage;
3560
3561 if (local_storage != NULL && rb_id_table_lookup(local_storage, id, &val)) {
3562 return val;
3563 }
3564 else {
3565 return Qnil;
3566 }
3567 }
3568}
3569
3571rb_thread_local_aref(VALUE thread, ID id)
3572{
3573 return threadptr_local_aref(rb_thread_ptr(thread), id);
3574}
3575
3576/*
3577 * call-seq:
3578 * thr[sym] -> obj or nil
3579 *
3580 * Attribute Reference---Returns the value of a fiber-local variable (current thread's root fiber
3581 * if not explicitly inside a Fiber), using either a symbol or a string name.
3582 * If the specified variable does not exist, returns +nil+.
3583 *
3584 * [
3585 * Thread.new { Thread.current["name"] = "A" },
3586 * Thread.new { Thread.current[:name] = "B" },
3587 * Thread.new { Thread.current["name"] = "C" }
3588 * ].each do |th|
3589 * th.join
3590 * puts "#{th.inspect}: #{th[:name]}"
3591 * end
3592 *
3593 * This will produce:
3594 *
3595 * #<Thread:0x00000002a54220 dead>: A
3596 * #<Thread:0x00000002a541a8 dead>: B
3597 * #<Thread:0x00000002a54130 dead>: C
3598 *
3599 * Thread#[] and Thread#[]= are not thread-local but fiber-local.
3600 * This confusion did not exist in Ruby 1.8 because
3601 * fibers are only available since Ruby 1.9.
3602 * Ruby 1.9 chooses that the methods behaves fiber-local to save
3603 * following idiom for dynamic scope.
3604 *
3605 * def meth(newvalue)
3606 * begin
3607 * oldvalue = Thread.current[:name]
3608 * Thread.current[:name] = newvalue
3609 * yield
3610 * ensure
3611 * Thread.current[:name] = oldvalue
3612 * end
3613 * end
3614 *
3615 * The idiom may not work as dynamic scope if the methods are thread-local
3616 * and a given block switches fiber.
3617 *
3618 * f = Fiber.new {
3619 * meth(1) {
3620 * Fiber.yield
3621 * }
3622 * }
3623 * meth(2) {
3624 * f.resume
3625 * }
3626 * f.resume
3627 * p Thread.current[:name]
3628 * #=> nil if fiber-local
3629 * #=> 2 if thread-local (The value 2 is leaked to outside of meth method.)
3630 *
3631 * For thread-local variables, please see #thread_variable_get and
3632 * #thread_variable_set.
3633 *
3634 */
3635
3636static VALUE
3637rb_thread_aref(VALUE thread, VALUE key)
3638{
3639 ID id = rb_check_id(&key);
3640 if (!id) return Qnil;
3641 return rb_thread_local_aref(thread, id);
3642}
3643
3644/*
3645 * call-seq:
3646 * thr.fetch(sym) -> obj
3647 * thr.fetch(sym) { } -> obj
3648 * thr.fetch(sym, default) -> obj
3649 *
3650 * Returns a fiber-local for the given key. If the key can't be
3651 * found, there are several options: With no other arguments, it will
3652 * raise a KeyError exception; if <i>default</i> is given, then that
3653 * will be returned; if the optional code block is specified, then
3654 * that will be run and its result returned. See Thread#[] and
3655 * Hash#fetch.
3656 */
3657static VALUE
3658rb_thread_fetch(int argc, VALUE *argv, VALUE self)
3659{
3660 VALUE key, val;
3661 ID id;
3662 rb_thread_t *target_th = rb_thread_ptr(self);
3663 int block_given;
3664
3665 rb_check_arity(argc, 1, 2);
3666 key = argv[0];
3667
3668 block_given = rb_block_given_p();
3669 if (block_given && argc == 2) {
3670 rb_warn("block supersedes default value argument");
3671 }
3672
3673 id = rb_check_id(&key);
3674
3675 if (id == recursive_key) {
3676 return target_th->ec->local_storage_recursive_hash;
3677 }
3678 else if (id && target_th->ec->local_storage &&
3679 rb_id_table_lookup(target_th->ec->local_storage, id, &val)) {
3680 return val;
3681 }
3682 else if (block_given) {
3683 return rb_yield(key);
3684 }
3685 else if (argc == 1) {
3686 rb_key_err_raise(rb_sprintf("key not found: %+"PRIsVALUE, key), self, key);
3687 }
3688 else {
3689 return argv[1];
3690 }
3691}
3692
3693static VALUE
3694threadptr_local_aset(rb_thread_t *th, ID id, VALUE val)
3695{
3696 if (id == recursive_key) {
3697 th->ec->local_storage_recursive_hash = val;
3698 return val;
3699 }
3700 else {
3701 struct rb_id_table *local_storage = th->ec->local_storage;
3702
3703 if (NIL_P(val)) {
3704 if (!local_storage) return Qnil;
3705 rb_id_table_delete(local_storage, id);
3706 return Qnil;
3707 }
3708 else {
3709 if (local_storage == NULL) {
3710 th->ec->local_storage = local_storage = rb_id_table_create(0);
3711 }
3712 rb_id_table_insert(local_storage, id, val);
3713 return val;
3714 }
3715 }
3716}
3717
3719rb_thread_local_aset(VALUE thread, ID id, VALUE val)
3720{
3721 if (OBJ_FROZEN(thread)) {
3722 rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3723 }
3724
3725 return threadptr_local_aset(rb_thread_ptr(thread), id, val);
3726}
3727
3728/*
3729 * call-seq:
3730 * thr[sym] = obj -> obj
3731 *
3732 * Attribute Assignment---Sets or creates the value of a fiber-local variable,
3733 * using either a symbol or a string.
3734 *
3735 * See also Thread#[].
3736 *
3737 * For thread-local variables, please see #thread_variable_set and
3738 * #thread_variable_get.
3739 */
3740
3741static VALUE
3742rb_thread_aset(VALUE self, VALUE id, VALUE val)
3743{
3744 return rb_thread_local_aset(self, rb_to_id(id), val);
3745}
3746
3747/*
3748 * call-seq:
3749 * thr.thread_variable_get(key) -> obj or nil
3750 *
3751 * Returns the value of a thread local variable that has been set. Note that
3752 * these are different than fiber local values. For fiber local values,
3753 * please see Thread#[] and Thread#[]=.
3754 *
3755 * Thread local values are carried along with threads, and do not respect
3756 * fibers. For example:
3757 *
3758 * Thread.new {
3759 * Thread.current.thread_variable_set("foo", "bar") # set a thread local
3760 * Thread.current["foo"] = "bar" # set a fiber local
3761 *
3762 * Fiber.new {
3763 * Fiber.yield [
3764 * Thread.current.thread_variable_get("foo"), # get the thread local
3765 * Thread.current["foo"], # get the fiber local
3766 * ]
3767 * }.resume
3768 * }.join.value # => ['bar', nil]
3769 *
3770 * The value "bar" is returned for the thread local, where nil is returned
3771 * for the fiber local. The fiber is executed in the same thread, so the
3772 * thread local values are available.
3773 */
3774
3775static VALUE
3776rb_thread_variable_get(VALUE thread, VALUE key)
3777{
3778 VALUE locals;
3779 VALUE symbol = rb_to_symbol(key);
3780
3781 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3782 return Qnil;
3783 }
3784 locals = rb_thread_local_storage(thread);
3785 return rb_hash_aref(locals, symbol);
3786}
3787
3788/*
3789 * call-seq:
3790 * thr.thread_variable_set(key, value)
3791 *
3792 * Sets a thread local with +key+ to +value+. Note that these are local to
3793 * threads, and not to fibers. Please see Thread#thread_variable_get and
3794 * Thread#[] for more information.
3795 */
3796
3797static VALUE
3798rb_thread_variable_set(VALUE thread, VALUE key, VALUE val)
3799{
3800 VALUE locals;
3801
3802 if (OBJ_FROZEN(thread)) {
3803 rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3804 }
3805
3806 locals = rb_thread_local_storage(thread);
3807 return rb_hash_aset(locals, rb_to_symbol(key), val);
3808}
3809
3810/*
3811 * call-seq:
3812 * thr.key?(sym) -> true or false
3813 *
3814 * Returns +true+ if the given string (or symbol) exists as a fiber-local
3815 * variable.
3816 *
3817 * me = Thread.current
3818 * me[:oliver] = "a"
3819 * me.key?(:oliver) #=> true
3820 * me.key?(:stanley) #=> false
3821 */
3822
3823static VALUE
3824rb_thread_key_p(VALUE self, VALUE key)
3825{
3826 VALUE val;
3827 ID id = rb_check_id(&key);
3828 struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
3829
3830 if (!id || local_storage == NULL) {
3831 return Qfalse;
3832 }
3833 return RBOOL(rb_id_table_lookup(local_storage, id, &val));
3834}
3835
3836static enum rb_id_table_iterator_result
3837thread_keys_i(ID key, VALUE value, void *ary)
3838{
3839 rb_ary_push((VALUE)ary, ID2SYM(key));
3840 return ID_TABLE_CONTINUE;
3841}
3842
3844rb_thread_alone(void)
3845{
3846 // TODO
3847 return rb_ractor_living_thread_num(GET_RACTOR()) == 1;
3848}
3849
3850/*
3851 * call-seq:
3852 * thr.keys -> array
3853 *
3854 * Returns an array of the names of the fiber-local variables (as Symbols).
3855 *
3856 * thr = Thread.new do
3857 * Thread.current[:cat] = 'meow'
3858 * Thread.current["dog"] = 'woof'
3859 * end
3860 * thr.join #=> #<Thread:0x401b3f10 dead>
3861 * thr.keys #=> [:dog, :cat]
3862 */
3863
3864static VALUE
3865rb_thread_keys(VALUE self)
3866{
3867 struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
3868 VALUE ary = rb_ary_new();
3869
3870 if (local_storage) {
3871 rb_id_table_foreach(local_storage, thread_keys_i, (void *)ary);
3872 }
3873 return ary;
3874}
3875
3876static int
3877keys_i(VALUE key, VALUE value, VALUE ary)
3878{
3879 rb_ary_push(ary, key);
3880 return ST_CONTINUE;
3881}
3882
3883/*
3884 * call-seq:
3885 * thr.thread_variables -> array
3886 *
3887 * Returns an array of the names of the thread-local variables (as Symbols).
3888 *
3889 * thr = Thread.new do
3890 * Thread.current.thread_variable_set(:cat, 'meow')
3891 * Thread.current.thread_variable_set("dog", 'woof')
3892 * end
3893 * thr.join #=> #<Thread:0x401b3f10 dead>
3894 * thr.thread_variables #=> [:dog, :cat]
3895 *
3896 * Note that these are not fiber local variables. Please see Thread#[] and
3897 * Thread#thread_variable_get for more details.
3898 */
3899
3900static VALUE
3901rb_thread_variables(VALUE thread)
3902{
3903 VALUE locals;
3904 VALUE ary;
3905
3906 ary = rb_ary_new();
3907 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3908 return ary;
3909 }
3910 locals = rb_thread_local_storage(thread);
3911 rb_hash_foreach(locals, keys_i, ary);
3912
3913 return ary;
3914}
3915
3916/*
3917 * call-seq:
3918 * thr.thread_variable?(key) -> true or false
3919 *
3920 * Returns +true+ if the given string (or symbol) exists as a thread-local
3921 * variable.
3922 *
3923 * me = Thread.current
3924 * me.thread_variable_set(:oliver, "a")
3925 * me.thread_variable?(:oliver) #=> true
3926 * me.thread_variable?(:stanley) #=> false
3927 *
3928 * Note that these are not fiber local variables. Please see Thread#[] and
3929 * Thread#thread_variable_get for more details.
3930 */
3931
3932static VALUE
3933rb_thread_variable_p(VALUE thread, VALUE key)
3934{
3935 VALUE locals;
3936 VALUE symbol = rb_to_symbol(key);
3937
3938 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3939 return Qfalse;
3940 }
3941 locals = rb_thread_local_storage(thread);
3942
3943 return RBOOL(rb_hash_lookup(locals, symbol) != Qnil);
3944}
3945
3946/*
3947 * call-seq:
3948 * thr.priority -> integer
3949 *
3950 * Returns the priority of <i>thr</i>. Default is inherited from the
3951 * current thread which creating the new thread, or zero for the
3952 * initial main thread; higher-priority thread will run more frequently
3953 * than lower-priority threads (but lower-priority threads can also run).
3954 *
3955 * This is just hint for Ruby thread scheduler. It may be ignored on some
3956 * platform.
3957 *
3958 * Thread.current.priority #=> 0
3959 */
3960
3961static VALUE
3962rb_thread_priority(VALUE thread)
3963{
3964 return INT2NUM(rb_thread_ptr(thread)->priority);
3965}
3966
3967
3968/*
3969 * call-seq:
3970 * thr.priority= integer -> thr
3971 *
3972 * Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
3973 * will run more frequently than lower-priority threads (but lower-priority
3974 * threads can also run).
3975 *
3976 * This is just hint for Ruby thread scheduler. It may be ignored on some
3977 * platform.
3978 *
3979 * count1 = count2 = 0
3980 * a = Thread.new do
3981 * loop { count1 += 1 }
3982 * end
3983 * a.priority = -1
3984 *
3985 * b = Thread.new do
3986 * loop { count2 += 1 }
3987 * end
3988 * b.priority = -2
3989 * sleep 1 #=> 1
3990 * count1 #=> 622504
3991 * count2 #=> 5832
3992 */
3993
3994static VALUE
3995rb_thread_priority_set(VALUE thread, VALUE prio)
3996{
3997 rb_thread_t *target_th = rb_thread_ptr(thread);
3998 int priority;
3999
4000#if USE_NATIVE_THREAD_PRIORITY
4001 target_th->priority = NUM2INT(prio);
4002 native_thread_apply_priority(th);
4003#else
4004 priority = NUM2INT(prio);
4005 if (priority > RUBY_THREAD_PRIORITY_MAX) {
4006 priority = RUBY_THREAD_PRIORITY_MAX;
4007 }
4008 else if (priority < RUBY_THREAD_PRIORITY_MIN) {
4009 priority = RUBY_THREAD_PRIORITY_MIN;
4010 }
4011 target_th->priority = (int8_t)priority;
4012#endif
4013 return INT2NUM(target_th->priority);
4014}
4015
4016/* for IO */
4017
4018#if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
4019
4020/*
4021 * several Unix platforms support file descriptors bigger than FD_SETSIZE
4022 * in select(2) system call.
4023 *
4024 * - Linux 2.2.12 (?)
4025 * - NetBSD 1.2 (src/sys/kern/sys_generic.c:1.25)
4026 * select(2) documents how to allocate fd_set dynamically.
4027 * http://netbsd.gw.com/cgi-bin/man-cgi?select++NetBSD-4.0
4028 * - FreeBSD 2.2 (src/sys/kern/sys_generic.c:1.19)
4029 * - OpenBSD 2.0 (src/sys/kern/sys_generic.c:1.4)
4030 * select(2) documents how to allocate fd_set dynamically.
4031 * http://www.openbsd.org/cgi-bin/man.cgi?query=select&manpath=OpenBSD+4.4
4032 * - Solaris 8 has select_large_fdset
4033 * - Mac OS X 10.7 (Lion)
4034 * select(2) returns EINVAL if nfds is greater than FD_SET_SIZE and
4035 * _DARWIN_UNLIMITED_SELECT (or _DARWIN_C_SOURCE) isn't defined.
4036 * https://developer.apple.com/library/archive/releasenotes/Darwin/SymbolVariantsRelNotes/index.html
4037 *
4038 * When fd_set is not big enough to hold big file descriptors,
4039 * it should be allocated dynamically.
4040 * Note that this assumes fd_set is structured as bitmap.
4041 *
4042 * rb_fd_init allocates the memory.
4043 * rb_fd_term free the memory.
4044 * rb_fd_set may re-allocates bitmap.
4045 *
4046 * So rb_fd_set doesn't reject file descriptors bigger than FD_SETSIZE.
4047 */
4048
4049void
4051{
4052 fds->maxfd = 0;
4053 fds->fdset = ALLOC(fd_set);
4054 FD_ZERO(fds->fdset);
4055}
4056
4057void
4058rb_fd_init_copy(rb_fdset_t *dst, rb_fdset_t *src)
4059{
4060 size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
4061
4062 if (size < sizeof(fd_set))
4063 size = sizeof(fd_set);
4064 dst->maxfd = src->maxfd;
4065 dst->fdset = xmalloc(size);
4066 memcpy(dst->fdset, src->fdset, size);
4067}
4068
4069void
4071{
4072 xfree(fds->fdset);
4073 fds->maxfd = 0;
4074 fds->fdset = 0;
4075}
4076
4077void
4079{
4080 if (fds->fdset)
4081 MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
4082}
4083
4084static void
4085rb_fd_resize(int n, rb_fdset_t *fds)
4086{
4087 size_t m = howmany(n + 1, NFDBITS) * sizeof(fd_mask);
4088 size_t o = howmany(fds->maxfd, NFDBITS) * sizeof(fd_mask);
4089
4090 if (m < sizeof(fd_set)) m = sizeof(fd_set);
4091 if (o < sizeof(fd_set)) o = sizeof(fd_set);
4092
4093 if (m > o) {
4094 fds->fdset = xrealloc(fds->fdset, m);
4095 memset((char *)fds->fdset + o, 0, m - o);
4096 }
4097 if (n >= fds->maxfd) fds->maxfd = n + 1;
4098}
4099
4100void
4101rb_fd_set(int n, rb_fdset_t *fds)
4102{
4103 rb_fd_resize(n, fds);
4104 FD_SET(n, fds->fdset);
4105}
4106
4107void
4108rb_fd_clr(int n, rb_fdset_t *fds)
4109{
4110 if (n >= fds->maxfd) return;
4111 FD_CLR(n, fds->fdset);
4112}
4113
4114int
4115rb_fd_isset(int n, const rb_fdset_t *fds)
4116{
4117 if (n >= fds->maxfd) return 0;
4118 return FD_ISSET(n, fds->fdset) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
4119}
4120
4121void
4122rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
4123{
4124 size_t size = howmany(max, NFDBITS) * sizeof(fd_mask);
4125
4126 if (size < sizeof(fd_set)) size = sizeof(fd_set);
4127 dst->maxfd = max;
4128 dst->fdset = xrealloc(dst->fdset, size);
4129 memcpy(dst->fdset, src, size);
4130}
4131
4132void
4133rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
4134{
4135 size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
4136
4137 if (size < sizeof(fd_set))
4138 size = sizeof(fd_set);
4139 dst->maxfd = src->maxfd;
4140 dst->fdset = xrealloc(dst->fdset, size);
4141 memcpy(dst->fdset, src->fdset, size);
4142}
4143
4144int
4145rb_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
4146{
4147 fd_set *r = NULL, *w = NULL, *e = NULL;
4148 if (readfds) {
4149 rb_fd_resize(n - 1, readfds);
4150 r = rb_fd_ptr(readfds);
4151 }
4152 if (writefds) {
4153 rb_fd_resize(n - 1, writefds);
4154 w = rb_fd_ptr(writefds);
4155 }
4156 if (exceptfds) {
4157 rb_fd_resize(n - 1, exceptfds);
4158 e = rb_fd_ptr(exceptfds);
4159 }
4160 return select(n, r, w, e, timeout);
4161}
4162
4163#define rb_fd_no_init(fds) ((void)((fds)->fdset = 0), (void)((fds)->maxfd = 0))
4164
4165#undef FD_ZERO
4166#undef FD_SET
4167#undef FD_CLR
4168#undef FD_ISSET
4169
4170#define FD_ZERO(f) rb_fd_zero(f)
4171#define FD_SET(i, f) rb_fd_set((i), (f))
4172#define FD_CLR(i, f) rb_fd_clr((i), (f))
4173#define FD_ISSET(i, f) rb_fd_isset((i), (f))
4174
4175#elif defined(_WIN32)
4176
4177void
4179{
4180 set->capa = FD_SETSIZE;
4181 set->fdset = ALLOC(fd_set);
4182 FD_ZERO(set->fdset);
4183}
4184
4185void
4186rb_fd_init_copy(rb_fdset_t *dst, rb_fdset_t *src)
4187{
4188 rb_fd_init(dst);
4189 rb_fd_dup(dst, src);
4190}
4191
4192void
4194{
4195 xfree(set->fdset);
4196 set->fdset = NULL;
4197 set->capa = 0;
4198}
4199
4200void
4201rb_fd_set(int fd, rb_fdset_t *set)
4202{
4203 unsigned int i;
4204 SOCKET s = rb_w32_get_osfhandle(fd);
4205
4206 for (i = 0; i < set->fdset->fd_count; i++) {
4207 if (set->fdset->fd_array[i] == s) {
4208 return;
4209 }
4210 }
4211 if (set->fdset->fd_count >= (unsigned)set->capa) {
4212 set->capa = (set->fdset->fd_count / FD_SETSIZE + 1) * FD_SETSIZE;
4213 set->fdset =
4214 rb_xrealloc_mul_add(
4215 set->fdset, set->capa, sizeof(SOCKET), sizeof(unsigned int));
4216 }
4217 set->fdset->fd_array[set->fdset->fd_count++] = s;
4218}
4219
4220#undef FD_ZERO
4221#undef FD_SET
4222#undef FD_CLR
4223#undef FD_ISSET
4224
4225#define FD_ZERO(f) rb_fd_zero(f)
4226#define FD_SET(i, f) rb_fd_set((i), (f))
4227#define FD_CLR(i, f) rb_fd_clr((i), (f))
4228#define FD_ISSET(i, f) rb_fd_isset((i), (f))
4229
4230#define rb_fd_no_init(fds) (void)((fds)->fdset = 0)
4231
4232#endif
4233
4234#ifndef rb_fd_no_init
4235#define rb_fd_no_init(fds) (void)(fds)
4236#endif
4237
4238static int
4239wait_retryable(volatile int *result, int errnum, rb_hrtime_t *rel, rb_hrtime_t end)
4240{
4241 int r = *result;
4242 if (r < 0) {
4243 switch (errnum) {
4244 case EINTR:
4245#ifdef ERESTART
4246 case ERESTART:
4247#endif
4248 *result = 0;
4249 if (rel && hrtime_update_expire(rel, end)) {
4250 *rel = 0;
4251 }
4252 return TRUE;
4253 }
4254 return FALSE;
4255 }
4256 else if (r == 0) {
4257 /* check for spurious wakeup */
4258 if (rel) {
4259 return !hrtime_update_expire(rel, end);
4260 }
4261 return TRUE;
4262 }
4263 return FALSE;
4264}
4266struct select_set {
4267 int max;
4268 rb_thread_t *th;
4269 rb_fdset_t *rset;
4270 rb_fdset_t *wset;
4271 rb_fdset_t *eset;
4272 rb_fdset_t orig_rset;
4273 rb_fdset_t orig_wset;
4274 rb_fdset_t orig_eset;
4275 struct timeval *timeout;
4276};
4277
4278static VALUE
4279select_set_free(VALUE p)
4280{
4281 struct select_set *set = (struct select_set *)p;
4282
4283 rb_fd_term(&set->orig_rset);
4284 rb_fd_term(&set->orig_wset);
4285 rb_fd_term(&set->orig_eset);
4286
4287 return Qfalse;
4288}
4289
4290static VALUE
4291do_select(VALUE p)
4292{
4293 struct select_set *set = (struct select_set *)p;
4294 volatile int result = 0;
4295 int lerrno;
4296 rb_hrtime_t *to, rel, end = 0;
4297
4298 timeout_prepare(&to, &rel, &end, set->timeout);
4299 volatile rb_hrtime_t endtime = end;
4300#define restore_fdset(dst, src) \
4301 ((dst) ? rb_fd_dup(dst, src) : (void)0)
4302#define do_select_update() \
4303 (restore_fdset(set->rset, &set->orig_rset), \
4304 restore_fdset(set->wset, &set->orig_wset), \
4305 restore_fdset(set->eset, &set->orig_eset), \
4306 TRUE)
4307
4308 do {
4309 lerrno = 0;
4310
4311 BLOCKING_REGION(set->th, {
4312 struct timeval tv;
4313
4314 if (!RUBY_VM_INTERRUPTED(set->th->ec)) {
4315 result = native_fd_select(set->max,
4316 set->rset, set->wset, set->eset,
4317 rb_hrtime2timeval(&tv, to), set->th);
4318 if (result < 0) lerrno = errno;
4319 }
4320 }, ubf_select, set->th, TRUE);
4321
4322 RUBY_VM_CHECK_INTS_BLOCKING(set->th->ec); /* may raise */
4323 } while (wait_retryable(&result, lerrno, to, endtime) && do_select_update());
4324
4325 if (result < 0) {
4326 errno = lerrno;
4327 }
4328
4329 return (VALUE)result;
4330}
4331
4333rb_thread_fd_select(int max, rb_fdset_t * read, rb_fdset_t * write, rb_fdset_t * except,
4334 struct timeval *timeout)
4335{
4336 struct select_set set;
4337
4338 set.th = GET_THREAD();
4339 RUBY_VM_CHECK_INTS_BLOCKING(set.th->ec);
4340 set.max = max;
4341 set.rset = read;
4342 set.wset = write;
4343 set.eset = except;
4344 set.timeout = timeout;
4345
4346 if (!set.rset && !set.wset && !set.eset) {
4347 if (!timeout) {
4349 return 0;
4350 }
4351 rb_thread_wait_for(*timeout);
4352 return 0;
4353 }
4354
4355#define fd_init_copy(f) do { \
4356 if (set.f) { \
4357 rb_fd_resize(set.max - 1, set.f); \
4358 if (&set.orig_##f != set.f) { /* sigwait_fd */ \
4359 rb_fd_init_copy(&set.orig_##f, set.f); \
4360 } \
4361 } \
4362 else { \
4363 rb_fd_no_init(&set.orig_##f); \
4364 } \
4365 } while (0)
4366 fd_init_copy(rset);
4367 fd_init_copy(wset);
4368 fd_init_copy(eset);
4369#undef fd_init_copy
4370
4371 return (int)rb_ensure(do_select, (VALUE)&set, select_set_free, (VALUE)&set);
4372}
4373
4374#ifdef USE_POLL
4375
4376/* The same with linux kernel. TODO: make platform independent definition. */
4377#define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
4378#define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
4379#define POLLEX_SET (POLLPRI)
4380
4381#ifndef POLLERR_SET /* defined for FreeBSD for now */
4382# define POLLERR_SET (0)
4383#endif
4384
4385static int
4386wait_for_single_fd_blocking_region(rb_thread_t *th, struct pollfd *fds, nfds_t nfds,
4387 rb_hrtime_t *const to, volatile int *lerrno)
4388{
4389 struct timespec ts;
4390 volatile int result = 0;
4391
4392 *lerrno = 0;
4393 BLOCKING_REGION(th, {
4394 if (!RUBY_VM_INTERRUPTED(th->ec)) {
4395 result = ppoll(fds, nfds, rb_hrtime2timespec(&ts, to), 0);
4396 if (result < 0) *lerrno = errno;
4397 }
4398 }, ubf_select, th, TRUE);
4399 return result;
4400}
4401
4402/*
4403 * returns a mask of events
4404 */
4405static int
4406thread_io_wait(struct rb_io *io, int fd, int events, struct timeval *timeout)
4407{
4408 struct pollfd fds[1] = {{
4409 .fd = fd,
4410 .events = (short)events,
4411 .revents = 0,
4412 }};
4413 volatile int result = 0;
4414 nfds_t nfds;
4415 struct waiting_fd wfd;
4416 enum ruby_tag_type state;
4417 volatile int lerrno;
4418
4419 rb_execution_context_t *ec = GET_EC();
4420 rb_thread_t *th = rb_ec_thread_ptr(ec);
4421
4422 thread_io_setup_wfd(th, fd, &wfd);
4423
4424 if (timeout == NULL && thread_io_wait_events(th, fd, events, NULL)) {
4425 // fd is readable
4426 state = 0;
4427 fds[0].revents = events;
4428 errno = 0;
4429 }
4430 else {
4431 EC_PUSH_TAG(wfd.th->ec);
4432 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
4433 rb_hrtime_t *to, rel, end = 0;
4434 RUBY_VM_CHECK_INTS_BLOCKING(wfd.th->ec);
4435 timeout_prepare(&to, &rel, &end, timeout);
4436 do {
4437 nfds = numberof(fds);
4438 result = wait_for_single_fd_blocking_region(wfd.th, fds, nfds, to, &lerrno);
4439
4440 RUBY_VM_CHECK_INTS_BLOCKING(wfd.th->ec);
4441 } while (wait_retryable(&result, lerrno, to, end));
4442 }
4443 EC_POP_TAG();
4444 }
4445
4446 thread_io_wake_pending_closer(&wfd);
4447
4448 if (state) {
4449 EC_JUMP_TAG(wfd.th->ec, state);
4450 }
4451
4452 if (result < 0) {
4453 errno = lerrno;
4454 return -1;
4455 }
4456
4457 if (fds[0].revents & POLLNVAL) {
4458 errno = EBADF;
4459 return -1;
4460 }
4461
4462 /*
4463 * POLLIN, POLLOUT have a different meanings from select(2)'s read/write bit.
4464 * Therefore we need to fix it up.
4465 */
4466 result = 0;
4467 if (fds[0].revents & POLLIN_SET)
4468 result |= RB_WAITFD_IN;
4469 if (fds[0].revents & POLLOUT_SET)
4470 result |= RB_WAITFD_OUT;
4471 if (fds[0].revents & POLLEX_SET)
4472 result |= RB_WAITFD_PRI;
4473
4474 /* all requested events are ready if there is an error */
4475 if (fds[0].revents & POLLERR_SET)
4476 result |= events;
4477
4478 return result;
4479}
4480#else /* ! USE_POLL - implement rb_io_poll_fd() using select() */
4481struct select_args {
4482 union {
4483 int fd;
4484 int error;
4485 } as;
4486 rb_fdset_t *read;
4487 rb_fdset_t *write;
4488 rb_fdset_t *except;
4489 struct waiting_fd wfd;
4490 struct timeval *tv;
4491};
4492
4493static VALUE
4494select_single(VALUE ptr)
4495{
4496 struct select_args *args = (struct select_args *)ptr;
4497 int r;
4498
4499 r = rb_thread_fd_select(args->as.fd + 1,
4500 args->read, args->write, args->except, args->tv);
4501 if (r == -1)
4502 args->as.error = errno;
4503 if (r > 0) {
4504 r = 0;
4505 if (args->read && rb_fd_isset(args->as.fd, args->read))
4506 r |= RB_WAITFD_IN;
4507 if (args->write && rb_fd_isset(args->as.fd, args->write))
4508 r |= RB_WAITFD_OUT;
4509 if (args->except && rb_fd_isset(args->as.fd, args->except))
4510 r |= RB_WAITFD_PRI;
4511 }
4512 return (VALUE)r;
4513}
4514
4515static VALUE
4516select_single_cleanup(VALUE ptr)
4517{
4518 struct select_args *args = (struct select_args *)ptr;
4519
4520 thread_io_wake_pending_closer(&args->wfd);
4521 if (args->read) rb_fd_term(args->read);
4522 if (args->write) rb_fd_term(args->write);
4523 if (args->except) rb_fd_term(args->except);
4524
4525 return (VALUE)-1;
4526}
4527
4528static rb_fdset_t *
4529init_set_fd(int fd, rb_fdset_t *fds)
4530{
4531 if (fd < 0) {
4532 return 0;
4533 }
4534 rb_fd_init(fds);
4535 rb_fd_set(fd, fds);
4536
4537 return fds;
4538}
4539
4540static int
4541thread_io_wait(struct rb_io *io, int fd, int events, struct timeval *timeout)
4542{
4543 rb_fdset_t rfds, wfds, efds;
4544 struct select_args args;
4545 int r;
4546 VALUE ptr = (VALUE)&args;
4547 rb_thread_t *th = GET_THREAD();
4548
4549 args.as.fd = fd;
4550 args.read = (events & RB_WAITFD_IN) ? init_set_fd(fd, &rfds) : NULL;
4551 args.write = (events & RB_WAITFD_OUT) ? init_set_fd(fd, &wfds) : NULL;
4552 args.except = (events & RB_WAITFD_PRI) ? init_set_fd(fd, &efds) : NULL;
4553 args.tv = timeout;
4554 thread_io_setup_wfd(th, fd, &args.wfd);
4555
4556 r = (int)rb_ensure(select_single, ptr, select_single_cleanup, ptr);
4557 if (r == -1)
4558 errno = args.as.error;
4559
4560 return r;
4561}
4562#endif /* ! USE_POLL */
4563
4564int
4565rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
4566{
4567 return thread_io_wait(NULL, fd, events, timeout);
4568}
4569
4570int
4571rb_thread_io_wait(struct rb_io *io, int events, struct timeval * timeout)
4572{
4573 return thread_io_wait(io, io->fd, events, timeout);
4574}
4575
4576/*
4577 * for GC
4578 */
4579
4580#ifdef USE_CONSERVATIVE_STACK_END
4581void
4582rb_gc_set_stack_end(VALUE **stack_end_p)
4583{
4584 VALUE stack_end;
4585COMPILER_WARNING_PUSH
4586#if __has_warning("-Wdangling-pointer")
4587COMPILER_WARNING_IGNORED(-Wdangling-pointer);
4588#endif
4589 *stack_end_p = &stack_end;
4590COMPILER_WARNING_POP
4591}
4592#endif
4593
4594/*
4595 *
4596 */
4597
4598void
4599rb_threadptr_check_signal(rb_thread_t *mth)
4600{
4601 /* mth must be main_thread */
4602 if (rb_signal_buff_size() > 0) {
4603 /* wakeup main thread */
4604 threadptr_trap_interrupt(mth);
4605 }
4606}
4607
4608static void
4609async_bug_fd(const char *mesg, int errno_arg, int fd)
4610{
4611 char buff[64];
4612 size_t n = strlcpy(buff, mesg, sizeof(buff));
4613 if (n < sizeof(buff)-3) {
4614 ruby_snprintf(buff+n, sizeof(buff)-n, "(%d)", fd);
4615 }
4616 rb_async_bug_errno(buff, errno_arg);
4617}
4618
4619/* VM-dependent API is not available for this function */
4620static int
4621consume_communication_pipe(int fd)
4622{
4623#if USE_EVENTFD
4624 uint64_t buff[1];
4625#else
4626 /* buffer can be shared because no one refers to them. */
4627 static char buff[1024];
4628#endif
4629 ssize_t result;
4630 int ret = FALSE; /* for rb_sigwait_sleep */
4631
4632 while (1) {
4633 result = read(fd, buff, sizeof(buff));
4634#if USE_EVENTFD
4635 RUBY_DEBUG_LOG("resultf:%d buff:%lu", (int)result, (unsigned long)buff[0]);
4636#else
4637 RUBY_DEBUG_LOG("result:%d", (int)result);
4638#endif
4639 if (result > 0) {
4640 ret = TRUE;
4641 if (USE_EVENTFD || result < (ssize_t)sizeof(buff)) {
4642 return ret;
4643 }
4644 }
4645 else if (result == 0) {
4646 return ret;
4647 }
4648 else if (result < 0) {
4649 int e = errno;
4650 switch (e) {
4651 case EINTR:
4652 continue; /* retry */
4653 case EAGAIN:
4654#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
4655 case EWOULDBLOCK:
4656#endif
4657 return ret;
4658 default:
4659 async_bug_fd("consume_communication_pipe: read", e, fd);
4660 }
4661 }
4662 }
4663}
4664
4665void
4666rb_thread_stop_timer_thread(void)
4667{
4668 if (TIMER_THREAD_CREATED_P() && native_stop_timer_thread()) {
4669 native_reset_timer_thread();
4670 }
4671}
4672
4673void
4674rb_thread_reset_timer_thread(void)
4675{
4676 native_reset_timer_thread();
4677}
4678
4679void
4680rb_thread_start_timer_thread(void)
4681{
4682 system_working = 1;
4683 rb_thread_create_timer_thread();
4684}
4685
4686static int
4687clear_coverage_i(st_data_t key, st_data_t val, st_data_t dummy)
4688{
4689 int i;
4690 VALUE coverage = (VALUE)val;
4691 VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
4692 VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
4693
4694 if (lines) {
4695 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
4696 rb_ary_clear(lines);
4697 }
4698 else {
4699 int i;
4700 for (i = 0; i < RARRAY_LEN(lines); i++) {
4701 if (RARRAY_AREF(lines, i) != Qnil)
4702 RARRAY_ASET(lines, i, INT2FIX(0));
4703 }
4704 }
4705 }
4706 if (branches) {
4707 VALUE counters = RARRAY_AREF(branches, 1);
4708 for (i = 0; i < RARRAY_LEN(counters); i++) {
4709 RARRAY_ASET(counters, i, INT2FIX(0));
4710 }
4711 }
4712
4713 return ST_CONTINUE;
4714}
4715
4716void
4717rb_clear_coverages(void)
4718{
4719 VALUE coverages = rb_get_coverages();
4720 if (RTEST(coverages)) {
4721 rb_hash_foreach(coverages, clear_coverage_i, 0);
4722 }
4723}
4724
4725#if defined(HAVE_WORKING_FORK)
4726
4727static void
4728rb_thread_atfork_internal(rb_thread_t *th, void (*atfork)(rb_thread_t *, const rb_thread_t *))
4729{
4730 rb_thread_t *i = 0;
4731 rb_vm_t *vm = th->vm;
4732 rb_ractor_t *r = th->ractor;
4733 vm->ractor.main_ractor = r;
4734 vm->ractor.main_thread = th;
4735 r->threads.main = th;
4736 r->status_ = ractor_created;
4737
4738 thread_sched_atfork(TH_SCHED(th));
4739 ubf_list_atfork();
4740
4741 // OK. Only this thread accesses:
4742 ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
4743 ccan_list_for_each(&r->threads.set, i, lt_node) {
4744 atfork(i, th);
4745 }
4746 }
4747 rb_vm_living_threads_init(vm);
4748
4749 rb_ractor_atfork(vm, th);
4750 rb_vm_postponed_job_atfork();
4751
4752 /* may be held by any thread in parent */
4753 rb_native_mutex_initialize(&th->interrupt_lock);
4754 ccan_list_head_init(&th->interrupt_exec_tasks);
4755
4756 vm->fork_gen++;
4757 rb_ractor_sleeper_threads_clear(th->ractor);
4758 rb_clear_coverages();
4759
4760 // restart timer thread (timer threads access to `vm->waitpid_lock` and so on.
4761 rb_thread_reset_timer_thread();
4762 rb_thread_start_timer_thread();
4763
4764 VM_ASSERT(vm->ractor.blocking_cnt == 0);
4765 VM_ASSERT(vm->ractor.cnt == 1);
4766}
4767
4768static void
4769terminate_atfork_i(rb_thread_t *th, const rb_thread_t *current_th)
4770{
4771 if (th != current_th) {
4772 rb_native_mutex_initialize(&th->interrupt_lock);
4773 rb_mutex_abandon_keeping_mutexes(th);
4774 rb_mutex_abandon_locking_mutex(th);
4775 thread_cleanup_func(th, TRUE);
4776 }
4777}
4778
4779void rb_fiber_atfork(rb_thread_t *);
4780void
4781rb_thread_atfork(void)
4782{
4783 rb_thread_t *th = GET_THREAD();
4784 rb_threadptr_pending_interrupt_clear(th);
4785 rb_thread_atfork_internal(th, terminate_atfork_i);
4786 th->join_list = NULL;
4787 rb_fiber_atfork(th);
4788
4789 /* We don't want reproduce CVE-2003-0900. */
4791}
4792
4793static void
4794terminate_atfork_before_exec_i(rb_thread_t *th, const rb_thread_t *current_th)
4795{
4796 if (th != current_th) {
4797 thread_cleanup_func_before_exec(th);
4798 }
4799}
4800
4801void
4803{
4804 rb_thread_t *th = GET_THREAD();
4805 rb_thread_atfork_internal(th, terminate_atfork_before_exec_i);
4806}
4807#else
4808void
4809rb_thread_atfork(void)
4810{
4811}
4812
4813void
4815{
4816}
4817#endif
4819struct thgroup {
4820 int enclosed;
4821};
4822
4823static const rb_data_type_t thgroup_data_type = {
4824 "thgroup",
4825 {
4826 0,
4828 NULL, // No external memory to report
4829 },
4830 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE
4831};
4832
4833/*
4834 * Document-class: ThreadGroup
4835 *
4836 * ThreadGroup provides a means of keeping track of a number of threads as a
4837 * group.
4838 *
4839 * A given Thread object can only belong to one ThreadGroup at a time; adding
4840 * a thread to a new group will remove it from any previous group.
4841 *
4842 * Newly created threads belong to the same group as the thread from which they
4843 * were created.
4844 */
4845
4846/*
4847 * Document-const: Default
4848 *
4849 * The default ThreadGroup created when Ruby starts; all Threads belong to it
4850 * by default.
4851 */
4852static VALUE
4853thgroup_s_alloc(VALUE klass)
4854{
4855 VALUE group;
4856 struct thgroup *data;
4857
4858 group = TypedData_Make_Struct(klass, struct thgroup, &thgroup_data_type, data);
4859 data->enclosed = 0;
4860
4861 return group;
4862}
4863
4864/*
4865 * call-seq:
4866 * thgrp.list -> array
4867 *
4868 * Returns an array of all existing Thread objects that belong to this group.
4869 *
4870 * ThreadGroup::Default.list #=> [#<Thread:0x401bdf4c run>]
4871 */
4872
4873static VALUE
4874thgroup_list(VALUE group)
4875{
4876 VALUE ary = rb_ary_new();
4877 rb_thread_t *th = 0;
4878 rb_ractor_t *r = GET_RACTOR();
4879
4880 ccan_list_for_each(&r->threads.set, th, lt_node) {
4881 if (th->thgroup == group) {
4882 rb_ary_push(ary, th->self);
4883 }
4884 }
4885 return ary;
4886}
4887
4888
4889/*
4890 * call-seq:
4891 * thgrp.enclose -> thgrp
4892 *
4893 * Prevents threads from being added to or removed from the receiving
4894 * ThreadGroup.
4895 *
4896 * New threads can still be started in an enclosed ThreadGroup.
4897 *
4898 * ThreadGroup::Default.enclose #=> #<ThreadGroup:0x4029d914>
4899 * thr = Thread.new { Thread.stop } #=> #<Thread:0x402a7210 sleep>
4900 * tg = ThreadGroup.new #=> #<ThreadGroup:0x402752d4>
4901 * tg.add thr
4902 * #=> ThreadError: can't move from the enclosed thread group
4903 */
4904
4905static VALUE
4906thgroup_enclose(VALUE group)
4907{
4908 struct thgroup *data;
4909
4910 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4911 data->enclosed = 1;
4912
4913 return group;
4914}
4915
4916
4917/*
4918 * call-seq:
4919 * thgrp.enclosed? -> true or false
4920 *
4921 * Returns +true+ if the +thgrp+ is enclosed. See also ThreadGroup#enclose.
4922 */
4923
4924static VALUE
4925thgroup_enclosed_p(VALUE group)
4926{
4927 struct thgroup *data;
4928
4929 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4930 return RBOOL(data->enclosed);
4931}
4932
4933
4934/*
4935 * call-seq:
4936 * thgrp.add(thread) -> thgrp
4937 *
4938 * Adds the given +thread+ to this group, removing it from any other
4939 * group to which it may have previously been a member.
4940 *
4941 * puts "Initial group is #{ThreadGroup::Default.list}"
4942 * tg = ThreadGroup.new
4943 * t1 = Thread.new { sleep }
4944 * t2 = Thread.new { sleep }
4945 * puts "t1 is #{t1}"
4946 * puts "t2 is #{t2}"
4947 * tg.add(t1)
4948 * puts "Initial group now #{ThreadGroup::Default.list}"
4949 * puts "tg group now #{tg.list}"
4950 *
4951 * This will produce:
4952 *
4953 * Initial group is #<Thread:0x401bdf4c>
4954 * t1 is #<Thread:0x401b3c90>
4955 * t2 is #<Thread:0x401b3c18>
4956 * Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
4957 * tg group now #<Thread:0x401b3c90>
4958 */
4959
4960static VALUE
4961thgroup_add(VALUE group, VALUE thread)
4962{
4963 rb_thread_t *target_th = rb_thread_ptr(thread);
4964 struct thgroup *data;
4965
4966 if (OBJ_FROZEN(group)) {
4967 rb_raise(rb_eThreadError, "can't move to the frozen thread group");
4968 }
4969 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
4970 if (data->enclosed) {
4971 rb_raise(rb_eThreadError, "can't move to the enclosed thread group");
4972 }
4973
4974 if (OBJ_FROZEN(target_th->thgroup)) {
4975 rb_raise(rb_eThreadError, "can't move from the frozen thread group");
4976 }
4977 TypedData_Get_Struct(target_th->thgroup, struct thgroup, &thgroup_data_type, data);
4978 if (data->enclosed) {
4979 rb_raise(rb_eThreadError,
4980 "can't move from the enclosed thread group");
4981 }
4982
4983 target_th->thgroup = group;
4984 return group;
4985}
4986
4987/*
4988 * Document-class: ThreadShield
4989 */
4990static void
4991thread_shield_mark(void *ptr)
4992{
4993 rb_gc_mark((VALUE)ptr);
4994}
4995
4996static const rb_data_type_t thread_shield_data_type = {
4997 "thread_shield",
4998 {thread_shield_mark, 0, 0,},
4999 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
5000};
5001
5002static VALUE
5003thread_shield_alloc(VALUE klass)
5004{
5005 return TypedData_Wrap_Struct(klass, &thread_shield_data_type, (void *)mutex_alloc(0));
5006}
5007
5008#define GetThreadShieldPtr(obj) ((VALUE)rb_check_typeddata((obj), &thread_shield_data_type))
5009#define THREAD_SHIELD_WAITING_MASK (((FL_USER19-1)&~(FL_USER0-1))|FL_USER19)
5010#define THREAD_SHIELD_WAITING_SHIFT (FL_USHIFT)
5011#define THREAD_SHIELD_WAITING_MAX (THREAD_SHIELD_WAITING_MASK>>THREAD_SHIELD_WAITING_SHIFT)
5012STATIC_ASSERT(THREAD_SHIELD_WAITING_MAX, THREAD_SHIELD_WAITING_MAX <= UINT_MAX);
5013static inline unsigned int
5014rb_thread_shield_waiting(VALUE b)
5015{
5016 return ((RBASIC(b)->flags&THREAD_SHIELD_WAITING_MASK)>>THREAD_SHIELD_WAITING_SHIFT);
5017}
5018
5019static inline void
5020rb_thread_shield_waiting_inc(VALUE b)
5021{
5022 unsigned int w = rb_thread_shield_waiting(b);
5023 w++;
5024 if (w > THREAD_SHIELD_WAITING_MAX)
5025 rb_raise(rb_eRuntimeError, "waiting count overflow");
5026 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
5027 RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
5028}
5029
5030static inline void
5031rb_thread_shield_waiting_dec(VALUE b)
5032{
5033 unsigned int w = rb_thread_shield_waiting(b);
5034 if (!w) rb_raise(rb_eRuntimeError, "waiting count underflow");
5035 w--;
5036 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
5037 RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
5038}
5039
5040VALUE
5041rb_thread_shield_new(void)
5042{
5043 VALUE thread_shield = thread_shield_alloc(rb_cThreadShield);
5044 rb_mutex_lock((VALUE)DATA_PTR(thread_shield));
5045 return thread_shield;
5046}
5047
5048bool
5049rb_thread_shield_owned(VALUE self)
5050{
5051 VALUE mutex = GetThreadShieldPtr(self);
5052 if (!mutex) return false;
5053
5054 rb_mutex_t *m = mutex_ptr(mutex);
5055
5056 return m->fiber == GET_EC()->fiber_ptr;
5057}
5058
5059/*
5060 * Wait a thread shield.
5061 *
5062 * Returns
5063 * true: acquired the thread shield
5064 * false: the thread shield was destroyed and no other threads waiting
5065 * nil: the thread shield was destroyed but still in use
5066 */
5067VALUE
5068rb_thread_shield_wait(VALUE self)
5069{
5070 VALUE mutex = GetThreadShieldPtr(self);
5071 rb_mutex_t *m;
5072
5073 if (!mutex) return Qfalse;
5074 m = mutex_ptr(mutex);
5075 if (m->fiber == GET_EC()->fiber_ptr) return Qnil;
5076 rb_thread_shield_waiting_inc(self);
5077 rb_mutex_lock(mutex);
5078 rb_thread_shield_waiting_dec(self);
5079 if (DATA_PTR(self)) return Qtrue;
5080 rb_mutex_unlock(mutex);
5081 return rb_thread_shield_waiting(self) > 0 ? Qnil : Qfalse;
5082}
5083
5084static VALUE
5085thread_shield_get_mutex(VALUE self)
5086{
5087 VALUE mutex = GetThreadShieldPtr(self);
5088 if (!mutex)
5089 rb_raise(rb_eThreadError, "destroyed thread shield - %p", (void *)self);
5090 return mutex;
5091}
5092
5093/*
5094 * Release a thread shield, and return true if it has waiting threads.
5095 */
5096VALUE
5097rb_thread_shield_release(VALUE self)
5098{
5099 VALUE mutex = thread_shield_get_mutex(self);
5100 rb_mutex_unlock(mutex);
5101 return RBOOL(rb_thread_shield_waiting(self) > 0);
5102}
5103
5104/*
5105 * Release and destroy a thread shield, and return true if it has waiting threads.
5106 */
5107VALUE
5108rb_thread_shield_destroy(VALUE self)
5109{
5110 VALUE mutex = thread_shield_get_mutex(self);
5111 DATA_PTR(self) = 0;
5112 rb_mutex_unlock(mutex);
5113 return RBOOL(rb_thread_shield_waiting(self) > 0);
5114}
5115
5116static VALUE
5117threadptr_recursive_hash(rb_thread_t *th)
5118{
5119 return th->ec->local_storage_recursive_hash;
5120}
5121
5122static void
5123threadptr_recursive_hash_set(rb_thread_t *th, VALUE hash)
5124{
5125 th->ec->local_storage_recursive_hash = hash;
5126}
5127
5129
5130/*
5131 * Returns the current "recursive list" used to detect recursion.
5132 * This list is a hash table, unique for the current thread and for
5133 * the current __callee__.
5134 */
5135
5136static VALUE
5137recursive_list_access(VALUE sym)
5138{
5139 rb_thread_t *th = GET_THREAD();
5140 VALUE hash = threadptr_recursive_hash(th);
5141 VALUE list;
5142 if (NIL_P(hash) || !RB_TYPE_P(hash, T_HASH)) {
5143 hash = rb_ident_hash_new();
5144 threadptr_recursive_hash_set(th, hash);
5145 list = Qnil;
5146 }
5147 else {
5148 list = rb_hash_aref(hash, sym);
5149 }
5150 if (NIL_P(list) || !RB_TYPE_P(list, T_HASH)) {
5151 list = rb_ident_hash_new();
5152 rb_hash_aset(hash, sym, list);
5153 }
5154 return list;
5155}
5156
5157/*
5158 * Returns true if and only if obj (or the pair <obj, paired_obj>) is already
5159 * in the recursion list.
5160 * Assumes the recursion list is valid.
5161 */
5162
5163static bool
5164recursive_check(VALUE list, VALUE obj, VALUE paired_obj_id)
5165{
5166#if SIZEOF_LONG == SIZEOF_VOIDP
5167 #define OBJ_ID_EQL(obj_id, other) ((obj_id) == (other))
5168#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
5169 #define OBJ_ID_EQL(obj_id, other) (RB_BIGNUM_TYPE_P((obj_id)) ? \
5170 rb_big_eql((obj_id), (other)) : ((obj_id) == (other)))
5171#endif
5172
5173 VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
5174 if (UNDEF_P(pair_list))
5175 return false;
5176 if (paired_obj_id) {
5177 if (!RB_TYPE_P(pair_list, T_HASH)) {
5178 if (!OBJ_ID_EQL(paired_obj_id, pair_list))
5179 return false;
5180 }
5181 else {
5182 if (NIL_P(rb_hash_lookup(pair_list, paired_obj_id)))
5183 return false;
5184 }
5185 }
5186 return true;
5187}
5188
5189/*
5190 * Pushes obj (or the pair <obj, paired_obj>) in the recursion list.
5191 * For a single obj, it sets list[obj] to Qtrue.
5192 * For a pair, it sets list[obj] to paired_obj_id if possible,
5193 * otherwise list[obj] becomes a hash like:
5194 * {paired_obj_id_1 => true, paired_obj_id_2 => true, ... }
5195 * Assumes the recursion list is valid.
5196 */
5197
5198static void
5199recursive_push(VALUE list, VALUE obj, VALUE paired_obj)
5200{
5201 VALUE pair_list;
5202
5203 if (!paired_obj) {
5204 rb_hash_aset(list, obj, Qtrue);
5205 }
5206 else if (UNDEF_P(pair_list = rb_hash_lookup2(list, obj, Qundef))) {
5207 rb_hash_aset(list, obj, paired_obj);
5208 }
5209 else {
5210 if (!RB_TYPE_P(pair_list, T_HASH)){
5211 VALUE other_paired_obj = pair_list;
5212 pair_list = rb_hash_new();
5213 rb_hash_aset(pair_list, other_paired_obj, Qtrue);
5214 rb_hash_aset(list, obj, pair_list);
5215 }
5216 rb_hash_aset(pair_list, paired_obj, Qtrue);
5217 }
5218}
5219
5220/*
5221 * Pops obj (or the pair <obj, paired_obj>) from the recursion list.
5222 * For a pair, if list[obj] is a hash, then paired_obj_id is
5223 * removed from the hash and no attempt is made to simplify
5224 * list[obj] from {only_one_paired_id => true} to only_one_paired_id
5225 * Assumes the recursion list is valid.
5226 */
5227
5228static int
5229recursive_pop(VALUE list, VALUE obj, VALUE paired_obj)
5230{
5231 if (paired_obj) {
5232 VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
5233 if (UNDEF_P(pair_list)) {
5234 return 0;
5235 }
5236 if (RB_TYPE_P(pair_list, T_HASH)) {
5237 rb_hash_delete_entry(pair_list, paired_obj);
5238 if (!RHASH_EMPTY_P(pair_list)) {
5239 return 1; /* keep hash until is empty */
5240 }
5241 }
5242 }
5243 rb_hash_delete_entry(list, obj);
5244 return 1;
5245}
5247struct exec_recursive_params {
5248 VALUE (*func) (VALUE, VALUE, int);
5249 VALUE list;
5250 VALUE obj;
5251 VALUE pairid;
5252 VALUE arg;
5253};
5254
5255static VALUE
5256exec_recursive_i(RB_BLOCK_CALL_FUNC_ARGLIST(tag, data))
5257{
5258 struct exec_recursive_params *p = (void *)data;
5259 return (*p->func)(p->obj, p->arg, FALSE);
5260}
5261
5262/*
5263 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5264 * current method is called recursively on obj, or on the pair <obj, pairid>
5265 * If outer is 0, then the innermost func will be called with recursive set
5266 * to true, otherwise the outermost func will be called. In the latter case,
5267 * all inner func are short-circuited by throw.
5268 * Implementation details: the value thrown is the recursive list which is
5269 * proper to the current method and unlikely to be caught anywhere else.
5270 * list[recursive_key] is used as a flag for the outermost call.
5271 */
5272
5273static VALUE
5274exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE pairid, VALUE arg, int outer, ID mid)
5275{
5276 VALUE result = Qundef;
5277 const VALUE sym = mid ? ID2SYM(mid) : ID2SYM(idNULL);
5278 struct exec_recursive_params p;
5279 int outermost;
5280 p.list = recursive_list_access(sym);
5281 p.obj = obj;
5282 p.pairid = pairid;
5283 p.arg = arg;
5284 outermost = outer && !recursive_check(p.list, ID2SYM(recursive_key), 0);
5285
5286 if (recursive_check(p.list, p.obj, pairid)) {
5287 if (outer && !outermost) {
5288 rb_throw_obj(p.list, p.list);
5289 }
5290 return (*func)(obj, arg, TRUE);
5291 }
5292 else {
5293 enum ruby_tag_type state;
5294
5295 p.func = func;
5296
5297 if (outermost) {
5298 recursive_push(p.list, ID2SYM(recursive_key), 0);
5299 recursive_push(p.list, p.obj, p.pairid);
5300 result = rb_catch_protect(p.list, exec_recursive_i, (VALUE)&p, &state);
5301 if (!recursive_pop(p.list, p.obj, p.pairid)) goto invalid;
5302 if (!recursive_pop(p.list, ID2SYM(recursive_key), 0)) goto invalid;
5303 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5304 if (result == p.list) {
5305 result = (*func)(obj, arg, TRUE);
5306 }
5307 }
5308 else {
5309 volatile VALUE ret = Qundef;
5310 recursive_push(p.list, p.obj, p.pairid);
5311 EC_PUSH_TAG(GET_EC());
5312 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
5313 ret = (*func)(obj, arg, FALSE);
5314 }
5315 EC_POP_TAG();
5316 if (!recursive_pop(p.list, p.obj, p.pairid)) {
5317 goto invalid;
5318 }
5319 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5320 result = ret;
5321 }
5322 }
5323 *(volatile struct exec_recursive_params *)&p;
5324 return result;
5325
5326 invalid:
5327 rb_raise(rb_eTypeError, "invalid inspect_tbl pair_list "
5328 "for %+"PRIsVALUE" in %+"PRIsVALUE,
5329 sym, rb_thread_current());
5331}
5332
5333/*
5334 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5335 * current method is called recursively on obj
5336 */
5337
5338VALUE
5339rb_exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
5340{
5341 return exec_recursive(func, obj, 0, arg, 0, rb_frame_last_func());
5342}
5343
5344/*
5345 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5346 * current method is called recursively on the ordered pair <obj, paired_obj>
5347 */
5348
5349VALUE
5350rb_exec_recursive_paired(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
5351{
5352 return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 0, rb_frame_last_func());
5353}
5354
5355/*
5356 * If recursion is detected on the current method and obj, the outermost
5357 * func will be called with (obj, arg, true). All inner func will be
5358 * short-circuited using throw.
5359 */
5360
5361VALUE
5362rb_exec_recursive_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
5363{
5364 return exec_recursive(func, obj, 0, arg, 1, rb_frame_last_func());
5365}
5366
5367VALUE
5368rb_exec_recursive_outer_mid(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg, ID mid)
5369{
5370 return exec_recursive(func, obj, 0, arg, 1, mid);
5371}
5372
5373/*
5374 * If recursion is detected on the current method, obj and paired_obj,
5375 * the outermost func will be called with (obj, arg, true). All inner
5376 * func will be short-circuited using throw.
5377 */
5378
5379VALUE
5380rb_exec_recursive_paired_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
5381{
5382 return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 1, rb_frame_last_func());
5383}
5384
5385/*
5386 * call-seq:
5387 * thread.backtrace -> array or nil
5388 *
5389 * Returns the current backtrace of the target thread.
5390 *
5391 */
5392
5393static VALUE
5394rb_thread_backtrace_m(int argc, VALUE *argv, VALUE thval)
5395{
5396 return rb_vm_thread_backtrace(argc, argv, thval);
5397}
5398
5399/* call-seq:
5400 * thread.backtrace_locations(*args) -> array or nil
5401 *
5402 * Returns the execution stack for the target thread---an array containing
5403 * backtrace location objects.
5404 *
5405 * See Thread::Backtrace::Location for more information.
5406 *
5407 * This method behaves similarly to Kernel#caller_locations except it applies
5408 * to a specific thread.
5409 */
5410static VALUE
5411rb_thread_backtrace_locations_m(int argc, VALUE *argv, VALUE thval)
5412{
5413 return rb_vm_thread_backtrace_locations(argc, argv, thval);
5414}
5415
5416void
5417Init_Thread_Mutex(void)
5418{
5419 rb_thread_t *th = GET_THREAD();
5420
5421 rb_native_mutex_initialize(&th->vm->workqueue_lock);
5422 rb_native_mutex_initialize(&th->interrupt_lock);
5423}
5424
5425/*
5426 * Document-class: ThreadError
5427 *
5428 * Raised when an invalid operation is attempted on a thread.
5429 *
5430 * For example, when no other thread has been started:
5431 *
5432 * Thread.stop
5433 *
5434 * This will raises the following exception:
5435 *
5436 * ThreadError: stopping only thread
5437 * note: use sleep to stop forever
5438 */
5439
5440void
5441Init_Thread(void)
5442{
5443 rb_thread_t *th = GET_THREAD();
5444
5445 sym_never = ID2SYM(rb_intern_const("never"));
5446 sym_immediate = ID2SYM(rb_intern_const("immediate"));
5447 sym_on_blocking = ID2SYM(rb_intern_const("on_blocking"));
5448
5449 rb_define_singleton_method(rb_cThread, "new", thread_s_new, -1);
5450 rb_define_singleton_method(rb_cThread, "start", thread_start, -2);
5451 rb_define_singleton_method(rb_cThread, "fork", thread_start, -2);
5452 rb_define_singleton_method(rb_cThread, "main", rb_thread_s_main, 0);
5453 rb_define_singleton_method(rb_cThread, "current", thread_s_current, 0);
5454 rb_define_singleton_method(rb_cThread, "stop", thread_stop, 0);
5455 rb_define_singleton_method(rb_cThread, "kill", rb_thread_s_kill, 1);
5456 rb_define_singleton_method(rb_cThread, "exit", rb_thread_exit, 0);
5457 rb_define_singleton_method(rb_cThread, "pass", thread_s_pass, 0);
5458 rb_define_singleton_method(rb_cThread, "list", thread_list, 0);
5459 rb_define_singleton_method(rb_cThread, "abort_on_exception", rb_thread_s_abort_exc, 0);
5460 rb_define_singleton_method(rb_cThread, "abort_on_exception=", rb_thread_s_abort_exc_set, 1);
5461 rb_define_singleton_method(rb_cThread, "report_on_exception", rb_thread_s_report_exc, 0);
5462 rb_define_singleton_method(rb_cThread, "report_on_exception=", rb_thread_s_report_exc_set, 1);
5463 rb_define_singleton_method(rb_cThread, "ignore_deadlock", rb_thread_s_ignore_deadlock, 0);
5464 rb_define_singleton_method(rb_cThread, "ignore_deadlock=", rb_thread_s_ignore_deadlock_set, 1);
5465 rb_define_singleton_method(rb_cThread, "handle_interrupt", rb_thread_s_handle_interrupt, 1);
5466 rb_define_singleton_method(rb_cThread, "pending_interrupt?", rb_thread_s_pending_interrupt_p, -1);
5467 rb_define_method(rb_cThread, "pending_interrupt?", rb_thread_pending_interrupt_p, -1);
5468
5469 rb_define_method(rb_cThread, "initialize", thread_initialize, -2);
5470 rb_define_method(rb_cThread, "raise", thread_raise_m, -1);
5471 rb_define_method(rb_cThread, "join", thread_join_m, -1);
5472 rb_define_method(rb_cThread, "value", thread_value, 0);
5473 rb_define_method(rb_cThread, "kill", rb_thread_kill, 0);
5474 rb_define_method(rb_cThread, "terminate", rb_thread_kill, 0);
5475 rb_define_method(rb_cThread, "exit", rb_thread_kill, 0);
5476 rb_define_method(rb_cThread, "run", rb_thread_run, 0);
5477 rb_define_method(rb_cThread, "wakeup", rb_thread_wakeup, 0);
5478 rb_define_method(rb_cThread, "[]", rb_thread_aref, 1);
5479 rb_define_method(rb_cThread, "[]=", rb_thread_aset, 2);
5480 rb_define_method(rb_cThread, "fetch", rb_thread_fetch, -1);
5481 rb_define_method(rb_cThread, "key?", rb_thread_key_p, 1);
5482 rb_define_method(rb_cThread, "keys", rb_thread_keys, 0);
5483 rb_define_method(rb_cThread, "priority", rb_thread_priority, 0);
5484 rb_define_method(rb_cThread, "priority=", rb_thread_priority_set, 1);
5485 rb_define_method(rb_cThread, "status", rb_thread_status, 0);
5486 rb_define_method(rb_cThread, "thread_variable_get", rb_thread_variable_get, 1);
5487 rb_define_method(rb_cThread, "thread_variable_set", rb_thread_variable_set, 2);
5488 rb_define_method(rb_cThread, "thread_variables", rb_thread_variables, 0);
5489 rb_define_method(rb_cThread, "thread_variable?", rb_thread_variable_p, 1);
5490 rb_define_method(rb_cThread, "alive?", rb_thread_alive_p, 0);
5491 rb_define_method(rb_cThread, "stop?", rb_thread_stop_p, 0);
5492 rb_define_method(rb_cThread, "abort_on_exception", rb_thread_abort_exc, 0);
5493 rb_define_method(rb_cThread, "abort_on_exception=", rb_thread_abort_exc_set, 1);
5494 rb_define_method(rb_cThread, "report_on_exception", rb_thread_report_exc, 0);
5495 rb_define_method(rb_cThread, "report_on_exception=", rb_thread_report_exc_set, 1);
5496 rb_define_method(rb_cThread, "group", rb_thread_group, 0);
5497 rb_define_method(rb_cThread, "backtrace", rb_thread_backtrace_m, -1);
5498 rb_define_method(rb_cThread, "backtrace_locations", rb_thread_backtrace_locations_m, -1);
5499
5500 rb_define_method(rb_cThread, "name", rb_thread_getname, 0);
5501 rb_define_method(rb_cThread, "name=", rb_thread_setname, 1);
5502 rb_define_method(rb_cThread, "native_thread_id", rb_thread_native_thread_id, 0);
5503 rb_define_method(rb_cThread, "to_s", rb_thread_to_s, 0);
5504 rb_define_alias(rb_cThread, "inspect", "to_s");
5505
5506 rb_vm_register_special_exception(ruby_error_stream_closed, rb_eIOError,
5507 "stream closed in another thread");
5508
5509 cThGroup = rb_define_class("ThreadGroup", rb_cObject);
5510 rb_define_alloc_func(cThGroup, thgroup_s_alloc);
5511 rb_define_method(cThGroup, "list", thgroup_list, 0);
5512 rb_define_method(cThGroup, "enclose", thgroup_enclose, 0);
5513 rb_define_method(cThGroup, "enclosed?", thgroup_enclosed_p, 0);
5514 rb_define_method(cThGroup, "add", thgroup_add, 1);
5515
5516 const char * ptr = getenv("RUBY_THREAD_TIMESLICE");
5517
5518 if (ptr) {
5519 long quantum = strtol(ptr, NULL, 0);
5520 if (quantum > 0 && !(SIZEOF_LONG > 4 && quantum > UINT32_MAX)) {
5521 thread_default_quantum_ms = (uint32_t)quantum;
5522 }
5523 else if (0) {
5524 fprintf(stderr, "Ignored RUBY_THREAD_TIMESLICE=%s\n", ptr);
5525 }
5526 }
5527
5528 {
5529 th->thgroup = th->ractor->thgroup_default = rb_obj_alloc(cThGroup);
5530 rb_define_const(cThGroup, "Default", th->thgroup);
5531 }
5532
5534
5535 /* init thread core */
5536 {
5537 /* main thread setting */
5538 {
5539 /* acquire global vm lock */
5540#ifdef HAVE_PTHREAD_NP_H
5541 VM_ASSERT(TH_SCHED(th)->running == th);
5542#endif
5543 // thread_sched_to_running() should not be called because
5544 // it assumes blocked by thread_sched_to_waiting().
5545 // thread_sched_to_running(sched, th);
5546
5547 th->pending_interrupt_queue = rb_ary_hidden_new(0);
5548 th->pending_interrupt_queue_checked = 0;
5549 th->pending_interrupt_mask_stack = rb_ary_hidden_new(0);
5550 }
5551 }
5552
5553 rb_thread_create_timer_thread();
5554
5555 Init_thread_sync();
5556
5557 // TODO: Suppress unused function warning for now
5558 // if (0) rb_thread_sched_destroy(NULL);
5559}
5560
5563{
5564 rb_thread_t *th = ruby_thread_from_native();
5565
5566 return th != 0;
5567}
5568
5569#ifdef NON_SCALAR_THREAD_ID
5570 #define thread_id_str(th) (NULL)
5571#else
5572 #define thread_id_str(th) ((void *)(uintptr_t)(th)->nt->thread_id)
5573#endif
5574
5575static void
5576debug_deadlock_check(rb_ractor_t *r, VALUE msg)
5577{
5578 rb_thread_t *th = 0;
5579 VALUE sep = rb_str_new_cstr("\n ");
5580
5581 rb_str_catf(msg, "\n%d threads, %d sleeps current:%p main thread:%p\n",
5582 rb_ractor_living_thread_num(r), rb_ractor_sleeper_thread_num(r),
5583 (void *)GET_THREAD(), (void *)r->threads.main);
5584
5585 ccan_list_for_each(&r->threads.set, th, lt_node) {
5586 rb_str_catf(msg, "* %+"PRIsVALUE"\n rb_thread_t:%p "
5587 "native:%p int:%u",
5588 th->self, (void *)th, th->nt ? thread_id_str(th) : "N/A", th->ec->interrupt_flag);
5589
5590 if (th->locking_mutex) {
5591 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5592 rb_str_catf(msg, " mutex:%p cond:%"PRIuSIZE,
5593 (void *)mutex->fiber, rb_mutex_num_waiting(mutex));
5594 }
5595
5596 {
5597 struct rb_waiting_list *list = th->join_list;
5598 while (list) {
5599 rb_str_catf(msg, "\n depended by: tb_thread_id:%p", (void *)list->thread);
5600 list = list->next;
5601 }
5602 }
5603 rb_str_catf(msg, "\n ");
5604 rb_str_concat(msg, rb_ary_join(rb_ec_backtrace_str_ary(th->ec, RUBY_BACKTRACE_START, RUBY_ALL_BACKTRACE_LINES), sep));
5605 rb_str_catf(msg, "\n");
5606 }
5607}
5608
5609static void
5610rb_check_deadlock(rb_ractor_t *r)
5611{
5612 if (GET_THREAD()->vm->thread_ignore_deadlock) return;
5613
5614#ifdef RUBY_THREAD_PTHREAD_H
5615 if (r->threads.sched.readyq_cnt > 0) return;
5616#endif
5617
5618 int sleeper_num = rb_ractor_sleeper_thread_num(r);
5619 int ltnum = rb_ractor_living_thread_num(r);
5620
5621 if (ltnum > sleeper_num) return;
5622 if (ltnum < sleeper_num) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
5623
5624 int found = 0;
5625 rb_thread_t *th = NULL;
5626
5627 ccan_list_for_each(&r->threads.set, th, lt_node) {
5628 if (th->status != THREAD_STOPPED_FOREVER || RUBY_VM_INTERRUPTED(th->ec)) {
5629 found = 1;
5630 }
5631 else if (th->locking_mutex) {
5632 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5633 if (mutex->fiber == th->ec->fiber_ptr || (!mutex->fiber && !ccan_list_empty(&mutex->waitq))) {
5634 found = 1;
5635 }
5636 }
5637 if (found)
5638 break;
5639 }
5640
5641 if (!found) {
5642 VALUE argv[2];
5643 argv[0] = rb_eFatal;
5644 argv[1] = rb_str_new2("No live threads left. Deadlock?");
5645 debug_deadlock_check(r, argv[1]);
5646 rb_ractor_sleeper_threads_dec(GET_RACTOR());
5647 rb_threadptr_raise(r->threads.main, 2, argv);
5648 }
5649}
5650
5651// Used for VM memsize reporting. Returns the size of a list of waiting_fd
5652// structs. Defined here because the struct definition lives here as well.
5653size_t
5654rb_vm_memsize_waiting_fds(struct ccan_list_head *waiting_fds)
5655{
5656 struct waiting_fd *waitfd = 0;
5657 size_t size = 0;
5658
5659 ccan_list_for_each(waiting_fds, waitfd, wfd_node) {
5660 size += sizeof(struct waiting_fd);
5661 }
5662
5663 return size;
5664}
5665
5666static void
5667update_line_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5668{
5669 const rb_control_frame_t *cfp = GET_EC()->cfp;
5670 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5671 if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5672 VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
5673 if (lines) {
5674 long line = rb_sourceline() - 1;
5675 VM_ASSERT(line >= 0);
5676 long count;
5677 VALUE num;
5678 void rb_iseq_clear_event_flags(const rb_iseq_t *iseq, size_t pos, rb_event_flag_t reset);
5679 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
5680 rb_iseq_clear_event_flags(cfp->iseq, cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded - 1, RUBY_EVENT_COVERAGE_LINE);
5681 rb_ary_push(lines, LONG2FIX(line + 1));
5682 return;
5683 }
5684 if (line >= RARRAY_LEN(lines)) { /* no longer tracked */
5685 return;
5686 }
5687 num = RARRAY_AREF(lines, line);
5688 if (!FIXNUM_P(num)) return;
5689 count = FIX2LONG(num) + 1;
5690 if (POSFIXABLE(count)) {
5691 RARRAY_ASET(lines, line, LONG2FIX(count));
5692 }
5693 }
5694 }
5695}
5696
5697static void
5698update_branch_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5699{
5700 const rb_control_frame_t *cfp = GET_EC()->cfp;
5701 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5702 if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5703 VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
5704 if (branches) {
5705 long pc = cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded - 1;
5706 long idx = FIX2INT(RARRAY_AREF(ISEQ_PC2BRANCHINDEX(cfp->iseq), pc)), count;
5707 VALUE counters = RARRAY_AREF(branches, 1);
5708 VALUE num = RARRAY_AREF(counters, idx);
5709 count = FIX2LONG(num) + 1;
5710 if (POSFIXABLE(count)) {
5711 RARRAY_ASET(counters, idx, LONG2FIX(count));
5712 }
5713 }
5714 }
5715}
5716
5717const rb_method_entry_t *
5718rb_resolve_me_location(const rb_method_entry_t *me, VALUE resolved_location[5])
5719{
5720 VALUE path, beg_pos_lineno, beg_pos_column, end_pos_lineno, end_pos_column;
5721
5722 if (!me->def) return NULL; // negative cme
5723
5724 retry:
5725 switch (me->def->type) {
5726 case VM_METHOD_TYPE_ISEQ: {
5727 const rb_iseq_t *iseq = me->def->body.iseq.iseqptr;
5728 rb_iseq_location_t *loc = &ISEQ_BODY(iseq)->location;
5729 path = rb_iseq_path(iseq);
5730 beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5731 beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5732 end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5733 end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5734 break;
5735 }
5736 case VM_METHOD_TYPE_BMETHOD: {
5737 const rb_iseq_t *iseq = rb_proc_get_iseq(me->def->body.bmethod.proc, 0);
5738 if (iseq) {
5739 rb_iseq_location_t *loc;
5740 rb_iseq_check(iseq);
5741 path = rb_iseq_path(iseq);
5742 loc = &ISEQ_BODY(iseq)->location;
5743 beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5744 beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5745 end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5746 end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5747 break;
5748 }
5749 return NULL;
5750 }
5751 case VM_METHOD_TYPE_ALIAS:
5752 me = me->def->body.alias.original_me;
5753 goto retry;
5754 case VM_METHOD_TYPE_REFINED:
5755 me = me->def->body.refined.orig_me;
5756 if (!me) return NULL;
5757 goto retry;
5758 default:
5759 return NULL;
5760 }
5761
5762 /* found */
5763 if (RB_TYPE_P(path, T_ARRAY)) {
5764 path = rb_ary_entry(path, 1);
5765 if (!RB_TYPE_P(path, T_STRING)) return NULL; /* just for the case... */
5766 }
5767 if (resolved_location) {
5768 resolved_location[0] = path;
5769 resolved_location[1] = beg_pos_lineno;
5770 resolved_location[2] = beg_pos_column;
5771 resolved_location[3] = end_pos_lineno;
5772 resolved_location[4] = end_pos_column;
5773 }
5774 return me;
5775}
5776
5777static void
5778update_method_coverage(VALUE me2counter, rb_trace_arg_t *trace_arg)
5779{
5780 const rb_control_frame_t *cfp = GET_EC()->cfp;
5781 const rb_callable_method_entry_t *cme = rb_vm_frame_method_entry(cfp);
5782 const rb_method_entry_t *me = (const rb_method_entry_t *)cme;
5783 VALUE rcount;
5784 long count;
5785
5786 me = rb_resolve_me_location(me, 0);
5787 if (!me) return;
5788
5789 rcount = rb_hash_aref(me2counter, (VALUE) me);
5790 count = FIXNUM_P(rcount) ? FIX2LONG(rcount) + 1 : 1;
5791 if (POSFIXABLE(count)) {
5792 rb_hash_aset(me2counter, (VALUE) me, LONG2FIX(count));
5793 }
5794}
5795
5796VALUE
5797rb_get_coverages(void)
5798{
5799 return GET_VM()->coverages;
5800}
5801
5802int
5803rb_get_coverage_mode(void)
5804{
5805 return GET_VM()->coverage_mode;
5806}
5807
5808void
5809rb_set_coverages(VALUE coverages, int mode, VALUE me2counter)
5810{
5811 GET_VM()->coverages = coverages;
5812 GET_VM()->me2counter = me2counter;
5813 GET_VM()->coverage_mode = mode;
5814}
5815
5816void
5817rb_resume_coverages(void)
5818{
5819 int mode = GET_VM()->coverage_mode;
5820 VALUE me2counter = GET_VM()->me2counter;
5821 rb_add_event_hook2((rb_event_hook_func_t) update_line_coverage, RUBY_EVENT_COVERAGE_LINE, Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5822 if (mode & COVERAGE_TARGET_BRANCHES) {
5823 rb_add_event_hook2((rb_event_hook_func_t) update_branch_coverage, RUBY_EVENT_COVERAGE_BRANCH, Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5824 }
5825 if (mode & COVERAGE_TARGET_METHODS) {
5826 rb_add_event_hook2((rb_event_hook_func_t) update_method_coverage, RUBY_EVENT_CALL, me2counter, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
5827 }
5828}
5829
5830void
5831rb_suspend_coverages(void)
5832{
5833 rb_remove_event_hook((rb_event_hook_func_t) update_line_coverage);
5834 if (GET_VM()->coverage_mode & COVERAGE_TARGET_BRANCHES) {
5835 rb_remove_event_hook((rb_event_hook_func_t) update_branch_coverage);
5836 }
5837 if (GET_VM()->coverage_mode & COVERAGE_TARGET_METHODS) {
5838 rb_remove_event_hook((rb_event_hook_func_t) update_method_coverage);
5839 }
5840}
5841
5842/* Make coverage arrays empty so old covered files are no longer tracked. */
5843void
5844rb_reset_coverages(void)
5845{
5846 rb_clear_coverages();
5847 rb_iseq_remove_coverage_all();
5848 GET_VM()->coverages = Qfalse;
5849}
5850
5851VALUE
5852rb_default_coverage(int n)
5853{
5854 VALUE coverage = rb_ary_hidden_new_fill(3);
5855 VALUE lines = Qfalse, branches = Qfalse;
5856 int mode = GET_VM()->coverage_mode;
5857
5858 if (mode & COVERAGE_TARGET_LINES) {
5859 lines = n > 0 ? rb_ary_hidden_new_fill(n) : rb_ary_hidden_new(0);
5860 }
5861 RARRAY_ASET(coverage, COVERAGE_INDEX_LINES, lines);
5862
5863 if (mode & COVERAGE_TARGET_BRANCHES) {
5864 branches = rb_ary_hidden_new_fill(2);
5865 /* internal data structures for branch coverage:
5866 *
5867 * { branch base node =>
5868 * [base_type, base_first_lineno, base_first_column, base_last_lineno, base_last_column, {
5869 * branch target id =>
5870 * [target_type, target_first_lineno, target_first_column, target_last_lineno, target_last_column, target_counter_index],
5871 * ...
5872 * }],
5873 * ...
5874 * }
5875 *
5876 * Example:
5877 * { NODE_CASE =>
5878 * [1, 0, 4, 3, {
5879 * NODE_WHEN => [2, 8, 2, 9, 0],
5880 * NODE_WHEN => [3, 8, 3, 9, 1],
5881 * ...
5882 * }],
5883 * ...
5884 * }
5885 */
5886 VALUE structure = rb_hash_new();
5887 rb_obj_hide(structure);
5888 RARRAY_ASET(branches, 0, structure);
5889 /* branch execution counters */
5890 RARRAY_ASET(branches, 1, rb_ary_hidden_new(0));
5891 }
5892 RARRAY_ASET(coverage, COVERAGE_INDEX_BRANCHES, branches);
5893
5894 return coverage;
5895}
5896
5897static VALUE
5898uninterruptible_exit(VALUE v)
5899{
5900 rb_thread_t *cur_th = GET_THREAD();
5901 rb_ary_pop(cur_th->pending_interrupt_mask_stack);
5902
5903 cur_th->pending_interrupt_queue_checked = 0;
5904 if (!rb_threadptr_pending_interrupt_empty_p(cur_th)) {
5905 RUBY_VM_SET_INTERRUPT(cur_th->ec);
5906 }
5907 return Qnil;
5908}
5909
5910VALUE
5911rb_uninterruptible(VALUE (*b_proc)(VALUE), VALUE data)
5912{
5913 VALUE interrupt_mask = rb_ident_hash_new();
5914 rb_thread_t *cur_th = GET_THREAD();
5915
5916 rb_hash_aset(interrupt_mask, rb_cObject, sym_never);
5917 OBJ_FREEZE(interrupt_mask);
5918 rb_ary_push(cur_th->pending_interrupt_mask_stack, interrupt_mask);
5919
5920 VALUE ret = rb_ensure(b_proc, data, uninterruptible_exit, Qnil);
5921
5922 RUBY_VM_CHECK_INTS(cur_th->ec);
5923 return ret;
5924}
5925
5926static void
5927thread_specific_storage_alloc(rb_thread_t *th)
5928{
5929 VM_ASSERT(th->specific_storage == NULL);
5930
5931 if (UNLIKELY(specific_key_count > 0)) {
5932 th->specific_storage = ZALLOC_N(void *, RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
5933 }
5934}
5935
5936rb_internal_thread_specific_key_t
5938{
5939 rb_vm_t *vm = GET_VM();
5940
5941 if (specific_key_count == 0 && vm->ractor.cnt > 1) {
5942 rb_raise(rb_eThreadError, "The first rb_internal_thread_specific_key_create() is called with multiple ractors");
5943 }
5944 else if (specific_key_count > RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX) {
5945 rb_raise(rb_eThreadError, "rb_internal_thread_specific_key_create() is called more than %d times", RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
5946 }
5947 else {
5948 rb_internal_thread_specific_key_t key = specific_key_count++;
5949
5950 if (key == 0) {
5951 // allocate
5952 rb_ractor_t *cr = GET_RACTOR();
5953 rb_thread_t *th;
5954
5955 ccan_list_for_each(&cr->threads.set, th, lt_node) {
5956 thread_specific_storage_alloc(th);
5957 }
5958 }
5959 return key;
5960 }
5961}
5962
5963// async and native thread safe.
5964void *
5965rb_internal_thread_specific_get(VALUE thread_val, rb_internal_thread_specific_key_t key)
5966{
5967 rb_thread_t *th = DATA_PTR(thread_val);
5968
5969 VM_ASSERT(rb_thread_ptr(thread_val) == th);
5970 VM_ASSERT(key < RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
5971 VM_ASSERT(th->specific_storage);
5972
5973 return th->specific_storage[key];
5974}
5975
5976// async and native thread safe.
5977void
5978rb_internal_thread_specific_set(VALUE thread_val, rb_internal_thread_specific_key_t key, void *data)
5979{
5980 rb_thread_t *th = DATA_PTR(thread_val);
5981
5982 VM_ASSERT(rb_thread_ptr(thread_val) == th);
5983 VM_ASSERT(key < RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
5984 VM_ASSERT(th->specific_storage);
5985
5986 th->specific_storage[key] = data;
5987}
5988
5989// interrupt_exec
5992 struct ccan_list_node node;
5993
5994 rb_interrupt_exec_func_t *func;
5995 void *data;
5996 enum rb_interrupt_exec_flag flags;
5997};
5998
5999void
6000rb_threadptr_interrupt_exec_task_mark(rb_thread_t *th)
6001{
6002 struct rb_interrupt_exec_task *task;
6003
6004 ccan_list_for_each(&th->interrupt_exec_tasks, task, node) {
6005 if (task->flags & rb_interrupt_exec_flag_value_data) {
6006 rb_gc_mark((VALUE)task->data);
6007 }
6008 }
6009}
6010
6011// native thread safe
6012// th should be available
6013void
6014rb_threadptr_interrupt_exec(rb_thread_t *th, rb_interrupt_exec_func_t *func, void *data, enum rb_interrupt_exec_flag flags)
6015{
6016 // should not use ALLOC
6018 *task = (struct rb_interrupt_exec_task) {
6019 .flags = flags,
6020 .func = func,
6021 .data = data,
6022 };
6023
6024 rb_native_mutex_lock(&th->interrupt_lock);
6025 {
6026 ccan_list_add_tail(&th->interrupt_exec_tasks, &task->node);
6027 threadptr_interrupt_locked(th, true);
6028 }
6029 rb_native_mutex_unlock(&th->interrupt_lock);
6030}
6031
6032static void
6033threadptr_interrupt_exec_exec(rb_thread_t *th)
6034{
6035 while (1) {
6036 struct rb_interrupt_exec_task *task;
6037
6038 rb_native_mutex_lock(&th->interrupt_lock);
6039 {
6040 task = ccan_list_pop(&th->interrupt_exec_tasks, struct rb_interrupt_exec_task, node);
6041 }
6042 rb_native_mutex_unlock(&th->interrupt_lock);
6043
6044 if (task) {
6045 (*task->func)(task->data);
6046 ruby_xfree(task);
6047 }
6048 else {
6049 break;
6050 }
6051 }
6052}
6053
6054static void
6055threadptr_interrupt_exec_cleanup(rb_thread_t *th)
6056{
6057 rb_native_mutex_lock(&th->interrupt_lock);
6058 {
6059 struct rb_interrupt_exec_task *task;
6060
6061 while ((task = ccan_list_pop(&th->interrupt_exec_tasks, struct rb_interrupt_exec_task, node)) != NULL) {
6062 ruby_xfree(task);
6063 }
6064 }
6065 rb_native_mutex_unlock(&th->interrupt_lock);
6066}
6069 rb_interrupt_exec_func_t *func;
6070 void *data;
6071};
6072
6073static VALUE
6074interrupt_ractor_new_thread_func(void *data)
6075{
6077 ruby_xfree(data);
6078
6079 d.func(d.data);
6080 return Qnil;
6081}
6082
6083static VALUE
6084interrupt_ractor_func(void *data)
6085{
6086 rb_thread_create(interrupt_ractor_new_thread_func, data);
6087 return Qnil;
6088}
6089
6090// native thread safe
6091// func/data should be native thread safe
6092void
6093rb_ractor_interrupt_exec(struct rb_ractor_struct *target_r,
6094 rb_interrupt_exec_func_t *func, void *data, enum rb_interrupt_exec_flag flags)
6095{
6097
6098 d->func = func;
6099 d->data = data;
6100 rb_thread_t *main_th = target_r->threads.main;
6101 rb_threadptr_interrupt_exec(main_th, interrupt_ractor_func, d, flags);
6102
6103 // TODO MEMO: we can create a new thread in a ractor, but not sure how to do that now.
6104}
#define RUBY_ASSERT_ALWAYS(expr,...)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
Definition assert.h:199
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
#define RUBY_INTERNAL_EVENT_SWITCH
Thread switched.
Definition event.h:90
int rb_remove_event_hook(rb_event_hook_func_t func)
Removes the passed function from the list of event hooks.
Definition vm_trace.c:313
#define RUBY_EVENT_THREAD_BEGIN
Encountered a new thread.
Definition event.h:57
void(* rb_event_hook_func_t)(rb_event_flag_t evflag, VALUE data, VALUE self, ID mid, VALUE klass)
Type of event hooks.
Definition event.h:120
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RUBY_EVENT_CALL
A method, written in Ruby, is called.
Definition event.h:41
#define RUBY_EVENT_THREAD_END
Encountered an end of a thread.
Definition event.h:58
static void RB_FL_SET_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_SET().
Definition fl_type.h:606
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition class.c:980
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
Definition class.c:2345
ID rb_frame_last_func(void)
Returns the ID of the last method in the call stack.
Definition eval.c:1160
int rb_keyword_given_p(void)
Determines if the current method is given a keyword argument.
Definition eval.c:950
int rb_block_given_p(void)
Determines if the current method is given a block.
Definition eval.c:937
#define rb_str_new2
Old name of rb_str_new_cstr.
Definition string.h:1675
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:400
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define OBJ_FROZEN
Old name of RB_OBJ_FROZEN.
Definition fl_type.h:137
#define xrealloc
Old name of ruby_xrealloc.
Definition xmalloc.h:56
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition fl_type.h:135
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition assume.h:29
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:203
#define xmalloc
Old name of ruby_xmalloc.
Definition xmalloc.h:53
#define LONG2FIX
Old name of RB_INT2FIX.
Definition long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition int.h:41
#define ZALLOC_N
Old name of RB_ZALLOC_N.
Definition memory.h:401
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define Qtrue
Old name of RUBY_Qtrue.
#define NUM2INT
Old name of RB_NUM2INT.
Definition int.h:44
#define INT2NUM
Old name of RB_INT2NUM.
Definition int.h:43
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define POSFIXABLE
Old name of RB_POSFIXABLE.
Definition fixnum.h:29
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
void ruby_stop(int ex)
Calls ruby_cleanup() and exits the process.
Definition eval.c:288
#define ruby_debug
This variable controls whether the interpreter is in debug mode.
Definition error.h:486
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition eval.c:676
VALUE rb_eSystemExit
SystemExit exception.
Definition error.c:1423
VALUE rb_eIOError
IOError exception.
Definition io.c:189
VALUE rb_eStandardError
StandardError exception.
Definition error.c:1427
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1430
void rb_frozen_error_raise(VALUE frozen_obj, const char *fmt,...)
Raises an instance of rb_eFrozenError.
Definition error.c:4117
VALUE rb_eFatal
fatal exception.
Definition error.c:1426
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1428
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:466
VALUE rb_exc_new(VALUE etype, const char *ptr, long len)
Creates an instance of the passed exception class.
Definition error.c:1468
VALUE rb_eException
Mother of all exceptions.
Definition error.c:1422
VALUE rb_eThreadError
ThreadError exception.
Definition eval.c:955
void rb_exit(int status)
Terminates the current execution context.
Definition process.c:4353
VALUE rb_eSignal
SignalException exception.
Definition error.c:1425
VALUE rb_obj_alloc(VALUE klass)
Allocates an instance of the given class.
Definition object.c:2121
VALUE rb_cInteger
Module class.
Definition numeric.c:198
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
Definition object.c:104
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:247
VALUE rb_cThread
Thread class.
Definition vm.c:551
VALUE rb_cModule
Module class.
Definition object.c:67
double rb_num2dbl(VALUE num)
Converts an instance of rb_cNumeric into C's double.
Definition object.c:3739
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:865
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:284
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition proc.c:839
void rb_reset_random_seed(void)
Resets the RNG behind rb_genrand_int32()/rb_genrand_real().
Definition random.c:1787
VALUE rb_str_new_frozen(VALUE str)
Creates a frozen copy of the string, if necessary.
Definition string.c:1831
VALUE rb_str_concat(VALUE dst, VALUE src)
Identical to rb_str_append(), except it also accepts an integer as a codepoint.
Definition string.c:4310
#define rb_str_cat_cstr(buf, str)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1656
#define rb_str_new_cstr(str)
Identical to rb_str_new, except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1514
int rb_thread_interrupted(VALUE thval)
Checks if the thread's execution was recently interrupted.
Definition thread.c:1450
VALUE rb_thread_local_aref(VALUE thread, ID key)
This badly named function reads from a Fiber local storage.
Definition thread.c:3570
VALUE rb_mutex_new(void)
Creates a mutex.
VALUE rb_thread_kill(VALUE thread)
Terminates the given thread.
Definition thread.c:2763
#define RUBY_UBF_IO
A special UBF for blocking IO operations.
Definition thread.h:382
VALUE rb_thread_main(void)
Obtains the "main" thread.
Definition thread.c:3002
VALUE rb_exec_recursive(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h)
"Recursion" API entry point.
void rb_thread_sleep_forever(void)
Blocks indefinitely.
Definition thread.c:1380
void rb_thread_fd_close(int fd)
Notifies a closing of a file descriptor to other threads.
Definition thread.c:2704
void rb_thread_wait_for(struct timeval time)
Identical to rb_thread_sleep(), except it takes struct timeval instead.
Definition thread.c:1412
VALUE rb_thread_stop(void)
Stops the current thread.
Definition thread.c:2914
VALUE rb_mutex_sleep(VALUE self, VALUE timeout)
Releases the lock held in the mutex and waits for the period of time; reacquires the lock on wakeup.
VALUE rb_exec_recursive_paired(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE p, VALUE h)
Identical to rb_exec_recursive(), except it checks for the recursion on the ordered pair of { g,...
void rb_unblock_function_t(void *)
This is the type of UBFs.
Definition thread.h:336
void rb_thread_atfork_before_exec(void)
:FIXME: situation of this function is unclear.
Definition thread.c:4813
void rb_thread_check_ints(void)
Checks for interrupts.
Definition thread.c:1433
VALUE rb_thread_run(VALUE thread)
This is a rb_thread_wakeup() + rb_thread_schedule() combo.
Definition thread.c:2905
VALUE rb_thread_wakeup(VALUE thread)
Marks a given thread as eligible for scheduling.
Definition thread.c:2858
VALUE rb_mutex_unlock(VALUE mutex)
Releases the mutex.
VALUE rb_exec_recursive_paired_outer(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE p, VALUE h)
Identical to rb_exec_recursive_outer(), except it checks for the recursion on the ordered pair of { g...
void rb_thread_sleep_deadly(void)
Identical to rb_thread_sleep_forever(), except the thread calling this function is considered "dead" ...
Definition thread.c:1387
void rb_thread_atfork(void)
A pthread_atfork(3posix)-like API.
Definition thread.c:4808
VALUE rb_thread_current(void)
Obtains the "current" thread.
Definition thread.c:2981
int rb_thread_alone(void)
Checks if the thread this function is running is the only thread that is currently alive.
Definition thread.c:3843
VALUE rb_thread_local_aset(VALUE thread, ID key, VALUE val)
This badly named function writes to a Fiber local storage.
Definition thread.c:3718
void rb_thread_schedule(void)
Tries to switch to another thread.
Definition thread.c:1481
#define RUBY_UBF_PROCESS
A special UBF for blocking process operations.
Definition thread.h:389
VALUE rb_exec_recursive_outer(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h)
Identical to rb_exec_recursive(), except it calls f for outermost recursion only.
VALUE rb_thread_wakeup_alive(VALUE thread)
Identical to rb_thread_wakeup(), except it doesn't raise on an already killed thread.
Definition thread.c:2867
VALUE rb_mutex_lock(VALUE mutex)
Attempts to lock the mutex.
void rb_thread_sleep(int sec)
Blocks for the given period of time.
Definition thread.c:1456
void rb_timespec_now(struct timespec *ts)
Fills the current time into the given struct.
Definition time.c:2015
struct timeval rb_time_timeval(VALUE time)
Converts an instance of rb_cTime to a struct timeval that represents the identical point of time.
Definition time.c:2964
VALUE rb_ivar_set(VALUE obj, ID name, VALUE val)
Identical to rb_iv_set(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1924
VALUE rb_ivar_get(VALUE obj, ID name)
Identical to rb_iv_get(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1415
VALUE rb_class_path(VALUE mod)
Identical to rb_mod_name(), except it returns #<Class: ...> style inspection for anonymous modules.
Definition variable.c:373
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
int rb_sourceline(void)
Resembles __LINE__.
Definition vm.c:1909
static ID rb_intern_const(const char *str)
This is a "tiny optimisation" over rb_intern().
Definition symbol.h:284
ID rb_check_id(volatile VALUE *namep)
Detects if the given name is already interned or not.
Definition symbol.c:1133
VALUE rb_to_symbol(VALUE name)
Identical to rb_intern_str(), except it generates a dynamic symbol if necessary.
Definition string.c:12895
ID rb_to_id(VALUE str)
Definition string.c:12885
VALUE rb_eIOTimeoutError
Indicates that a timeout has occurred while performing an IO operation.
Definition io.c:190
#define RB_NOGVL_UBF_ASYNC_SAFE
Passing this flag to rb_nogvl() indicates that the passed UBF is async-signal-safe.
Definition thread.h:60
void * rb_internal_thread_specific_get(VALUE thread_val, rb_internal_thread_specific_key_t key)
Get thread and tool specific data.
Definition thread.c:5964
#define RB_NOGVL_INTR_FAIL
Passing this flag to rb_nogvl() prevents it from checking interrupts.
Definition thread.h:48
void rb_internal_thread_specific_set(VALUE thread_val, rb_internal_thread_specific_key_t key, void *data)
Set thread and tool specific data.
Definition thread.c:5977
rb_internal_thread_specific_key_t rb_internal_thread_specific_key_create(void)
Create a key to store thread specific data.
Definition thread.c:5936
void * rb_nogvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2, int flags)
Identical to rb_thread_call_without_gvl(), except it additionally takes "flags" that change the behav...
Definition thread.c:1538
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
Definition thread.c:1906
#define RB_NOGVL_OFFLOAD_SAFE
Passing this flag to rb_nogvl() indicates that the passed function is safe to offload to a background...
Definition thread.h:73
void * rb_thread_call_without_gvl2(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Identical to rb_thread_call_without_gvl(), except it does not interface with signals etc.
Definition thread.c:1677
void * rb_thread_call_without_gvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Allows the passed function to run in parallel with other Ruby threads.
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
Shim for block function parameters.
Definition iterator.h:58
VALUE rb_yield(VALUE val)
Yields the block.
Definition vm_eval.c:1354
void rb_throw_obj(VALUE tag, VALUE val)
Identical to rb_throw(), except it allows arbitrary Ruby object to become a tag.
Definition vm_eval.c:2497
static int rb_fd_max(const rb_fdset_t *f)
It seems this function has no use.
Definition largesize.h:209
void rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
Destructively overwrites an fdset with another.
void rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
Identical to rb_fd_copy(), except it copies unlimited number of file descriptors.
void rb_fd_term(rb_fdset_t *f)
Destroys the rb_fdset_t, releasing any memory and resources it used.
static fd_set * rb_fd_ptr(const rb_fdset_t *f)
Raw pointer to fd_set.
Definition largesize.h:195
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition memory.h:372
#define ALLOCA_N(type, n)
Definition memory.h:292
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
Definition memory.h:360
#define RB_GC_GUARD(v)
Prevents premature destruction of local objects.
Definition memory.h:167
VALUE rb_thread_create(type *q, void *w)
Creates a rb_cThread instance.
void rb_hash_foreach(VALUE q, int_type *w, VALUE e)
Iteration over the given hash.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define rb_fd_isset
Queries if the given fd is in the rb_fdset_t.
Definition posix.h:60
#define rb_fd_select
Waits for multiple file descriptors at once.
Definition posix.h:66
#define rb_fd_init
Initialises the :given :rb_fdset_t.
Definition posix.h:63
#define rb_fd_set
Sets the given fd to the rb_fdset_t.
Definition posix.h:54
#define rb_fd_zero
Clears the given rb_fdset_t.
Definition posix.h:51
#define rb_fd_clr
Unsets the given fd from the rb_fdset_t.
Definition posix.h:57
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
static int RARRAY_LENINT(VALUE ary)
Identical to rb_array_len(), except it differs for the return type.
Definition rarray.h:281
static void RARRAY_ASET(VALUE ary, long i, VALUE v)
Assigns an object in an array.
Definition rarray.h:386
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:150
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
Definition rclass.h:44
#define DATA_PTR(obj)
Convenient getter macro.
Definition rdata.h:67
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
#define StringValueCStr(v)
Identical to StringValuePtr, except it additionally checks for the contents for viability as a C stri...
Definition rstring.h:89
#define RUBY_TYPED_DEFAULT_FREE
This is a value you can set to rb_data_type_struct::dfree.
Definition rtypeddata.h:79
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
Definition rtypeddata.h:515
#define TypedData_Wrap_Struct(klass, data_type, sval)
Converts sval, a pointer to your struct, into a Ruby object.
Definition rtypeddata.h:449
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
Definition rtypeddata.h:497
#define errno
Ractor-aware version of errno.
Definition ruby.h:388
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition thread.c:5561
int ruby_snprintf(char *str, size_t n, char const *fmt,...)
Our own locale-insensitive version of snprintf(3).
Definition sprintf.c:1041
#define RB_PASS_CALLED_KEYWORDS
Pass keywords if current method is called with keywords, useful for argument delegation.
Definition scan_args.h:78
Scheduler APIs.
VALUE rb_fiber_scheduler_blocking_operation_wait(VALUE scheduler, void *(*function)(void *), void *data, rb_unblock_function_t *unblock_function, void *data2, int flags, struct rb_fiber_scheduler_blocking_operation_state *state)
Defer the execution of the passed function to the scheduler.
Definition scheduler.c:753
VALUE rb_fiber_scheduler_current(void)
Identical to rb_fiber_scheduler_get(), except it also returns RUBY_Qnil in case of a blocking fiber.
Definition scheduler.c:228
VALUE rb_fiber_scheduler_block(VALUE scheduler, VALUE blocker, VALUE timeout)
Non-blocking wait for the passed "blocker", which is for instance Thread.join or Mutex....
Definition scheduler.c:392
VALUE rb_fiber_scheduler_set(VALUE scheduler)
Destructively assigns the passed scheduler to that of the current thread that is calling this functio...
Definition scheduler.c:189
VALUE rb_fiber_scheduler_unblock(VALUE scheduler, VALUE blocker, VALUE fiber)
Wakes up a fiber previously blocked using rb_fiber_scheduler_block().
Definition scheduler.c:411
int rb_thread_fd_select(int nfds, rb_fdset_t *rfds, rb_fdset_t *wfds, rb_fdset_t *efds, struct timeval *timeout)
Waits for multiple file descriptors at once.
Definition thread.c:4332
#define rb_fd_resize(n, f)
Does nothing (defined for compatibility).
Definition select.h:43
static bool RB_TEST(VALUE obj)
Emulates Ruby's "if" statement.
#define RTEST
This is an old name of RB_TEST.
#define _(args)
This was a transition path from K&R to ANSI.
Definition stdarg.h:35
Definition method.h:63
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:200
The data structure which wraps the fd_set bitmap used by select(2).
Definition largesize.h:71
int maxfd
Maximum allowed number of FDs.
Definition largesize.h:72
fd_set * fdset
File descriptors buffer.
Definition largesize.h:73
int capa
Maximum allowed number of FDs.
Definition win32.h:50
Ruby's IO, metadata and buffers.
Definition io.h:280
int fd
file descriptor.
Definition io.h:291
Definition method.h:55
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:136
void rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
Blocks until the current thread obtains a lock.
Definition thread.c:296
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
Releases a lock.
Definition thread.c:302
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy.
void rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
Fills the passed lock with an initial value.
Definition thread.c:284
void rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
Destroys the passed mutex.
Definition thread.c:290
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376