Ruby 4.0.0dev (2025-12-15 revision e7cf07ba3e6758e3e6d34754422505c75fdf9c80)
thread.c (e7cf07ba3e6758e3e6d34754422505c75fdf9c80)
1/**********************************************************************
2
3 thread.c -
4
5 $Author$
6
7 Copyright (C) 2004-2007 Koichi Sasada
8
9**********************************************************************/
10
11/*
12 YARV Thread Design
13
14 model 1: Userlevel Thread
15 Same as traditional ruby thread.
16
17 model 2: Native Thread with Global VM lock
18 Using pthread (or Windows thread) and Ruby threads run concurrent.
19
20 model 3: Native Thread with fine grain lock
21 Using pthread and Ruby threads run concurrent or parallel.
22
23 model 4: M:N User:Native threads with Global VM lock
24 Combination of model 1 and 2
25
26 model 5: M:N User:Native thread with fine grain lock
27 Combination of model 1 and 3
28
29------------------------------------------------------------------------
30
31 model 2:
32 A thread has mutex (GVL: Global VM Lock or Giant VM Lock) can run.
33 When thread scheduling, running thread release GVL. If running thread
34 try blocking operation, this thread must release GVL and another
35 thread can continue this flow. After blocking operation, thread
36 must check interrupt (RUBY_VM_CHECK_INTS).
37
38 Every VM can run parallel.
39
40 Ruby threads are scheduled by OS thread scheduler.
41
42------------------------------------------------------------------------
43
44 model 3:
45 Every threads run concurrent or parallel and to access shared object
46 exclusive access control is needed. For example, to access String
47 object or Array object, fine grain lock must be locked every time.
48 */
49
50
51/*
52 * FD_SET, FD_CLR and FD_ISSET have a small sanity check when using glibc
53 * 2.15 or later and set _FORTIFY_SOURCE > 0.
54 * However, the implementation is wrong. Even though Linux's select(2)
55 * supports large fd size (>FD_SETSIZE), it wrongly assumes fd is always
56 * less than FD_SETSIZE (i.e. 1024). And then when enabling HAVE_RB_FD_INIT,
57 * it doesn't work correctly and makes program abort. Therefore we need to
58 * disable FORTIFY_SOURCE until glibc fixes it.
59 */
60#undef _FORTIFY_SOURCE
61#undef __USE_FORTIFY_LEVEL
62#define __USE_FORTIFY_LEVEL 0
63
64/* for model 2 */
65
66#include "ruby/internal/config.h"
67
68#ifdef __linux__
69// Normally, gcc(1) translates calls to alloca() with inlined code. This is not done when either the -ansi, -std=c89, -std=c99, or the -std=c11 option is given and the header <alloca.h> is not included.
70# include <alloca.h>
71#endif
72
73#define TH_SCHED(th) (&(th)->ractor->threads.sched)
74
75#include "eval_intern.h"
76#include "hrtime.h"
77#include "internal.h"
78#include "internal/class.h"
79#include "internal/cont.h"
80#include "internal/error.h"
81#include "internal/eval.h"
82#include "internal/gc.h"
83#include "internal/hash.h"
84#include "internal/io.h"
85#include "internal/object.h"
86#include "internal/proc.h"
88#include "internal/signal.h"
89#include "internal/thread.h"
90#include "internal/time.h"
91#include "internal/warnings.h"
92#include "iseq.h"
93#include "ruby/debug.h"
94#include "ruby/io.h"
95#include "ruby/thread.h"
96#include "ruby/thread_native.h"
97#include "timev.h"
98#include "vm_core.h"
99#include "ractor_core.h"
100#include "vm_debug.h"
101#include "vm_sync.h"
102
103#include "ccan/list/list.h"
104
105#ifndef USE_NATIVE_THREAD_PRIORITY
106#define USE_NATIVE_THREAD_PRIORITY 0
107#define RUBY_THREAD_PRIORITY_MAX 3
108#define RUBY_THREAD_PRIORITY_MIN -3
109#endif
110
111static VALUE rb_cThreadShield;
112static VALUE cThGroup;
113
114static VALUE sym_immediate;
115static VALUE sym_on_blocking;
116static VALUE sym_never;
117
118static uint32_t thread_default_quantum_ms = 100;
119
120#define THREAD_LOCAL_STORAGE_INITIALISED FL_USER13
121#define THREAD_LOCAL_STORAGE_INITIALISED_P(th) RB_FL_TEST_RAW((th), THREAD_LOCAL_STORAGE_INITIALISED)
122
123static inline VALUE
124rb_thread_local_storage(VALUE thread)
125{
126 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
127 rb_ivar_set(thread, idLocals, rb_hash_new());
128 RB_FL_SET_RAW(thread, THREAD_LOCAL_STORAGE_INITIALISED);
129 }
130 return rb_ivar_get(thread, idLocals);
131}
132
133enum SLEEP_FLAGS {
134 SLEEP_DEADLOCKABLE = 0x01,
135 SLEEP_SPURIOUS_CHECK = 0x02,
136 SLEEP_ALLOW_SPURIOUS = 0x04,
137 SLEEP_NO_CHECKINTS = 0x08,
138};
139
140static void sleep_forever(rb_thread_t *th, unsigned int fl);
141static int sleep_hrtime(rb_thread_t *, rb_hrtime_t, unsigned int fl);
142
143static void rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker, VALUE timeout, rb_hrtime_t end);
144static int rb_threadptr_dead(rb_thread_t *th);
145static void rb_check_deadlock(rb_ractor_t *r);
146static int rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th);
147static const char *thread_status_name(rb_thread_t *th, int detail);
148static int hrtime_update_expire(rb_hrtime_t *, const rb_hrtime_t);
149NORETURN(static void async_bug_fd(const char *mesg, int errno_arg, int fd));
150MAYBE_UNUSED(static int consume_communication_pipe(int fd));
151
152static rb_atomic_t system_working = 1;
153static rb_internal_thread_specific_key_t specific_key_count;
154
155/********************************************************************************/
156
157#define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
158
160 enum rb_thread_status prev_status;
161};
162
163static int unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted);
164static void unblock_function_clear(rb_thread_t *th);
165
166static inline int blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
167 rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted);
168static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region);
169
170#define THREAD_BLOCKING_BEGIN(th) do { \
171 struct rb_thread_sched * const sched = TH_SCHED(th); \
172 RB_VM_SAVE_MACHINE_CONTEXT(th); \
173 thread_sched_to_waiting((sched), (th));
174
175#define THREAD_BLOCKING_END(th) \
176 thread_sched_to_running((sched), (th)); \
177 rb_ractor_thread_switch(th->ractor, th, false); \
178} while(0)
179
180#ifdef __GNUC__
181#ifdef HAVE_BUILTIN___BUILTIN_CHOOSE_EXPR_CONSTANT_P
182#define only_if_constant(expr, notconst) __builtin_choose_expr(__builtin_constant_p(expr), (expr), (notconst))
183#else
184#define only_if_constant(expr, notconst) (__builtin_constant_p(expr) ? (expr) : (notconst))
185#endif
186#else
187#define only_if_constant(expr, notconst) notconst
188#endif
189#define BLOCKING_REGION(th, exec, ubf, ubfarg, fail_if_interrupted) do { \
190 struct rb_blocking_region_buffer __region; \
191 if (blocking_region_begin(th, &__region, (ubf), (ubfarg), fail_if_interrupted) || \
192 /* always return true unless fail_if_interrupted */ \
193 !only_if_constant(fail_if_interrupted, TRUE)) { \
194 /* Important that this is inlined into the macro, and not part of \
195 * blocking_region_begin - see bug #20493 */ \
196 RB_VM_SAVE_MACHINE_CONTEXT(th); \
197 thread_sched_to_waiting(TH_SCHED(th), th); \
198 exec; \
199 blocking_region_end(th, &__region); \
200 }; \
201} while(0)
202
203/*
204 * returns true if this thread was spuriously interrupted, false otherwise
205 * (e.g. hit by Thread#run or ran a Ruby-level Signal.trap handler)
206 */
207#define RUBY_VM_CHECK_INTS_BLOCKING(ec) vm_check_ints_blocking(ec)
208static inline int
209vm_check_ints_blocking(rb_execution_context_t *ec)
210{
211#ifdef RUBY_ASSERT_CRITICAL_SECTION
212 VM_ASSERT(ruby_assert_critical_section_entered == 0);
213#endif
214
215 rb_thread_t *th = rb_ec_thread_ptr(ec);
216
217 if (LIKELY(rb_threadptr_pending_interrupt_empty_p(th))) {
218 if (LIKELY(!RUBY_VM_INTERRUPTED_ANY(ec))) return FALSE;
219 }
220 else {
221 th->pending_interrupt_queue_checked = 0;
222 RUBY_VM_SET_INTERRUPT(ec);
223 }
224
225 int result = rb_threadptr_execute_interrupts(th, 1);
226
227 // When a signal is received, we yield to the scheduler as soon as possible:
228 if (result || RUBY_VM_INTERRUPTED(ec)) {
230 if (scheduler != Qnil) {
231 rb_fiber_scheduler_yield(scheduler);
232 }
233 }
234
235 return result;
236}
237
238int
239rb_vm_check_ints_blocking(rb_execution_context_t *ec)
240{
241 return vm_check_ints_blocking(ec);
242}
243
244/*
245 * poll() is supported by many OSes, but so far Linux is the only
246 * one we know of that supports using poll() in all places select()
247 * would work.
248 */
249#if defined(HAVE_POLL)
250# if defined(__linux__)
251# define USE_POLL
252# endif
253# if defined(__FreeBSD_version) && __FreeBSD_version >= 1100000
254# define USE_POLL
255 /* FreeBSD does not set POLLOUT when POLLHUP happens */
256# define POLLERR_SET (POLLHUP | POLLERR)
257# endif
258#endif
259
260static void
261timeout_prepare(rb_hrtime_t **to, rb_hrtime_t *rel, rb_hrtime_t *end,
262 const struct timeval *timeout)
263{
264 if (timeout) {
265 *rel = rb_timeval2hrtime(timeout);
266 *end = rb_hrtime_add(rb_hrtime_now(), *rel);
267 *to = rel;
268 }
269 else {
270 *to = 0;
271 }
272}
273
274MAYBE_UNUSED(NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start)));
275MAYBE_UNUSED(static bool th_has_dedicated_nt(const rb_thread_t *th));
276MAYBE_UNUSED(static int waitfd_to_waiting_flag(int wfd_event));
277
278#include THREAD_IMPL_SRC
279
280/*
281 * TODO: somebody with win32 knowledge should be able to get rid of
282 * timer-thread by busy-waiting on signals. And it should be possible
283 * to make the GVL in thread_pthread.c be platform-independent.
284 */
285#ifndef BUSY_WAIT_SIGNALS
286# define BUSY_WAIT_SIGNALS (0)
287#endif
288
289#ifndef USE_EVENTFD
290# define USE_EVENTFD (0)
291#endif
292
293#include "thread_sync.c"
294
295void
296rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
297{
299}
300
301void
302rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
303{
305}
306
307void
308rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
309{
311}
312
313void
314rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
315{
317}
318
319static int
320unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted)
321{
322 do {
323 if (fail_if_interrupted) {
324 if (RUBY_VM_INTERRUPTED_ANY(th->ec)) {
325 return FALSE;
326 }
327 }
328 else {
329 RUBY_VM_CHECK_INTS(th->ec);
330 }
331
332 rb_native_mutex_lock(&th->interrupt_lock);
333 } while (!th->ec->raised_flag && RUBY_VM_INTERRUPTED_ANY(th->ec) &&
334 (rb_native_mutex_unlock(&th->interrupt_lock), TRUE));
335
336 VM_ASSERT(th->unblock.func == NULL);
337
338 th->unblock.func = func;
339 th->unblock.arg = arg;
340 rb_native_mutex_unlock(&th->interrupt_lock);
341
342 return TRUE;
343}
344
345static void
346unblock_function_clear(rb_thread_t *th)
347{
348 rb_native_mutex_lock(&th->interrupt_lock);
349 th->unblock.func = 0;
350 rb_native_mutex_unlock(&th->interrupt_lock);
351}
352
353static void
354threadptr_set_interrupt_locked(rb_thread_t *th, bool trap)
355{
356 // th->interrupt_lock should be acquired here
357
358 RUBY_DEBUG_LOG("th:%u trap:%d", rb_th_serial(th), trap);
359
360 if (trap) {
361 RUBY_VM_SET_TRAP_INTERRUPT(th->ec);
362 }
363 else {
364 RUBY_VM_SET_INTERRUPT(th->ec);
365 }
366
367 if (th->unblock.func != NULL) {
368 (th->unblock.func)(th->unblock.arg);
369 }
370 else {
371 /* none */
372 }
373}
374
375static void
376threadptr_set_interrupt(rb_thread_t *th, int trap)
377{
378 rb_native_mutex_lock(&th->interrupt_lock);
379 {
380 threadptr_set_interrupt_locked(th, trap);
381 }
382 rb_native_mutex_unlock(&th->interrupt_lock);
383}
384
385/* Set interrupt flag on another thread or current thread, and call its UBF if it has one set */
386void
387rb_threadptr_interrupt(rb_thread_t *th)
388{
389 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
390 threadptr_set_interrupt(th, false);
391}
392
393static void
394threadptr_trap_interrupt(rb_thread_t *th)
395{
396 threadptr_set_interrupt(th, true);
397}
398
399static void
400terminate_all(rb_ractor_t *r, const rb_thread_t *main_thread)
401{
402 rb_thread_t *th = 0;
403
404 ccan_list_for_each(&r->threads.set, th, lt_node) {
405 if (th != main_thread) {
406 RUBY_DEBUG_LOG("terminate start th:%u status:%s", rb_th_serial(th), thread_status_name(th, TRUE));
407
408 rb_threadptr_pending_interrupt_enque(th, RUBY_FATAL_THREAD_TERMINATED);
409 rb_threadptr_interrupt(th);
410
411 RUBY_DEBUG_LOG("terminate done th:%u status:%s", rb_th_serial(th), thread_status_name(th, TRUE));
412 }
413 else {
414 RUBY_DEBUG_LOG("main thread th:%u", rb_th_serial(th));
415 }
416 }
417}
418
419static void
420rb_threadptr_join_list_wakeup(rb_thread_t *thread)
421{
422 while (thread->join_list) {
423 struct rb_waiting_list *join_list = thread->join_list;
424
425 // Consume the entry from the join list:
426 thread->join_list = join_list->next;
427
428 rb_thread_t *target_thread = join_list->thread;
429
430 if (target_thread->scheduler != Qnil && join_list->fiber) {
431 rb_fiber_scheduler_unblock(target_thread->scheduler, target_thread->self, rb_fiberptr_self(join_list->fiber));
432 }
433 else {
434 rb_threadptr_interrupt(target_thread);
435
436 switch (target_thread->status) {
437 case THREAD_STOPPED:
438 case THREAD_STOPPED_FOREVER:
439 target_thread->status = THREAD_RUNNABLE;
440 break;
441 default:
442 break;
443 }
444 }
445 }
446}
447
448void
449rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th)
450{
451 while (th->keeping_mutexes) {
452 rb_mutex_t *mutex = th->keeping_mutexes;
453 th->keeping_mutexes = mutex->next_mutex;
454
455 // rb_warn("mutex #<%p> was not unlocked by thread #<%p>", (void *)mutex, (void*)th);
456 VM_ASSERT(mutex->fiber_serial);
457 const char *error_message = rb_mutex_unlock_th(mutex, th, NULL);
458 if (error_message) rb_bug("invalid keeping_mutexes: %s", error_message);
459 }
460}
461
462void
463rb_thread_terminate_all(rb_thread_t *th)
464{
465 rb_ractor_t *cr = th->ractor;
466 rb_execution_context_t * volatile ec = th->ec;
467 volatile int sleeping = 0;
468
469 if (cr->threads.main != th) {
470 rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)",
471 (void *)cr->threads.main, (void *)th);
472 }
473
474 /* unlock all locking mutexes */
475 rb_threadptr_unlock_all_locking_mutexes(th);
476
477 EC_PUSH_TAG(ec);
478 if (EC_EXEC_TAG() == TAG_NONE) {
479 retry:
480 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
481
482 terminate_all(cr, th);
483
484 while (rb_ractor_living_thread_num(cr) > 1) {
485 rb_hrtime_t rel = RB_HRTIME_PER_SEC;
486 /*q
487 * Thread exiting routine in thread_start_func_2 notify
488 * me when the last sub-thread exit.
489 */
490 sleeping = 1;
491 native_sleep(th, &rel);
492 RUBY_VM_CHECK_INTS_BLOCKING(ec);
493 sleeping = 0;
494 }
495 }
496 else {
497 /*
498 * When caught an exception (e.g. Ctrl+C), let's broadcast
499 * kill request again to ensure killing all threads even
500 * if they are blocked on sleep, mutex, etc.
501 */
502 if (sleeping) {
503 sleeping = 0;
504 goto retry;
505 }
506 }
507 EC_POP_TAG();
508}
509
510void rb_threadptr_root_fiber_terminate(rb_thread_t *th);
511static void threadptr_interrupt_exec_cleanup(rb_thread_t *th);
512
513static void
514thread_cleanup_func_before_exec(void *th_ptr)
515{
516 rb_thread_t *th = th_ptr;
517 th->status = THREAD_KILLED;
518
519 // The thread stack doesn't exist in the forked process:
520 th->ec->machine.stack_start = th->ec->machine.stack_end = NULL;
521
522 threadptr_interrupt_exec_cleanup(th);
523 rb_threadptr_root_fiber_terminate(th);
524}
525
526static void
527thread_cleanup_func(void *th_ptr, int atfork)
528{
529 rb_thread_t *th = th_ptr;
530
531 th->locking_mutex = Qfalse;
532 thread_cleanup_func_before_exec(th_ptr);
533
534 if (atfork) {
535 native_thread_destroy_atfork(th->nt);
536 th->nt = NULL;
537 return;
538 }
539
540 rb_native_mutex_destroy(&th->interrupt_lock);
541}
542
543void
544rb_thread_free_native_thread(void *th_ptr)
545{
546 rb_thread_t *th = th_ptr;
547
548 native_thread_destroy_atfork(th->nt);
549 th->nt = NULL;
550}
551
552static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *);
553static VALUE rb_thread_to_s(VALUE thread);
554
555void
556ruby_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame)
557{
558 native_thread_init_stack(th, local_in_parent_frame);
559}
560
561const VALUE *
562rb_vm_proc_local_ep(VALUE proc)
563{
564 const VALUE *ep = vm_proc_ep(proc);
565
566 if (ep) {
567 return rb_vm_ep_local_ep(ep);
568 }
569 else {
570 return NULL;
571 }
572}
573
574// for ractor, defined in vm.c
575VALUE rb_vm_invoke_proc_with_self(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
576 int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler);
577
578static VALUE
579thread_do_start_proc(rb_thread_t *th)
580{
581 VALUE args = th->invoke_arg.proc.args;
582 const VALUE *args_ptr;
583 int args_len;
584 VALUE procval = th->invoke_arg.proc.proc;
585 rb_proc_t *proc;
586 GetProcPtr(procval, proc);
587
588 th->ec->errinfo = Qnil;
589 th->ec->root_lep = rb_vm_proc_local_ep(procval);
590 th->ec->root_svar = Qfalse;
591
592 vm_check_ints_blocking(th->ec);
593
594 if (th->invoke_type == thread_invoke_type_ractor_proc) {
595 VALUE self = rb_ractor_self(th->ractor);
596 th->thgroup = th->ractor->thgroup_default = rb_obj_alloc(cThGroup);
597
598 VM_ASSERT(FIXNUM_P(args));
599 args_len = FIX2INT(args);
600 args_ptr = ALLOCA_N(VALUE, args_len);
601 rb_ractor_receive_parameters(th->ec, th->ractor, args_len, (VALUE *)args_ptr);
602 vm_check_ints_blocking(th->ec);
603
604 return rb_vm_invoke_proc_with_self(
605 th->ec, proc, self,
606 args_len, args_ptr,
607 th->invoke_arg.proc.kw_splat,
608 VM_BLOCK_HANDLER_NONE
609 );
610 }
611 else {
612 args_len = RARRAY_LENINT(args);
613 if (args_len < 8) {
614 /* free proc.args if the length is enough small */
615 args_ptr = ALLOCA_N(VALUE, args_len);
616 MEMCPY((VALUE *)args_ptr, RARRAY_CONST_PTR(args), VALUE, args_len);
617 th->invoke_arg.proc.args = Qnil;
618 }
619 else {
620 args_ptr = RARRAY_CONST_PTR(args);
621 }
622
623 vm_check_ints_blocking(th->ec);
624
625 return rb_vm_invoke_proc(
626 th->ec, proc,
627 args_len, args_ptr,
628 th->invoke_arg.proc.kw_splat,
629 VM_BLOCK_HANDLER_NONE
630 );
631 }
632}
633
634static VALUE
635thread_do_start(rb_thread_t *th)
636{
637 native_set_thread_name(th);
638 VALUE result = Qundef;
639
640 switch (th->invoke_type) {
641 case thread_invoke_type_proc:
642 result = thread_do_start_proc(th);
643 break;
644
645 case thread_invoke_type_ractor_proc:
646 result = thread_do_start_proc(th);
647 rb_ractor_atexit(th->ec, result);
648 break;
649
650 case thread_invoke_type_func:
651 result = (*th->invoke_arg.func.func)(th->invoke_arg.func.arg);
652 break;
653
654 case thread_invoke_type_none:
655 rb_bug("unreachable");
656 }
657
658 return result;
659}
660
661void rb_ec_clear_current_thread_trace_func(const rb_execution_context_t *ec);
662
663static int
664thread_start_func_2(rb_thread_t *th, VALUE *stack_start)
665{
666 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
667 VM_ASSERT(th != th->vm->ractor.main_thread);
668
669 enum ruby_tag_type state;
670 VALUE errinfo = Qnil;
671 rb_thread_t *ractor_main_th = th->ractor->threads.main;
672
673 // setup ractor
674 if (rb_ractor_status_p(th->ractor, ractor_blocking)) {
675 RB_VM_LOCK();
676 {
677 rb_vm_ractor_blocking_cnt_dec(th->vm, th->ractor, __FILE__, __LINE__);
678 rb_ractor_t *r = th->ractor;
679 r->r_stdin = rb_io_prep_stdin();
680 r->r_stdout = rb_io_prep_stdout();
681 r->r_stderr = rb_io_prep_stderr();
682 }
683 RB_VM_UNLOCK();
684 }
685
686 // Ensure that we are not joinable.
687 VM_ASSERT(UNDEF_P(th->value));
688
689 int fiber_scheduler_closed = 0, event_thread_end_hooked = 0;
690 VALUE result = Qundef;
691
692 EC_PUSH_TAG(th->ec);
693
694 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
695 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_BEGIN, th->self, 0, 0, 0, Qundef);
696
697 result = thread_do_start(th);
698 }
699
700 if (!fiber_scheduler_closed) {
701 fiber_scheduler_closed = 1;
703 }
704
705 if (!event_thread_end_hooked) {
706 event_thread_end_hooked = 1;
707 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_END, th->self, 0, 0, 0, Qundef);
708 }
709
710 if (state == TAG_NONE) {
711 // This must be set AFTER doing all user-level code. At this point, the thread is effectively finished and calls to `Thread#join` will succeed.
712 th->value = result;
713 }
714 else {
715 errinfo = th->ec->errinfo;
716
717 VALUE exc = rb_vm_make_jump_tag_but_local_jump(state, Qundef);
718 if (!NIL_P(exc)) errinfo = exc;
719
720 if (state == TAG_FATAL) {
721 if (th->invoke_type == thread_invoke_type_ractor_proc) {
722 rb_ractor_atexit(th->ec, Qnil);
723 }
724 /* fatal error within this thread, need to stop whole script */
725 }
726 else if (rb_obj_is_kind_of(errinfo, rb_eSystemExit)) {
727 if (th->invoke_type == thread_invoke_type_ractor_proc) {
728 rb_ractor_atexit_exception(th->ec);
729 }
730
731 /* exit on main_thread. */
732 }
733 else {
734 if (th->report_on_exception) {
735 VALUE mesg = rb_thread_to_s(th->self);
736 rb_str_cat_cstr(mesg, " terminated with exception (report_on_exception is true):\n");
737 rb_write_error_str(mesg);
738 rb_ec_error_print(th->ec, errinfo);
739 }
740
741 if (th->invoke_type == thread_invoke_type_ractor_proc) {
742 rb_ractor_atexit_exception(th->ec);
743 }
744
745 if (th->vm->thread_abort_on_exception ||
746 th->abort_on_exception || RTEST(ruby_debug)) {
747 /* exit on main_thread */
748 }
749 else {
750 errinfo = Qnil;
751 }
752 }
753 th->value = Qnil;
754 }
755
756 // The thread is effectively finished and can be joined.
757 VM_ASSERT(!UNDEF_P(th->value));
758
759 rb_threadptr_join_list_wakeup(th);
760 rb_threadptr_unlock_all_locking_mutexes(th);
761
762 if (th->invoke_type == thread_invoke_type_ractor_proc) {
763 rb_thread_terminate_all(th);
764 rb_ractor_teardown(th->ec);
765 }
766
767 th->status = THREAD_KILLED;
768 RUBY_DEBUG_LOG("killed th:%u", rb_th_serial(th));
769
770 if (th->vm->ractor.main_thread == th) {
771 ruby_stop(0);
772 }
773
774 if (RB_TYPE_P(errinfo, T_OBJECT)) {
775 /* treat with normal error object */
776 rb_threadptr_raise(ractor_main_th, 1, &errinfo);
777 }
778
779 EC_POP_TAG();
780
781 rb_ec_clear_current_thread_trace_func(th->ec);
782
783 /* locking_mutex must be Qfalse */
784 if (th->locking_mutex != Qfalse) {
785 rb_bug("thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
786 (void *)th, th->locking_mutex);
787 }
788
789 if (ractor_main_th->status == THREAD_KILLED &&
790 th->ractor->threads.cnt <= 2 /* main thread and this thread */) {
791 /* I'm last thread. wake up main thread from rb_thread_terminate_all */
792 rb_threadptr_interrupt(ractor_main_th);
793 }
794
795 rb_check_deadlock(th->ractor);
796
797 rb_fiber_close(th->ec->fiber_ptr);
798
799 thread_cleanup_func(th, FALSE);
800 VM_ASSERT(th->ec->vm_stack == NULL);
801
802 if (th->invoke_type == thread_invoke_type_ractor_proc) {
803 // after rb_ractor_living_threads_remove()
804 // GC will happen anytime and this ractor can be collected (and destroy GVL).
805 // So gvl_release() should be before it.
806 thread_sched_to_dead(TH_SCHED(th), th);
807 rb_ractor_living_threads_remove(th->ractor, th);
808 }
809 else {
810 rb_ractor_living_threads_remove(th->ractor, th);
811 thread_sched_to_dead(TH_SCHED(th), th);
812 }
813
814 return 0;
815}
818 enum thread_invoke_type type;
819
820 // for normal proc thread
821 VALUE args;
822 VALUE proc;
823
824 // for ractor
825 rb_ractor_t *g;
826
827 // for func
828 VALUE (*fn)(void *);
829};
830
831static void thread_specific_storage_alloc(rb_thread_t *th);
832
833static VALUE
834thread_create_core(VALUE thval, struct thread_create_params *params)
835{
836 rb_execution_context_t *ec = GET_EC();
837 rb_thread_t *th = rb_thread_ptr(thval), *current_th = rb_ec_thread_ptr(ec);
838 int err;
839
840 thread_specific_storage_alloc(th);
841
842 if (OBJ_FROZEN(current_th->thgroup)) {
843 rb_raise(rb_eThreadError,
844 "can't start a new thread (frozen ThreadGroup)");
845 }
846
847 rb_fiber_inherit_storage(ec, th->ec->fiber_ptr);
848
849 switch (params->type) {
850 case thread_invoke_type_proc:
851 th->invoke_type = thread_invoke_type_proc;
852 th->invoke_arg.proc.args = params->args;
853 th->invoke_arg.proc.proc = params->proc;
854 th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
855 break;
856
857 case thread_invoke_type_ractor_proc:
858#if RACTOR_CHECK_MODE > 0
859 rb_ractor_setup_belonging_to(thval, rb_ractor_id(params->g));
860#endif
861 th->invoke_type = thread_invoke_type_ractor_proc;
862 th->ractor = params->g;
863 th->ractor->threads.main = th;
864 th->invoke_arg.proc.proc = rb_proc_isolate_bang(params->proc, Qnil);
865 th->invoke_arg.proc.args = INT2FIX(RARRAY_LENINT(params->args));
866 th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
867 rb_ractor_send_parameters(ec, params->g, params->args);
868 break;
869
870 case thread_invoke_type_func:
871 th->invoke_type = thread_invoke_type_func;
872 th->invoke_arg.func.func = params->fn;
873 th->invoke_arg.func.arg = (void *)params->args;
874 break;
875
876 default:
877 rb_bug("unreachable");
878 }
879
880 th->priority = current_th->priority;
881 th->thgroup = current_th->thgroup;
882
883 th->pending_interrupt_queue = rb_ary_hidden_new(0);
884 th->pending_interrupt_queue_checked = 0;
885 th->pending_interrupt_mask_stack = rb_ary_dup(current_th->pending_interrupt_mask_stack);
886 RBASIC_CLEAR_CLASS(th->pending_interrupt_mask_stack);
887
888 rb_native_mutex_initialize(&th->interrupt_lock);
889
890 RUBY_DEBUG_LOG("r:%u th:%u", rb_ractor_id(th->ractor), rb_th_serial(th));
891
892 rb_ractor_living_threads_insert(th->ractor, th);
893
894 /* kick thread */
895 err = native_thread_create(th);
896 if (err) {
897 th->status = THREAD_KILLED;
898 rb_ractor_living_threads_remove(th->ractor, th);
899 rb_raise(rb_eThreadError, "can't create Thread: %s", strerror(err));
900 }
901 return thval;
902}
903
904#define threadptr_initialized(th) ((th)->invoke_type != thread_invoke_type_none)
905
906/*
907 * call-seq:
908 * Thread.new { ... } -> thread
909 * Thread.new(*args, &proc) -> thread
910 * Thread.new(*args) { |args| ... } -> thread
911 *
912 * Creates a new thread executing the given block.
913 *
914 * Any +args+ given to ::new will be passed to the block:
915 *
916 * arr = []
917 * a, b, c = 1, 2, 3
918 * Thread.new(a,b,c) { |d,e,f| arr << d << e << f }.join
919 * arr #=> [1, 2, 3]
920 *
921 * A ThreadError exception is raised if ::new is called without a block.
922 *
923 * If you're going to subclass Thread, be sure to call super in your
924 * +initialize+ method, otherwise a ThreadError will be raised.
925 */
926static VALUE
927thread_s_new(int argc, VALUE *argv, VALUE klass)
928{
929 rb_thread_t *th;
930 VALUE thread = rb_thread_alloc(klass);
931
932 if (GET_RACTOR()->threads.main->status == THREAD_KILLED) {
933 rb_raise(rb_eThreadError, "can't alloc thread");
934 }
935
936 rb_obj_call_init_kw(thread, argc, argv, RB_PASS_CALLED_KEYWORDS);
937 th = rb_thread_ptr(thread);
938 if (!threadptr_initialized(th)) {
939 rb_raise(rb_eThreadError, "uninitialized thread - check '%"PRIsVALUE"#initialize'",
940 klass);
941 }
942 return thread;
943}
944
945/*
946 * call-seq:
947 * Thread.start([args]*) {|args| block } -> thread
948 * Thread.fork([args]*) {|args| block } -> thread
949 *
950 * Basically the same as ::new. However, if class Thread is subclassed, then
951 * calling +start+ in that subclass will not invoke the subclass's
952 * +initialize+ method.
953 */
954
955static VALUE
956thread_start(VALUE klass, VALUE args)
957{
958 struct thread_create_params params = {
959 .type = thread_invoke_type_proc,
960 .args = args,
961 .proc = rb_block_proc(),
962 };
963 return thread_create_core(rb_thread_alloc(klass), &params);
964}
965
966static VALUE
967threadptr_invoke_proc_location(rb_thread_t *th)
968{
969 if (th->invoke_type == thread_invoke_type_proc) {
970 return rb_proc_location(th->invoke_arg.proc.proc);
971 }
972 else {
973 return Qnil;
974 }
975}
976
977/* :nodoc: */
978static VALUE
979thread_initialize(VALUE thread, VALUE args)
980{
981 rb_thread_t *th = rb_thread_ptr(thread);
982
983 if (!rb_block_given_p()) {
984 rb_raise(rb_eThreadError, "must be called with a block");
985 }
986 else if (th->invoke_type != thread_invoke_type_none) {
987 VALUE loc = threadptr_invoke_proc_location(th);
988 if (!NIL_P(loc)) {
989 rb_raise(rb_eThreadError,
990 "already initialized thread - %"PRIsVALUE":%"PRIsVALUE,
991 RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
992 }
993 else {
994 rb_raise(rb_eThreadError, "already initialized thread");
995 }
996 }
997 else {
998 struct thread_create_params params = {
999 .type = thread_invoke_type_proc,
1000 .args = args,
1001 .proc = rb_block_proc(),
1002 };
1003 return thread_create_core(thread, &params);
1004 }
1005}
1006
1007VALUE
1008rb_thread_create(VALUE (*fn)(void *), void *arg)
1009{
1010 struct thread_create_params params = {
1011 .type = thread_invoke_type_func,
1012 .fn = fn,
1013 .args = (VALUE)arg,
1014 };
1015 return thread_create_core(rb_thread_alloc(rb_cThread), &params);
1016}
1017
1018VALUE
1019rb_thread_create_ractor(rb_ractor_t *r, VALUE args, VALUE proc)
1020{
1021 struct thread_create_params params = {
1022 .type = thread_invoke_type_ractor_proc,
1023 .g = r,
1024 .args = args,
1025 .proc = proc,
1026 };
1027 return thread_create_core(rb_thread_alloc(rb_cThread), &params);
1028}
1029
1031struct join_arg {
1032 struct rb_waiting_list *waiter;
1033 rb_thread_t *target;
1034 VALUE timeout;
1035 rb_hrtime_t *limit;
1036};
1037
1038static VALUE
1039remove_from_join_list(VALUE arg)
1040{
1041 struct join_arg *p = (struct join_arg *)arg;
1042 rb_thread_t *target_thread = p->target;
1043
1044 if (target_thread->status != THREAD_KILLED) {
1045 struct rb_waiting_list **join_list = &target_thread->join_list;
1046
1047 while (*join_list) {
1048 if (*join_list == p->waiter) {
1049 *join_list = (*join_list)->next;
1050 break;
1051 }
1052
1053 join_list = &(*join_list)->next;
1054 }
1055 }
1056
1057 return Qnil;
1058}
1059
1060static int
1061thread_finished(rb_thread_t *th)
1062{
1063 return th->status == THREAD_KILLED || !UNDEF_P(th->value);
1064}
1065
1066static VALUE
1067thread_join_sleep(VALUE arg)
1068{
1069 struct join_arg *p = (struct join_arg *)arg;
1070 rb_thread_t *target_th = p->target, *th = p->waiter->thread;
1071 rb_hrtime_t end = 0, *limit = p->limit;
1072
1073 if (limit) {
1074 end = rb_hrtime_add(*limit, rb_hrtime_now());
1075 }
1076
1077 while (!thread_finished(target_th)) {
1079
1080 if (!limit) {
1081 if (scheduler != Qnil) {
1082 rb_fiber_scheduler_block(scheduler, target_th->self, Qnil);
1083 }
1084 else {
1085 sleep_forever(th, SLEEP_DEADLOCKABLE | SLEEP_ALLOW_SPURIOUS | SLEEP_NO_CHECKINTS);
1086 }
1087 }
1088 else {
1089 if (hrtime_update_expire(limit, end)) {
1090 RUBY_DEBUG_LOG("timeout target_th:%u", rb_th_serial(target_th));
1091 return Qfalse;
1092 }
1093
1094 if (scheduler != Qnil) {
1095 VALUE timeout = rb_float_new(hrtime2double(*limit));
1096 rb_fiber_scheduler_block(scheduler, target_th->self, timeout);
1097 }
1098 else {
1099 th->status = THREAD_STOPPED;
1100 native_sleep(th, limit);
1101 }
1102 }
1103 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1104 th->status = THREAD_RUNNABLE;
1105
1106 RUBY_DEBUG_LOG("interrupted target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1107 }
1108
1109 return Qtrue;
1110}
1111
1112static VALUE
1113thread_join(rb_thread_t *target_th, VALUE timeout, rb_hrtime_t *limit)
1114{
1115 rb_execution_context_t *ec = GET_EC();
1116 rb_thread_t *th = ec->thread_ptr;
1117 rb_fiber_t *fiber = ec->fiber_ptr;
1118
1119 if (th == target_th) {
1120 rb_raise(rb_eThreadError, "Target thread must not be current thread");
1121 }
1122
1123 if (th->ractor->threads.main == target_th) {
1124 rb_raise(rb_eThreadError, "Target thread must not be main thread");
1125 }
1126
1127 RUBY_DEBUG_LOG("target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1128
1129 if (target_th->status != THREAD_KILLED) {
1130 struct rb_waiting_list waiter;
1131 waiter.next = target_th->join_list;
1132 waiter.thread = th;
1133 waiter.fiber = rb_fiberptr_blocking(fiber) ? NULL : fiber;
1134 target_th->join_list = &waiter;
1135
1136 struct join_arg arg;
1137 arg.waiter = &waiter;
1138 arg.target = target_th;
1139 arg.timeout = timeout;
1140 arg.limit = limit;
1141
1142 if (!rb_ensure(thread_join_sleep, (VALUE)&arg, remove_from_join_list, (VALUE)&arg)) {
1143 return Qnil;
1144 }
1145 }
1146
1147 RUBY_DEBUG_LOG("success target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1148
1149 if (target_th->ec->errinfo != Qnil) {
1150 VALUE err = target_th->ec->errinfo;
1151
1152 if (FIXNUM_P(err)) {
1153 switch (err) {
1154 case INT2FIX(TAG_FATAL):
1155 RUBY_DEBUG_LOG("terminated target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1156
1157 /* OK. killed. */
1158 break;
1159 default:
1160 if (err == RUBY_FATAL_FIBER_KILLED) { // not integer constant so can't be a case expression
1161 // root fiber killed in non-main thread
1162 break;
1163 }
1164 rb_bug("thread_join: Fixnum (%d) should not reach here.", FIX2INT(err));
1165 }
1166 }
1167 else if (THROW_DATA_P(target_th->ec->errinfo)) {
1168 rb_bug("thread_join: THROW_DATA should not reach here.");
1169 }
1170 else {
1171 /* normal exception */
1172 rb_exc_raise(err);
1173 }
1174 }
1175 return target_th->self;
1176}
1177
1178/*
1179 * call-seq:
1180 * thr.join -> thr
1181 * thr.join(limit) -> thr
1182 *
1183 * The calling thread will suspend execution and run this +thr+.
1184 *
1185 * Does not return until +thr+ exits or until the given +limit+ seconds have
1186 * passed.
1187 *
1188 * If the time limit expires, +nil+ will be returned, otherwise +thr+ is
1189 * returned.
1190 *
1191 * Any threads not joined will be killed when the main program exits.
1192 *
1193 * If +thr+ had previously raised an exception and the ::abort_on_exception or
1194 * $DEBUG flags are not set, (so the exception has not yet been processed), it
1195 * will be processed at this time.
1196 *
1197 * a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
1198 * x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
1199 * x.join # Let thread x finish, thread a will be killed on exit.
1200 * #=> "axyz"
1201 *
1202 * The following example illustrates the +limit+ parameter.
1203 *
1204 * y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
1205 * puts "Waiting" until y.join(0.15)
1206 *
1207 * This will produce:
1208 *
1209 * tick...
1210 * Waiting
1211 * tick...
1212 * Waiting
1213 * tick...
1214 * tick...
1215 */
1216
1217static VALUE
1218thread_join_m(int argc, VALUE *argv, VALUE self)
1219{
1220 VALUE timeout = Qnil;
1221 rb_hrtime_t rel = 0, *limit = 0;
1222
1223 if (rb_check_arity(argc, 0, 1)) {
1224 timeout = argv[0];
1225 }
1226
1227 // Convert the timeout eagerly, so it's always converted and deterministic
1228 /*
1229 * This supports INFINITY and negative values, so we can't use
1230 * rb_time_interval right now...
1231 */
1232 if (NIL_P(timeout)) {
1233 /* unlimited */
1234 }
1235 else if (FIXNUM_P(timeout)) {
1236 rel = rb_sec2hrtime(NUM2TIMET(timeout));
1237 limit = &rel;
1238 }
1239 else {
1240 limit = double2hrtime(&rel, rb_num2dbl(timeout));
1241 }
1242
1243 return thread_join(rb_thread_ptr(self), timeout, limit);
1244}
1245
1246/*
1247 * call-seq:
1248 * thr.value -> obj
1249 *
1250 * Waits for +thr+ to complete, using #join, and returns its value or raises
1251 * the exception which terminated the thread.
1252 *
1253 * a = Thread.new { 2 + 2 }
1254 * a.value #=> 4
1255 *
1256 * b = Thread.new { raise 'something went wrong' }
1257 * b.value #=> RuntimeError: something went wrong
1258 */
1259
1260static VALUE
1261thread_value(VALUE self)
1262{
1263 rb_thread_t *th = rb_thread_ptr(self);
1264 thread_join(th, Qnil, 0);
1265 if (UNDEF_P(th->value)) {
1266 // If the thread is dead because we forked th->value is still Qundef.
1267 return Qnil;
1268 }
1269 return th->value;
1270}
1271
1272/*
1273 * Thread Scheduling
1274 */
1275
1276static void
1277getclockofday(struct timespec *ts)
1278{
1279#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1280 if (clock_gettime(CLOCK_MONOTONIC, ts) == 0)
1281 return;
1282#endif
1283 rb_timespec_now(ts);
1284}
1285
1286/*
1287 * Don't inline this, since library call is already time consuming
1288 * and we don't want "struct timespec" on stack too long for GC
1289 */
1290NOINLINE(rb_hrtime_t rb_hrtime_now(void));
1291rb_hrtime_t
1292rb_hrtime_now(void)
1293{
1294 struct timespec ts;
1295
1296 getclockofday(&ts);
1297 return rb_timespec2hrtime(&ts);
1298}
1299
1300/*
1301 * at least gcc 7.2 and 7.3 complains about "rb_hrtime_t end"
1302 * being uninitialized, maybe other versions, too.
1303 */
1304COMPILER_WARNING_PUSH
1305#if defined(__GNUC__) && __GNUC__ == 7 && __GNUC_MINOR__ <= 3
1306COMPILER_WARNING_IGNORED(-Wmaybe-uninitialized)
1307#endif
1308#ifndef PRIu64
1309#define PRIu64 PRI_64_PREFIX "u"
1310#endif
1311/*
1312 * @end is the absolute time when @ts is set to expire
1313 * Returns true if @end has past
1314 * Updates @ts and returns false otherwise
1315 */
1316static int
1317hrtime_update_expire(rb_hrtime_t *timeout, const rb_hrtime_t end)
1318{
1319 rb_hrtime_t now = rb_hrtime_now();
1320
1321 if (now > end) return 1;
1322
1323 RUBY_DEBUG_LOG("%"PRIu64" > %"PRIu64"", (uint64_t)end, (uint64_t)now);
1324
1325 *timeout = end - now;
1326 return 0;
1327}
1328COMPILER_WARNING_POP
1329
1330static int
1331sleep_hrtime(rb_thread_t *th, rb_hrtime_t rel, unsigned int fl)
1332{
1333 enum rb_thread_status prev_status = th->status;
1334 int woke;
1335 rb_hrtime_t end = rb_hrtime_add(rb_hrtime_now(), rel);
1336
1337 th->status = THREAD_STOPPED;
1338 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1339 while (th->status == THREAD_STOPPED) {
1340 native_sleep(th, &rel);
1341 woke = vm_check_ints_blocking(th->ec);
1342 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1343 break;
1344 if (hrtime_update_expire(&rel, end))
1345 break;
1346 woke = 1;
1347 }
1348 th->status = prev_status;
1349 return woke;
1350}
1351
1352static int
1353sleep_hrtime_until(rb_thread_t *th, rb_hrtime_t end, unsigned int fl)
1354{
1355 enum rb_thread_status prev_status = th->status;
1356 int woke;
1357 rb_hrtime_t rel = rb_hrtime_sub(end, rb_hrtime_now());
1358
1359 th->status = THREAD_STOPPED;
1360 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1361 while (th->status == THREAD_STOPPED) {
1362 native_sleep(th, &rel);
1363 woke = vm_check_ints_blocking(th->ec);
1364 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1365 break;
1366 if (hrtime_update_expire(&rel, end))
1367 break;
1368 woke = 1;
1369 }
1370 th->status = prev_status;
1371 return woke;
1372}
1373
1374static void
1375sleep_forever(rb_thread_t *th, unsigned int fl)
1376{
1377 enum rb_thread_status prev_status = th->status;
1378 enum rb_thread_status status;
1379 int woke;
1380
1381 status = fl & SLEEP_DEADLOCKABLE ? THREAD_STOPPED_FOREVER : THREAD_STOPPED;
1382 th->status = status;
1383
1384 if (!(fl & SLEEP_NO_CHECKINTS)) RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1385
1386 while (th->status == status) {
1387 if (fl & SLEEP_DEADLOCKABLE) {
1388 rb_ractor_sleeper_threads_inc(th->ractor);
1389 rb_check_deadlock(th->ractor);
1390 }
1391 {
1392 native_sleep(th, 0);
1393 }
1394 if (fl & SLEEP_DEADLOCKABLE) {
1395 rb_ractor_sleeper_threads_dec(th->ractor);
1396 }
1397 if (fl & SLEEP_ALLOW_SPURIOUS) {
1398 break;
1399 }
1400
1401 woke = vm_check_ints_blocking(th->ec);
1402
1403 if (woke && !(fl & SLEEP_SPURIOUS_CHECK)) {
1404 break;
1405 }
1406 }
1407 th->status = prev_status;
1408}
1409
1410void
1412{
1413 RUBY_DEBUG_LOG("forever");
1414 sleep_forever(GET_THREAD(), SLEEP_SPURIOUS_CHECK);
1415}
1416
1417void
1419{
1420 RUBY_DEBUG_LOG("deadly");
1421 sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE|SLEEP_SPURIOUS_CHECK);
1422}
1423
1424static void
1425rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker, VALUE timeout, rb_hrtime_t end)
1426{
1427 rb_thread_t *th = GET_THREAD();
1429 if (scheduler != Qnil) {
1430 rb_fiber_scheduler_block(scheduler, blocker, timeout);
1431 }
1432 else {
1433 RUBY_DEBUG_LOG("...");
1434 if (end) {
1435 sleep_hrtime_until(th, end, SLEEP_SPURIOUS_CHECK);
1436 }
1437 else {
1438 sleep_forever(th, SLEEP_DEADLOCKABLE);
1439 }
1440 }
1441}
1442
1443void
1444rb_thread_wait_for(struct timeval time)
1445{
1446 rb_thread_t *th = GET_THREAD();
1447
1448 sleep_hrtime(th, rb_timeval2hrtime(&time), SLEEP_SPURIOUS_CHECK);
1449}
1450
1451void
1452rb_ec_check_ints(rb_execution_context_t *ec)
1453{
1454 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1455}
1456
1457/*
1458 * CAUTION: This function causes thread switching.
1459 * rb_thread_check_ints() check ruby's interrupts.
1460 * some interrupt needs thread switching/invoke handlers,
1461 * and so on.
1462 */
1463
1464void
1466{
1467 rb_ec_check_ints(GET_EC());
1468}
1469
1470/*
1471 * Hidden API for tcl/tk wrapper.
1472 * There is no guarantee to perpetuate it.
1473 */
1474int
1475rb_thread_check_trap_pending(void)
1476{
1477 return rb_signal_buff_size() != 0;
1478}
1479
1480/* This function can be called in blocking region. */
1483{
1484 return (int)RUBY_VM_INTERRUPTED(rb_thread_ptr(thval)->ec);
1485}
1486
1487void
1488rb_thread_sleep(int sec)
1489{
1491}
1492
1493static void
1494rb_thread_schedule_limits(uint32_t limits_us)
1495{
1496 if (!rb_thread_alone()) {
1497 rb_thread_t *th = GET_THREAD();
1498 RUBY_DEBUG_LOG("us:%u", (unsigned int)limits_us);
1499
1500 if (th->running_time_us >= limits_us) {
1501 RUBY_DEBUG_LOG("switch %s", "start");
1502
1503 RB_VM_SAVE_MACHINE_CONTEXT(th);
1504 thread_sched_yield(TH_SCHED(th), th);
1505 rb_ractor_thread_switch(th->ractor, th, true);
1506
1507 RUBY_DEBUG_LOG("switch %s", "done");
1508 }
1509 }
1510}
1511
1512void
1514{
1515 rb_thread_schedule_limits(0);
1516 RUBY_VM_CHECK_INTS(GET_EC());
1517}
1518
1519/* blocking region */
1520
1521static inline int
1522blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
1523 rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted)
1524{
1525#ifdef RUBY_ASSERT_CRITICAL_SECTION
1526 VM_ASSERT(ruby_assert_critical_section_entered == 0);
1527#endif
1528 VM_ASSERT(th == GET_THREAD());
1529
1530 region->prev_status = th->status;
1531 if (unblock_function_set(th, ubf, arg, fail_if_interrupted)) {
1532 th->blocking_region_buffer = region;
1533 th->status = THREAD_STOPPED;
1534 rb_ractor_blocking_threads_inc(th->ractor, __FILE__, __LINE__);
1535
1536 RUBY_DEBUG_LOG("thread_id:%p", (void *)th->nt->thread_id);
1537 return TRUE;
1538 }
1539 else {
1540 return FALSE;
1541 }
1542}
1543
1544static inline void
1545blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region)
1546{
1547 /* entry to ubf_list still permitted at this point, make it impossible: */
1548 unblock_function_clear(th);
1549 /* entry to ubf_list impossible at this point, so unregister is safe: */
1550 unregister_ubf_list(th);
1551
1552 thread_sched_to_running(TH_SCHED(th), th);
1553 rb_ractor_thread_switch(th->ractor, th, false);
1554
1555 th->blocking_region_buffer = 0;
1556 rb_ractor_blocking_threads_dec(th->ractor, __FILE__, __LINE__);
1557 if (th->status == THREAD_STOPPED) {
1558 th->status = region->prev_status;
1559 }
1560
1561 RUBY_DEBUG_LOG("end");
1562
1563#ifndef _WIN32
1564 // GET_THREAD() clears WSAGetLastError()
1565 VM_ASSERT(th == GET_THREAD());
1566#endif
1567}
1568
1569/*
1570 * Resolve sentinel unblock function values to their actual function pointers
1571 * and appropriate data2 values. This centralizes the logic for handling
1572 * RUBY_UBF_IO and RUBY_UBF_PROCESS sentinel values.
1573 *
1574 * @param unblock_function Pointer to unblock function pointer (modified in place)
1575 * @param data2 Pointer to data2 pointer (modified in place)
1576 * @param thread Thread context for resolving data2 when needed
1577 * @return true if sentinel values were resolved, false otherwise
1578 */
1579bool
1580rb_thread_resolve_unblock_function(rb_unblock_function_t **unblock_function, void **data2, struct rb_thread_struct *thread)
1581{
1582 rb_unblock_function_t *ubf = *unblock_function;
1583
1584 if ((ubf == RUBY_UBF_IO) || (ubf == RUBY_UBF_PROCESS)) {
1585 *unblock_function = ubf_select;
1586 *data2 = thread;
1587 return true;
1588 }
1589 return false;
1590}
1591
1592void *
1593rb_nogvl(void *(*func)(void *), void *data1,
1594 rb_unblock_function_t *ubf, void *data2,
1595 int flags)
1596{
1597 if (flags & RB_NOGVL_OFFLOAD_SAFE) {
1598 VALUE scheduler = rb_fiber_scheduler_current();
1599 if (scheduler != Qnil) {
1601
1602 VALUE result = rb_fiber_scheduler_blocking_operation_wait(scheduler, func, data1, ubf, data2, flags, &state);
1603
1604 if (!UNDEF_P(result)) {
1605 rb_errno_set(state.saved_errno);
1606 return state.result;
1607 }
1608 }
1609 }
1610
1611 void *val = 0;
1612 rb_execution_context_t *ec = GET_EC();
1613 rb_thread_t *th = rb_ec_thread_ptr(ec);
1614 rb_vm_t *vm = rb_ec_vm_ptr(ec);
1615 bool is_main_thread = vm->ractor.main_thread == th;
1616 int saved_errno = 0;
1617
1618 rb_thread_resolve_unblock_function(&ubf, &data2, th);
1619
1620 if (ubf && rb_ractor_living_thread_num(th->ractor) == 1 && is_main_thread) {
1621 if (flags & RB_NOGVL_UBF_ASYNC_SAFE) {
1622 vm->ubf_async_safe = 1;
1623 }
1624 }
1625
1626 rb_vm_t *volatile saved_vm = vm;
1627 BLOCKING_REGION(th, {
1628 val = func(data1);
1629 saved_errno = rb_errno();
1630 }, ubf, data2, flags & RB_NOGVL_INTR_FAIL);
1631 vm = saved_vm;
1632
1633 if (is_main_thread) vm->ubf_async_safe = 0;
1634
1635 if ((flags & RB_NOGVL_INTR_FAIL) == 0) {
1636 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1637 }
1638
1639 rb_errno_set(saved_errno);
1640
1641 return val;
1642}
1643
1644/*
1645 * rb_thread_call_without_gvl - permit concurrent/parallel execution.
1646 * rb_thread_call_without_gvl2 - permit concurrent/parallel execution
1647 * without interrupt process.
1648 *
1649 * rb_thread_call_without_gvl() does:
1650 * (1) Check interrupts.
1651 * (2) release GVL.
1652 * Other Ruby threads may run in parallel.
1653 * (3) call func with data1
1654 * (4) acquire GVL.
1655 * Other Ruby threads can not run in parallel any more.
1656 * (5) Check interrupts.
1657 *
1658 * rb_thread_call_without_gvl2() does:
1659 * (1) Check interrupt and return if interrupted.
1660 * (2) release GVL.
1661 * (3) call func with data1 and a pointer to the flags.
1662 * (4) acquire GVL.
1663 *
1664 * If another thread interrupts this thread (Thread#kill, signal delivery,
1665 * VM-shutdown request, and so on), `ubf()' is called (`ubf()' means
1666 * "un-blocking function"). `ubf()' should interrupt `func()' execution by
1667 * toggling a cancellation flag, canceling the invocation of a call inside
1668 * `func()' or similar. Note that `ubf()' may not be called with the GVL.
1669 *
1670 * There are built-in ubfs and you can specify these ubfs:
1671 *
1672 * * RUBY_UBF_IO: ubf for IO operation
1673 * * RUBY_UBF_PROCESS: ubf for process operation
1674 *
1675 * However, we can not guarantee our built-in ubfs interrupt your `func()'
1676 * correctly. Be careful to use rb_thread_call_without_gvl(). If you don't
1677 * provide proper ubf(), your program will not stop for Control+C or other
1678 * shutdown events.
1679 *
1680 * "Check interrupts" on above list means checking asynchronous
1681 * interrupt events (such as Thread#kill, signal delivery, VM-shutdown
1682 * request, and so on) and calling corresponding procedures
1683 * (such as `trap' for signals, raise an exception for Thread#raise).
1684 * If `func()' finished and received interrupts, you may skip interrupt
1685 * checking. For example, assume the following func() it reads data from file.
1686 *
1687 * read_func(...) {
1688 * // (a) before read
1689 * read(buffer); // (b) reading
1690 * // (c) after read
1691 * }
1692 *
1693 * If an interrupt occurs at (a) or (b), then `ubf()' cancels this
1694 * `read_func()' and interrupts are checked. However, if an interrupt occurs
1695 * at (c), after *read* operation is completed, checking interrupts is harmful
1696 * because it causes irrevocable side-effect, the read data will vanish. To
1697 * avoid such problem, the `read_func()' should be used with
1698 * `rb_thread_call_without_gvl2()'.
1699 *
1700 * If `rb_thread_call_without_gvl2()' detects interrupt, it returns
1701 * immediately. This function does not show when the execution was interrupted.
1702 * For example, there are 4 possible timing (a), (b), (c) and before calling
1703 * read_func(). You need to record progress of a read_func() and check
1704 * the progress after `rb_thread_call_without_gvl2()'. You may need to call
1705 * `rb_thread_check_ints()' correctly or your program can not process proper
1706 * process such as `trap' and so on.
1707 *
1708 * NOTE: You can not execute most of Ruby C API and touch Ruby
1709 * objects in `func()' and `ubf()', including raising an
1710 * exception, because current thread doesn't acquire GVL
1711 * (it causes synchronization problems). If you need to
1712 * call ruby functions either use rb_thread_call_with_gvl()
1713 * or read source code of C APIs and confirm safety by
1714 * yourself.
1715 *
1716 * NOTE: In short, this API is difficult to use safely. I recommend you
1717 * use other ways if you have. We lack experiences to use this API.
1718 * Please report your problem related on it.
1719 *
1720 * NOTE: Releasing GVL and re-acquiring GVL may be expensive operations
1721 * for a short running `func()'. Be sure to benchmark and use this
1722 * mechanism when `func()' consumes enough time.
1723 *
1724 * Safe C API:
1725 * * rb_thread_interrupted() - check interrupt flag
1726 * * ruby_xmalloc(), ruby_xrealloc(), ruby_xfree() -
1727 * they will work without GVL, and may acquire GVL when GC is needed.
1728 */
1729void *
1730rb_thread_call_without_gvl2(void *(*func)(void *), void *data1,
1731 rb_unblock_function_t *ubf, void *data2)
1732{
1733 return rb_nogvl(func, data1, ubf, data2, RB_NOGVL_INTR_FAIL);
1734}
1735
1736void *
1737rb_thread_call_without_gvl(void *(*func)(void *data), void *data1,
1738 rb_unblock_function_t *ubf, void *data2)
1739{
1740 return rb_nogvl(func, data1, ubf, data2, 0);
1741}
1742
1743static int
1744waitfd_to_waiting_flag(int wfd_event)
1745{
1746 return wfd_event << 1;
1747}
1748
1749static struct ccan_list_head *
1750rb_io_blocking_operations(struct rb_io *io)
1751{
1752 rb_serial_t fork_generation = GET_VM()->fork_gen;
1753
1754 // On fork, all existing entries in this list (which are stack allocated) become invalid.
1755 // Therefore, we re-initialize the list which clears it.
1756 if (io->fork_generation != fork_generation) {
1757 ccan_list_head_init(&io->blocking_operations);
1758 io->fork_generation = fork_generation;
1759 }
1760
1761 return &io->blocking_operations;
1762}
1763
1764/*
1765 * Registers a blocking operation for an IO object. This is used to track all threads and fibers
1766 * that are currently blocked on this IO for reading, writing or other operations.
1767 *
1768 * When the IO is closed, all blocking operations will be notified via rb_fiber_scheduler_fiber_interrupt
1769 * for fibers with a scheduler, or via rb_threadptr_interrupt for threads without a scheduler.
1770 *
1771 * @parameter io The IO object on which the operation will block
1772 * @parameter blocking_operation The operation details including the execution context that will be blocked
1773 */
1774static void
1775rb_io_blocking_operation_enter(struct rb_io *io, struct rb_io_blocking_operation *blocking_operation)
1776{
1777 ccan_list_add(rb_io_blocking_operations(io), &blocking_operation->list);
1778}
1779
1780static void
1781rb_io_blocking_operation_pop(struct rb_io *io, struct rb_io_blocking_operation *blocking_operation)
1782{
1783 ccan_list_del(&blocking_operation->list);
1784}
1787 struct rb_io *io;
1788 struct rb_io_blocking_operation *blocking_operation;
1789};
1790
1791static VALUE
1792io_blocking_operation_exit(VALUE _arguments)
1793{
1794 struct io_blocking_operation_arguments *arguments = (void*)_arguments;
1795 struct rb_io_blocking_operation *blocking_operation = arguments->blocking_operation;
1796
1797 rb_io_blocking_operation_pop(arguments->io, blocking_operation);
1798
1799 rb_io_t *io = arguments->io;
1800 rb_thread_t *thread = io->closing_ec->thread_ptr;
1801 rb_fiber_t *fiber = io->closing_ec->fiber_ptr;
1802
1803 if (thread->scheduler != Qnil) {
1804 // This can cause spurious wakeups...
1805 rb_fiber_scheduler_unblock(thread->scheduler, io->self, rb_fiberptr_self(fiber));
1806 }
1807 else {
1808 rb_thread_wakeup(thread->self);
1809 }
1810
1811 return Qnil;
1812}
1813
1814/*
1815 * Called when a blocking operation completes or is interrupted. Removes the operation from
1816 * the IO's blocking_operations list and wakes up any waiting threads/fibers.
1817 *
1818 * If there's a wakeup_mutex (meaning an IO close is in progress), synchronizes the cleanup
1819 * through that mutex to ensure proper coordination with the closing thread.
1820 *
1821 * @parameter io The IO object the operation was performed on
1822 * @parameter blocking_operation The completed operation to clean up
1823 */
1824static void
1825rb_io_blocking_operation_exit(struct rb_io *io, struct rb_io_blocking_operation *blocking_operation)
1826{
1827 VALUE wakeup_mutex = io->wakeup_mutex;
1828
1829 // Indicate that the blocking operation is no longer active:
1830 blocking_operation->ec = NULL;
1831
1832 if (RB_TEST(wakeup_mutex)) {
1833 struct io_blocking_operation_arguments arguments = {
1834 .io = io,
1835 .blocking_operation = blocking_operation
1836 };
1837
1838 rb_mutex_synchronize(wakeup_mutex, io_blocking_operation_exit, (VALUE)&arguments);
1839 }
1840 else {
1841 // If there's no wakeup_mutex, we can safely remove the operation directly:
1842 rb_io_blocking_operation_pop(io, blocking_operation);
1843 }
1844}
1845
1846static VALUE
1847rb_thread_io_blocking_operation_ensure(VALUE _argument)
1848{
1849 struct io_blocking_operation_arguments *arguments = (void*)_argument;
1850
1851 rb_io_blocking_operation_exit(arguments->io, arguments->blocking_operation);
1852
1853 return Qnil;
1854}
1855
1856/*
1857 * Executes a function that performs a blocking IO operation, while properly tracking
1858 * the operation in the IO's blocking_operations list. This ensures proper cleanup
1859 * and interruption handling if the IO is closed while blocked.
1860 *
1861 * The operation is automatically removed from the blocking_operations list when the function
1862 * returns, whether normally or due to an exception.
1863 *
1864 * @parameter self The IO object
1865 * @parameter function The function to execute that will perform the blocking operation
1866 * @parameter argument The argument to pass to the function
1867 * @returns The result of the blocking operation function
1868 */
1869VALUE
1870rb_thread_io_blocking_operation(VALUE self, VALUE(*function)(VALUE), VALUE argument)
1871{
1872 struct rb_io *io;
1873 RB_IO_POINTER(self, io);
1874
1875 rb_execution_context_t *ec = GET_EC();
1876 struct rb_io_blocking_operation blocking_operation = {
1877 .ec = ec,
1878 };
1879 rb_io_blocking_operation_enter(io, &blocking_operation);
1880
1882 .io = io,
1883 .blocking_operation = &blocking_operation
1884 };
1885
1886 return rb_ensure(function, argument, rb_thread_io_blocking_operation_ensure, (VALUE)&io_blocking_operation_arguments);
1887}
1888
1889static bool
1890thread_io_mn_schedulable(rb_thread_t *th, int events, const struct timeval *timeout)
1891{
1892#if defined(USE_MN_THREADS) && USE_MN_THREADS
1893 return !th_has_dedicated_nt(th) && (events || timeout) && th->blocking;
1894#else
1895 return false;
1896#endif
1897}
1898
1899// true if need retry
1900static bool
1901thread_io_wait_events(rb_thread_t *th, int fd, int events, const struct timeval *timeout)
1902{
1903#if defined(USE_MN_THREADS) && USE_MN_THREADS
1904 if (thread_io_mn_schedulable(th, events, timeout)) {
1905 rb_hrtime_t rel, *prel;
1906
1907 if (timeout) {
1908 rel = rb_timeval2hrtime(timeout);
1909 prel = &rel;
1910 }
1911 else {
1912 prel = NULL;
1913 }
1914
1915 VM_ASSERT(prel || (events & (RB_WAITFD_IN | RB_WAITFD_OUT)));
1916
1917 if (thread_sched_wait_events(TH_SCHED(th), th, fd, waitfd_to_waiting_flag(events), prel)) {
1918 // timeout
1919 return false;
1920 }
1921 else {
1922 return true;
1923 }
1924 }
1925#endif // defined(USE_MN_THREADS) && USE_MN_THREADS
1926 return false;
1927}
1928
1929// assume read/write
1930static bool
1931blocking_call_retryable_p(int r, int eno)
1932{
1933 if (r != -1) return false;
1934
1935 switch (eno) {
1936 case EAGAIN:
1937#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
1938 case EWOULDBLOCK:
1939#endif
1940 return true;
1941 default:
1942 return false;
1943 }
1944}
1945
1946bool
1947rb_thread_mn_schedulable(VALUE thval)
1948{
1949 rb_thread_t *th = rb_thread_ptr(thval);
1950 return th->mn_schedulable;
1951}
1952
1953VALUE
1954rb_thread_io_blocking_call(struct rb_io* io, rb_blocking_function_t *func, void *data1, int events)
1955{
1956 rb_execution_context_t * volatile ec = GET_EC();
1957 rb_thread_t * volatile th = rb_ec_thread_ptr(ec);
1958
1959 RUBY_DEBUG_LOG("th:%u fd:%d ev:%d", rb_th_serial(th), io->fd, events);
1960
1961 volatile VALUE val = Qundef; /* shouldn't be used */
1962 volatile int saved_errno = 0;
1963 enum ruby_tag_type state;
1964 volatile bool prev_mn_schedulable = th->mn_schedulable;
1965 th->mn_schedulable = thread_io_mn_schedulable(th, events, NULL);
1966
1967 int fd = io->fd;
1968
1969 // `errno` is only valid when there is an actual error - but we can't
1970 // extract that from the return value of `func` alone, so we clear any
1971 // prior `errno` value here so that we can later check if it was set by
1972 // `func` or not (as opposed to some previously set value).
1973 errno = 0;
1974
1975 struct rb_io_blocking_operation blocking_operation = {
1976 .ec = ec,
1977 };
1978 rb_io_blocking_operation_enter(io, &blocking_operation);
1979
1980 {
1981 EC_PUSH_TAG(ec);
1982 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
1983 volatile enum ruby_tag_type saved_state = state; /* for BLOCKING_REGION */
1984 retry:
1985 BLOCKING_REGION(th, {
1986 val = func(data1);
1987 saved_errno = errno;
1988 }, ubf_select, th, FALSE);
1989
1990 RUBY_ASSERT(th == rb_ec_thread_ptr(ec));
1991 if (events &&
1992 blocking_call_retryable_p((int)val, saved_errno) &&
1993 thread_io_wait_events(th, fd, events, NULL)) {
1994 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1995 goto retry;
1996 }
1997
1998 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1999
2000 state = saved_state;
2001 }
2002 EC_POP_TAG();
2003
2004 th = rb_ec_thread_ptr(ec);
2005 th->mn_schedulable = prev_mn_schedulable;
2006 }
2007
2008 rb_io_blocking_operation_exit(io, &blocking_operation);
2009
2010 if (state) {
2011 EC_JUMP_TAG(ec, state);
2012 }
2013
2014 // If the error was a timeout, we raise a specific exception for that:
2015 if (saved_errno == ETIMEDOUT) {
2016 rb_raise(rb_eIOTimeoutError, "Blocking operation timed out!");
2017 }
2018
2019 errno = saved_errno;
2020
2021 return val;
2022}
2023
2024VALUE
2025rb_thread_io_blocking_region(struct rb_io *io, rb_blocking_function_t *func, void *data1)
2026{
2027 return rb_thread_io_blocking_call(io, func, data1, 0);
2028}
2029
2030/*
2031 * rb_thread_call_with_gvl - re-enter the Ruby world after GVL release.
2032 *
2033 * After releasing GVL using
2034 * rb_thread_call_without_gvl() you can not access Ruby values or invoke
2035 * methods. If you need to access Ruby you must use this function
2036 * rb_thread_call_with_gvl().
2037 *
2038 * This function rb_thread_call_with_gvl() does:
2039 * (1) acquire GVL.
2040 * (2) call passed function `func'.
2041 * (3) release GVL.
2042 * (4) return a value which is returned at (2).
2043 *
2044 * NOTE: You should not return Ruby object at (2) because such Object
2045 * will not be marked.
2046 *
2047 * NOTE: If an exception is raised in `func', this function DOES NOT
2048 * protect (catch) the exception. If you have any resources
2049 * which should free before throwing exception, you need use
2050 * rb_protect() in `func' and return a value which represents
2051 * exception was raised.
2052 *
2053 * NOTE: This function should not be called by a thread which was not
2054 * created as Ruby thread (created by Thread.new or so). In other
2055 * words, this function *DOES NOT* associate or convert a NON-Ruby
2056 * thread to a Ruby thread.
2057 *
2058 * NOTE: If this thread has already acquired the GVL, then the method call
2059 * is performed without acquiring or releasing the GVL (from Ruby 4.0).
2060 */
2061void *
2062rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
2063{
2064 rb_thread_t *th = ruby_thread_from_native();
2065 struct rb_blocking_region_buffer *brb;
2066 struct rb_unblock_callback prev_unblock;
2067 void *r;
2068
2069 if (th == 0) {
2070 /* Error has occurred, but we can't use rb_bug()
2071 * because this thread is not Ruby's thread.
2072 * What should we do?
2073 */
2074 bp();
2075 fprintf(stderr, "[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
2076 exit(EXIT_FAILURE);
2077 }
2078
2079 brb = (struct rb_blocking_region_buffer *)th->blocking_region_buffer;
2080 prev_unblock = th->unblock;
2081
2082 if (brb == 0) {
2083 /* the GVL is already acquired, call method directly */
2084 return (*func)(data1);
2085 }
2086
2087 blocking_region_end(th, brb);
2088 /* enter to Ruby world: You can access Ruby values, methods and so on. */
2089 r = (*func)(data1);
2090 /* leave from Ruby world: You can not access Ruby values, etc. */
2091 int released = blocking_region_begin(th, brb, prev_unblock.func, prev_unblock.arg, FALSE);
2092 RUBY_ASSERT_ALWAYS(released);
2093 RB_VM_SAVE_MACHINE_CONTEXT(th);
2094 thread_sched_to_waiting(TH_SCHED(th), th);
2095 return r;
2096}
2097
2098/*
2099 * ruby_thread_has_gvl_p - check if current native thread has GVL.
2100 */
2101
2103ruby_thread_has_gvl_p(void)
2104{
2105 rb_thread_t *th = ruby_thread_from_native();
2106
2107 if (th && th->blocking_region_buffer == 0) {
2108 return 1;
2109 }
2110 else {
2111 return 0;
2112 }
2113}
2114
2115/*
2116 * call-seq:
2117 * Thread.pass -> nil
2118 *
2119 * Give the thread scheduler a hint to pass execution to another thread.
2120 * A running thread may or may not switch, it depends on OS and processor.
2121 */
2122
2123static VALUE
2124thread_s_pass(VALUE klass)
2125{
2127 return Qnil;
2128}
2129
2130/*****************************************************/
2131
2132/*
2133 * rb_threadptr_pending_interrupt_* - manage asynchronous error queue
2134 *
2135 * Async events such as an exception thrown by Thread#raise,
2136 * Thread#kill and thread termination (after main thread termination)
2137 * will be queued to th->pending_interrupt_queue.
2138 * - clear: clear the queue.
2139 * - enque: enqueue err object into queue.
2140 * - deque: dequeue err object from queue.
2141 * - active_p: return 1 if the queue should be checked.
2142 *
2143 * All rb_threadptr_pending_interrupt_* functions are called by
2144 * a GVL acquired thread, of course.
2145 * Note that all "rb_" prefix APIs need GVL to call.
2146 */
2147
2148void
2149rb_threadptr_pending_interrupt_clear(rb_thread_t *th)
2150{
2151 rb_ary_clear(th->pending_interrupt_queue);
2152}
2153
2154void
2155rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
2156{
2157 rb_ary_push(th->pending_interrupt_queue, v);
2158 th->pending_interrupt_queue_checked = 0;
2159}
2160
2161static void
2162threadptr_check_pending_interrupt_queue(rb_thread_t *th)
2163{
2164 if (!th->pending_interrupt_queue) {
2165 rb_raise(rb_eThreadError, "uninitialized thread");
2166 }
2167}
2168
2169enum handle_interrupt_timing {
2170 INTERRUPT_NONE,
2171 INTERRUPT_IMMEDIATE,
2172 INTERRUPT_ON_BLOCKING,
2173 INTERRUPT_NEVER
2174};
2175
2176static enum handle_interrupt_timing
2177rb_threadptr_pending_interrupt_from_symbol(rb_thread_t *th, VALUE sym)
2178{
2179 if (sym == sym_immediate) {
2180 return INTERRUPT_IMMEDIATE;
2181 }
2182 else if (sym == sym_on_blocking) {
2183 return INTERRUPT_ON_BLOCKING;
2184 }
2185 else if (sym == sym_never) {
2186 return INTERRUPT_NEVER;
2187 }
2188 else {
2189 rb_raise(rb_eThreadError, "unknown mask signature");
2190 }
2191}
2192
2193static enum handle_interrupt_timing
2194rb_threadptr_pending_interrupt_check_mask(rb_thread_t *th, VALUE err)
2195{
2196 VALUE mask;
2197 long mask_stack_len = RARRAY_LEN(th->pending_interrupt_mask_stack);
2198 const VALUE *mask_stack = RARRAY_CONST_PTR(th->pending_interrupt_mask_stack);
2199 VALUE mod;
2200 long i;
2201
2202 for (i=0; i<mask_stack_len; i++) {
2203 mask = mask_stack[mask_stack_len-(i+1)];
2204
2205 if (SYMBOL_P(mask)) {
2206 /* do not match RUBY_FATAL_THREAD_KILLED etc */
2207 if (err != rb_cInteger) {
2208 return rb_threadptr_pending_interrupt_from_symbol(th, mask);
2209 }
2210 else {
2211 continue;
2212 }
2213 }
2214
2215 for (mod = err; mod; mod = RCLASS_SUPER(mod)) {
2216 VALUE klass = mod;
2217 VALUE sym;
2218
2219 if (BUILTIN_TYPE(mod) == T_ICLASS) {
2220 klass = RBASIC(mod)->klass;
2221 }
2222 else if (mod != RCLASS_ORIGIN(mod)) {
2223 continue;
2224 }
2225
2226 if ((sym = rb_hash_aref(mask, klass)) != Qnil) {
2227 return rb_threadptr_pending_interrupt_from_symbol(th, sym);
2228 }
2229 }
2230 /* try to next mask */
2231 }
2232 return INTERRUPT_NONE;
2233}
2234
2235static int
2236rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th)
2237{
2238 return RARRAY_LEN(th->pending_interrupt_queue) == 0;
2239}
2240
2241static int
2242rb_threadptr_pending_interrupt_include_p(rb_thread_t *th, VALUE err)
2243{
2244 int i;
2245 for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
2246 VALUE e = RARRAY_AREF(th->pending_interrupt_queue, i);
2247 if (rb_obj_is_kind_of(e, err)) {
2248 return TRUE;
2249 }
2250 }
2251 return FALSE;
2252}
2253
2254static VALUE
2255rb_threadptr_pending_interrupt_deque(rb_thread_t *th, enum handle_interrupt_timing timing)
2256{
2257#if 1 /* 1 to enable Thread#handle_interrupt, 0 to ignore it */
2258 int i;
2259
2260 for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
2261 VALUE err = RARRAY_AREF(th->pending_interrupt_queue, i);
2262
2263 enum handle_interrupt_timing mask_timing = rb_threadptr_pending_interrupt_check_mask(th, CLASS_OF(err));
2264
2265 switch (mask_timing) {
2266 case INTERRUPT_ON_BLOCKING:
2267 if (timing != INTERRUPT_ON_BLOCKING) {
2268 break;
2269 }
2270 /* fall through */
2271 case INTERRUPT_NONE: /* default: IMMEDIATE */
2272 case INTERRUPT_IMMEDIATE:
2273 rb_ary_delete_at(th->pending_interrupt_queue, i);
2274 return err;
2275 case INTERRUPT_NEVER:
2276 break;
2277 }
2278 }
2279
2280 th->pending_interrupt_queue_checked = 1;
2281 return Qundef;
2282#else
2283 VALUE err = rb_ary_shift(th->pending_interrupt_queue);
2284 if (rb_threadptr_pending_interrupt_empty_p(th)) {
2285 th->pending_interrupt_queue_checked = 1;
2286 }
2287 return err;
2288#endif
2289}
2290
2291static int
2292threadptr_pending_interrupt_active_p(rb_thread_t *th)
2293{
2294 /*
2295 * For optimization, we don't check async errinfo queue
2296 * if the queue and the thread interrupt mask were not changed
2297 * since last check.
2298 */
2299 if (th->pending_interrupt_queue_checked) {
2300 return 0;
2301 }
2302
2303 if (rb_threadptr_pending_interrupt_empty_p(th)) {
2304 return 0;
2305 }
2306
2307 return 1;
2308}
2309
2310static int
2311handle_interrupt_arg_check_i(VALUE key, VALUE val, VALUE args)
2312{
2313 VALUE *maskp = (VALUE *)args;
2314
2315 if (val != sym_immediate && val != sym_on_blocking && val != sym_never) {
2316 rb_raise(rb_eArgError, "unknown mask signature");
2317 }
2318
2319 if (key == rb_eException && (UNDEF_P(*maskp) || NIL_P(*maskp))) {
2320 *maskp = val;
2321 return ST_CONTINUE;
2322 }
2323
2324 if (RTEST(*maskp)) {
2325 if (!RB_TYPE_P(*maskp, T_HASH)) {
2326 VALUE prev = *maskp;
2327 *maskp = rb_ident_hash_new();
2328 if (SYMBOL_P(prev)) {
2329 rb_hash_aset(*maskp, rb_eException, prev);
2330 }
2331 }
2332 rb_hash_aset(*maskp, key, val);
2333 }
2334 else {
2335 *maskp = Qfalse;
2336 }
2337
2338 return ST_CONTINUE;
2339}
2340
2341/*
2342 * call-seq:
2343 * Thread.handle_interrupt(hash) { ... } -> result of the block
2344 *
2345 * Changes asynchronous interrupt timing.
2346 *
2347 * _interrupt_ means asynchronous event and corresponding procedure
2348 * by Thread#raise, Thread#kill, signal trap (not supported yet)
2349 * and main thread termination (if main thread terminates, then all
2350 * other thread will be killed).
2351 *
2352 * The given +hash+ has pairs like <code>ExceptionClass =>
2353 * :TimingSymbol</code>. Where the ExceptionClass is the interrupt handled by
2354 * the given block. The TimingSymbol can be one of the following symbols:
2355 *
2356 * [+:immediate+] Invoke interrupts immediately.
2357 * [+:on_blocking+] Invoke interrupts while _BlockingOperation_.
2358 * [+:never+] Never invoke all interrupts.
2359 *
2360 * _BlockingOperation_ means that the operation will block the calling thread,
2361 * such as read and write. On CRuby implementation, _BlockingOperation_ is any
2362 * operation executed without GVL.
2363 *
2364 * Masked asynchronous interrupts are delayed until they are enabled.
2365 * This method is similar to sigprocmask(3).
2366 *
2367 * === NOTE
2368 *
2369 * Asynchronous interrupts are difficult to use.
2370 *
2371 * If you need to communicate between threads, please consider to use another way such as Queue.
2372 *
2373 * Or use them with deep understanding about this method.
2374 *
2375 * === Usage
2376 *
2377 * In this example, we can guard from Thread#raise exceptions.
2378 *
2379 * Using the +:never+ TimingSymbol the RuntimeError exception will always be
2380 * ignored in the first block of the main thread. In the second
2381 * ::handle_interrupt block we can purposefully handle RuntimeError exceptions.
2382 *
2383 * th = Thread.new do
2384 * Thread.handle_interrupt(RuntimeError => :never) {
2385 * begin
2386 * # You can write resource allocation code safely.
2387 * Thread.handle_interrupt(RuntimeError => :immediate) {
2388 * # ...
2389 * }
2390 * ensure
2391 * # You can write resource deallocation code safely.
2392 * end
2393 * }
2394 * end
2395 * Thread.pass
2396 * # ...
2397 * th.raise "stop"
2398 *
2399 * While we are ignoring the RuntimeError exception, it's safe to write our
2400 * resource allocation code. Then, the ensure block is where we can safely
2401 * deallocate your resources.
2402 *
2403 * ==== Stack control settings
2404 *
2405 * It's possible to stack multiple levels of ::handle_interrupt blocks in order
2406 * to control more than one ExceptionClass and TimingSymbol at a time.
2407 *
2408 * Thread.handle_interrupt(FooError => :never) {
2409 * Thread.handle_interrupt(BarError => :never) {
2410 * # FooError and BarError are prohibited.
2411 * }
2412 * }
2413 *
2414 * ==== Inheritance with ExceptionClass
2415 *
2416 * All exceptions inherited from the ExceptionClass parameter will be considered.
2417 *
2418 * Thread.handle_interrupt(Exception => :never) {
2419 * # all exceptions inherited from Exception are prohibited.
2420 * }
2421 *
2422 * For handling all interrupts, use +Object+ and not +Exception+
2423 * as the ExceptionClass, as kill/terminate interrupts are not handled by +Exception+.
2424 */
2425static VALUE
2426rb_thread_s_handle_interrupt(VALUE self, VALUE mask_arg)
2427{
2428 VALUE mask = Qundef;
2429 rb_execution_context_t * volatile ec = GET_EC();
2430 rb_thread_t * volatile th = rb_ec_thread_ptr(ec);
2431 volatile VALUE r = Qnil;
2432 enum ruby_tag_type state;
2433
2434 if (!rb_block_given_p()) {
2435 rb_raise(rb_eArgError, "block is needed.");
2436 }
2437
2438 mask_arg = rb_to_hash_type(mask_arg);
2439
2440 if (OBJ_FROZEN(mask_arg) && rb_hash_compare_by_id_p(mask_arg)) {
2441 mask = Qnil;
2442 }
2443
2444 rb_hash_foreach(mask_arg, handle_interrupt_arg_check_i, (VALUE)&mask);
2445
2446 if (UNDEF_P(mask)) {
2447 return rb_yield(Qnil);
2448 }
2449
2450 if (!RTEST(mask)) {
2451 mask = mask_arg;
2452 }
2453 else if (RB_TYPE_P(mask, T_HASH)) {
2454 OBJ_FREEZE(mask);
2455 }
2456
2457 rb_ary_push(th->pending_interrupt_mask_stack, mask);
2458 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2459 th->pending_interrupt_queue_checked = 0;
2460 RUBY_VM_SET_INTERRUPT(th->ec);
2461 }
2462
2463 EC_PUSH_TAG(th->ec);
2464 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
2465 r = rb_yield(Qnil);
2466 }
2467 EC_POP_TAG();
2468
2469 rb_ary_pop(th->pending_interrupt_mask_stack);
2470 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2471 th->pending_interrupt_queue_checked = 0;
2472 RUBY_VM_SET_INTERRUPT(th->ec);
2473 }
2474
2475 RUBY_VM_CHECK_INTS(th->ec);
2476
2477 if (state) {
2478 EC_JUMP_TAG(th->ec, state);
2479 }
2480
2481 return r;
2482}
2483
2484/*
2485 * call-seq:
2486 * target_thread.pending_interrupt?(error = nil) -> true/false
2487 *
2488 * Returns whether or not the asynchronous queue is empty for the target thread.
2489 *
2490 * If +error+ is given, then check only for +error+ type deferred events.
2491 *
2492 * See ::pending_interrupt? for more information.
2493 */
2494static VALUE
2495rb_thread_pending_interrupt_p(int argc, VALUE *argv, VALUE target_thread)
2496{
2497 rb_thread_t *target_th = rb_thread_ptr(target_thread);
2498
2499 if (!target_th->pending_interrupt_queue) {
2500 return Qfalse;
2501 }
2502 if (rb_threadptr_pending_interrupt_empty_p(target_th)) {
2503 return Qfalse;
2504 }
2505 if (rb_check_arity(argc, 0, 1)) {
2506 VALUE err = argv[0];
2507 if (!rb_obj_is_kind_of(err, rb_cModule)) {
2508 rb_raise(rb_eTypeError, "class or module required for rescue clause");
2509 }
2510 return RBOOL(rb_threadptr_pending_interrupt_include_p(target_th, err));
2511 }
2512 else {
2513 return Qtrue;
2514 }
2515}
2516
2517/*
2518 * call-seq:
2519 * Thread.pending_interrupt?(error = nil) -> true/false
2520 *
2521 * Returns whether or not the asynchronous queue is empty.
2522 *
2523 * Since Thread::handle_interrupt can be used to defer asynchronous events,
2524 * this method can be used to determine if there are any deferred events.
2525 *
2526 * If you find this method returns true, then you may finish +:never+ blocks.
2527 *
2528 * For example, the following method processes deferred asynchronous events
2529 * immediately.
2530 *
2531 * def Thread.kick_interrupt_immediately
2532 * Thread.handle_interrupt(Object => :immediate) {
2533 * Thread.pass
2534 * }
2535 * end
2536 *
2537 * If +error+ is given, then check only for +error+ type deferred events.
2538 *
2539 * === Usage
2540 *
2541 * th = Thread.new{
2542 * Thread.handle_interrupt(RuntimeError => :on_blocking){
2543 * while true
2544 * ...
2545 * # reach safe point to invoke interrupt
2546 * if Thread.pending_interrupt?
2547 * Thread.handle_interrupt(Object => :immediate){}
2548 * end
2549 * ...
2550 * end
2551 * }
2552 * }
2553 * ...
2554 * th.raise # stop thread
2555 *
2556 * This example can also be written as the following, which you should use to
2557 * avoid asynchronous interrupts.
2558 *
2559 * flag = true
2560 * th = Thread.new{
2561 * Thread.handle_interrupt(RuntimeError => :on_blocking){
2562 * while true
2563 * ...
2564 * # reach safe point to invoke interrupt
2565 * break if flag == false
2566 * ...
2567 * end
2568 * }
2569 * }
2570 * ...
2571 * flag = false # stop thread
2572 */
2573
2574static VALUE
2575rb_thread_s_pending_interrupt_p(int argc, VALUE *argv, VALUE self)
2576{
2577 return rb_thread_pending_interrupt_p(argc, argv, GET_THREAD()->self);
2578}
2579
2580NORETURN(static void rb_threadptr_to_kill(rb_thread_t *th));
2581
2582static void
2583rb_threadptr_to_kill(rb_thread_t *th)
2584{
2585 VM_ASSERT(GET_THREAD() == th);
2586 rb_threadptr_pending_interrupt_clear(th);
2587 th->status = THREAD_RUNNABLE;
2588 th->to_kill = 1;
2589 th->ec->errinfo = INT2FIX(TAG_FATAL);
2590 EC_JUMP_TAG(th->ec, TAG_FATAL);
2591}
2592
2593static inline rb_atomic_t
2594threadptr_get_interrupts(rb_thread_t *th)
2595{
2596 rb_execution_context_t *ec = th->ec;
2597 rb_atomic_t interrupt;
2598 rb_atomic_t old;
2599
2600 old = ATOMIC_LOAD_RELAXED(ec->interrupt_flag);
2601 do {
2602 interrupt = old;
2603 old = ATOMIC_CAS(ec->interrupt_flag, interrupt, interrupt & ec->interrupt_mask);
2604 } while (old != interrupt);
2605 return interrupt & (rb_atomic_t)~ec->interrupt_mask;
2606}
2607
2608static void threadptr_interrupt_exec_exec(rb_thread_t *th);
2609
2610// Execute interrupts on currently running thread
2611// In certain situations, calling this function will raise an exception. Some examples are:
2612// * during VM shutdown (`rb_ractor_terminate_all`)
2613// * Call to Thread#exit for current thread (`rb_thread_kill`)
2614// * Call to Thread#raise for current thread
2615int
2616rb_threadptr_execute_interrupts(rb_thread_t *th, int blocking_timing)
2617{
2618 rb_atomic_t interrupt;
2619 int postponed_job_interrupt = 0;
2620 int ret = FALSE;
2621
2622 VM_ASSERT(GET_THREAD() == th);
2623
2624 if (th->ec->raised_flag) return ret;
2625
2626 while ((interrupt = threadptr_get_interrupts(th)) != 0) {
2627 int sig;
2628 int timer_interrupt;
2629 int pending_interrupt;
2630 int trap_interrupt;
2631 int terminate_interrupt;
2632
2633 timer_interrupt = interrupt & TIMER_INTERRUPT_MASK;
2634 pending_interrupt = interrupt & PENDING_INTERRUPT_MASK;
2635 postponed_job_interrupt = interrupt & POSTPONED_JOB_INTERRUPT_MASK;
2636 trap_interrupt = interrupt & TRAP_INTERRUPT_MASK;
2637 terminate_interrupt = interrupt & TERMINATE_INTERRUPT_MASK; // request from other ractors
2638
2639 if (interrupt & VM_BARRIER_INTERRUPT_MASK) {
2640 RB_VM_LOCKING();
2641 }
2642
2643 if (postponed_job_interrupt) {
2644 rb_postponed_job_flush(th->vm);
2645 }
2646
2647 if (trap_interrupt) {
2648 /* signal handling */
2649 if (th == th->vm->ractor.main_thread) {
2650 enum rb_thread_status prev_status = th->status;
2651
2652 th->status = THREAD_RUNNABLE;
2653 {
2654 while ((sig = rb_get_next_signal()) != 0) {
2655 ret |= rb_signal_exec(th, sig);
2656 }
2657 }
2658 th->status = prev_status;
2659 }
2660
2661 if (!ccan_list_empty(&th->interrupt_exec_tasks)) {
2662 enum rb_thread_status prev_status = th->status;
2663
2664 th->status = THREAD_RUNNABLE;
2665 {
2666 threadptr_interrupt_exec_exec(th);
2667 }
2668 th->status = prev_status;
2669 }
2670 }
2671
2672 /* exception from another thread */
2673 if (pending_interrupt && threadptr_pending_interrupt_active_p(th)) {
2674 VALUE err = rb_threadptr_pending_interrupt_deque(th, blocking_timing ? INTERRUPT_ON_BLOCKING : INTERRUPT_NONE);
2675 RUBY_DEBUG_LOG("err:%"PRIdVALUE, err);
2676 ret = TRUE;
2677
2678 if (UNDEF_P(err)) {
2679 /* no error */
2680 }
2681 else if (err == RUBY_FATAL_THREAD_KILLED /* Thread#kill received */ ||
2682 err == RUBY_FATAL_THREAD_TERMINATED /* Terminate thread */ ||
2683 err == INT2FIX(TAG_FATAL) /* Thread.exit etc. */ ) {
2684 terminate_interrupt = 1;
2685 }
2686 else {
2687 if (err == th->vm->special_exceptions[ruby_error_stream_closed]) {
2688 /* the only special exception to be queued across thread */
2689 err = ruby_vm_special_exception_copy(err);
2690 }
2691 /* set runnable if th was slept. */
2692 if (th->status == THREAD_STOPPED ||
2693 th->status == THREAD_STOPPED_FOREVER)
2694 th->status = THREAD_RUNNABLE;
2695 rb_exc_raise(err);
2696 }
2697 }
2698
2699 if (terminate_interrupt) {
2700 rb_threadptr_to_kill(th);
2701 }
2702
2703 if (timer_interrupt) {
2704 uint32_t limits_us = thread_default_quantum_ms * 1000;
2705
2706 if (th->priority > 0)
2707 limits_us <<= th->priority;
2708 else
2709 limits_us >>= -th->priority;
2710
2711 if (th->status == THREAD_RUNNABLE)
2712 th->running_time_us += 10 * 1000; // 10ms = 10_000us // TODO: use macro
2713
2714 VM_ASSERT(th->ec->cfp);
2715 EXEC_EVENT_HOOK(th->ec, RUBY_INTERNAL_EVENT_SWITCH, th->ec->cfp->self,
2716 0, 0, 0, Qundef);
2717
2718 rb_thread_schedule_limits(limits_us);
2719 }
2720 }
2721 return ret;
2722}
2723
2724void
2725rb_thread_execute_interrupts(VALUE thval)
2726{
2727 rb_threadptr_execute_interrupts(rb_thread_ptr(thval), 1);
2728}
2729
2730static void
2731rb_threadptr_ready(rb_thread_t *th)
2732{
2733 rb_threadptr_interrupt(th);
2734}
2735
2736static VALUE
2737rb_threadptr_raise(rb_thread_t *target_th, int argc, VALUE *argv)
2738{
2739 if (rb_threadptr_dead(target_th)) {
2740 return Qnil;
2741 }
2742
2743 VALUE exception = rb_exception_setup(argc, argv);
2744
2745 /* making an exception object can switch thread,
2746 so we need to check thread deadness again */
2747 if (rb_threadptr_dead(target_th)) {
2748 return Qnil;
2749 }
2750
2751 rb_threadptr_pending_interrupt_enque(target_th, exception);
2752 rb_threadptr_interrupt(target_th);
2753
2754 return Qnil;
2755}
2756
2757void
2758rb_threadptr_signal_raise(rb_thread_t *th, int sig)
2759{
2760 VALUE argv[2];
2761
2762 argv[0] = rb_eSignal;
2763 argv[1] = INT2FIX(sig);
2764 rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2765}
2766
2767void
2768rb_threadptr_signal_exit(rb_thread_t *th)
2769{
2770 VALUE argv[2];
2771
2772 argv[0] = rb_eSystemExit;
2773 argv[1] = rb_str_new2("exit");
2774
2775 // TODO: check signal raise deliverly
2776 rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2777}
2778
2779int
2780rb_ec_set_raised(rb_execution_context_t *ec)
2781{
2782 if (ec->raised_flag & RAISED_EXCEPTION) {
2783 return 1;
2784 }
2785 ec->raised_flag |= RAISED_EXCEPTION;
2786 return 0;
2787}
2788
2789int
2790rb_ec_reset_raised(rb_execution_context_t *ec)
2791{
2792 if (!(ec->raised_flag & RAISED_EXCEPTION)) {
2793 return 0;
2794 }
2795 ec->raised_flag &= ~RAISED_EXCEPTION;
2796 return 1;
2797}
2798
2799/*
2800 * Thread-safe IO closing mechanism.
2801 *
2802 * When an IO is closed while other threads or fibers are blocked on it, we need to:
2803 * 1. Track and notify all blocking operations through io->blocking_operations
2804 * 2. Ensure only one thread can close at a time using io->closing_ec
2805 * 3. Synchronize cleanup using wakeup_mutex
2806 *
2807 * The close process works as follows:
2808 * - First check if any thread is already closing (io->closing_ec)
2809 * - Set up wakeup_mutex for synchronization
2810 * - Iterate through all blocking operations in io->blocking_operations
2811 * - For each blocked fiber with a scheduler:
2812 * - Notify via rb_fiber_scheduler_fiber_interrupt
2813 * - For each blocked thread without a scheduler:
2814 * - Enqueue IOError via rb_threadptr_pending_interrupt_enque
2815 * - Wake via rb_threadptr_interrupt
2816 * - Wait on wakeup_mutex until all operations are cleaned up
2817 * - Only then clear closing state and allow actual close to proceed
2818 */
2819static VALUE
2820thread_io_close_notify_all(VALUE _io)
2821{
2822 struct rb_io *io = (struct rb_io *)_io;
2823
2824 size_t count = 0;
2825 rb_vm_t *vm = io->closing_ec->thread_ptr->vm;
2826 VALUE error = vm->special_exceptions[ruby_error_stream_closed];
2827
2828 struct rb_io_blocking_operation *blocking_operation;
2829 ccan_list_for_each(rb_io_blocking_operations(io), blocking_operation, list) {
2830 rb_execution_context_t *ec = blocking_operation->ec;
2831
2832 // If the operation is in progress, we need to interrupt it:
2833 if (ec) {
2834 rb_thread_t *thread = ec->thread_ptr;
2835
2836 VALUE result = RUBY_Qundef;
2837 if (thread->scheduler != Qnil) {
2838 result = rb_fiber_scheduler_fiber_interrupt(thread->scheduler, rb_fiberptr_self(ec->fiber_ptr), error);
2839 }
2840
2841 if (result == RUBY_Qundef) {
2842 // If the thread is not the current thread, we need to enqueue an error:
2843 rb_threadptr_pending_interrupt_enque(thread, error);
2844 rb_threadptr_interrupt(thread);
2845 }
2846 }
2847
2848 count += 1;
2849 }
2850
2851 return (VALUE)count;
2852}
2853
2854size_t
2855rb_thread_io_close_interrupt(struct rb_io *io)
2856{
2857 // We guard this operation based on `io->closing_ec` -> only one thread will ever enter this function.
2858 if (io->closing_ec) {
2859 return 0;
2860 }
2861
2862 // If there are no blocking operations, we are done:
2863 if (ccan_list_empty(rb_io_blocking_operations(io))) {
2864 return 0;
2865 }
2866
2867 // Otherwise, we are now closing the IO:
2868 rb_execution_context_t *ec = GET_EC();
2869 io->closing_ec = ec;
2870
2871 // This is used to ensure the correct execution context is woken up after the blocking operation is interrupted:
2872 io->wakeup_mutex = rb_mutex_new();
2873 rb_mutex_allow_trap(io->wakeup_mutex, 1);
2874
2875 // We need to use a mutex here as entering the fiber scheduler may cause a context switch:
2876 VALUE result = rb_mutex_synchronize(io->wakeup_mutex, thread_io_close_notify_all, (VALUE)io);
2877
2878 return (size_t)result;
2879}
2880
2881void
2882rb_thread_io_close_wait(struct rb_io* io)
2883{
2884 VALUE wakeup_mutex = io->wakeup_mutex;
2885
2886 if (!RB_TEST(wakeup_mutex)) {
2887 // There was nobody else using this file when we closed it, so we never bothered to allocate a mutex:
2888 return;
2889 }
2890
2891 rb_mutex_lock(wakeup_mutex);
2892 while (!ccan_list_empty(rb_io_blocking_operations(io))) {
2893 rb_mutex_sleep(wakeup_mutex, Qnil);
2894 }
2895 rb_mutex_unlock(wakeup_mutex);
2896
2897 // We are done closing:
2898 io->wakeup_mutex = Qnil;
2899 io->closing_ec = NULL;
2900}
2901
2902void
2903rb_thread_fd_close(int fd)
2904{
2905 rb_warn("rb_thread_fd_close is deprecated (and is now a no-op).");
2906}
2907
2908/*
2909 * call-seq:
2910 * thr.raise
2911 * thr.raise(string)
2912 * thr.raise(exception [, string [, array]])
2913 *
2914 * Raises an exception from the given thread. The caller does not have to be
2915 * +thr+. See Kernel#raise for more information.
2916 *
2917 * Thread.abort_on_exception = true
2918 * a = Thread.new { sleep(200) }
2919 * a.raise("Gotcha")
2920 *
2921 * This will produce:
2922 *
2923 * prog.rb:3: Gotcha (RuntimeError)
2924 * from prog.rb:2:in `initialize'
2925 * from prog.rb:2:in `new'
2926 * from prog.rb:2
2927 */
2928
2929static VALUE
2930thread_raise_m(int argc, VALUE *argv, VALUE self)
2931{
2932 rb_thread_t *target_th = rb_thread_ptr(self);
2933 const rb_thread_t *current_th = GET_THREAD();
2934
2935 threadptr_check_pending_interrupt_queue(target_th);
2936 rb_threadptr_raise(target_th, argc, argv);
2937
2938 /* To perform Thread.current.raise as Kernel.raise */
2939 if (current_th == target_th) {
2940 RUBY_VM_CHECK_INTS(target_th->ec);
2941 }
2942 return Qnil;
2943}
2944
2945
2946/*
2947 * call-seq:
2948 * thr.exit -> thr
2949 * thr.kill -> thr
2950 * thr.terminate -> thr
2951 *
2952 * Terminates +thr+ and schedules another thread to be run, returning
2953 * the terminated Thread. If this is the main thread, or the last
2954 * thread, exits the process. Note that the caller does not wait for
2955 * the thread to terminate if the receiver is different from the currently
2956 * running thread. The termination is asynchronous, and the thread can still
2957 * run a small amount of ruby code before exiting.
2958 */
2959
2961rb_thread_kill(VALUE thread)
2962{
2963 rb_thread_t *target_th = rb_thread_ptr(thread);
2964
2965 if (target_th->to_kill || target_th->status == THREAD_KILLED) {
2966 return thread;
2967 }
2968 if (target_th == target_th->vm->ractor.main_thread) {
2969 rb_exit(EXIT_SUCCESS);
2970 }
2971
2972 RUBY_DEBUG_LOG("target_th:%u", rb_th_serial(target_th));
2973
2974 if (target_th == GET_THREAD()) {
2975 /* kill myself immediately */
2976 rb_threadptr_to_kill(target_th);
2977 }
2978 else {
2979 threadptr_check_pending_interrupt_queue(target_th);
2980 rb_threadptr_pending_interrupt_enque(target_th, RUBY_FATAL_THREAD_KILLED);
2981 rb_threadptr_interrupt(target_th);
2982 }
2983
2984 return thread;
2985}
2986
2987int
2988rb_thread_to_be_killed(VALUE thread)
2989{
2990 rb_thread_t *target_th = rb_thread_ptr(thread);
2991
2992 if (target_th->to_kill || target_th->status == THREAD_KILLED) {
2993 return TRUE;
2994 }
2995 return FALSE;
2996}
2997
2998/*
2999 * call-seq:
3000 * Thread.kill(thread) -> thread
3001 *
3002 * Causes the given +thread+ to exit, see also Thread::exit.
3003 *
3004 * count = 0
3005 * a = Thread.new { loop { count += 1 } }
3006 * sleep(0.1) #=> 0
3007 * Thread.kill(a) #=> #<Thread:0x401b3d30 dead>
3008 * count #=> 93947
3009 * a.alive? #=> false
3010 */
3011
3012static VALUE
3013rb_thread_s_kill(VALUE obj, VALUE th)
3014{
3015 return rb_thread_kill(th);
3016}
3017
3018
3019/*
3020 * call-seq:
3021 * Thread.exit -> thread
3022 *
3023 * Terminates the currently running thread and schedules another thread to be
3024 * run.
3025 *
3026 * If this thread is already marked to be killed, ::exit returns the Thread.
3027 *
3028 * If this is the main thread, or the last thread, exit the process.
3029 */
3030
3031static VALUE
3032rb_thread_exit(VALUE _)
3033{
3034 rb_thread_t *th = GET_THREAD();
3035 return rb_thread_kill(th->self);
3036}
3037
3038
3039/*
3040 * call-seq:
3041 * thr.wakeup -> thr
3042 *
3043 * Marks a given thread as eligible for scheduling, however it may still
3044 * remain blocked on I/O.
3045 *
3046 * *Note:* This does not invoke the scheduler, see #run for more information.
3047 *
3048 * c = Thread.new { Thread.stop; puts "hey!" }
3049 * sleep 0.1 while c.status!='sleep'
3050 * c.wakeup
3051 * c.join
3052 * #=> "hey!"
3053 */
3054
3056rb_thread_wakeup(VALUE thread)
3057{
3058 if (!RTEST(rb_thread_wakeup_alive(thread))) {
3059 rb_raise(rb_eThreadError, "killed thread");
3060 }
3061 return thread;
3062}
3063
3066{
3067 rb_thread_t *target_th = rb_thread_ptr(thread);
3068 if (target_th->status == THREAD_KILLED) return Qnil;
3069
3070 rb_threadptr_ready(target_th);
3071
3072 if (target_th->status == THREAD_STOPPED ||
3073 target_th->status == THREAD_STOPPED_FOREVER) {
3074 target_th->status = THREAD_RUNNABLE;
3075 }
3076
3077 return thread;
3078}
3079
3080
3081/*
3082 * call-seq:
3083 * thr.run -> thr
3084 *
3085 * Wakes up +thr+, making it eligible for scheduling.
3086 *
3087 * a = Thread.new { puts "a"; Thread.stop; puts "c" }
3088 * sleep 0.1 while a.status!='sleep'
3089 * puts "Got here"
3090 * a.run
3091 * a.join
3092 *
3093 * This will produce:
3094 *
3095 * a
3096 * Got here
3097 * c
3098 *
3099 * See also the instance method #wakeup.
3100 */
3101
3103rb_thread_run(VALUE thread)
3104{
3105 rb_thread_wakeup(thread);
3107 return thread;
3108}
3109
3110
3112rb_thread_stop(void)
3113{
3114 if (rb_thread_alone()) {
3115 rb_raise(rb_eThreadError,
3116 "stopping only thread\n\tnote: use sleep to stop forever");
3117 }
3119 return Qnil;
3120}
3121
3122/*
3123 * call-seq:
3124 * Thread.stop -> nil
3125 *
3126 * Stops execution of the current thread, putting it into a ``sleep'' state,
3127 * and schedules execution of another thread.
3128 *
3129 * a = Thread.new { print "a"; Thread.stop; print "c" }
3130 * sleep 0.1 while a.status!='sleep'
3131 * print "b"
3132 * a.run
3133 * a.join
3134 * #=> "abc"
3135 */
3136
3137static VALUE
3138thread_stop(VALUE _)
3139{
3140 return rb_thread_stop();
3141}
3142
3143/********************************************************************/
3144
3145VALUE
3146rb_thread_list(void)
3147{
3148 // TODO
3149 return rb_ractor_thread_list();
3150}
3151
3152/*
3153 * call-seq:
3154 * Thread.list -> array
3155 *
3156 * Returns an array of Thread objects for all threads that are either runnable
3157 * or stopped.
3158 *
3159 * Thread.new { sleep(200) }
3160 * Thread.new { 1000000.times {|i| i*i } }
3161 * Thread.new { Thread.stop }
3162 * Thread.list.each {|t| p t}
3163 *
3164 * This will produce:
3165 *
3166 * #<Thread:0x401b3e84 sleep>
3167 * #<Thread:0x401b3f38 run>
3168 * #<Thread:0x401b3fb0 sleep>
3169 * #<Thread:0x401bdf4c run>
3170 */
3171
3172static VALUE
3173thread_list(VALUE _)
3174{
3175 return rb_thread_list();
3176}
3177
3180{
3181 return GET_THREAD()->self;
3182}
3183
3184/*
3185 * call-seq:
3186 * Thread.current -> thread
3187 *
3188 * Returns the currently executing thread.
3189 *
3190 * Thread.current #=> #<Thread:0x401bdf4c run>
3191 */
3192
3193static VALUE
3194thread_s_current(VALUE klass)
3195{
3196 return rb_thread_current();
3197}
3198
3200rb_thread_main(void)
3201{
3202 return GET_RACTOR()->threads.main->self;
3203}
3204
3205/*
3206 * call-seq:
3207 * Thread.main -> thread
3208 *
3209 * Returns the main thread.
3210 */
3211
3212static VALUE
3213rb_thread_s_main(VALUE klass)
3214{
3215 return rb_thread_main();
3216}
3217
3218
3219/*
3220 * call-seq:
3221 * Thread.abort_on_exception -> true or false
3222 *
3223 * Returns the status of the global ``abort on exception'' condition.
3224 *
3225 * The default is +false+.
3226 *
3227 * When set to +true+, if any thread is aborted by an exception, the
3228 * raised exception will be re-raised in the main thread.
3229 *
3230 * Can also be specified by the global $DEBUG flag or command line option
3231 * +-d+.
3232 *
3233 * See also ::abort_on_exception=.
3234 *
3235 * There is also an instance level method to set this for a specific thread,
3236 * see #abort_on_exception.
3237 */
3238
3239static VALUE
3240rb_thread_s_abort_exc(VALUE _)
3241{
3242 return RBOOL(GET_THREAD()->vm->thread_abort_on_exception);
3243}
3244
3245
3246/*
3247 * call-seq:
3248 * Thread.abort_on_exception= boolean -> true or false
3249 *
3250 * When set to +true+, if any thread is aborted by an exception, the
3251 * raised exception will be re-raised in the main thread.
3252 * Returns the new state.
3253 *
3254 * Thread.abort_on_exception = true
3255 * t1 = Thread.new do
3256 * puts "In new thread"
3257 * raise "Exception from thread"
3258 * end
3259 * sleep(1)
3260 * puts "not reached"
3261 *
3262 * This will produce:
3263 *
3264 * In new thread
3265 * prog.rb:4: Exception from thread (RuntimeError)
3266 * from prog.rb:2:in `initialize'
3267 * from prog.rb:2:in `new'
3268 * from prog.rb:2
3269 *
3270 * See also ::abort_on_exception.
3271 *
3272 * There is also an instance level method to set this for a specific thread,
3273 * see #abort_on_exception=.
3274 */
3275
3276static VALUE
3277rb_thread_s_abort_exc_set(VALUE self, VALUE val)
3278{
3279 GET_THREAD()->vm->thread_abort_on_exception = RTEST(val);
3280 return val;
3281}
3282
3283
3284/*
3285 * call-seq:
3286 * thr.abort_on_exception -> true or false
3287 *
3288 * Returns the status of the thread-local ``abort on exception'' condition for
3289 * this +thr+.
3290 *
3291 * The default is +false+.
3292 *
3293 * See also #abort_on_exception=.
3294 *
3295 * There is also a class level method to set this for all threads, see
3296 * ::abort_on_exception.
3297 */
3298
3299static VALUE
3300rb_thread_abort_exc(VALUE thread)
3301{
3302 return RBOOL(rb_thread_ptr(thread)->abort_on_exception);
3303}
3304
3305
3306/*
3307 * call-seq:
3308 * thr.abort_on_exception= boolean -> true or false
3309 *
3310 * When set to +true+, if this +thr+ is aborted by an exception, the
3311 * raised exception will be re-raised in the main thread.
3312 *
3313 * See also #abort_on_exception.
3314 *
3315 * There is also a class level method to set this for all threads, see
3316 * ::abort_on_exception=.
3317 */
3318
3319static VALUE
3320rb_thread_abort_exc_set(VALUE thread, VALUE val)
3321{
3322 rb_thread_ptr(thread)->abort_on_exception = RTEST(val);
3323 return val;
3324}
3325
3326
3327/*
3328 * call-seq:
3329 * Thread.report_on_exception -> true or false
3330 *
3331 * Returns the status of the global ``report on exception'' condition.
3332 *
3333 * The default is +true+ since Ruby 2.5.
3334 *
3335 * All threads created when this flag is true will report
3336 * a message on $stderr if an exception kills the thread.
3337 *
3338 * Thread.new { 1.times { raise } }
3339 *
3340 * will produce this output on $stderr:
3341 *
3342 * #<Thread:...> terminated with exception (report_on_exception is true):
3343 * Traceback (most recent call last):
3344 * 2: from -e:1:in `block in <main>'
3345 * 1: from -e:1:in `times'
3346 *
3347 * This is done to catch errors in threads early.
3348 * In some cases, you might not want this output.
3349 * There are multiple ways to avoid the extra output:
3350 *
3351 * * If the exception is not intended, the best is to fix the cause of
3352 * the exception so it does not happen anymore.
3353 * * If the exception is intended, it might be better to rescue it closer to
3354 * where it is raised rather then let it kill the Thread.
3355 * * If it is guaranteed the Thread will be joined with Thread#join or
3356 * Thread#value, then it is safe to disable this report with
3357 * <code>Thread.current.report_on_exception = false</code>
3358 * when starting the Thread.
3359 * However, this might handle the exception much later, or not at all
3360 * if the Thread is never joined due to the parent thread being blocked, etc.
3361 *
3362 * See also ::report_on_exception=.
3363 *
3364 * There is also an instance level method to set this for a specific thread,
3365 * see #report_on_exception=.
3366 *
3367 */
3368
3369static VALUE
3370rb_thread_s_report_exc(VALUE _)
3371{
3372 return RBOOL(GET_THREAD()->vm->thread_report_on_exception);
3373}
3374
3375
3376/*
3377 * call-seq:
3378 * Thread.report_on_exception= boolean -> true or false
3379 *
3380 * Returns the new state.
3381 * When set to +true+, all threads created afterwards will inherit the
3382 * condition and report a message on $stderr if an exception kills a thread:
3383 *
3384 * Thread.report_on_exception = true
3385 * t1 = Thread.new do
3386 * puts "In new thread"
3387 * raise "Exception from thread"
3388 * end
3389 * sleep(1)
3390 * puts "In the main thread"
3391 *
3392 * This will produce:
3393 *
3394 * In new thread
3395 * #<Thread:...prog.rb:2> terminated with exception (report_on_exception is true):
3396 * Traceback (most recent call last):
3397 * prog.rb:4:in `block in <main>': Exception from thread (RuntimeError)
3398 * In the main thread
3399 *
3400 * See also ::report_on_exception.
3401 *
3402 * There is also an instance level method to set this for a specific thread,
3403 * see #report_on_exception=.
3404 */
3405
3406static VALUE
3407rb_thread_s_report_exc_set(VALUE self, VALUE val)
3408{
3409 GET_THREAD()->vm->thread_report_on_exception = RTEST(val);
3410 return val;
3411}
3412
3413
3414/*
3415 * call-seq:
3416 * Thread.ignore_deadlock -> true or false
3417 *
3418 * Returns the status of the global ``ignore deadlock'' condition.
3419 * The default is +false+, so that deadlock conditions are not ignored.
3420 *
3421 * See also ::ignore_deadlock=.
3422 *
3423 */
3424
3425static VALUE
3426rb_thread_s_ignore_deadlock(VALUE _)
3427{
3428 return RBOOL(GET_THREAD()->vm->thread_ignore_deadlock);
3429}
3430
3431
3432/*
3433 * call-seq:
3434 * Thread.ignore_deadlock = boolean -> true or false
3435 *
3436 * Returns the new state.
3437 * When set to +true+, the VM will not check for deadlock conditions.
3438 * It is only useful to set this if your application can break a
3439 * deadlock condition via some other means, such as a signal.
3440 *
3441 * Thread.ignore_deadlock = true
3442 * queue = Thread::Queue.new
3443 *
3444 * trap(:SIGUSR1){queue.push "Received signal"}
3445 *
3446 * # raises fatal error unless ignoring deadlock
3447 * puts queue.pop
3448 *
3449 * See also ::ignore_deadlock.
3450 */
3451
3452static VALUE
3453rb_thread_s_ignore_deadlock_set(VALUE self, VALUE val)
3454{
3455 GET_THREAD()->vm->thread_ignore_deadlock = RTEST(val);
3456 return val;
3457}
3458
3459
3460/*
3461 * call-seq:
3462 * thr.report_on_exception -> true or false
3463 *
3464 * Returns the status of the thread-local ``report on exception'' condition for
3465 * this +thr+.
3466 *
3467 * The default value when creating a Thread is the value of
3468 * the global flag Thread.report_on_exception.
3469 *
3470 * See also #report_on_exception=.
3471 *
3472 * There is also a class level method to set this for all new threads, see
3473 * ::report_on_exception=.
3474 */
3475
3476static VALUE
3477rb_thread_report_exc(VALUE thread)
3478{
3479 return RBOOL(rb_thread_ptr(thread)->report_on_exception);
3480}
3481
3482
3483/*
3484 * call-seq:
3485 * thr.report_on_exception= boolean -> true or false
3486 *
3487 * When set to +true+, a message is printed on $stderr if an exception
3488 * kills this +thr+. See ::report_on_exception for details.
3489 *
3490 * See also #report_on_exception.
3491 *
3492 * There is also a class level method to set this for all new threads, see
3493 * ::report_on_exception=.
3494 */
3495
3496static VALUE
3497rb_thread_report_exc_set(VALUE thread, VALUE val)
3498{
3499 rb_thread_ptr(thread)->report_on_exception = RTEST(val);
3500 return val;
3501}
3502
3503
3504/*
3505 * call-seq:
3506 * thr.group -> thgrp or nil
3507 *
3508 * Returns the ThreadGroup which contains the given thread.
3509 *
3510 * Thread.main.group #=> #<ThreadGroup:0x4029d914>
3511 */
3512
3513VALUE
3514rb_thread_group(VALUE thread)
3515{
3516 return rb_thread_ptr(thread)->thgroup;
3517}
3518
3519static const char *
3520thread_status_name(rb_thread_t *th, int detail)
3521{
3522 switch (th->status) {
3523 case THREAD_RUNNABLE:
3524 return th->to_kill ? "aborting" : "run";
3525 case THREAD_STOPPED_FOREVER:
3526 if (detail) return "sleep_forever";
3527 case THREAD_STOPPED:
3528 return "sleep";
3529 case THREAD_KILLED:
3530 return "dead";
3531 default:
3532 return "unknown";
3533 }
3534}
3535
3536static int
3537rb_threadptr_dead(rb_thread_t *th)
3538{
3539 return th->status == THREAD_KILLED;
3540}
3541
3542
3543/*
3544 * call-seq:
3545 * thr.status -> string, false or nil
3546 *
3547 * Returns the status of +thr+.
3548 *
3549 * [<tt>"sleep"</tt>]
3550 * Returned if this thread is sleeping or waiting on I/O
3551 * [<tt>"run"</tt>]
3552 * When this thread is executing
3553 * [<tt>"aborting"</tt>]
3554 * If this thread is aborting
3555 * [+false+]
3556 * When this thread is terminated normally
3557 * [+nil+]
3558 * If terminated with an exception.
3559 *
3560 * a = Thread.new { raise("die now") }
3561 * b = Thread.new { Thread.stop }
3562 * c = Thread.new { Thread.exit }
3563 * d = Thread.new { sleep }
3564 * d.kill #=> #<Thread:0x401b3678 aborting>
3565 * a.status #=> nil
3566 * b.status #=> "sleep"
3567 * c.status #=> false
3568 * d.status #=> "aborting"
3569 * Thread.current.status #=> "run"
3570 *
3571 * See also the instance methods #alive? and #stop?
3572 */
3573
3574static VALUE
3575rb_thread_status(VALUE thread)
3576{
3577 rb_thread_t *target_th = rb_thread_ptr(thread);
3578
3579 if (rb_threadptr_dead(target_th)) {
3580 if (!NIL_P(target_th->ec->errinfo) &&
3581 !FIXNUM_P(target_th->ec->errinfo)) {
3582 return Qnil;
3583 }
3584 else {
3585 return Qfalse;
3586 }
3587 }
3588 else {
3589 return rb_str_new2(thread_status_name(target_th, FALSE));
3590 }
3591}
3592
3593
3594/*
3595 * call-seq:
3596 * thr.alive? -> true or false
3597 *
3598 * Returns +true+ if +thr+ is running or sleeping.
3599 *
3600 * thr = Thread.new { }
3601 * thr.join #=> #<Thread:0x401b3fb0 dead>
3602 * Thread.current.alive? #=> true
3603 * thr.alive? #=> false
3604 *
3605 * See also #stop? and #status.
3606 */
3607
3608static VALUE
3609rb_thread_alive_p(VALUE thread)
3610{
3611 return RBOOL(!thread_finished(rb_thread_ptr(thread)));
3612}
3613
3614/*
3615 * call-seq:
3616 * thr.stop? -> true or false
3617 *
3618 * Returns +true+ if +thr+ is dead or sleeping.
3619 *
3620 * a = Thread.new { Thread.stop }
3621 * b = Thread.current
3622 * a.stop? #=> true
3623 * b.stop? #=> false
3624 *
3625 * See also #alive? and #status.
3626 */
3627
3628static VALUE
3629rb_thread_stop_p(VALUE thread)
3630{
3631 rb_thread_t *th = rb_thread_ptr(thread);
3632
3633 if (rb_threadptr_dead(th)) {
3634 return Qtrue;
3635 }
3636 return RBOOL(th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER);
3637}
3638
3639/*
3640 * call-seq:
3641 * thr.name -> string
3642 *
3643 * show the name of the thread.
3644 */
3645
3646static VALUE
3647rb_thread_getname(VALUE thread)
3648{
3649 return rb_thread_ptr(thread)->name;
3650}
3651
3652/*
3653 * call-seq:
3654 * thr.name=(name) -> string
3655 *
3656 * set given name to the ruby thread.
3657 * On some platform, it may set the name to pthread and/or kernel.
3658 */
3659
3660static VALUE
3661rb_thread_setname(VALUE thread, VALUE name)
3662{
3663 rb_thread_t *target_th = rb_thread_ptr(thread);
3664
3665 if (!NIL_P(name)) {
3666 rb_encoding *enc;
3667 StringValueCStr(name);
3668 enc = rb_enc_get(name);
3669 if (!rb_enc_asciicompat(enc)) {
3670 rb_raise(rb_eArgError, "ASCII incompatible encoding (%s)",
3671 rb_enc_name(enc));
3672 }
3673 name = rb_str_new_frozen(name);
3674 }
3675 target_th->name = name;
3676 if (threadptr_initialized(target_th) && target_th->has_dedicated_nt) {
3677 native_set_another_thread_name(target_th->nt->thread_id, name);
3678 }
3679 return name;
3680}
3681
3682#if USE_NATIVE_THREAD_NATIVE_THREAD_ID
3683/*
3684 * call-seq:
3685 * thr.native_thread_id -> integer
3686 *
3687 * Return the native thread ID which is used by the Ruby thread.
3688 *
3689 * The ID depends on the OS. (not POSIX thread ID returned by pthread_self(3))
3690 * * On Linux it is TID returned by gettid(2).
3691 * * On macOS it is the system-wide unique integral ID of thread returned
3692 * by pthread_threadid_np(3).
3693 * * On FreeBSD it is the unique integral ID of the thread returned by
3694 * pthread_getthreadid_np(3).
3695 * * On Windows it is the thread identifier returned by GetThreadId().
3696 * * On other platforms, it raises NotImplementedError.
3697 *
3698 * NOTE:
3699 * If the thread is not associated yet or already deassociated with a native
3700 * thread, it returns _nil_.
3701 * If the Ruby implementation uses M:N thread model, the ID may change
3702 * depending on the timing.
3703 */
3704
3705static VALUE
3706rb_thread_native_thread_id(VALUE thread)
3707{
3708 rb_thread_t *target_th = rb_thread_ptr(thread);
3709 if (rb_threadptr_dead(target_th)) return Qnil;
3710 return native_thread_native_thread_id(target_th);
3711}
3712#else
3713# define rb_thread_native_thread_id rb_f_notimplement
3714#endif
3715
3716/*
3717 * call-seq:
3718 * thr.to_s -> string
3719 *
3720 * Dump the name, id, and status of _thr_ to a string.
3721 */
3722
3723static VALUE
3724rb_thread_to_s(VALUE thread)
3725{
3726 VALUE cname = rb_class_path(rb_obj_class(thread));
3727 rb_thread_t *target_th = rb_thread_ptr(thread);
3728 const char *status;
3729 VALUE str, loc;
3730
3731 status = thread_status_name(target_th, TRUE);
3732 str = rb_sprintf("#<%"PRIsVALUE":%p", cname, (void *)thread);
3733 if (!NIL_P(target_th->name)) {
3734 rb_str_catf(str, "@%"PRIsVALUE, target_th->name);
3735 }
3736 if ((loc = threadptr_invoke_proc_location(target_th)) != Qnil) {
3737 rb_str_catf(str, " %"PRIsVALUE":%"PRIsVALUE,
3738 RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
3739 }
3740 rb_str_catf(str, " %s>", status);
3741
3742 return str;
3743}
3744
3745/* variables for recursive traversals */
3746#define recursive_key id__recursive_key__
3747
3748static VALUE
3749threadptr_local_aref(rb_thread_t *th, ID id)
3750{
3751 if (id == recursive_key) {
3752 return th->ec->local_storage_recursive_hash;
3753 }
3754 else {
3755 VALUE val;
3756 struct rb_id_table *local_storage = th->ec->local_storage;
3757
3758 if (local_storage != NULL && rb_id_table_lookup(local_storage, id, &val)) {
3759 return val;
3760 }
3761 else {
3762 return Qnil;
3763 }
3764 }
3765}
3766
3768rb_thread_local_aref(VALUE thread, ID id)
3769{
3770 return threadptr_local_aref(rb_thread_ptr(thread), id);
3771}
3772
3773/*
3774 * call-seq:
3775 * thr[sym] -> obj or nil
3776 *
3777 * Attribute Reference---Returns the value of a fiber-local variable (current thread's root fiber
3778 * if not explicitly inside a Fiber), using either a symbol or a string name.
3779 * If the specified variable does not exist, returns +nil+.
3780 *
3781 * [
3782 * Thread.new { Thread.current["name"] = "A" },
3783 * Thread.new { Thread.current[:name] = "B" },
3784 * Thread.new { Thread.current["name"] = "C" }
3785 * ].each do |th|
3786 * th.join
3787 * puts "#{th.inspect}: #{th[:name]}"
3788 * end
3789 *
3790 * This will produce:
3791 *
3792 * #<Thread:0x00000002a54220 dead>: A
3793 * #<Thread:0x00000002a541a8 dead>: B
3794 * #<Thread:0x00000002a54130 dead>: C
3795 *
3796 * Thread#[] and Thread#[]= are not thread-local but fiber-local.
3797 * This confusion did not exist in Ruby 1.8 because
3798 * fibers are only available since Ruby 1.9.
3799 * Ruby 1.9 chooses that the methods behaves fiber-local to save
3800 * following idiom for dynamic scope.
3801 *
3802 * def meth(newvalue)
3803 * begin
3804 * oldvalue = Thread.current[:name]
3805 * Thread.current[:name] = newvalue
3806 * yield
3807 * ensure
3808 * Thread.current[:name] = oldvalue
3809 * end
3810 * end
3811 *
3812 * The idiom may not work as dynamic scope if the methods are thread-local
3813 * and a given block switches fiber.
3814 *
3815 * f = Fiber.new {
3816 * meth(1) {
3817 * Fiber.yield
3818 * }
3819 * }
3820 * meth(2) {
3821 * f.resume
3822 * }
3823 * f.resume
3824 * p Thread.current[:name]
3825 * #=> nil if fiber-local
3826 * #=> 2 if thread-local (The value 2 is leaked to outside of meth method.)
3827 *
3828 * For thread-local variables, please see #thread_variable_get and
3829 * #thread_variable_set.
3830 *
3831 */
3832
3833static VALUE
3834rb_thread_aref(VALUE thread, VALUE key)
3835{
3836 ID id = rb_check_id(&key);
3837 if (!id) return Qnil;
3838 return rb_thread_local_aref(thread, id);
3839}
3840
3841/*
3842 * call-seq:
3843 * thr.fetch(sym) -> obj
3844 * thr.fetch(sym) { } -> obj
3845 * thr.fetch(sym, default) -> obj
3846 *
3847 * Returns a fiber-local for the given key. If the key can't be
3848 * found, there are several options: With no other arguments, it will
3849 * raise a KeyError exception; if <i>default</i> is given, then that
3850 * will be returned; if the optional code block is specified, then
3851 * that will be run and its result returned. See Thread#[] and
3852 * Hash#fetch.
3853 */
3854static VALUE
3855rb_thread_fetch(int argc, VALUE *argv, VALUE self)
3856{
3857 VALUE key, val;
3858 ID id;
3859 rb_thread_t *target_th = rb_thread_ptr(self);
3860 int block_given;
3861
3862 rb_check_arity(argc, 1, 2);
3863 key = argv[0];
3864
3865 block_given = rb_block_given_p();
3866 if (block_given && argc == 2) {
3867 rb_warn("block supersedes default value argument");
3868 }
3869
3870 id = rb_check_id(&key);
3871
3872 if (id == recursive_key) {
3873 return target_th->ec->local_storage_recursive_hash;
3874 }
3875 else if (id && target_th->ec->local_storage &&
3876 rb_id_table_lookup(target_th->ec->local_storage, id, &val)) {
3877 return val;
3878 }
3879 else if (block_given) {
3880 return rb_yield(key);
3881 }
3882 else if (argc == 1) {
3883 rb_key_err_raise(rb_sprintf("key not found: %+"PRIsVALUE, key), self, key);
3884 }
3885 else {
3886 return argv[1];
3887 }
3888}
3889
3890static VALUE
3891threadptr_local_aset(rb_thread_t *th, ID id, VALUE val)
3892{
3893 if (id == recursive_key) {
3894 th->ec->local_storage_recursive_hash = val;
3895 return val;
3896 }
3897 else {
3898 struct rb_id_table *local_storage = th->ec->local_storage;
3899
3900 if (NIL_P(val)) {
3901 if (!local_storage) return Qnil;
3902 rb_id_table_delete(local_storage, id);
3903 return Qnil;
3904 }
3905 else {
3906 if (local_storage == NULL) {
3907 th->ec->local_storage = local_storage = rb_id_table_create(0);
3908 }
3909 rb_id_table_insert(local_storage, id, val);
3910 return val;
3911 }
3912 }
3913}
3914
3916rb_thread_local_aset(VALUE thread, ID id, VALUE val)
3917{
3918 if (OBJ_FROZEN(thread)) {
3919 rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3920 }
3921
3922 return threadptr_local_aset(rb_thread_ptr(thread), id, val);
3923}
3924
3925/*
3926 * call-seq:
3927 * thr[sym] = obj -> obj
3928 *
3929 * Attribute Assignment---Sets or creates the value of a fiber-local variable,
3930 * using either a symbol or a string.
3931 *
3932 * See also Thread#[].
3933 *
3934 * For thread-local variables, please see #thread_variable_set and
3935 * #thread_variable_get.
3936 */
3937
3938static VALUE
3939rb_thread_aset(VALUE self, VALUE id, VALUE val)
3940{
3941 return rb_thread_local_aset(self, rb_to_id(id), val);
3942}
3943
3944/*
3945 * call-seq:
3946 * thr.thread_variable_get(key) -> obj or nil
3947 *
3948 * Returns the value of a thread local variable that has been set. Note that
3949 * these are different than fiber local values. For fiber local values,
3950 * please see Thread#[] and Thread#[]=.
3951 *
3952 * Thread local values are carried along with threads, and do not respect
3953 * fibers. For example:
3954 *
3955 * Thread.new {
3956 * Thread.current.thread_variable_set("foo", "bar") # set a thread local
3957 * Thread.current["foo"] = "bar" # set a fiber local
3958 *
3959 * Fiber.new {
3960 * Fiber.yield [
3961 * Thread.current.thread_variable_get("foo"), # get the thread local
3962 * Thread.current["foo"], # get the fiber local
3963 * ]
3964 * }.resume
3965 * }.join.value # => ['bar', nil]
3966 *
3967 * The value "bar" is returned for the thread local, where nil is returned
3968 * for the fiber local. The fiber is executed in the same thread, so the
3969 * thread local values are available.
3970 */
3971
3972static VALUE
3973rb_thread_variable_get(VALUE thread, VALUE key)
3974{
3975 VALUE locals;
3976 VALUE symbol = rb_to_symbol(key);
3977
3978 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3979 return Qnil;
3980 }
3981 locals = rb_thread_local_storage(thread);
3982 return rb_hash_aref(locals, symbol);
3983}
3984
3985/*
3986 * call-seq:
3987 * thr.thread_variable_set(key, value)
3988 *
3989 * Sets a thread local with +key+ to +value+. Note that these are local to
3990 * threads, and not to fibers. Please see Thread#thread_variable_get and
3991 * Thread#[] for more information.
3992 */
3993
3994static VALUE
3995rb_thread_variable_set(VALUE thread, VALUE key, VALUE val)
3996{
3997 VALUE locals;
3998
3999 if (OBJ_FROZEN(thread)) {
4000 rb_frozen_error_raise(thread, "can't modify frozen thread locals");
4001 }
4002
4003 locals = rb_thread_local_storage(thread);
4004 return rb_hash_aset(locals, rb_to_symbol(key), val);
4005}
4006
4007/*
4008 * call-seq:
4009 * thr.key?(sym) -> true or false
4010 *
4011 * Returns +true+ if the given string (or symbol) exists as a fiber-local
4012 * variable.
4013 *
4014 * me = Thread.current
4015 * me[:oliver] = "a"
4016 * me.key?(:oliver) #=> true
4017 * me.key?(:stanley) #=> false
4018 */
4019
4020static VALUE
4021rb_thread_key_p(VALUE self, VALUE key)
4022{
4023 VALUE val;
4024 ID id = rb_check_id(&key);
4025 struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
4026
4027 if (!id || local_storage == NULL) {
4028 return Qfalse;
4029 }
4030 return RBOOL(rb_id_table_lookup(local_storage, id, &val));
4031}
4032
4033static enum rb_id_table_iterator_result
4034thread_keys_i(ID key, VALUE value, void *ary)
4035{
4036 rb_ary_push((VALUE)ary, ID2SYM(key));
4037 return ID_TABLE_CONTINUE;
4038}
4039
4041rb_thread_alone(void)
4042{
4043 // TODO
4044 return rb_ractor_living_thread_num(GET_RACTOR()) == 1;
4045}
4046
4047/*
4048 * call-seq:
4049 * thr.keys -> array
4050 *
4051 * Returns an array of the names of the fiber-local variables (as Symbols).
4052 *
4053 * thr = Thread.new do
4054 * Thread.current[:cat] = 'meow'
4055 * Thread.current["dog"] = 'woof'
4056 * end
4057 * thr.join #=> #<Thread:0x401b3f10 dead>
4058 * thr.keys #=> [:dog, :cat]
4059 */
4060
4061static VALUE
4062rb_thread_keys(VALUE self)
4063{
4064 struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
4065 VALUE ary = rb_ary_new();
4066
4067 if (local_storage) {
4068 rb_id_table_foreach(local_storage, thread_keys_i, (void *)ary);
4069 }
4070 return ary;
4071}
4072
4073static int
4074keys_i(VALUE key, VALUE value, VALUE ary)
4075{
4076 rb_ary_push(ary, key);
4077 return ST_CONTINUE;
4078}
4079
4080/*
4081 * call-seq:
4082 * thr.thread_variables -> array
4083 *
4084 * Returns an array of the names of the thread-local variables (as Symbols).
4085 *
4086 * thr = Thread.new do
4087 * Thread.current.thread_variable_set(:cat, 'meow')
4088 * Thread.current.thread_variable_set("dog", 'woof')
4089 * end
4090 * thr.join #=> #<Thread:0x401b3f10 dead>
4091 * thr.thread_variables #=> [:dog, :cat]
4092 *
4093 * Note that these are not fiber local variables. Please see Thread#[] and
4094 * Thread#thread_variable_get for more details.
4095 */
4096
4097static VALUE
4098rb_thread_variables(VALUE thread)
4099{
4100 VALUE locals;
4101 VALUE ary;
4102
4103 ary = rb_ary_new();
4104 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
4105 return ary;
4106 }
4107 locals = rb_thread_local_storage(thread);
4108 rb_hash_foreach(locals, keys_i, ary);
4109
4110 return ary;
4111}
4112
4113/*
4114 * call-seq:
4115 * thr.thread_variable?(key) -> true or false
4116 *
4117 * Returns +true+ if the given string (or symbol) exists as a thread-local
4118 * variable.
4119 *
4120 * me = Thread.current
4121 * me.thread_variable_set(:oliver, "a")
4122 * me.thread_variable?(:oliver) #=> true
4123 * me.thread_variable?(:stanley) #=> false
4124 *
4125 * Note that these are not fiber local variables. Please see Thread#[] and
4126 * Thread#thread_variable_get for more details.
4127 */
4128
4129static VALUE
4130rb_thread_variable_p(VALUE thread, VALUE key)
4131{
4132 VALUE locals;
4133 VALUE symbol = rb_to_symbol(key);
4134
4135 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
4136 return Qfalse;
4137 }
4138 locals = rb_thread_local_storage(thread);
4139
4140 return RBOOL(rb_hash_lookup(locals, symbol) != Qnil);
4141}
4142
4143/*
4144 * call-seq:
4145 * thr.priority -> integer
4146 *
4147 * Returns the priority of <i>thr</i>. Default is inherited from the
4148 * current thread which creating the new thread, or zero for the
4149 * initial main thread; higher-priority thread will run more frequently
4150 * than lower-priority threads (but lower-priority threads can also run).
4151 *
4152 * This is just hint for Ruby thread scheduler. It may be ignored on some
4153 * platform.
4154 *
4155 * Thread.current.priority #=> 0
4156 */
4157
4158static VALUE
4159rb_thread_priority(VALUE thread)
4160{
4161 return INT2NUM(rb_thread_ptr(thread)->priority);
4162}
4163
4164
4165/*
4166 * call-seq:
4167 * thr.priority= integer -> thr
4168 *
4169 * Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
4170 * will run more frequently than lower-priority threads (but lower-priority
4171 * threads can also run).
4172 *
4173 * This is just hint for Ruby thread scheduler. It may be ignored on some
4174 * platform.
4175 *
4176 * count1 = count2 = 0
4177 * a = Thread.new do
4178 * loop { count1 += 1 }
4179 * end
4180 * a.priority = -1
4181 *
4182 * b = Thread.new do
4183 * loop { count2 += 1 }
4184 * end
4185 * b.priority = -2
4186 * sleep 1 #=> 1
4187 * count1 #=> 622504
4188 * count2 #=> 5832
4189 */
4190
4191static VALUE
4192rb_thread_priority_set(VALUE thread, VALUE prio)
4193{
4194 rb_thread_t *target_th = rb_thread_ptr(thread);
4195 int priority;
4196
4197#if USE_NATIVE_THREAD_PRIORITY
4198 target_th->priority = NUM2INT(prio);
4199 native_thread_apply_priority(th);
4200#else
4201 priority = NUM2INT(prio);
4202 if (priority > RUBY_THREAD_PRIORITY_MAX) {
4203 priority = RUBY_THREAD_PRIORITY_MAX;
4204 }
4205 else if (priority < RUBY_THREAD_PRIORITY_MIN) {
4206 priority = RUBY_THREAD_PRIORITY_MIN;
4207 }
4208 target_th->priority = (int8_t)priority;
4209#endif
4210 return INT2NUM(target_th->priority);
4211}
4212
4213/* for IO */
4214
4215#if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
4216
4217/*
4218 * several Unix platforms support file descriptors bigger than FD_SETSIZE
4219 * in select(2) system call.
4220 *
4221 * - Linux 2.2.12 (?)
4222 * - NetBSD 1.2 (src/sys/kern/sys_generic.c:1.25)
4223 * select(2) documents how to allocate fd_set dynamically.
4224 * http://netbsd.gw.com/cgi-bin/man-cgi?select++NetBSD-4.0
4225 * - FreeBSD 2.2 (src/sys/kern/sys_generic.c:1.19)
4226 * - OpenBSD 2.0 (src/sys/kern/sys_generic.c:1.4)
4227 * select(2) documents how to allocate fd_set dynamically.
4228 * http://www.openbsd.org/cgi-bin/man.cgi?query=select&manpath=OpenBSD+4.4
4229 * - Solaris 8 has select_large_fdset
4230 * - Mac OS X 10.7 (Lion)
4231 * select(2) returns EINVAL if nfds is greater than FD_SET_SIZE and
4232 * _DARWIN_UNLIMITED_SELECT (or _DARWIN_C_SOURCE) isn't defined.
4233 * https://developer.apple.com/library/archive/releasenotes/Darwin/SymbolVariantsRelNotes/index.html
4234 *
4235 * When fd_set is not big enough to hold big file descriptors,
4236 * it should be allocated dynamically.
4237 * Note that this assumes fd_set is structured as bitmap.
4238 *
4239 * rb_fd_init allocates the memory.
4240 * rb_fd_term free the memory.
4241 * rb_fd_set may re-allocates bitmap.
4242 *
4243 * So rb_fd_set doesn't reject file descriptors bigger than FD_SETSIZE.
4244 */
4245
4246void
4248{
4249 fds->maxfd = 0;
4250 fds->fdset = ALLOC(fd_set);
4251 FD_ZERO(fds->fdset);
4252}
4253
4254void
4255rb_fd_init_copy(rb_fdset_t *dst, rb_fdset_t *src)
4256{
4257 size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
4258
4259 if (size < sizeof(fd_set))
4260 size = sizeof(fd_set);
4261 dst->maxfd = src->maxfd;
4262 dst->fdset = xmalloc(size);
4263 memcpy(dst->fdset, src->fdset, size);
4264}
4265
4266void
4268{
4269 xfree(fds->fdset);
4270 fds->maxfd = 0;
4271 fds->fdset = 0;
4272}
4273
4274void
4276{
4277 if (fds->fdset)
4278 MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
4279}
4280
4281static void
4282rb_fd_resize(int n, rb_fdset_t *fds)
4283{
4284 size_t m = howmany(n + 1, NFDBITS) * sizeof(fd_mask);
4285 size_t o = howmany(fds->maxfd, NFDBITS) * sizeof(fd_mask);
4286
4287 if (m < sizeof(fd_set)) m = sizeof(fd_set);
4288 if (o < sizeof(fd_set)) o = sizeof(fd_set);
4289
4290 if (m > o) {
4291 fds->fdset = xrealloc(fds->fdset, m);
4292 memset((char *)fds->fdset + o, 0, m - o);
4293 }
4294 if (n >= fds->maxfd) fds->maxfd = n + 1;
4295}
4296
4297void
4298rb_fd_set(int n, rb_fdset_t *fds)
4299{
4300 rb_fd_resize(n, fds);
4301 FD_SET(n, fds->fdset);
4302}
4303
4304void
4305rb_fd_clr(int n, rb_fdset_t *fds)
4306{
4307 if (n >= fds->maxfd) return;
4308 FD_CLR(n, fds->fdset);
4309}
4310
4311int
4312rb_fd_isset(int n, const rb_fdset_t *fds)
4313{
4314 if (n >= fds->maxfd) return 0;
4315 return FD_ISSET(n, fds->fdset) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
4316}
4317
4318void
4319rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
4320{
4321 size_t size = howmany(max, NFDBITS) * sizeof(fd_mask);
4322
4323 if (size < sizeof(fd_set)) size = sizeof(fd_set);
4324 dst->maxfd = max;
4325 dst->fdset = xrealloc(dst->fdset, size);
4326 memcpy(dst->fdset, src, size);
4327}
4328
4329void
4330rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
4331{
4332 size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
4333
4334 if (size < sizeof(fd_set))
4335 size = sizeof(fd_set);
4336 dst->maxfd = src->maxfd;
4337 dst->fdset = xrealloc(dst->fdset, size);
4338 memcpy(dst->fdset, src->fdset, size);
4339}
4340
4341int
4342rb_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
4343{
4344 fd_set *r = NULL, *w = NULL, *e = NULL;
4345 if (readfds) {
4346 rb_fd_resize(n - 1, readfds);
4347 r = rb_fd_ptr(readfds);
4348 }
4349 if (writefds) {
4350 rb_fd_resize(n - 1, writefds);
4351 w = rb_fd_ptr(writefds);
4352 }
4353 if (exceptfds) {
4354 rb_fd_resize(n - 1, exceptfds);
4355 e = rb_fd_ptr(exceptfds);
4356 }
4357 return select(n, r, w, e, timeout);
4358}
4359
4360#define rb_fd_no_init(fds) ((void)((fds)->fdset = 0), (void)((fds)->maxfd = 0))
4361
4362#undef FD_ZERO
4363#undef FD_SET
4364#undef FD_CLR
4365#undef FD_ISSET
4366
4367#define FD_ZERO(f) rb_fd_zero(f)
4368#define FD_SET(i, f) rb_fd_set((i), (f))
4369#define FD_CLR(i, f) rb_fd_clr((i), (f))
4370#define FD_ISSET(i, f) rb_fd_isset((i), (f))
4371
4372#elif defined(_WIN32)
4373
4374void
4376{
4377 set->capa = FD_SETSIZE;
4378 set->fdset = ALLOC(fd_set);
4379 FD_ZERO(set->fdset);
4380}
4381
4382void
4383rb_fd_init_copy(rb_fdset_t *dst, rb_fdset_t *src)
4384{
4385 rb_fd_init(dst);
4386 rb_fd_dup(dst, src);
4387}
4388
4389void
4391{
4392 xfree(set->fdset);
4393 set->fdset = NULL;
4394 set->capa = 0;
4395}
4396
4397void
4398rb_fd_set(int fd, rb_fdset_t *set)
4399{
4400 unsigned int i;
4401 SOCKET s = rb_w32_get_osfhandle(fd);
4402
4403 for (i = 0; i < set->fdset->fd_count; i++) {
4404 if (set->fdset->fd_array[i] == s) {
4405 return;
4406 }
4407 }
4408 if (set->fdset->fd_count >= (unsigned)set->capa) {
4409 set->capa = (set->fdset->fd_count / FD_SETSIZE + 1) * FD_SETSIZE;
4410 set->fdset =
4411 rb_xrealloc_mul_add(
4412 set->fdset, set->capa, sizeof(SOCKET), sizeof(unsigned int));
4413 }
4414 set->fdset->fd_array[set->fdset->fd_count++] = s;
4415}
4416
4417#undef FD_ZERO
4418#undef FD_SET
4419#undef FD_CLR
4420#undef FD_ISSET
4421
4422#define FD_ZERO(f) rb_fd_zero(f)
4423#define FD_SET(i, f) rb_fd_set((i), (f))
4424#define FD_CLR(i, f) rb_fd_clr((i), (f))
4425#define FD_ISSET(i, f) rb_fd_isset((i), (f))
4426
4427#define rb_fd_no_init(fds) (void)((fds)->fdset = 0)
4428
4429#endif
4430
4431#ifndef rb_fd_no_init
4432#define rb_fd_no_init(fds) (void)(fds)
4433#endif
4434
4435static int
4436wait_retryable(volatile int *result, int errnum, rb_hrtime_t *rel, rb_hrtime_t end)
4437{
4438 int r = *result;
4439 if (r < 0) {
4440 switch (errnum) {
4441 case EINTR:
4442#ifdef ERESTART
4443 case ERESTART:
4444#endif
4445 *result = 0;
4446 if (rel && hrtime_update_expire(rel, end)) {
4447 *rel = 0;
4448 }
4449 return TRUE;
4450 }
4451 return FALSE;
4452 }
4453 else if (r == 0) {
4454 /* check for spurious wakeup */
4455 if (rel) {
4456 return !hrtime_update_expire(rel, end);
4457 }
4458 return TRUE;
4459 }
4460 return FALSE;
4461}
4463struct select_set {
4464 int max;
4465 rb_thread_t *th;
4466 rb_fdset_t *rset;
4467 rb_fdset_t *wset;
4468 rb_fdset_t *eset;
4469 rb_fdset_t orig_rset;
4470 rb_fdset_t orig_wset;
4471 rb_fdset_t orig_eset;
4472 struct timeval *timeout;
4473};
4474
4475static VALUE
4476select_set_free(VALUE p)
4477{
4478 struct select_set *set = (struct select_set *)p;
4479
4480 rb_fd_term(&set->orig_rset);
4481 rb_fd_term(&set->orig_wset);
4482 rb_fd_term(&set->orig_eset);
4483
4484 return Qfalse;
4485}
4486
4487static VALUE
4488do_select(VALUE p)
4489{
4490 struct select_set *set = (struct select_set *)p;
4491 volatile int result = 0;
4492 int lerrno;
4493 rb_hrtime_t *to, rel, end = 0;
4494
4495 timeout_prepare(&to, &rel, &end, set->timeout);
4496 volatile rb_hrtime_t endtime = end;
4497#define restore_fdset(dst, src) \
4498 ((dst) ? rb_fd_dup(dst, src) : (void)0)
4499#define do_select_update() \
4500 (restore_fdset(set->rset, &set->orig_rset), \
4501 restore_fdset(set->wset, &set->orig_wset), \
4502 restore_fdset(set->eset, &set->orig_eset), \
4503 TRUE)
4504
4505 do {
4506 lerrno = 0;
4507
4508 BLOCKING_REGION(set->th, {
4509 struct timeval tv;
4510
4511 if (!RUBY_VM_INTERRUPTED(set->th->ec)) {
4512 result = native_fd_select(set->max,
4513 set->rset, set->wset, set->eset,
4514 rb_hrtime2timeval(&tv, to), set->th);
4515 if (result < 0) lerrno = errno;
4516 }
4517 }, ubf_select, set->th, TRUE);
4518
4519 RUBY_VM_CHECK_INTS_BLOCKING(set->th->ec); /* may raise */
4520 } while (wait_retryable(&result, lerrno, to, endtime) && do_select_update());
4521
4522 RUBY_VM_CHECK_INTS_BLOCKING(set->th->ec);
4523
4524 if (result < 0) {
4525 errno = lerrno;
4526 }
4527
4528 return (VALUE)result;
4529}
4530
4532rb_thread_fd_select(int max, rb_fdset_t * read, rb_fdset_t * write, rb_fdset_t * except,
4533 struct timeval *timeout)
4534{
4535 struct select_set set;
4536
4537 set.th = GET_THREAD();
4538 RUBY_VM_CHECK_INTS_BLOCKING(set.th->ec);
4539 set.max = max;
4540 set.rset = read;
4541 set.wset = write;
4542 set.eset = except;
4543 set.timeout = timeout;
4544
4545 if (!set.rset && !set.wset && !set.eset) {
4546 if (!timeout) {
4548 return 0;
4549 }
4550 rb_thread_wait_for(*timeout);
4551 return 0;
4552 }
4553
4554#define fd_init_copy(f) do { \
4555 if (set.f) { \
4556 rb_fd_resize(set.max - 1, set.f); \
4557 if (&set.orig_##f != set.f) { /* sigwait_fd */ \
4558 rb_fd_init_copy(&set.orig_##f, set.f); \
4559 } \
4560 } \
4561 else { \
4562 rb_fd_no_init(&set.orig_##f); \
4563 } \
4564 } while (0)
4565 fd_init_copy(rset);
4566 fd_init_copy(wset);
4567 fd_init_copy(eset);
4568#undef fd_init_copy
4569
4570 return (int)rb_ensure(do_select, (VALUE)&set, select_set_free, (VALUE)&set);
4571}
4572
4573#ifdef USE_POLL
4574
4575/* The same with linux kernel. TODO: make platform independent definition. */
4576#define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
4577#define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
4578#define POLLEX_SET (POLLPRI)
4579
4580#ifndef POLLERR_SET /* defined for FreeBSD for now */
4581# define POLLERR_SET (0)
4582#endif
4583
4584static int
4585wait_for_single_fd_blocking_region(rb_thread_t *th, struct pollfd *fds, nfds_t nfds,
4586 rb_hrtime_t *const to, volatile int *lerrno)
4587{
4588 struct timespec ts;
4589 volatile int result = 0;
4590
4591 *lerrno = 0;
4592 BLOCKING_REGION(th, {
4593 if (!RUBY_VM_INTERRUPTED(th->ec)) {
4594 result = ppoll(fds, nfds, rb_hrtime2timespec(&ts, to), 0);
4595 if (result < 0) *lerrno = errno;
4596 }
4597 }, ubf_select, th, TRUE);
4598 return result;
4599}
4600
4601/*
4602 * returns a mask of events
4603 */
4604static int
4605thread_io_wait(rb_thread_t *th, struct rb_io *io, int fd, int events, struct timeval *timeout)
4606{
4607 struct pollfd fds[1] = {{
4608 .fd = fd,
4609 .events = (short)events,
4610 .revents = 0,
4611 }};
4612 volatile int result = 0;
4613 nfds_t nfds;
4614 struct rb_io_blocking_operation blocking_operation;
4615 enum ruby_tag_type state;
4616 volatile int lerrno;
4617
4618 RUBY_ASSERT(th);
4619 rb_execution_context_t *ec = th->ec;
4620
4621 if (io) {
4622 blocking_operation.ec = ec;
4623 rb_io_blocking_operation_enter(io, &blocking_operation);
4624 }
4625
4626 if (timeout == NULL && thread_io_wait_events(th, fd, events, NULL)) {
4627 // fd is readable
4628 state = 0;
4629 fds[0].revents = events;
4630 errno = 0;
4631 }
4632 else {
4633 EC_PUSH_TAG(ec);
4634 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
4635 rb_hrtime_t *to, rel, end = 0;
4636 RUBY_VM_CHECK_INTS_BLOCKING(ec);
4637 timeout_prepare(&to, &rel, &end, timeout);
4638 do {
4639 nfds = numberof(fds);
4640 result = wait_for_single_fd_blocking_region(th, fds, nfds, to, &lerrno);
4641
4642 RUBY_VM_CHECK_INTS_BLOCKING(ec);
4643 } while (wait_retryable(&result, lerrno, to, end));
4644
4645 RUBY_VM_CHECK_INTS_BLOCKING(ec);
4646 }
4647
4648 EC_POP_TAG();
4649 }
4650
4651 if (io) {
4652 rb_io_blocking_operation_exit(io, &blocking_operation);
4653 }
4654
4655 if (state) {
4656 EC_JUMP_TAG(ec, state);
4657 }
4658
4659 if (result < 0) {
4660 errno = lerrno;
4661 return -1;
4662 }
4663
4664 if (fds[0].revents & POLLNVAL) {
4665 errno = EBADF;
4666 return -1;
4667 }
4668
4669 /*
4670 * POLLIN, POLLOUT have a different meanings from select(2)'s read/write bit.
4671 * Therefore we need to fix it up.
4672 */
4673 result = 0;
4674 if (fds[0].revents & POLLIN_SET)
4675 result |= RB_WAITFD_IN;
4676 if (fds[0].revents & POLLOUT_SET)
4677 result |= RB_WAITFD_OUT;
4678 if (fds[0].revents & POLLEX_SET)
4679 result |= RB_WAITFD_PRI;
4680
4681 /* all requested events are ready if there is an error */
4682 if (fds[0].revents & POLLERR_SET)
4683 result |= events;
4684
4685 return result;
4686}
4687#else /* ! USE_POLL - implement rb_io_poll_fd() using select() */
4688struct select_args {
4689 struct rb_io *io;
4690 struct rb_io_blocking_operation *blocking_operation;
4691
4692 union {
4693 int fd;
4694 int error;
4695 } as;
4696 rb_fdset_t *read;
4697 rb_fdset_t *write;
4698 rb_fdset_t *except;
4699 struct timeval *tv;
4700};
4701
4702static VALUE
4703select_single(VALUE ptr)
4704{
4705 struct select_args *args = (struct select_args *)ptr;
4706 int r;
4707
4708 r = rb_thread_fd_select(args->as.fd + 1,
4709 args->read, args->write, args->except, args->tv);
4710 if (r == -1)
4711 args->as.error = errno;
4712 if (r > 0) {
4713 r = 0;
4714 if (args->read && rb_fd_isset(args->as.fd, args->read))
4715 r |= RB_WAITFD_IN;
4716 if (args->write && rb_fd_isset(args->as.fd, args->write))
4717 r |= RB_WAITFD_OUT;
4718 if (args->except && rb_fd_isset(args->as.fd, args->except))
4719 r |= RB_WAITFD_PRI;
4720 }
4721 return (VALUE)r;
4722}
4723
4724static VALUE
4725select_single_cleanup(VALUE ptr)
4726{
4727 struct select_args *args = (struct select_args *)ptr;
4728
4729 if (args->blocking_operation) {
4730 rb_io_blocking_operation_exit(args->io, args->blocking_operation);
4731 }
4732
4733 if (args->read) rb_fd_term(args->read);
4734 if (args->write) rb_fd_term(args->write);
4735 if (args->except) rb_fd_term(args->except);
4736
4737 return (VALUE)-1;
4738}
4739
4740static rb_fdset_t *
4741init_set_fd(int fd, rb_fdset_t *fds)
4742{
4743 if (fd < 0) {
4744 return 0;
4745 }
4746 rb_fd_init(fds);
4747 rb_fd_set(fd, fds);
4748
4749 return fds;
4750}
4751
4752static int
4753thread_io_wait(rb_thread_t *th, struct rb_io *io, int fd, int events, struct timeval *timeout)
4754{
4755 rb_fdset_t rfds, wfds, efds;
4756 struct select_args args;
4757 VALUE ptr = (VALUE)&args;
4758
4759 struct rb_io_blocking_operation blocking_operation;
4760 if (io) {
4761 args.io = io;
4762 blocking_operation.ec = th->ec;
4763 rb_io_blocking_operation_enter(io, &blocking_operation);
4764 args.blocking_operation = &blocking_operation;
4765 }
4766 else {
4767 args.io = NULL;
4768 blocking_operation.ec = NULL;
4769 args.blocking_operation = NULL;
4770 }
4771
4772 args.as.fd = fd;
4773 args.read = (events & RB_WAITFD_IN) ? init_set_fd(fd, &rfds) : NULL;
4774 args.write = (events & RB_WAITFD_OUT) ? init_set_fd(fd, &wfds) : NULL;
4775 args.except = (events & RB_WAITFD_PRI) ? init_set_fd(fd, &efds) : NULL;
4776 args.tv = timeout;
4777
4778 int result = (int)rb_ensure(select_single, ptr, select_single_cleanup, ptr);
4779 if (result == -1)
4780 errno = args.as.error;
4781
4782 return result;
4783}
4784#endif /* ! USE_POLL */
4785
4786int
4787rb_thread_wait_for_single_fd(rb_thread_t *th, int fd, int events, struct timeval *timeout)
4788{
4789 return thread_io_wait(th, NULL, fd, events, timeout);
4790}
4791
4792int
4793rb_thread_io_wait(rb_thread_t *th, struct rb_io *io, int events, struct timeval * timeout)
4794{
4795 return thread_io_wait(th, io, io->fd, events, timeout);
4796}
4797
4798/*
4799 * for GC
4800 */
4801
4802#ifdef USE_CONSERVATIVE_STACK_END
4803void
4804rb_gc_set_stack_end(VALUE **stack_end_p)
4805{
4806 VALUE stack_end;
4807COMPILER_WARNING_PUSH
4808#if RBIMPL_COMPILER_IS(GCC)
4809COMPILER_WARNING_IGNORED(-Wdangling-pointer);
4810#endif
4811 *stack_end_p = &stack_end;
4812COMPILER_WARNING_POP
4813}
4814#endif
4815
4816/*
4817 *
4818 */
4819
4820void
4821rb_threadptr_check_signal(rb_thread_t *mth)
4822{
4823 /* mth must be main_thread */
4824 if (rb_signal_buff_size() > 0) {
4825 /* wakeup main thread */
4826 threadptr_trap_interrupt(mth);
4827 }
4828}
4829
4830static void
4831async_bug_fd(const char *mesg, int errno_arg, int fd)
4832{
4833 char buff[64];
4834 size_t n = strlcpy(buff, mesg, sizeof(buff));
4835 if (n < sizeof(buff)-3) {
4836 ruby_snprintf(buff+n, sizeof(buff)-n, "(%d)", fd);
4837 }
4838 rb_async_bug_errno(buff, errno_arg);
4839}
4840
4841/* VM-dependent API is not available for this function */
4842static int
4843consume_communication_pipe(int fd)
4844{
4845#if USE_EVENTFD
4846 uint64_t buff[1];
4847#else
4848 /* buffer can be shared because no one refers to them. */
4849 static char buff[1024];
4850#endif
4851 ssize_t result;
4852 int ret = FALSE; /* for rb_sigwait_sleep */
4853
4854 while (1) {
4855 result = read(fd, buff, sizeof(buff));
4856#if USE_EVENTFD
4857 RUBY_DEBUG_LOG("resultf:%d buff:%lu", (int)result, (unsigned long)buff[0]);
4858#else
4859 RUBY_DEBUG_LOG("result:%d", (int)result);
4860#endif
4861 if (result > 0) {
4862 ret = TRUE;
4863 if (USE_EVENTFD || result < (ssize_t)sizeof(buff)) {
4864 return ret;
4865 }
4866 }
4867 else if (result == 0) {
4868 return ret;
4869 }
4870 else if (result < 0) {
4871 int e = errno;
4872 switch (e) {
4873 case EINTR:
4874 continue; /* retry */
4875 case EAGAIN:
4876#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
4877 case EWOULDBLOCK:
4878#endif
4879 return ret;
4880 default:
4881 async_bug_fd("consume_communication_pipe: read", e, fd);
4882 }
4883 }
4884 }
4885}
4886
4887void
4888rb_thread_stop_timer_thread(void)
4889{
4890 if (TIMER_THREAD_CREATED_P() && native_stop_timer_thread()) {
4891 native_reset_timer_thread();
4892 }
4893}
4894
4895void
4896rb_thread_reset_timer_thread(void)
4897{
4898 native_reset_timer_thread();
4899}
4900
4901void
4902rb_thread_start_timer_thread(void)
4903{
4904 system_working = 1;
4905 rb_thread_create_timer_thread();
4906}
4907
4908static int
4909clear_coverage_i(st_data_t key, st_data_t val, st_data_t dummy)
4910{
4911 int i;
4912 VALUE coverage = (VALUE)val;
4913 VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
4914 VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
4915
4916 if (lines) {
4917 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
4918 rb_ary_clear(lines);
4919 }
4920 else {
4921 int i;
4922 for (i = 0; i < RARRAY_LEN(lines); i++) {
4923 if (RARRAY_AREF(lines, i) != Qnil)
4924 RARRAY_ASET(lines, i, INT2FIX(0));
4925 }
4926 }
4927 }
4928 if (branches) {
4929 VALUE counters = RARRAY_AREF(branches, 1);
4930 for (i = 0; i < RARRAY_LEN(counters); i++) {
4931 RARRAY_ASET(counters, i, INT2FIX(0));
4932 }
4933 }
4934
4935 return ST_CONTINUE;
4936}
4937
4938void
4939rb_clear_coverages(void)
4940{
4941 VALUE coverages = rb_get_coverages();
4942 if (RTEST(coverages)) {
4943 rb_hash_foreach(coverages, clear_coverage_i, 0);
4944 }
4945}
4946
4947#if defined(HAVE_WORKING_FORK)
4948
4949static void
4950rb_thread_atfork_internal(rb_thread_t *th, void (*atfork)(rb_thread_t *, const rb_thread_t *))
4951{
4952 rb_thread_t *i = 0;
4953 rb_vm_t *vm = th->vm;
4954 rb_ractor_t *r = th->ractor;
4955 vm->ractor.main_ractor = r;
4956 vm->ractor.main_thread = th;
4957 r->threads.main = th;
4958 r->status_ = ractor_created;
4959
4960 thread_sched_atfork(TH_SCHED(th));
4961 ubf_list_atfork();
4962 rb_signal_atfork();
4963
4964 // OK. Only this thread accesses:
4965 ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
4966 if (r != vm->ractor.main_ractor) {
4967 rb_ractor_terminate_atfork(vm, r);
4968 }
4969 ccan_list_for_each(&r->threads.set, i, lt_node) {
4970 atfork(i, th);
4971 }
4972 }
4973 rb_vm_living_threads_init(vm);
4974
4975 rb_ractor_atfork(vm, th);
4976 rb_vm_postponed_job_atfork();
4977
4978 /* may be held by any thread in parent */
4979 rb_native_mutex_initialize(&th->interrupt_lock);
4980 ccan_list_head_init(&th->interrupt_exec_tasks);
4981
4982 vm->fork_gen++;
4983 rb_ractor_sleeper_threads_clear(th->ractor);
4984 rb_clear_coverages();
4985
4986 // restart timer thread (timer threads access to `vm->waitpid_lock` and so on.
4987 rb_thread_reset_timer_thread();
4988 rb_thread_start_timer_thread();
4989
4990 VM_ASSERT(vm->ractor.blocking_cnt == 0);
4991 VM_ASSERT(vm->ractor.cnt == 1);
4992}
4993
4994static void
4995terminate_atfork_i(rb_thread_t *th, const rb_thread_t *current_th)
4996{
4997 if (th != current_th) {
4998 // Clear the scheduler as it is no longer operational:
4999 th->scheduler = Qnil;
5000
5001 rb_native_mutex_initialize(&th->interrupt_lock);
5002 rb_mutex_abandon_keeping_mutexes(th);
5003 rb_mutex_abandon_locking_mutex(th);
5004 thread_cleanup_func(th, TRUE);
5005 }
5006}
5007
5008void rb_fiber_atfork(rb_thread_t *);
5009void
5010rb_thread_atfork(void)
5011{
5012 rb_thread_t *th = GET_THREAD();
5013 rb_threadptr_pending_interrupt_clear(th);
5014 rb_thread_atfork_internal(th, terminate_atfork_i);
5015 th->join_list = NULL;
5016 th->scheduler = Qnil;
5017 rb_fiber_atfork(th);
5018
5019 /* We don't want reproduce CVE-2003-0900. */
5021}
5022
5023static void
5024terminate_atfork_before_exec_i(rb_thread_t *th, const rb_thread_t *current_th)
5025{
5026 if (th != current_th) {
5027 thread_cleanup_func_before_exec(th);
5028 }
5029}
5030
5031void
5033{
5034 rb_thread_t *th = GET_THREAD();
5035 rb_thread_atfork_internal(th, terminate_atfork_before_exec_i);
5036}
5037#else
5038void
5039rb_thread_atfork(void)
5040{
5041}
5042
5043void
5045{
5046}
5047#endif
5049struct thgroup {
5050 int enclosed;
5051};
5052
5053static const rb_data_type_t thgroup_data_type = {
5054 "thgroup",
5055 {
5056 0,
5058 NULL, // No external memory to report
5059 },
5060 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE
5061};
5062
5063/*
5064 * Document-class: ThreadGroup
5065 *
5066 * ThreadGroup provides a means of keeping track of a number of threads as a
5067 * group.
5068 *
5069 * A given Thread object can only belong to one ThreadGroup at a time; adding
5070 * a thread to a new group will remove it from any previous group.
5071 *
5072 * Newly created threads belong to the same group as the thread from which they
5073 * were created.
5074 */
5075
5076/*
5077 * Document-const: Default
5078 *
5079 * The default ThreadGroup created when Ruby starts; all Threads belong to it
5080 * by default.
5081 */
5082static VALUE
5083thgroup_s_alloc(VALUE klass)
5084{
5085 VALUE group;
5086 struct thgroup *data;
5087
5088 group = TypedData_Make_Struct(klass, struct thgroup, &thgroup_data_type, data);
5089 data->enclosed = 0;
5090
5091 return group;
5092}
5093
5094/*
5095 * call-seq:
5096 * thgrp.list -> array
5097 *
5098 * Returns an array of all existing Thread objects that belong to this group.
5099 *
5100 * ThreadGroup::Default.list #=> [#<Thread:0x401bdf4c run>]
5101 */
5102
5103static VALUE
5104thgroup_list(VALUE group)
5105{
5106 VALUE ary = rb_ary_new();
5107 rb_thread_t *th = 0;
5108 rb_ractor_t *r = GET_RACTOR();
5109
5110 ccan_list_for_each(&r->threads.set, th, lt_node) {
5111 if (th->thgroup == group) {
5112 rb_ary_push(ary, th->self);
5113 }
5114 }
5115 return ary;
5116}
5117
5118
5119/*
5120 * call-seq:
5121 * thgrp.enclose -> thgrp
5122 *
5123 * Prevents threads from being added to or removed from the receiving
5124 * ThreadGroup.
5125 *
5126 * New threads can still be started in an enclosed ThreadGroup.
5127 *
5128 * ThreadGroup::Default.enclose #=> #<ThreadGroup:0x4029d914>
5129 * thr = Thread.new { Thread.stop } #=> #<Thread:0x402a7210 sleep>
5130 * tg = ThreadGroup.new #=> #<ThreadGroup:0x402752d4>
5131 * tg.add thr
5132 * #=> ThreadError: can't move from the enclosed thread group
5133 */
5134
5135static VALUE
5136thgroup_enclose(VALUE group)
5137{
5138 struct thgroup *data;
5139
5140 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
5141 data->enclosed = 1;
5142
5143 return group;
5144}
5145
5146
5147/*
5148 * call-seq:
5149 * thgrp.enclosed? -> true or false
5150 *
5151 * Returns +true+ if the +thgrp+ is enclosed. See also ThreadGroup#enclose.
5152 */
5153
5154static VALUE
5155thgroup_enclosed_p(VALUE group)
5156{
5157 struct thgroup *data;
5158
5159 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
5160 return RBOOL(data->enclosed);
5161}
5162
5163
5164/*
5165 * call-seq:
5166 * thgrp.add(thread) -> thgrp
5167 *
5168 * Adds the given +thread+ to this group, removing it from any other
5169 * group to which it may have previously been a member.
5170 *
5171 * puts "Initial group is #{ThreadGroup::Default.list}"
5172 * tg = ThreadGroup.new
5173 * t1 = Thread.new { sleep }
5174 * t2 = Thread.new { sleep }
5175 * puts "t1 is #{t1}"
5176 * puts "t2 is #{t2}"
5177 * tg.add(t1)
5178 * puts "Initial group now #{ThreadGroup::Default.list}"
5179 * puts "tg group now #{tg.list}"
5180 *
5181 * This will produce:
5182 *
5183 * Initial group is #<Thread:0x401bdf4c>
5184 * t1 is #<Thread:0x401b3c90>
5185 * t2 is #<Thread:0x401b3c18>
5186 * Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
5187 * tg group now #<Thread:0x401b3c90>
5188 */
5189
5190static VALUE
5191thgroup_add(VALUE group, VALUE thread)
5192{
5193 rb_thread_t *target_th = rb_thread_ptr(thread);
5194 struct thgroup *data;
5195
5196 if (OBJ_FROZEN(group)) {
5197 rb_raise(rb_eThreadError, "can't move to the frozen thread group");
5198 }
5199 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
5200 if (data->enclosed) {
5201 rb_raise(rb_eThreadError, "can't move to the enclosed thread group");
5202 }
5203
5204 if (OBJ_FROZEN(target_th->thgroup)) {
5205 rb_raise(rb_eThreadError, "can't move from the frozen thread group");
5206 }
5207 TypedData_Get_Struct(target_th->thgroup, struct thgroup, &thgroup_data_type, data);
5208 if (data->enclosed) {
5209 rb_raise(rb_eThreadError,
5210 "can't move from the enclosed thread group");
5211 }
5212
5213 target_th->thgroup = group;
5214 return group;
5215}
5216
5217/*
5218 * Document-class: ThreadShield
5219 */
5220static void
5221thread_shield_mark(void *ptr)
5222{
5223 rb_gc_mark((VALUE)ptr);
5224}
5225
5226static const rb_data_type_t thread_shield_data_type = {
5227 "thread_shield",
5228 {thread_shield_mark, 0, 0,},
5229 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
5230};
5231
5232static VALUE
5233thread_shield_alloc(VALUE klass)
5234{
5235 return TypedData_Wrap_Struct(klass, &thread_shield_data_type, (void *)mutex_alloc(0));
5236}
5237
5238#define GetThreadShieldPtr(obj) ((VALUE)rb_check_typeddata((obj), &thread_shield_data_type))
5239#define THREAD_SHIELD_WAITING_MASK (((FL_USER19-1)&~(FL_USER0-1))|FL_USER19)
5240#define THREAD_SHIELD_WAITING_SHIFT (FL_USHIFT)
5241#define THREAD_SHIELD_WAITING_MAX (THREAD_SHIELD_WAITING_MASK>>THREAD_SHIELD_WAITING_SHIFT)
5242STATIC_ASSERT(THREAD_SHIELD_WAITING_MAX, THREAD_SHIELD_WAITING_MAX <= UINT_MAX);
5243static inline unsigned int
5244rb_thread_shield_waiting(VALUE b)
5245{
5246 return ((RBASIC(b)->flags&THREAD_SHIELD_WAITING_MASK)>>THREAD_SHIELD_WAITING_SHIFT);
5247}
5248
5249static inline void
5250rb_thread_shield_waiting_inc(VALUE b)
5251{
5252 unsigned int w = rb_thread_shield_waiting(b);
5253 w++;
5254 if (w > THREAD_SHIELD_WAITING_MAX)
5255 rb_raise(rb_eRuntimeError, "waiting count overflow");
5256 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
5257 RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
5258}
5259
5260static inline void
5261rb_thread_shield_waiting_dec(VALUE b)
5262{
5263 unsigned int w = rb_thread_shield_waiting(b);
5264 if (!w) rb_raise(rb_eRuntimeError, "waiting count underflow");
5265 w--;
5266 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
5267 RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
5268}
5269
5270VALUE
5271rb_thread_shield_new(void)
5272{
5273 VALUE thread_shield = thread_shield_alloc(rb_cThreadShield);
5274 rb_mutex_lock((VALUE)DATA_PTR(thread_shield));
5275 return thread_shield;
5276}
5277
5278bool
5279rb_thread_shield_owned(VALUE self)
5280{
5281 VALUE mutex = GetThreadShieldPtr(self);
5282 if (!mutex) return false;
5283
5284 rb_mutex_t *m = mutex_ptr(mutex);
5285
5286 return m->fiber_serial == rb_fiber_serial(GET_EC()->fiber_ptr);
5287}
5288
5289/*
5290 * Wait a thread shield.
5291 *
5292 * Returns
5293 * true: acquired the thread shield
5294 * false: the thread shield was destroyed and no other threads waiting
5295 * nil: the thread shield was destroyed but still in use
5296 */
5297VALUE
5298rb_thread_shield_wait(VALUE self)
5299{
5300 VALUE mutex = GetThreadShieldPtr(self);
5301 rb_mutex_t *m;
5302
5303 if (!mutex) return Qfalse;
5304 m = mutex_ptr(mutex);
5305 if (m->fiber_serial == rb_fiber_serial(GET_EC()->fiber_ptr)) return Qnil;
5306 rb_thread_shield_waiting_inc(self);
5307 rb_mutex_lock(mutex);
5308 rb_thread_shield_waiting_dec(self);
5309 if (DATA_PTR(self)) return Qtrue;
5310 rb_mutex_unlock(mutex);
5311 return rb_thread_shield_waiting(self) > 0 ? Qnil : Qfalse;
5312}
5313
5314static VALUE
5315thread_shield_get_mutex(VALUE self)
5316{
5317 VALUE mutex = GetThreadShieldPtr(self);
5318 if (!mutex)
5319 rb_raise(rb_eThreadError, "destroyed thread shield - %p", (void *)self);
5320 return mutex;
5321}
5322
5323/*
5324 * Release a thread shield, and return true if it has waiting threads.
5325 */
5326VALUE
5327rb_thread_shield_release(VALUE self)
5328{
5329 VALUE mutex = thread_shield_get_mutex(self);
5330 rb_mutex_unlock(mutex);
5331 return RBOOL(rb_thread_shield_waiting(self) > 0);
5332}
5333
5334/*
5335 * Release and destroy a thread shield, and return true if it has waiting threads.
5336 */
5337VALUE
5338rb_thread_shield_destroy(VALUE self)
5339{
5340 VALUE mutex = thread_shield_get_mutex(self);
5341 DATA_PTR(self) = 0;
5342 rb_mutex_unlock(mutex);
5343 return RBOOL(rb_thread_shield_waiting(self) > 0);
5344}
5345
5346static VALUE
5347threadptr_recursive_hash(rb_thread_t *th)
5348{
5349 return th->ec->local_storage_recursive_hash;
5350}
5351
5352static void
5353threadptr_recursive_hash_set(rb_thread_t *th, VALUE hash)
5354{
5355 th->ec->local_storage_recursive_hash = hash;
5356}
5357
5359
5360/*
5361 * Returns the current "recursive list" used to detect recursion.
5362 * This list is a hash table, unique for the current thread and for
5363 * the current __callee__.
5364 */
5365
5366static VALUE
5367recursive_list_access(VALUE sym)
5368{
5369 rb_thread_t *th = GET_THREAD();
5370 VALUE hash = threadptr_recursive_hash(th);
5371 VALUE list;
5372 if (NIL_P(hash) || !RB_TYPE_P(hash, T_HASH)) {
5373 hash = rb_ident_hash_new();
5374 threadptr_recursive_hash_set(th, hash);
5375 list = Qnil;
5376 }
5377 else {
5378 list = rb_hash_aref(hash, sym);
5379 }
5380 if (NIL_P(list) || !RB_TYPE_P(list, T_HASH)) {
5381 list = rb_ident_hash_new();
5382 rb_hash_aset(hash, sym, list);
5383 }
5384 return list;
5385}
5386
5387/*
5388 * Returns true if and only if obj (or the pair <obj, paired_obj>) is already
5389 * in the recursion list.
5390 * Assumes the recursion list is valid.
5391 */
5392
5393static bool
5394recursive_check(VALUE list, VALUE obj, VALUE paired_obj_id)
5395{
5396#if SIZEOF_LONG == SIZEOF_VOIDP
5397 #define OBJ_ID_EQL(obj_id, other) ((obj_id) == (other))
5398#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
5399 #define OBJ_ID_EQL(obj_id, other) (RB_BIGNUM_TYPE_P((obj_id)) ? \
5400 rb_big_eql((obj_id), (other)) : ((obj_id) == (other)))
5401#endif
5402
5403 VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
5404 if (UNDEF_P(pair_list))
5405 return false;
5406 if (paired_obj_id) {
5407 if (!RB_TYPE_P(pair_list, T_HASH)) {
5408 if (!OBJ_ID_EQL(paired_obj_id, pair_list))
5409 return false;
5410 }
5411 else {
5412 if (NIL_P(rb_hash_lookup(pair_list, paired_obj_id)))
5413 return false;
5414 }
5415 }
5416 return true;
5417}
5418
5419/*
5420 * Pushes obj (or the pair <obj, paired_obj>) in the recursion list.
5421 * For a single obj, it sets list[obj] to Qtrue.
5422 * For a pair, it sets list[obj] to paired_obj_id if possible,
5423 * otherwise list[obj] becomes a hash like:
5424 * {paired_obj_id_1 => true, paired_obj_id_2 => true, ... }
5425 * Assumes the recursion list is valid.
5426 */
5427
5428static void
5429recursive_push(VALUE list, VALUE obj, VALUE paired_obj)
5430{
5431 VALUE pair_list;
5432
5433 if (!paired_obj) {
5434 rb_hash_aset(list, obj, Qtrue);
5435 }
5436 else if (UNDEF_P(pair_list = rb_hash_lookup2(list, obj, Qundef))) {
5437 rb_hash_aset(list, obj, paired_obj);
5438 }
5439 else {
5440 if (!RB_TYPE_P(pair_list, T_HASH)){
5441 VALUE other_paired_obj = pair_list;
5442 pair_list = rb_hash_new();
5443 rb_hash_aset(pair_list, other_paired_obj, Qtrue);
5444 rb_hash_aset(list, obj, pair_list);
5445 }
5446 rb_hash_aset(pair_list, paired_obj, Qtrue);
5447 }
5448}
5449
5450/*
5451 * Pops obj (or the pair <obj, paired_obj>) from the recursion list.
5452 * For a pair, if list[obj] is a hash, then paired_obj_id is
5453 * removed from the hash and no attempt is made to simplify
5454 * list[obj] from {only_one_paired_id => true} to only_one_paired_id
5455 * Assumes the recursion list is valid.
5456 */
5457
5458static int
5459recursive_pop(VALUE list, VALUE obj, VALUE paired_obj)
5460{
5461 if (paired_obj) {
5462 VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
5463 if (UNDEF_P(pair_list)) {
5464 return 0;
5465 }
5466 if (RB_TYPE_P(pair_list, T_HASH)) {
5467 rb_hash_delete_entry(pair_list, paired_obj);
5468 if (!RHASH_EMPTY_P(pair_list)) {
5469 return 1; /* keep hash until is empty */
5470 }
5471 }
5472 }
5473 rb_hash_delete_entry(list, obj);
5474 return 1;
5475}
5477struct exec_recursive_params {
5478 VALUE (*func) (VALUE, VALUE, int);
5479 VALUE list;
5480 VALUE obj;
5481 VALUE pairid;
5482 VALUE arg;
5483};
5484
5485static VALUE
5486exec_recursive_i(RB_BLOCK_CALL_FUNC_ARGLIST(tag, data))
5487{
5488 struct exec_recursive_params *p = (void *)data;
5489 return (*p->func)(p->obj, p->arg, FALSE);
5490}
5491
5492/*
5493 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5494 * current method is called recursively on obj, or on the pair <obj, pairid>
5495 * If outer is 0, then the innermost func will be called with recursive set
5496 * to true, otherwise the outermost func will be called. In the latter case,
5497 * all inner func are short-circuited by throw.
5498 * Implementation details: the value thrown is the recursive list which is
5499 * proper to the current method and unlikely to be caught anywhere else.
5500 * list[recursive_key] is used as a flag for the outermost call.
5501 */
5502
5503static VALUE
5504exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE pairid, VALUE arg, int outer, ID mid)
5505{
5506 VALUE result = Qundef;
5507 const VALUE sym = mid ? ID2SYM(mid) : ID2SYM(idNULL);
5508 struct exec_recursive_params p;
5509 int outermost;
5510 p.list = recursive_list_access(sym);
5511 p.obj = obj;
5512 p.pairid = pairid;
5513 p.arg = arg;
5514 outermost = outer && !recursive_check(p.list, ID2SYM(recursive_key), 0);
5515
5516 if (recursive_check(p.list, p.obj, pairid)) {
5517 if (outer && !outermost) {
5518 rb_throw_obj(p.list, p.list);
5519 }
5520 return (*func)(obj, arg, TRUE);
5521 }
5522 else {
5523 enum ruby_tag_type state;
5524
5525 p.func = func;
5526
5527 if (outermost) {
5528 recursive_push(p.list, ID2SYM(recursive_key), 0);
5529 recursive_push(p.list, p.obj, p.pairid);
5530 result = rb_catch_protect(p.list, exec_recursive_i, (VALUE)&p, &state);
5531 if (!recursive_pop(p.list, p.obj, p.pairid)) goto invalid;
5532 if (!recursive_pop(p.list, ID2SYM(recursive_key), 0)) goto invalid;
5533 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5534 if (result == p.list) {
5535 result = (*func)(obj, arg, TRUE);
5536 }
5537 }
5538 else {
5539 volatile VALUE ret = Qundef;
5540 recursive_push(p.list, p.obj, p.pairid);
5541 EC_PUSH_TAG(GET_EC());
5542 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
5543 ret = (*func)(obj, arg, FALSE);
5544 }
5545 EC_POP_TAG();
5546 if (!recursive_pop(p.list, p.obj, p.pairid)) {
5547 goto invalid;
5548 }
5549 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5550 result = ret;
5551 }
5552 }
5553 *(volatile struct exec_recursive_params *)&p;
5554 return result;
5555
5556 invalid:
5557 rb_raise(rb_eTypeError, "invalid inspect_tbl pair_list "
5558 "for %+"PRIsVALUE" in %+"PRIsVALUE,
5559 sym, rb_thread_current());
5561}
5562
5563/*
5564 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5565 * current method is called recursively on obj
5566 */
5567
5568VALUE
5569rb_exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
5570{
5571 return exec_recursive(func, obj, 0, arg, 0, rb_frame_last_func());
5572}
5573
5574/*
5575 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5576 * current method is called recursively on the ordered pair <obj, paired_obj>
5577 */
5578
5579VALUE
5580rb_exec_recursive_paired(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
5581{
5582 return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 0, rb_frame_last_func());
5583}
5584
5585/*
5586 * If recursion is detected on the current method and obj, the outermost
5587 * func will be called with (obj, arg, true). All inner func will be
5588 * short-circuited using throw.
5589 */
5590
5591VALUE
5592rb_exec_recursive_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
5593{
5594 return exec_recursive(func, obj, 0, arg, 1, rb_frame_last_func());
5595}
5596
5597VALUE
5598rb_exec_recursive_outer_mid(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg, ID mid)
5599{
5600 return exec_recursive(func, obj, 0, arg, 1, mid);
5601}
5602
5603/*
5604 * If recursion is detected on the current method, obj and paired_obj,
5605 * the outermost func will be called with (obj, arg, true). All inner
5606 * func will be short-circuited using throw.
5607 */
5608
5609VALUE
5610rb_exec_recursive_paired_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
5611{
5612 return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 1, rb_frame_last_func());
5613}
5614
5615/*
5616 * call-seq:
5617 * thread.backtrace -> array or nil
5618 *
5619 * Returns the current backtrace of the target thread.
5620 *
5621 */
5622
5623static VALUE
5624rb_thread_backtrace_m(int argc, VALUE *argv, VALUE thval)
5625{
5626 return rb_vm_thread_backtrace(argc, argv, thval);
5627}
5628
5629/* call-seq:
5630 * thread.backtrace_locations(*args) -> array or nil
5631 *
5632 * Returns the execution stack for the target thread---an array containing
5633 * backtrace location objects.
5634 *
5635 * See Thread::Backtrace::Location for more information.
5636 *
5637 * This method behaves similarly to Kernel#caller_locations except it applies
5638 * to a specific thread.
5639 */
5640static VALUE
5641rb_thread_backtrace_locations_m(int argc, VALUE *argv, VALUE thval)
5642{
5643 return rb_vm_thread_backtrace_locations(argc, argv, thval);
5644}
5645
5646void
5647Init_Thread_Mutex(void)
5648{
5649 rb_thread_t *th = GET_THREAD();
5650
5651 rb_native_mutex_initialize(&th->vm->workqueue_lock);
5652 rb_native_mutex_initialize(&th->interrupt_lock);
5653}
5654
5655/*
5656 * Document-class: ThreadError
5657 *
5658 * Raised when an invalid operation is attempted on a thread.
5659 *
5660 * For example, when no other thread has been started:
5661 *
5662 * Thread.stop
5663 *
5664 * This will raises the following exception:
5665 *
5666 * ThreadError: stopping only thread
5667 * note: use sleep to stop forever
5668 */
5669
5670void
5671Init_Thread(void)
5672{
5673 rb_thread_t *th = GET_THREAD();
5674
5675 sym_never = ID2SYM(rb_intern_const("never"));
5676 sym_immediate = ID2SYM(rb_intern_const("immediate"));
5677 sym_on_blocking = ID2SYM(rb_intern_const("on_blocking"));
5678
5679 rb_define_singleton_method(rb_cThread, "new", thread_s_new, -1);
5680 rb_define_singleton_method(rb_cThread, "start", thread_start, -2);
5681 rb_define_singleton_method(rb_cThread, "fork", thread_start, -2);
5682 rb_define_singleton_method(rb_cThread, "main", rb_thread_s_main, 0);
5683 rb_define_singleton_method(rb_cThread, "current", thread_s_current, 0);
5684 rb_define_singleton_method(rb_cThread, "stop", thread_stop, 0);
5685 rb_define_singleton_method(rb_cThread, "kill", rb_thread_s_kill, 1);
5686 rb_define_singleton_method(rb_cThread, "exit", rb_thread_exit, 0);
5687 rb_define_singleton_method(rb_cThread, "pass", thread_s_pass, 0);
5688 rb_define_singleton_method(rb_cThread, "list", thread_list, 0);
5689 rb_define_singleton_method(rb_cThread, "abort_on_exception", rb_thread_s_abort_exc, 0);
5690 rb_define_singleton_method(rb_cThread, "abort_on_exception=", rb_thread_s_abort_exc_set, 1);
5691 rb_define_singleton_method(rb_cThread, "report_on_exception", rb_thread_s_report_exc, 0);
5692 rb_define_singleton_method(rb_cThread, "report_on_exception=", rb_thread_s_report_exc_set, 1);
5693 rb_define_singleton_method(rb_cThread, "ignore_deadlock", rb_thread_s_ignore_deadlock, 0);
5694 rb_define_singleton_method(rb_cThread, "ignore_deadlock=", rb_thread_s_ignore_deadlock_set, 1);
5695 rb_define_singleton_method(rb_cThread, "handle_interrupt", rb_thread_s_handle_interrupt, 1);
5696 rb_define_singleton_method(rb_cThread, "pending_interrupt?", rb_thread_s_pending_interrupt_p, -1);
5697 rb_define_method(rb_cThread, "pending_interrupt?", rb_thread_pending_interrupt_p, -1);
5698
5699 rb_define_method(rb_cThread, "initialize", thread_initialize, -2);
5700 rb_define_method(rb_cThread, "raise", thread_raise_m, -1);
5701 rb_define_method(rb_cThread, "join", thread_join_m, -1);
5702 rb_define_method(rb_cThread, "value", thread_value, 0);
5703 rb_define_method(rb_cThread, "kill", rb_thread_kill, 0);
5704 rb_define_method(rb_cThread, "terminate", rb_thread_kill, 0);
5705 rb_define_method(rb_cThread, "exit", rb_thread_kill, 0);
5706 rb_define_method(rb_cThread, "run", rb_thread_run, 0);
5707 rb_define_method(rb_cThread, "wakeup", rb_thread_wakeup, 0);
5708 rb_define_method(rb_cThread, "[]", rb_thread_aref, 1);
5709 rb_define_method(rb_cThread, "[]=", rb_thread_aset, 2);
5710 rb_define_method(rb_cThread, "fetch", rb_thread_fetch, -1);
5711 rb_define_method(rb_cThread, "key?", rb_thread_key_p, 1);
5712 rb_define_method(rb_cThread, "keys", rb_thread_keys, 0);
5713 rb_define_method(rb_cThread, "priority", rb_thread_priority, 0);
5714 rb_define_method(rb_cThread, "priority=", rb_thread_priority_set, 1);
5715 rb_define_method(rb_cThread, "status", rb_thread_status, 0);
5716 rb_define_method(rb_cThread, "thread_variable_get", rb_thread_variable_get, 1);
5717 rb_define_method(rb_cThread, "thread_variable_set", rb_thread_variable_set, 2);
5718 rb_define_method(rb_cThread, "thread_variables", rb_thread_variables, 0);
5719 rb_define_method(rb_cThread, "thread_variable?", rb_thread_variable_p, 1);
5720 rb_define_method(rb_cThread, "alive?", rb_thread_alive_p, 0);
5721 rb_define_method(rb_cThread, "stop?", rb_thread_stop_p, 0);
5722 rb_define_method(rb_cThread, "abort_on_exception", rb_thread_abort_exc, 0);
5723 rb_define_method(rb_cThread, "abort_on_exception=", rb_thread_abort_exc_set, 1);
5724 rb_define_method(rb_cThread, "report_on_exception", rb_thread_report_exc, 0);
5725 rb_define_method(rb_cThread, "report_on_exception=", rb_thread_report_exc_set, 1);
5726 rb_define_method(rb_cThread, "group", rb_thread_group, 0);
5727 rb_define_method(rb_cThread, "backtrace", rb_thread_backtrace_m, -1);
5728 rb_define_method(rb_cThread, "backtrace_locations", rb_thread_backtrace_locations_m, -1);
5729
5730 rb_define_method(rb_cThread, "name", rb_thread_getname, 0);
5731 rb_define_method(rb_cThread, "name=", rb_thread_setname, 1);
5732 rb_define_method(rb_cThread, "native_thread_id", rb_thread_native_thread_id, 0);
5733 rb_define_method(rb_cThread, "to_s", rb_thread_to_s, 0);
5734 rb_define_alias(rb_cThread, "inspect", "to_s");
5735
5736 rb_vm_register_special_exception(ruby_error_stream_closed, rb_eIOError,
5737 "stream closed in another thread");
5738
5739 cThGroup = rb_define_class("ThreadGroup", rb_cObject);
5740 rb_define_alloc_func(cThGroup, thgroup_s_alloc);
5741 rb_define_method(cThGroup, "list", thgroup_list, 0);
5742 rb_define_method(cThGroup, "enclose", thgroup_enclose, 0);
5743 rb_define_method(cThGroup, "enclosed?", thgroup_enclosed_p, 0);
5744 rb_define_method(cThGroup, "add", thgroup_add, 1);
5745
5746 const char * ptr = getenv("RUBY_THREAD_TIMESLICE");
5747
5748 if (ptr) {
5749 long quantum = strtol(ptr, NULL, 0);
5750 if (quantum > 0 && !(SIZEOF_LONG > 4 && quantum > UINT32_MAX)) {
5751 thread_default_quantum_ms = (uint32_t)quantum;
5752 }
5753 else if (0) {
5754 fprintf(stderr, "Ignored RUBY_THREAD_TIMESLICE=%s\n", ptr);
5755 }
5756 }
5757
5758 {
5759 th->thgroup = th->ractor->thgroup_default = rb_obj_alloc(cThGroup);
5760 rb_define_const(cThGroup, "Default", th->thgroup);
5761 }
5762
5764
5765 /* init thread core */
5766 {
5767 /* main thread setting */
5768 {
5769 /* acquire global vm lock */
5770#ifdef HAVE_PTHREAD_NP_H
5771 VM_ASSERT(TH_SCHED(th)->running == th);
5772#endif
5773 // thread_sched_to_running() should not be called because
5774 // it assumes blocked by thread_sched_to_waiting().
5775 // thread_sched_to_running(sched, th);
5776
5777 th->pending_interrupt_queue = rb_ary_hidden_new(0);
5778 th->pending_interrupt_queue_checked = 0;
5779 th->pending_interrupt_mask_stack = rb_ary_hidden_new(0);
5780 }
5781 }
5782
5783 rb_thread_create_timer_thread();
5784
5785 Init_thread_sync();
5786
5787 // TODO: Suppress unused function warning for now
5788 // if (0) rb_thread_sched_destroy(NULL);
5789}
5790
5793{
5794 rb_thread_t *th = ruby_thread_from_native();
5795
5796 return th != 0;
5797}
5798
5799#ifdef NON_SCALAR_THREAD_ID
5800 #define thread_id_str(th) (NULL)
5801#else
5802 #define thread_id_str(th) ((void *)(uintptr_t)(th)->nt->thread_id)
5803#endif
5804
5805static void
5806debug_deadlock_check(rb_ractor_t *r, VALUE msg)
5807{
5808 rb_thread_t *th = 0;
5809 VALUE sep = rb_str_new_cstr("\n ");
5810
5811 rb_str_catf(msg, "\n%d threads, %d sleeps current:%p main thread:%p\n",
5812 rb_ractor_living_thread_num(r), rb_ractor_sleeper_thread_num(r),
5813 (void *)GET_THREAD(), (void *)r->threads.main);
5814
5815 ccan_list_for_each(&r->threads.set, th, lt_node) {
5816 rb_str_catf(msg, "* %+"PRIsVALUE"\n rb_thread_t:%p "
5817 "native:%p int:%u",
5818 th->self, (void *)th, th->nt ? thread_id_str(th) : "N/A", th->ec->interrupt_flag);
5819
5820 if (th->locking_mutex) {
5821 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5822 rb_str_catf(msg, " mutex:%llu cond:%"PRIuSIZE,
5823 (unsigned long long)mutex->fiber_serial, rb_mutex_num_waiting(mutex));
5824 }
5825
5826 {
5827 struct rb_waiting_list *list = th->join_list;
5828 while (list) {
5829 rb_str_catf(msg, "\n depended by: tb_thread_id:%p", (void *)list->thread);
5830 list = list->next;
5831 }
5832 }
5833 rb_str_catf(msg, "\n ");
5834 rb_str_concat(msg, rb_ary_join(rb_ec_backtrace_str_ary(th->ec, RUBY_BACKTRACE_START, RUBY_ALL_BACKTRACE_LINES), sep));
5835 rb_str_catf(msg, "\n");
5836 }
5837}
5838
5839static void
5840rb_check_deadlock(rb_ractor_t *r)
5841{
5842 if (GET_THREAD()->vm->thread_ignore_deadlock) return;
5843
5844#ifdef RUBY_THREAD_PTHREAD_H
5845 if (r->threads.sched.readyq_cnt > 0) return;
5846#endif
5847
5848 int sleeper_num = rb_ractor_sleeper_thread_num(r);
5849 int ltnum = rb_ractor_living_thread_num(r);
5850
5851 if (ltnum > sleeper_num) return;
5852 if (ltnum < sleeper_num) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
5853
5854 int found = 0;
5855 rb_thread_t *th = NULL;
5856
5857 ccan_list_for_each(&r->threads.set, th, lt_node) {
5858 if (th->status != THREAD_STOPPED_FOREVER || RUBY_VM_INTERRUPTED(th->ec)) {
5859 found = 1;
5860 }
5861 else if (th->locking_mutex) {
5862 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5863 if (mutex->fiber_serial == rb_fiber_serial(th->ec->fiber_ptr) || (!mutex->fiber_serial && !ccan_list_empty(&mutex->waitq))) {
5864 found = 1;
5865 }
5866 }
5867 if (found)
5868 break;
5869 }
5870
5871 if (!found) {
5872 VALUE argv[2];
5873 argv[0] = rb_eFatal;
5874 argv[1] = rb_str_new2("No live threads left. Deadlock?");
5875 debug_deadlock_check(r, argv[1]);
5876 rb_ractor_sleeper_threads_dec(GET_RACTOR());
5877 rb_threadptr_raise(r->threads.main, 2, argv);
5878 }
5879}
5880
5881static void
5882update_line_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5883{
5884 const rb_control_frame_t *cfp = GET_EC()->cfp;
5885 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5886 if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5887 VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
5888 if (lines) {
5889 long line = rb_sourceline() - 1;
5890 VM_ASSERT(line >= 0);
5891 long count;
5892 VALUE num;
5893 void rb_iseq_clear_event_flags(const rb_iseq_t *iseq, size_t pos, rb_event_flag_t reset);
5894 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
5895 rb_iseq_clear_event_flags(cfp->iseq, cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded - 1, RUBY_EVENT_COVERAGE_LINE);
5896 rb_ary_push(lines, LONG2FIX(line + 1));
5897 return;
5898 }
5899 if (line >= RARRAY_LEN(lines)) { /* no longer tracked */
5900 return;
5901 }
5902 num = RARRAY_AREF(lines, line);
5903 if (!FIXNUM_P(num)) return;
5904 count = FIX2LONG(num) + 1;
5905 if (POSFIXABLE(count)) {
5906 RARRAY_ASET(lines, line, LONG2FIX(count));
5907 }
5908 }
5909 }
5910}
5911
5912static void
5913update_branch_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5914{
5915 const rb_control_frame_t *cfp = GET_EC()->cfp;
5916 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5917 if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5918 VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
5919 if (branches) {
5920 long pc = cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded - 1;
5921 long idx = FIX2INT(RARRAY_AREF(ISEQ_PC2BRANCHINDEX(cfp->iseq), pc)), count;
5922 VALUE counters = RARRAY_AREF(branches, 1);
5923 VALUE num = RARRAY_AREF(counters, idx);
5924 count = FIX2LONG(num) + 1;
5925 if (POSFIXABLE(count)) {
5926 RARRAY_ASET(counters, idx, LONG2FIX(count));
5927 }
5928 }
5929 }
5930}
5931
5932const rb_method_entry_t *
5933rb_resolve_me_location(const rb_method_entry_t *me, VALUE resolved_location[5])
5934{
5935 VALUE path, beg_pos_lineno, beg_pos_column, end_pos_lineno, end_pos_column;
5936
5937 if (!me->def) return NULL; // negative cme
5938
5939 retry:
5940 switch (me->def->type) {
5941 case VM_METHOD_TYPE_ISEQ: {
5942 const rb_iseq_t *iseq = me->def->body.iseq.iseqptr;
5943 rb_iseq_location_t *loc = &ISEQ_BODY(iseq)->location;
5944 path = rb_iseq_path(iseq);
5945 beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5946 beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5947 end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5948 end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5949 break;
5950 }
5951 case VM_METHOD_TYPE_BMETHOD: {
5952 const rb_iseq_t *iseq = rb_proc_get_iseq(me->def->body.bmethod.proc, 0);
5953 if (iseq) {
5954 rb_iseq_location_t *loc;
5955 rb_iseq_check(iseq);
5956 path = rb_iseq_path(iseq);
5957 loc = &ISEQ_BODY(iseq)->location;
5958 beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5959 beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5960 end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5961 end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5962 break;
5963 }
5964 return NULL;
5965 }
5966 case VM_METHOD_TYPE_ALIAS:
5967 me = me->def->body.alias.original_me;
5968 goto retry;
5969 case VM_METHOD_TYPE_REFINED:
5970 me = me->def->body.refined.orig_me;
5971 if (!me) return NULL;
5972 goto retry;
5973 default:
5974 return NULL;
5975 }
5976
5977 /* found */
5978 if (RB_TYPE_P(path, T_ARRAY)) {
5979 path = rb_ary_entry(path, 1);
5980 if (!RB_TYPE_P(path, T_STRING)) return NULL; /* just for the case... */
5981 }
5982 if (resolved_location) {
5983 resolved_location[0] = path;
5984 resolved_location[1] = beg_pos_lineno;
5985 resolved_location[2] = beg_pos_column;
5986 resolved_location[3] = end_pos_lineno;
5987 resolved_location[4] = end_pos_column;
5988 }
5989 return me;
5990}
5991
5992static void
5993update_method_coverage(VALUE me2counter, rb_trace_arg_t *trace_arg)
5994{
5995 const rb_control_frame_t *cfp = GET_EC()->cfp;
5996 const rb_callable_method_entry_t *cme = rb_vm_frame_method_entry(cfp);
5997 const rb_method_entry_t *me = (const rb_method_entry_t *)cme;
5998 VALUE rcount;
5999 long count;
6000
6001 me = rb_resolve_me_location(me, 0);
6002 if (!me) return;
6003
6004 rcount = rb_hash_aref(me2counter, (VALUE) me);
6005 count = FIXNUM_P(rcount) ? FIX2LONG(rcount) + 1 : 1;
6006 if (POSFIXABLE(count)) {
6007 rb_hash_aset(me2counter, (VALUE) me, LONG2FIX(count));
6008 }
6009}
6010
6011VALUE
6012rb_get_coverages(void)
6013{
6014 return GET_VM()->coverages;
6015}
6016
6017int
6018rb_get_coverage_mode(void)
6019{
6020 return GET_VM()->coverage_mode;
6021}
6022
6023void
6024rb_set_coverages(VALUE coverages, int mode, VALUE me2counter)
6025{
6026 GET_VM()->coverages = coverages;
6027 GET_VM()->me2counter = me2counter;
6028 GET_VM()->coverage_mode = mode;
6029}
6030
6031void
6032rb_resume_coverages(void)
6033{
6034 int mode = GET_VM()->coverage_mode;
6035 VALUE me2counter = GET_VM()->me2counter;
6036 rb_add_event_hook2((rb_event_hook_func_t) update_line_coverage, RUBY_EVENT_COVERAGE_LINE, Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
6037 if (mode & COVERAGE_TARGET_BRANCHES) {
6038 rb_add_event_hook2((rb_event_hook_func_t) update_branch_coverage, RUBY_EVENT_COVERAGE_BRANCH, Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
6039 }
6040 if (mode & COVERAGE_TARGET_METHODS) {
6041 rb_add_event_hook2((rb_event_hook_func_t) update_method_coverage, RUBY_EVENT_CALL, me2counter, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
6042 }
6043}
6044
6045void
6046rb_suspend_coverages(void)
6047{
6048 rb_remove_event_hook((rb_event_hook_func_t) update_line_coverage);
6049 if (GET_VM()->coverage_mode & COVERAGE_TARGET_BRANCHES) {
6050 rb_remove_event_hook((rb_event_hook_func_t) update_branch_coverage);
6051 }
6052 if (GET_VM()->coverage_mode & COVERAGE_TARGET_METHODS) {
6053 rb_remove_event_hook((rb_event_hook_func_t) update_method_coverage);
6054 }
6055}
6056
6057/* Make coverage arrays empty so old covered files are no longer tracked. */
6058void
6059rb_reset_coverages(void)
6060{
6061 rb_clear_coverages();
6062 rb_iseq_remove_coverage_all();
6063 GET_VM()->coverages = Qfalse;
6064}
6065
6066VALUE
6067rb_default_coverage(int n)
6068{
6069 VALUE coverage = rb_ary_hidden_new_fill(3);
6070 VALUE lines = Qfalse, branches = Qfalse;
6071 int mode = GET_VM()->coverage_mode;
6072
6073 if (mode & COVERAGE_TARGET_LINES) {
6074 lines = n > 0 ? rb_ary_hidden_new_fill(n) : rb_ary_hidden_new(0);
6075 }
6076 RARRAY_ASET(coverage, COVERAGE_INDEX_LINES, lines);
6077
6078 if (mode & COVERAGE_TARGET_BRANCHES) {
6079 branches = rb_ary_hidden_new_fill(2);
6080 /* internal data structures for branch coverage:
6081 *
6082 * { branch base node =>
6083 * [base_type, base_first_lineno, base_first_column, base_last_lineno, base_last_column, {
6084 * branch target id =>
6085 * [target_type, target_first_lineno, target_first_column, target_last_lineno, target_last_column, target_counter_index],
6086 * ...
6087 * }],
6088 * ...
6089 * }
6090 *
6091 * Example:
6092 * { NODE_CASE =>
6093 * [1, 0, 4, 3, {
6094 * NODE_WHEN => [2, 8, 2, 9, 0],
6095 * NODE_WHEN => [3, 8, 3, 9, 1],
6096 * ...
6097 * }],
6098 * ...
6099 * }
6100 */
6101 VALUE structure = rb_hash_new();
6102 rb_obj_hide(structure);
6103 RARRAY_ASET(branches, 0, structure);
6104 /* branch execution counters */
6105 RARRAY_ASET(branches, 1, rb_ary_hidden_new(0));
6106 }
6107 RARRAY_ASET(coverage, COVERAGE_INDEX_BRANCHES, branches);
6108
6109 return coverage;
6110}
6111
6112static VALUE
6113uninterruptible_exit(VALUE v)
6114{
6115 rb_thread_t *cur_th = GET_THREAD();
6116 rb_ary_pop(cur_th->pending_interrupt_mask_stack);
6117
6118 cur_th->pending_interrupt_queue_checked = 0;
6119 if (!rb_threadptr_pending_interrupt_empty_p(cur_th)) {
6120 RUBY_VM_SET_INTERRUPT(cur_th->ec);
6121 }
6122 return Qnil;
6123}
6124
6125VALUE
6126rb_uninterruptible(VALUE (*b_proc)(VALUE), VALUE data)
6127{
6128 VALUE interrupt_mask = rb_ident_hash_new();
6129 rb_thread_t *cur_th = GET_THREAD();
6130
6131 rb_hash_aset(interrupt_mask, rb_cObject, sym_never);
6132 OBJ_FREEZE(interrupt_mask);
6133 rb_ary_push(cur_th->pending_interrupt_mask_stack, interrupt_mask);
6134
6135 VALUE ret = rb_ensure(b_proc, data, uninterruptible_exit, Qnil);
6136
6137 RUBY_VM_CHECK_INTS(cur_th->ec);
6138 return ret;
6139}
6140
6141static void
6142thread_specific_storage_alloc(rb_thread_t *th)
6143{
6144 VM_ASSERT(th->specific_storage == NULL);
6145
6146 if (UNLIKELY(specific_key_count > 0)) {
6147 th->specific_storage = ZALLOC_N(void *, RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
6148 }
6149}
6150
6151rb_internal_thread_specific_key_t
6153{
6154 rb_vm_t *vm = GET_VM();
6155
6156 if (specific_key_count == 0 && vm->ractor.cnt > 1) {
6157 rb_raise(rb_eThreadError, "The first rb_internal_thread_specific_key_create() is called with multiple ractors");
6158 }
6159 else if (specific_key_count > RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX) {
6160 rb_raise(rb_eThreadError, "rb_internal_thread_specific_key_create() is called more than %d times", RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
6161 }
6162 else {
6163 rb_internal_thread_specific_key_t key = specific_key_count++;
6164
6165 if (key == 0) {
6166 // allocate
6167 rb_ractor_t *cr = GET_RACTOR();
6168 rb_thread_t *th;
6169
6170 ccan_list_for_each(&cr->threads.set, th, lt_node) {
6171 thread_specific_storage_alloc(th);
6172 }
6173 }
6174 return key;
6175 }
6176}
6177
6178// async and native thread safe.
6179void *
6180rb_internal_thread_specific_get(VALUE thread_val, rb_internal_thread_specific_key_t key)
6181{
6182 rb_thread_t *th = DATA_PTR(thread_val);
6183
6184 VM_ASSERT(rb_thread_ptr(thread_val) == th);
6185 VM_ASSERT(key < RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
6186 VM_ASSERT(th->specific_storage);
6187
6188 return th->specific_storage[key];
6189}
6190
6191// async and native thread safe.
6192void
6193rb_internal_thread_specific_set(VALUE thread_val, rb_internal_thread_specific_key_t key, void *data)
6194{
6195 rb_thread_t *th = DATA_PTR(thread_val);
6196
6197 VM_ASSERT(rb_thread_ptr(thread_val) == th);
6198 VM_ASSERT(key < RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
6199 VM_ASSERT(th->specific_storage);
6200
6201 th->specific_storage[key] = data;
6202}
6203
6204// interrupt_exec
6207 struct ccan_list_node node;
6208
6209 rb_interrupt_exec_func_t *func;
6210 void *data;
6211 enum rb_interrupt_exec_flag flags;
6212};
6213
6214void
6215rb_threadptr_interrupt_exec_task_mark(rb_thread_t *th)
6216{
6217 struct rb_interrupt_exec_task *task;
6218
6219 ccan_list_for_each(&th->interrupt_exec_tasks, task, node) {
6220 if (task->flags & rb_interrupt_exec_flag_value_data) {
6221 rb_gc_mark((VALUE)task->data);
6222 }
6223 }
6224}
6225
6226// native thread safe
6227// th should be available
6228void
6229rb_threadptr_interrupt_exec(rb_thread_t *th, rb_interrupt_exec_func_t *func, void *data, enum rb_interrupt_exec_flag flags)
6230{
6231 // should not use ALLOC
6233 *task = (struct rb_interrupt_exec_task) {
6234 .flags = flags,
6235 .func = func,
6236 .data = data,
6237 };
6238
6239 rb_native_mutex_lock(&th->interrupt_lock);
6240 {
6241 ccan_list_add_tail(&th->interrupt_exec_tasks, &task->node);
6242 threadptr_set_interrupt_locked(th, true);
6243 }
6244 rb_native_mutex_unlock(&th->interrupt_lock);
6245}
6246
6247static void
6248threadptr_interrupt_exec_exec(rb_thread_t *th)
6249{
6250 while (1) {
6251 struct rb_interrupt_exec_task *task;
6252
6253 rb_native_mutex_lock(&th->interrupt_lock);
6254 {
6255 task = ccan_list_pop(&th->interrupt_exec_tasks, struct rb_interrupt_exec_task, node);
6256 }
6257 rb_native_mutex_unlock(&th->interrupt_lock);
6258
6259 RUBY_DEBUG_LOG("task:%p", task);
6260
6261 if (task) {
6262 if (task->flags & rb_interrupt_exec_flag_new_thread) {
6263 rb_thread_create(task->func, task->data);
6264 }
6265 else {
6266 (*task->func)(task->data);
6267 }
6268 ruby_xfree(task);
6269 }
6270 else {
6271 break;
6272 }
6273 }
6274}
6275
6276static void
6277threadptr_interrupt_exec_cleanup(rb_thread_t *th)
6278{
6279 rb_native_mutex_lock(&th->interrupt_lock);
6280 {
6281 struct rb_interrupt_exec_task *task;
6282
6283 while ((task = ccan_list_pop(&th->interrupt_exec_tasks, struct rb_interrupt_exec_task, node)) != NULL) {
6284 ruby_xfree(task);
6285 }
6286 }
6287 rb_native_mutex_unlock(&th->interrupt_lock);
6288}
6289
6290// native thread safe
6291// func/data should be native thread safe
6292void
6293rb_ractor_interrupt_exec(struct rb_ractor_struct *target_r,
6294 rb_interrupt_exec_func_t *func, void *data, enum rb_interrupt_exec_flag flags)
6295{
6296 RUBY_DEBUG_LOG("flags:%d", (int)flags);
6297
6298 rb_thread_t *main_th = target_r->threads.main;
6299 rb_threadptr_interrupt_exec(main_th, func, data, flags | rb_interrupt_exec_flag_new_thread);
6300}
6301
#define RUBY_ASSERT_ALWAYS(expr,...)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
Definition assert.h:199
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
#define RUBY_INTERNAL_EVENT_SWITCH
Thread switched.
Definition event.h:90
int rb_remove_event_hook(rb_event_hook_func_t func)
Removes the passed function from the list of event hooks.
Definition vm_trace.c:345
#define RUBY_EVENT_THREAD_BEGIN
Encountered a new thread.
Definition event.h:57
void(* rb_event_hook_func_t)(rb_event_flag_t evflag, VALUE data, VALUE self, ID mid, VALUE klass)
Type of event hooks.
Definition event.h:120
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RUBY_EVENT_CALL
A method, written in Ruby, is called.
Definition event.h:41
#define RUBY_EVENT_THREAD_END
Encountered an end of a thread.
Definition event.h:58
static void RB_FL_SET_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_SET().
Definition fl_type.h:600
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition class.c:1588
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
Definition class.c:2947
ID rb_frame_last_func(void)
Returns the ID of the last method in the call stack.
Definition eval.c:1235
int rb_keyword_given_p(void)
Determines if the current method is given a keyword argument.
Definition eval.c:1020
int rb_block_given_p(void)
Determines if the current method is given a block.
Definition eval.c:1007
#define rb_str_new2
Old name of rb_str_new_cstr.
Definition string.h:1674
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:400
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define OBJ_FROZEN
Old name of RB_OBJ_FROZEN.
Definition fl_type.h:136
#define xrealloc
Old name of ruby_xrealloc.
Definition xmalloc.h:56
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition fl_type.h:134
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition assume.h:29
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:205
#define xmalloc
Old name of ruby_xmalloc.
Definition xmalloc.h:53
#define LONG2FIX
Old name of RB_INT2FIX.
Definition long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition int.h:41
#define ZALLOC_N
Old name of RB_ZALLOC_N.
Definition memory.h:401
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define Qtrue
Old name of RUBY_Qtrue.
#define NUM2INT
Old name of RB_NUM2INT.
Definition int.h:44
#define INT2NUM
Old name of RB_INT2NUM.
Definition int.h:43
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define POSFIXABLE
Old name of RB_POSFIXABLE.
Definition fixnum.h:29
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
void ruby_stop(int ex)
Calls ruby_cleanup() and exits the process.
Definition eval.c:290
#define ruby_debug
This variable controls whether the interpreter is in debug mode.
Definition error.h:486
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition eval.c:653
VALUE rb_eSystemExit
SystemExit exception.
Definition error.c:1424
VALUE rb_eIOError
IOError exception.
Definition io.c:189
VALUE rb_eStandardError
StandardError exception.
Definition error.c:1428
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1431
void rb_frozen_error_raise(VALUE frozen_obj, const char *fmt,...)
Raises an instance of rb_eFrozenError.
Definition error.c:4121
VALUE rb_eFatal
fatal exception.
Definition error.c:1427
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1429
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:466
VALUE rb_eException
Mother of all exceptions.
Definition error.c:1423
VALUE rb_eThreadError
ThreadError exception.
Definition eval.c:1025
void rb_exit(int status)
Terminates the current execution context.
Definition process.c:4363
VALUE rb_eSignal
SignalException exception.
Definition error.c:1426
VALUE rb_obj_alloc(VALUE klass)
Allocates an instance of the given class.
Definition object.c:2191
VALUE rb_cInteger
Module class.
Definition numeric.c:198
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
Definition object.c:100
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:264
VALUE rb_cThread
Thread class.
Definition vm.c:671
VALUE rb_cModule
Module class.
Definition object.c:62
double rb_num2dbl(VALUE num)
Converts an instance of rb_cNumeric into C's double.
Definition object.c:3812
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:923
VALUE rb_ary_shift(VALUE ary)
Destructively deletes an element from the beginning of the passed array and returns what was deleted.
VALUE rb_ary_dup(VALUE ary)
Duplicates an array.
VALUE rb_ary_delete_at(VALUE ary, long pos)
Destructively removes an element which resides at the specific index of the passed array.
VALUE rb_ary_new(void)
Allocates a new, empty array.
VALUE rb_ary_pop(VALUE ary)
Destructively deletes an element from the end of the passed array and returns what was deleted.
VALUE rb_ary_hidden_new(long capa)
Allocates a hidden (no class) empty array.
VALUE rb_ary_clear(VALUE ary)
Destructively removes everything form an array.
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
VALUE rb_ary_entry(VALUE ary, long off)
Queries an element of an array.
VALUE rb_ary_join(VALUE ary, VALUE sep)
Recursively stringises the elements of the passed array, flattens that result, then joins the sequenc...
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:284
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition proc.c:983
void rb_reset_random_seed(void)
Resets the RNG behind rb_genrand_int32()/rb_genrand_real().
Definition random.c:1817
VALUE rb_str_new_frozen(VALUE str)
Creates a frozen copy of the string, if necessary.
Definition string.c:1516
VALUE rb_str_concat(VALUE dst, VALUE src)
Identical to rb_str_append(), except it also accepts an integer as a codepoint.
Definition string.c:4032
#define rb_str_cat_cstr(buf, str)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1655
#define rb_str_new_cstr(str)
Identical to rb_str_new, except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1513
int rb_thread_interrupted(VALUE thval)
Checks if the thread's execution was recently interrupted.
Definition thread.c:1481
VALUE rb_thread_local_aref(VALUE thread, ID key)
This badly named function reads from a Fiber local storage.
Definition thread.c:3767
VALUE rb_mutex_new(void)
Creates a mutex.
VALUE rb_thread_kill(VALUE thread)
Terminates the given thread.
Definition thread.c:2960
#define RUBY_UBF_IO
A special UBF for blocking IO operations.
Definition thread.h:382
VALUE rb_thread_main(void)
Obtains the "main" thread.
Definition thread.c:3199
VALUE rb_exec_recursive(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h)
"Recursion" API entry point.
void rb_thread_sleep_forever(void)
Blocks indefinitely.
Definition thread.c:1410
void rb_thread_fd_close(int fd)
This funciton is now a no-op.
Definition thread.c:2902
void rb_thread_wait_for(struct timeval time)
Identical to rb_thread_sleep(), except it takes struct timeval instead.
Definition thread.c:1443
VALUE rb_mutex_synchronize(VALUE mutex, VALUE(*func)(VALUE arg), VALUE arg)
Obtains the lock, runs the passed function, and releases the lock when it completes.
VALUE rb_thread_stop(void)
Stops the current thread.
Definition thread.c:3111
VALUE rb_mutex_sleep(VALUE self, VALUE timeout)
Releases the lock held in the mutex and waits for the period of time; reacquires the lock on wakeup.
VALUE rb_exec_recursive_paired(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE p, VALUE h)
Identical to rb_exec_recursive(), except it checks for the recursion on the ordered pair of { g,...
void rb_unblock_function_t(void *)
This is the type of UBFs.
Definition thread.h:336
void rb_thread_atfork_before_exec(void)
:FIXME: situation of this function is unclear.
Definition thread.c:5043
void rb_thread_check_ints(void)
Checks for interrupts.
Definition thread.c:1464
VALUE rb_thread_run(VALUE thread)
This is a rb_thread_wakeup() + rb_thread_schedule() combo.
Definition thread.c:3102
VALUE rb_thread_wakeup(VALUE thread)
Marks a given thread as eligible for scheduling.
Definition thread.c:3055
VALUE rb_mutex_unlock(VALUE mutex)
Releases the mutex.
VALUE rb_exec_recursive_paired_outer(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE p, VALUE h)
Identical to rb_exec_recursive_outer(), except it checks for the recursion on the ordered pair of { g...
void rb_thread_sleep_deadly(void)
Identical to rb_thread_sleep_forever(), except the thread calling this function is considered "dead" ...
Definition thread.c:1417
void rb_thread_atfork(void)
A pthread_atfork(3posix)-like API.
Definition thread.c:5038
VALUE rb_thread_current(void)
Obtains the "current" thread.
Definition thread.c:3178
int rb_thread_alone(void)
Checks if the thread this function is running is the only thread that is currently alive.
Definition thread.c:4040
VALUE rb_thread_local_aset(VALUE thread, ID key, VALUE val)
This badly named function writes to a Fiber local storage.
Definition thread.c:3915
void rb_thread_schedule(void)
Tries to switch to another thread.
Definition thread.c:1512
#define RUBY_UBF_PROCESS
A special UBF for blocking process operations.
Definition thread.h:389
VALUE rb_exec_recursive_outer(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h)
Identical to rb_exec_recursive(), except it calls f for outermost recursion only.
VALUE rb_thread_wakeup_alive(VALUE thread)
Identical to rb_thread_wakeup(), except it doesn't raise on an already killed thread.
Definition thread.c:3064
VALUE rb_mutex_lock(VALUE mutex)
Attempts to lock the mutex.
void rb_thread_sleep(int sec)
Blocks for the given period of time.
Definition thread.c:1487
void rb_timespec_now(struct timespec *ts)
Fills the current time into the given struct.
Definition time.c:2003
struct timeval rb_time_timeval(VALUE time)
Converts an instance of rb_cTime to a struct timeval that represents the identical point of time.
Definition time.c:2955
VALUE rb_ivar_set(VALUE obj, ID name, VALUE val)
Identical to rb_iv_set(), except it accepts the name as an ID instead of a C string.
Definition variable.c:2013
VALUE rb_ivar_get(VALUE obj, ID name)
Identical to rb_iv_get(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1488
VALUE rb_class_path(VALUE mod)
Identical to rb_mod_name(), except it returns #<Class: ...> style inspection for anonymous modules.
Definition variable.c:380
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
int rb_sourceline(void)
Resembles __LINE__.
Definition vm.c:2078
static ID rb_intern_const(const char *str)
This is a "tiny optimisation" over rb_intern().
Definition symbol.h:284
ID rb_check_id(volatile VALUE *namep)
Detects if the given name is already interned or not.
Definition symbol.c:1133
VALUE rb_to_symbol(VALUE name)
Identical to rb_intern_str(), except it generates a dynamic symbol if necessary.
Definition string.c:12658
ID rb_to_id(VALUE str)
Definition string.c:12648
#define RB_IO_POINTER(obj, fp)
Queries the underlying IO pointer.
Definition io.h:436
VALUE rb_eIOTimeoutError
Indicates that a timeout has occurred while performing an IO operation.
Definition io.c:190
#define RB_NOGVL_UBF_ASYNC_SAFE
Passing this flag to rb_nogvl() indicates that the passed UBF is async-signal-safe.
Definition thread.h:60
void * rb_internal_thread_specific_get(VALUE thread_val, rb_internal_thread_specific_key_t key)
Get thread and tool specific data.
Definition thread.c:6179
#define RB_NOGVL_INTR_FAIL
Passing this flag to rb_nogvl() prevents it from checking interrupts.
Definition thread.h:48
void rb_internal_thread_specific_set(VALUE thread_val, rb_internal_thread_specific_key_t key, void *data)
Set thread and tool specific data.
Definition thread.c:6192
rb_internal_thread_specific_key_t rb_internal_thread_specific_key_create(void)
Create a key to store thread specific data.
Definition thread.c:6151
void * rb_nogvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2, int flags)
Identical to rb_thread_call_without_gvl(), except it additionally takes "flags" that change the behav...
Definition thread.c:1592
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
Definition thread.c:2061
#define RB_NOGVL_OFFLOAD_SAFE
Passing this flag to rb_nogvl() indicates that the passed function is safe to offload to a background...
Definition thread.h:73
void * rb_thread_call_without_gvl2(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Identical to rb_thread_call_without_gvl(), except it does not interface with signals etc.
Definition thread.c:1729
void * rb_thread_call_without_gvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Allows the passed function to run in parallel with other Ruby threads.
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
Shim for block function parameters.
Definition iterator.h:58
VALUE rb_yield(VALUE val)
Yields the block.
Definition vm_eval.c:1372
void rb_throw_obj(VALUE tag, VALUE val)
Identical to rb_throw(), except it allows arbitrary Ruby object to become a tag.
Definition vm_eval.c:2540
static int rb_fd_max(const rb_fdset_t *f)
It seems this function has no use.
Definition largesize.h:209
void rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
Destructively overwrites an fdset with another.
void rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
Identical to rb_fd_copy(), except it copies unlimited number of file descriptors.
void rb_fd_term(rb_fdset_t *f)
Destroys the rb_fdset_t, releasing any memory and resources it used.
static fd_set * rb_fd_ptr(const rb_fdset_t *f)
Raw pointer to fd_set.
Definition largesize.h:195
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition memory.h:372
#define ALLOCA_N(type, n)
Definition memory.h:292
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
Definition memory.h:360
VALUE rb_thread_create(type *q, void *w)
Creates a rb_cThread instance.
void rb_hash_foreach(VALUE q, int_type *w, VALUE e)
Iteration over the given hash.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define rb_fd_isset
Queries if the given fd is in the rb_fdset_t.
Definition posix.h:60
#define rb_fd_select
Waits for multiple file descriptors at once.
Definition posix.h:66
#define rb_fd_init
Initialises the :given :rb_fdset_t.
Definition posix.h:63
#define rb_fd_set
Sets the given fd to the rb_fdset_t.
Definition posix.h:54
#define rb_fd_zero
Clears the given rb_fdset_t.
Definition posix.h:51
#define rb_fd_clr
Unsets the given fd from the rb_fdset_t.
Definition posix.h:57
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
static int RARRAY_LENINT(VALUE ary)
Identical to rb_array_len(), except it differs for the return type.
Definition rarray.h:281
static void RARRAY_ASET(VALUE ary, long i, VALUE v)
Assigns an object in an array.
Definition rarray.h:386
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:166
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
Definition rclass.h:44
#define DATA_PTR(obj)
Convenient getter macro.
Definition rdata.h:67
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
#define StringValueCStr(v)
Identical to StringValuePtr, except it additionally checks for the contents for viability as a C stri...
Definition rstring.h:89
#define RUBY_TYPED_DEFAULT_FREE
This is a value you can set to rb_data_type_struct::dfree.
Definition rtypeddata.h:80
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
Definition rtypeddata.h:638
#define TypedData_Wrap_Struct(klass, data_type, sval)
Converts sval, a pointer to your struct, into a Ruby object.
Definition rtypeddata.h:461
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
Definition rtypeddata.h:508
#define errno
Ractor-aware version of errno.
Definition ruby.h:388
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition thread.c:5791
int ruby_snprintf(char *str, size_t n, char const *fmt,...)
Our own locale-insensitive version of snprintf(3).
Definition sprintf.c:1041
#define RB_PASS_CALLED_KEYWORDS
Pass keywords if current method is called with keywords, useful for argument delegation.
Definition scan_args.h:78
Scheduler APIs.
VALUE rb_fiber_scheduler_blocking_operation_wait(VALUE scheduler, void *(*function)(void *), void *data, rb_unblock_function_t *unblock_function, void *data2, int flags, struct rb_fiber_scheduler_blocking_operation_state *state)
Defer the execution of the passed function to the scheduler.
Definition scheduler.c:1090
VALUE rb_fiber_scheduler_current(void)
Identical to rb_fiber_scheduler_get(), except it also returns RUBY_Qnil in case of a blocking fiber.
Definition scheduler.c:466
VALUE rb_fiber_scheduler_fiber_interrupt(VALUE scheduler, VALUE fiber, VALUE exception)
Interrupt a fiber by raising an exception.
Definition scheduler.c:1121
VALUE rb_fiber_scheduler_block(VALUE scheduler, VALUE blocker, VALUE timeout)
Non-blocking wait for the passed "blocker", which is for instance Thread.join or Mutex....
Definition scheduler.c:655
VALUE rb_fiber_scheduler_yield(VALUE scheduler)
Yield to the scheduler, to be resumed on the next scheduling cycle.
Definition scheduler.c:556
VALUE rb_fiber_scheduler_set(VALUE scheduler)
Destructively assigns the passed scheduler to that of the current thread that is calling this functio...
Definition scheduler.c:428
VALUE rb_fiber_scheduler_current_for_threadptr(struct rb_thread_struct *thread)
Identical to rb_fiber_scheduler_current_for_thread(), except it expects a threadptr instead of a thre...
Definition scheduler.c:479
VALUE rb_fiber_scheduler_unblock(VALUE scheduler, VALUE blocker, VALUE fiber)
Wakes up a fiber previously blocked using rb_fiber_scheduler_block().
Definition scheduler.c:674
int rb_thread_fd_select(int nfds, rb_fdset_t *rfds, rb_fdset_t *wfds, rb_fdset_t *efds, struct timeval *timeout)
Waits for multiple file descriptors at once.
Definition thread.c:4531
#define rb_fd_resize(n, f)
Does nothing (defined for compatibility).
Definition select.h:43
static bool RB_TEST(VALUE obj)
Emulates Ruby's "if" statement.
@ RUBY_Qundef
Represents so-called undef.
#define RTEST
This is an old name of RB_TEST.
#define _(args)
This was a transition path from K&R to ANSI.
Definition stdarg.h:35
Definition method.h:63
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:208
The data structure which wraps the fd_set bitmap used by select(2).
Definition largesize.h:71
int maxfd
Maximum allowed number of FDs.
Definition largesize.h:72
fd_set * fdset
File descriptors buffer.
Definition largesize.h:73
int capa
Maximum allowed number of FDs.
Definition win32.h:50
Ruby's IO, metadata and buffers.
Definition io.h:295
VALUE self
The IO's Ruby level counterpart.
Definition io.h:298
int fd
file descriptor.
Definition io.h:306
struct ccan_list_head blocking_operations
Threads that are performing a blocking operation without the GVL using this IO.
Definition io.h:131
Definition method.h:55
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:143
void rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
Blocks until the current thread obtains a lock.
Definition thread.c:307
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
Releases a lock.
Definition thread.c:313
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy.
void rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
Fills the passed lock with an initial value.
Definition thread.c:295
void rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
Destroys the passed mutex.
Definition thread.c:301
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376