Ruby 3.5.0dev (2025-07-22 revision 8541dec8c4f118d09a7dcb4b83540ec0cb0d02db)
thread.c (8541dec8c4f118d09a7dcb4b83540ec0cb0d02db)
1/**********************************************************************
2
3 thread.c -
4
5 $Author$
6
7 Copyright (C) 2004-2007 Koichi Sasada
8
9**********************************************************************/
10
11/*
12 YARV Thread Design
13
14 model 1: Userlevel Thread
15 Same as traditional ruby thread.
16
17 model 2: Native Thread with Global VM lock
18 Using pthread (or Windows thread) and Ruby threads run concurrent.
19
20 model 3: Native Thread with fine grain lock
21 Using pthread and Ruby threads run concurrent or parallel.
22
23 model 4: M:N User:Native threads with Global VM lock
24 Combination of model 1 and 2
25
26 model 5: M:N User:Native thread with fine grain lock
27 Combination of model 1 and 3
28
29------------------------------------------------------------------------
30
31 model 2:
32 A thread has mutex (GVL: Global VM Lock or Giant VM Lock) can run.
33 When thread scheduling, running thread release GVL. If running thread
34 try blocking operation, this thread must release GVL and another
35 thread can continue this flow. After blocking operation, thread
36 must check interrupt (RUBY_VM_CHECK_INTS).
37
38 Every VM can run parallel.
39
40 Ruby threads are scheduled by OS thread scheduler.
41
42------------------------------------------------------------------------
43
44 model 3:
45 Every threads run concurrent or parallel and to access shared object
46 exclusive access control is needed. For example, to access String
47 object or Array object, fine grain lock must be locked every time.
48 */
49
50
51/*
52 * FD_SET, FD_CLR and FD_ISSET have a small sanity check when using glibc
53 * 2.15 or later and set _FORTIFY_SOURCE > 0.
54 * However, the implementation is wrong. Even though Linux's select(2)
55 * supports large fd size (>FD_SETSIZE), it wrongly assumes fd is always
56 * less than FD_SETSIZE (i.e. 1024). And then when enabling HAVE_RB_FD_INIT,
57 * it doesn't work correctly and makes program abort. Therefore we need to
58 * disable FORTIFY_SOURCE until glibc fixes it.
59 */
60#undef _FORTIFY_SOURCE
61#undef __USE_FORTIFY_LEVEL
62#define __USE_FORTIFY_LEVEL 0
63
64/* for model 2 */
65
66#include "ruby/internal/config.h"
67
68#ifdef __linux__
69// Normally, gcc(1) translates calls to alloca() with inlined code. This is not done when either the -ansi, -std=c89, -std=c99, or the -std=c11 option is given and the header <alloca.h> is not included.
70# include <alloca.h>
71#endif
72
73#define TH_SCHED(th) (&(th)->ractor->threads.sched)
74
75#include "eval_intern.h"
76#include "hrtime.h"
77#include "internal.h"
78#include "internal/class.h"
79#include "internal/cont.h"
80#include "internal/error.h"
81#include "internal/gc.h"
82#include "internal/hash.h"
83#include "internal/io.h"
84#include "internal/object.h"
85#include "internal/proc.h"
87#include "internal/signal.h"
88#include "internal/thread.h"
89#include "internal/time.h"
90#include "internal/warnings.h"
91#include "iseq.h"
92#include "ruby/debug.h"
93#include "ruby/io.h"
94#include "ruby/thread.h"
95#include "ruby/thread_native.h"
96#include "timev.h"
97#include "vm_core.h"
98#include "ractor_core.h"
99#include "vm_debug.h"
100#include "vm_sync.h"
101
102#include "ccan/list/list.h"
103
104#ifndef USE_NATIVE_THREAD_PRIORITY
105#define USE_NATIVE_THREAD_PRIORITY 0
106#define RUBY_THREAD_PRIORITY_MAX 3
107#define RUBY_THREAD_PRIORITY_MIN -3
108#endif
109
110static VALUE rb_cThreadShield;
111static VALUE cThGroup;
112
113static VALUE sym_immediate;
114static VALUE sym_on_blocking;
115static VALUE sym_never;
116
117static uint32_t thread_default_quantum_ms = 100;
118
119#define THREAD_LOCAL_STORAGE_INITIALISED FL_USER13
120#define THREAD_LOCAL_STORAGE_INITIALISED_P(th) RB_FL_TEST_RAW((th), THREAD_LOCAL_STORAGE_INITIALISED)
121
122static inline VALUE
123rb_thread_local_storage(VALUE thread)
124{
125 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
126 rb_ivar_set(thread, idLocals, rb_hash_new());
127 RB_FL_SET_RAW(thread, THREAD_LOCAL_STORAGE_INITIALISED);
128 }
129 return rb_ivar_get(thread, idLocals);
130}
131
132enum SLEEP_FLAGS {
133 SLEEP_DEADLOCKABLE = 0x01,
134 SLEEP_SPURIOUS_CHECK = 0x02,
135 SLEEP_ALLOW_SPURIOUS = 0x04,
136 SLEEP_NO_CHECKINTS = 0x08,
137};
138
139static void sleep_forever(rb_thread_t *th, unsigned int fl);
140static int sleep_hrtime(rb_thread_t *, rb_hrtime_t, unsigned int fl);
141
142static void rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker, VALUE timeout, rb_hrtime_t end);
143static int rb_threadptr_dead(rb_thread_t *th);
144static void rb_check_deadlock(rb_ractor_t *r);
145static int rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th);
146static const char *thread_status_name(rb_thread_t *th, int detail);
147static int hrtime_update_expire(rb_hrtime_t *, const rb_hrtime_t);
148NORETURN(static void async_bug_fd(const char *mesg, int errno_arg, int fd));
149MAYBE_UNUSED(static int consume_communication_pipe(int fd));
150
151static rb_atomic_t system_working = 1;
152static rb_internal_thread_specific_key_t specific_key_count;
153
154/********************************************************************************/
155
156#define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
157
159 enum rb_thread_status prev_status;
160};
161
162static int unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted);
163static void unblock_function_clear(rb_thread_t *th);
164
165static inline int blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
166 rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted);
167static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region);
168
169#define THREAD_BLOCKING_BEGIN(th) do { \
170 struct rb_thread_sched * const sched = TH_SCHED(th); \
171 RB_VM_SAVE_MACHINE_CONTEXT(th); \
172 thread_sched_to_waiting((sched), (th));
173
174#define THREAD_BLOCKING_END(th) \
175 thread_sched_to_running((sched), (th)); \
176 rb_ractor_thread_switch(th->ractor, th, false); \
177} while(0)
178
179#ifdef __GNUC__
180#ifdef HAVE_BUILTIN___BUILTIN_CHOOSE_EXPR_CONSTANT_P
181#define only_if_constant(expr, notconst) __builtin_choose_expr(__builtin_constant_p(expr), (expr), (notconst))
182#else
183#define only_if_constant(expr, notconst) (__builtin_constant_p(expr) ? (expr) : (notconst))
184#endif
185#else
186#define only_if_constant(expr, notconst) notconst
187#endif
188#define BLOCKING_REGION(th, exec, ubf, ubfarg, fail_if_interrupted) do { \
189 struct rb_blocking_region_buffer __region; \
190 if (blocking_region_begin(th, &__region, (ubf), (ubfarg), fail_if_interrupted) || \
191 /* always return true unless fail_if_interrupted */ \
192 !only_if_constant(fail_if_interrupted, TRUE)) { \
193 /* Important that this is inlined into the macro, and not part of \
194 * blocking_region_begin - see bug #20493 */ \
195 RB_VM_SAVE_MACHINE_CONTEXT(th); \
196 thread_sched_to_waiting(TH_SCHED(th), th); \
197 exec; \
198 blocking_region_end(th, &__region); \
199 }; \
200} while(0)
201
202/*
203 * returns true if this thread was spuriously interrupted, false otherwise
204 * (e.g. hit by Thread#run or ran a Ruby-level Signal.trap handler)
205 */
206#define RUBY_VM_CHECK_INTS_BLOCKING(ec) vm_check_ints_blocking(ec)
207static inline int
208vm_check_ints_blocking(rb_execution_context_t *ec)
209{
210#ifdef RUBY_ASSERT_CRITICAL_SECTION
211 VM_ASSERT(ruby_assert_critical_section_entered == 0);
212#endif
213
214 rb_thread_t *th = rb_ec_thread_ptr(ec);
215
216 if (LIKELY(rb_threadptr_pending_interrupt_empty_p(th))) {
217 if (LIKELY(!RUBY_VM_INTERRUPTED_ANY(ec))) return FALSE;
218 }
219 else {
220 th->pending_interrupt_queue_checked = 0;
221 RUBY_VM_SET_INTERRUPT(ec);
222 }
223 return rb_threadptr_execute_interrupts(th, 1);
224}
225
226int
227rb_vm_check_ints_blocking(rb_execution_context_t *ec)
228{
229 return vm_check_ints_blocking(ec);
230}
231
232/*
233 * poll() is supported by many OSes, but so far Linux is the only
234 * one we know of that supports using poll() in all places select()
235 * would work.
236 */
237#if defined(HAVE_POLL)
238# if defined(__linux__)
239# define USE_POLL
240# endif
241# if defined(__FreeBSD_version) && __FreeBSD_version >= 1100000
242# define USE_POLL
243 /* FreeBSD does not set POLLOUT when POLLHUP happens */
244# define POLLERR_SET (POLLHUP | POLLERR)
245# endif
246#endif
247
248static void
249timeout_prepare(rb_hrtime_t **to, rb_hrtime_t *rel, rb_hrtime_t *end,
250 const struct timeval *timeout)
251{
252 if (timeout) {
253 *rel = rb_timeval2hrtime(timeout);
254 *end = rb_hrtime_add(rb_hrtime_now(), *rel);
255 *to = rel;
256 }
257 else {
258 *to = 0;
259 }
260}
261
262MAYBE_UNUSED(NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start)));
263MAYBE_UNUSED(static bool th_has_dedicated_nt(const rb_thread_t *th));
264MAYBE_UNUSED(static int waitfd_to_waiting_flag(int wfd_event));
265
266#include THREAD_IMPL_SRC
267
268/*
269 * TODO: somebody with win32 knowledge should be able to get rid of
270 * timer-thread by busy-waiting on signals. And it should be possible
271 * to make the GVL in thread_pthread.c be platform-independent.
272 */
273#ifndef BUSY_WAIT_SIGNALS
274# define BUSY_WAIT_SIGNALS (0)
275#endif
276
277#ifndef USE_EVENTFD
278# define USE_EVENTFD (0)
279#endif
280
281#include "thread_sync.c"
282
283void
284rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
285{
287}
288
289void
290rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
291{
293}
294
295void
296rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
297{
299}
300
301void
302rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
303{
305}
306
307static int
308unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted)
309{
310 do {
311 if (fail_if_interrupted) {
312 if (RUBY_VM_INTERRUPTED_ANY(th->ec)) {
313 return FALSE;
314 }
315 }
316 else {
317 RUBY_VM_CHECK_INTS(th->ec);
318 }
319
320 rb_native_mutex_lock(&th->interrupt_lock);
321 } while (!th->ec->raised_flag && RUBY_VM_INTERRUPTED_ANY(th->ec) &&
322 (rb_native_mutex_unlock(&th->interrupt_lock), TRUE));
323
324 VM_ASSERT(th->unblock.func == NULL);
325
326 th->unblock.func = func;
327 th->unblock.arg = arg;
328 rb_native_mutex_unlock(&th->interrupt_lock);
329
330 return TRUE;
331}
332
333static void
334unblock_function_clear(rb_thread_t *th)
335{
336 rb_native_mutex_lock(&th->interrupt_lock);
337 th->unblock.func = 0;
338 rb_native_mutex_unlock(&th->interrupt_lock);
339}
340
341static void
342threadptr_set_interrupt_locked(rb_thread_t *th, bool trap)
343{
344 // th->interrupt_lock should be acquired here
345
346 RUBY_DEBUG_LOG("th:%u trap:%d", rb_th_serial(th), trap);
347
348 if (trap) {
349 RUBY_VM_SET_TRAP_INTERRUPT(th->ec);
350 }
351 else {
352 RUBY_VM_SET_INTERRUPT(th->ec);
353 }
354
355 if (th->unblock.func != NULL) {
356 (th->unblock.func)(th->unblock.arg);
357 }
358 else {
359 /* none */
360 }
361}
362
363static void
364threadptr_set_interrupt(rb_thread_t *th, int trap)
365{
366 rb_native_mutex_lock(&th->interrupt_lock);
367 {
368 threadptr_set_interrupt_locked(th, trap);
369 }
370 rb_native_mutex_unlock(&th->interrupt_lock);
371}
372
373/* Set interrupt flag on another thread or current thread, and call its UBF if it has one set */
374void
375rb_threadptr_interrupt(rb_thread_t *th)
376{
377 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
378 threadptr_set_interrupt(th, false);
379}
380
381static void
382threadptr_trap_interrupt(rb_thread_t *th)
383{
384 threadptr_set_interrupt(th, true);
385}
386
387static void
388terminate_all(rb_ractor_t *r, const rb_thread_t *main_thread)
389{
390 rb_thread_t *th = 0;
391
392 ccan_list_for_each(&r->threads.set, th, lt_node) {
393 if (th != main_thread) {
394 RUBY_DEBUG_LOG("terminate start th:%u status:%s", rb_th_serial(th), thread_status_name(th, TRUE));
395
396 rb_threadptr_pending_interrupt_enque(th, RUBY_FATAL_THREAD_TERMINATED);
397 rb_threadptr_interrupt(th);
398
399 RUBY_DEBUG_LOG("terminate done th:%u status:%s", rb_th_serial(th), thread_status_name(th, TRUE));
400 }
401 else {
402 RUBY_DEBUG_LOG("main thread th:%u", rb_th_serial(th));
403 }
404 }
405}
406
407static void
408rb_threadptr_join_list_wakeup(rb_thread_t *thread)
409{
410 while (thread->join_list) {
411 struct rb_waiting_list *join_list = thread->join_list;
412
413 // Consume the entry from the join list:
414 thread->join_list = join_list->next;
415
416 rb_thread_t *target_thread = join_list->thread;
417
418 if (target_thread->scheduler != Qnil && join_list->fiber) {
419 rb_fiber_scheduler_unblock(target_thread->scheduler, target_thread->self, rb_fiberptr_self(join_list->fiber));
420 }
421 else {
422 rb_threadptr_interrupt(target_thread);
423
424 switch (target_thread->status) {
425 case THREAD_STOPPED:
426 case THREAD_STOPPED_FOREVER:
427 target_thread->status = THREAD_RUNNABLE;
428 break;
429 default:
430 break;
431 }
432 }
433 }
434}
435
436void
437rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th)
438{
439 while (th->keeping_mutexes) {
440 rb_mutex_t *mutex = th->keeping_mutexes;
441 th->keeping_mutexes = mutex->next_mutex;
442
443 // rb_warn("mutex #<%p> was not unlocked by thread #<%p>", (void *)mutex, (void*)th);
444
445 const char *error_message = rb_mutex_unlock_th(mutex, th, mutex->fiber);
446 if (error_message) rb_bug("invalid keeping_mutexes: %s", error_message);
447 }
448}
449
450void
451rb_thread_terminate_all(rb_thread_t *th)
452{
453 rb_ractor_t *cr = th->ractor;
454 rb_execution_context_t * volatile ec = th->ec;
455 volatile int sleeping = 0;
456
457 if (cr->threads.main != th) {
458 rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)",
459 (void *)cr->threads.main, (void *)th);
460 }
461
462 /* unlock all locking mutexes */
463 rb_threadptr_unlock_all_locking_mutexes(th);
464
465 EC_PUSH_TAG(ec);
466 if (EC_EXEC_TAG() == TAG_NONE) {
467 retry:
468 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
469
470 terminate_all(cr, th);
471
472 while (rb_ractor_living_thread_num(cr) > 1) {
473 rb_hrtime_t rel = RB_HRTIME_PER_SEC;
474 /*q
475 * Thread exiting routine in thread_start_func_2 notify
476 * me when the last sub-thread exit.
477 */
478 sleeping = 1;
479 native_sleep(th, &rel);
480 RUBY_VM_CHECK_INTS_BLOCKING(ec);
481 sleeping = 0;
482 }
483 }
484 else {
485 /*
486 * When caught an exception (e.g. Ctrl+C), let's broadcast
487 * kill request again to ensure killing all threads even
488 * if they are blocked on sleep, mutex, etc.
489 */
490 if (sleeping) {
491 sleeping = 0;
492 goto retry;
493 }
494 }
495 EC_POP_TAG();
496}
497
498void rb_threadptr_root_fiber_terminate(rb_thread_t *th);
499static void threadptr_interrupt_exec_cleanup(rb_thread_t *th);
500
501static void
502thread_cleanup_func_before_exec(void *th_ptr)
503{
504 rb_thread_t *th = th_ptr;
505 th->status = THREAD_KILLED;
506
507 // The thread stack doesn't exist in the forked process:
508 th->ec->machine.stack_start = th->ec->machine.stack_end = NULL;
509
510 threadptr_interrupt_exec_cleanup(th);
511 rb_threadptr_root_fiber_terminate(th);
512}
513
514static void
515thread_cleanup_func(void *th_ptr, int atfork)
516{
517 rb_thread_t *th = th_ptr;
518
519 th->locking_mutex = Qfalse;
520 thread_cleanup_func_before_exec(th_ptr);
521
522 if (atfork) {
523 native_thread_destroy_atfork(th->nt);
524 th->nt = NULL;
525 return;
526 }
527
528 rb_native_mutex_destroy(&th->interrupt_lock);
529}
530
531static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *);
532static VALUE rb_thread_to_s(VALUE thread);
533
534void
535ruby_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame)
536{
537 native_thread_init_stack(th, local_in_parent_frame);
538}
539
540const VALUE *
541rb_vm_proc_local_ep(VALUE proc)
542{
543 const VALUE *ep = vm_proc_ep(proc);
544
545 if (ep) {
546 return rb_vm_ep_local_ep(ep);
547 }
548 else {
549 return NULL;
550 }
551}
552
553// for ractor, defined in vm.c
554VALUE rb_vm_invoke_proc_with_self(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
555 int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler);
556
557static VALUE
558thread_do_start_proc(rb_thread_t *th)
559{
560 VALUE args = th->invoke_arg.proc.args;
561 const VALUE *args_ptr;
562 int args_len;
563 VALUE procval = th->invoke_arg.proc.proc;
564 rb_proc_t *proc;
565 GetProcPtr(procval, proc);
566
567 th->ec->errinfo = Qnil;
568 th->ec->root_lep = rb_vm_proc_local_ep(procval);
569 th->ec->root_svar = Qfalse;
570
571 vm_check_ints_blocking(th->ec);
572
573 if (th->invoke_type == thread_invoke_type_ractor_proc) {
574 VALUE self = rb_ractor_self(th->ractor);
575 th->thgroup = th->ractor->thgroup_default = rb_obj_alloc(cThGroup);
576
577 VM_ASSERT(FIXNUM_P(args));
578 args_len = FIX2INT(args);
579 args_ptr = ALLOCA_N(VALUE, args_len);
580 rb_ractor_receive_parameters(th->ec, th->ractor, args_len, (VALUE *)args_ptr);
581 vm_check_ints_blocking(th->ec);
582
583 return rb_vm_invoke_proc_with_self(
584 th->ec, proc, self,
585 args_len, args_ptr,
586 th->invoke_arg.proc.kw_splat,
587 VM_BLOCK_HANDLER_NONE
588 );
589 }
590 else {
591 args_len = RARRAY_LENINT(args);
592 if (args_len < 8) {
593 /* free proc.args if the length is enough small */
594 args_ptr = ALLOCA_N(VALUE, args_len);
595 MEMCPY((VALUE *)args_ptr, RARRAY_CONST_PTR(args), VALUE, args_len);
596 th->invoke_arg.proc.args = Qnil;
597 }
598 else {
599 args_ptr = RARRAY_CONST_PTR(args);
600 }
601
602 vm_check_ints_blocking(th->ec);
603
604 return rb_vm_invoke_proc(
605 th->ec, proc,
606 args_len, args_ptr,
607 th->invoke_arg.proc.kw_splat,
608 VM_BLOCK_HANDLER_NONE
609 );
610 }
611}
612
613static VALUE
614thread_do_start(rb_thread_t *th)
615{
616 native_set_thread_name(th);
617 VALUE result = Qundef;
618
619 switch (th->invoke_type) {
620 case thread_invoke_type_proc:
621 result = thread_do_start_proc(th);
622 break;
623
624 case thread_invoke_type_ractor_proc:
625 result = thread_do_start_proc(th);
626 rb_ractor_atexit(th->ec, result);
627 break;
628
629 case thread_invoke_type_func:
630 result = (*th->invoke_arg.func.func)(th->invoke_arg.func.arg);
631 break;
632
633 case thread_invoke_type_none:
634 rb_bug("unreachable");
635 }
636
637 return result;
638}
639
640void rb_ec_clear_current_thread_trace_func(const rb_execution_context_t *ec);
641
642static int
643thread_start_func_2(rb_thread_t *th, VALUE *stack_start)
644{
645 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
646 VM_ASSERT(th != th->vm->ractor.main_thread);
647
648 enum ruby_tag_type state;
649 VALUE errinfo = Qnil;
650 rb_thread_t *ractor_main_th = th->ractor->threads.main;
651
652 // setup ractor
653 if (rb_ractor_status_p(th->ractor, ractor_blocking)) {
654 RB_VM_LOCK();
655 {
656 rb_vm_ractor_blocking_cnt_dec(th->vm, th->ractor, __FILE__, __LINE__);
657 rb_ractor_t *r = th->ractor;
658 r->r_stdin = rb_io_prep_stdin();
659 r->r_stdout = rb_io_prep_stdout();
660 r->r_stderr = rb_io_prep_stderr();
661 }
662 RB_VM_UNLOCK();
663 }
664
665 // Ensure that we are not joinable.
666 VM_ASSERT(UNDEF_P(th->value));
667
668 int fiber_scheduler_closed = 0, event_thread_end_hooked = 0;
669 VALUE result = Qundef;
670
671 EC_PUSH_TAG(th->ec);
672
673 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
674 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_BEGIN, th->self, 0, 0, 0, Qundef);
675
676 result = thread_do_start(th);
677 }
678
679 if (!fiber_scheduler_closed) {
680 fiber_scheduler_closed = 1;
682 }
683
684 if (!event_thread_end_hooked) {
685 event_thread_end_hooked = 1;
686 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_END, th->self, 0, 0, 0, Qundef);
687 }
688
689 if (state == TAG_NONE) {
690 // This must be set AFTER doing all user-level code. At this point, the thread is effectively finished and calls to `Thread#join` will succeed.
691 th->value = result;
692 }
693 else {
694 errinfo = th->ec->errinfo;
695
696 VALUE exc = rb_vm_make_jump_tag_but_local_jump(state, Qundef);
697 if (!NIL_P(exc)) errinfo = exc;
698
699 if (state == TAG_FATAL) {
700 if (th->invoke_type == thread_invoke_type_ractor_proc) {
701 rb_ractor_atexit(th->ec, Qnil);
702 }
703 /* fatal error within this thread, need to stop whole script */
704 }
705 else if (rb_obj_is_kind_of(errinfo, rb_eSystemExit)) {
706 if (th->invoke_type == thread_invoke_type_ractor_proc) {
707 rb_ractor_atexit_exception(th->ec);
708 }
709
710 /* exit on main_thread. */
711 }
712 else {
713 if (th->report_on_exception) {
714 VALUE mesg = rb_thread_to_s(th->self);
715 rb_str_cat_cstr(mesg, " terminated with exception (report_on_exception is true):\n");
716 rb_write_error_str(mesg);
717 rb_ec_error_print(th->ec, errinfo);
718 }
719
720 if (th->invoke_type == thread_invoke_type_ractor_proc) {
721 rb_ractor_atexit_exception(th->ec);
722 }
723
724 if (th->vm->thread_abort_on_exception ||
725 th->abort_on_exception || RTEST(ruby_debug)) {
726 /* exit on main_thread */
727 }
728 else {
729 errinfo = Qnil;
730 }
731 }
732 th->value = Qnil;
733 }
734
735 // The thread is effectively finished and can be joined.
736 VM_ASSERT(!UNDEF_P(th->value));
737
738 rb_threadptr_join_list_wakeup(th);
739 rb_threadptr_unlock_all_locking_mutexes(th);
740
741 if (th->invoke_type == thread_invoke_type_ractor_proc) {
742 rb_thread_terminate_all(th);
743 rb_ractor_teardown(th->ec);
744 }
745
746 th->status = THREAD_KILLED;
747 RUBY_DEBUG_LOG("killed th:%u", rb_th_serial(th));
748
749 if (th->vm->ractor.main_thread == th) {
750 ruby_stop(0);
751 }
752
753 if (RB_TYPE_P(errinfo, T_OBJECT)) {
754 /* treat with normal error object */
755 rb_threadptr_raise(ractor_main_th, 1, &errinfo);
756 }
757
758 EC_POP_TAG();
759
760 rb_ec_clear_current_thread_trace_func(th->ec);
761
762 /* locking_mutex must be Qfalse */
763 if (th->locking_mutex != Qfalse) {
764 rb_bug("thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
765 (void *)th, th->locking_mutex);
766 }
767
768 if (ractor_main_th->status == THREAD_KILLED &&
769 th->ractor->threads.cnt <= 2 /* main thread and this thread */) {
770 /* I'm last thread. wake up main thread from rb_thread_terminate_all */
771 rb_threadptr_interrupt(ractor_main_th);
772 }
773
774 rb_check_deadlock(th->ractor);
775
776 rb_fiber_close(th->ec->fiber_ptr);
777
778 thread_cleanup_func(th, FALSE);
779 VM_ASSERT(th->ec->vm_stack == NULL);
780
781 if (th->invoke_type == thread_invoke_type_ractor_proc) {
782 // after rb_ractor_living_threads_remove()
783 // GC will happen anytime and this ractor can be collected (and destroy GVL).
784 // So gvl_release() should be before it.
785 thread_sched_to_dead(TH_SCHED(th), th);
786 rb_ractor_living_threads_remove(th->ractor, th);
787 }
788 else {
789 rb_ractor_living_threads_remove(th->ractor, th);
790 thread_sched_to_dead(TH_SCHED(th), th);
791 }
792
793 return 0;
794}
797 enum thread_invoke_type type;
798
799 // for normal proc thread
800 VALUE args;
801 VALUE proc;
802
803 // for ractor
804 rb_ractor_t *g;
805
806 // for func
807 VALUE (*fn)(void *);
808};
809
810static void thread_specific_storage_alloc(rb_thread_t *th);
811
812static VALUE
813thread_create_core(VALUE thval, struct thread_create_params *params)
814{
815 rb_execution_context_t *ec = GET_EC();
816 rb_thread_t *th = rb_thread_ptr(thval), *current_th = rb_ec_thread_ptr(ec);
817 int err;
818
819 thread_specific_storage_alloc(th);
820
821 if (OBJ_FROZEN(current_th->thgroup)) {
822 rb_raise(rb_eThreadError,
823 "can't start a new thread (frozen ThreadGroup)");
824 }
825
826 rb_fiber_inherit_storage(ec, th->ec->fiber_ptr);
827
828 switch (params->type) {
829 case thread_invoke_type_proc:
830 th->invoke_type = thread_invoke_type_proc;
831 th->invoke_arg.proc.args = params->args;
832 th->invoke_arg.proc.proc = params->proc;
833 th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
834 break;
835
836 case thread_invoke_type_ractor_proc:
837#if RACTOR_CHECK_MODE > 0
838 rb_ractor_setup_belonging_to(thval, rb_ractor_id(params->g));
839#endif
840 th->invoke_type = thread_invoke_type_ractor_proc;
841 th->ractor = params->g;
842 th->ractor->threads.main = th;
843 th->invoke_arg.proc.proc = rb_proc_isolate_bang(params->proc);
844 th->invoke_arg.proc.args = INT2FIX(RARRAY_LENINT(params->args));
845 th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
846 rb_ractor_send_parameters(ec, params->g, params->args);
847 break;
848
849 case thread_invoke_type_func:
850 th->invoke_type = thread_invoke_type_func;
851 th->invoke_arg.func.func = params->fn;
852 th->invoke_arg.func.arg = (void *)params->args;
853 break;
854
855 default:
856 rb_bug("unreachable");
857 }
858
859 th->priority = current_th->priority;
860 th->thgroup = current_th->thgroup;
861
862 th->pending_interrupt_queue = rb_ary_hidden_new(0);
863 th->pending_interrupt_queue_checked = 0;
864 th->pending_interrupt_mask_stack = rb_ary_dup(current_th->pending_interrupt_mask_stack);
865 RBASIC_CLEAR_CLASS(th->pending_interrupt_mask_stack);
866
867 rb_native_mutex_initialize(&th->interrupt_lock);
868
869 RUBY_DEBUG_LOG("r:%u th:%u", rb_ractor_id(th->ractor), rb_th_serial(th));
870
871 rb_ractor_living_threads_insert(th->ractor, th);
872
873 /* kick thread */
874 err = native_thread_create(th);
875 if (err) {
876 th->status = THREAD_KILLED;
877 rb_ractor_living_threads_remove(th->ractor, th);
878 rb_raise(rb_eThreadError, "can't create Thread: %s", strerror(err));
879 }
880 return thval;
881}
882
883#define threadptr_initialized(th) ((th)->invoke_type != thread_invoke_type_none)
884
885/*
886 * call-seq:
887 * Thread.new { ... } -> thread
888 * Thread.new(*args, &proc) -> thread
889 * Thread.new(*args) { |args| ... } -> thread
890 *
891 * Creates a new thread executing the given block.
892 *
893 * Any +args+ given to ::new will be passed to the block:
894 *
895 * arr = []
896 * a, b, c = 1, 2, 3
897 * Thread.new(a,b,c) { |d,e,f| arr << d << e << f }.join
898 * arr #=> [1, 2, 3]
899 *
900 * A ThreadError exception is raised if ::new is called without a block.
901 *
902 * If you're going to subclass Thread, be sure to call super in your
903 * +initialize+ method, otherwise a ThreadError will be raised.
904 */
905static VALUE
906thread_s_new(int argc, VALUE *argv, VALUE klass)
907{
908 rb_thread_t *th;
909 VALUE thread = rb_thread_alloc(klass);
910
911 if (GET_RACTOR()->threads.main->status == THREAD_KILLED) {
912 rb_raise(rb_eThreadError, "can't alloc thread");
913 }
914
915 rb_obj_call_init_kw(thread, argc, argv, RB_PASS_CALLED_KEYWORDS);
916 th = rb_thread_ptr(thread);
917 if (!threadptr_initialized(th)) {
918 rb_raise(rb_eThreadError, "uninitialized thread - check '%"PRIsVALUE"#initialize'",
919 klass);
920 }
921 return thread;
922}
923
924/*
925 * call-seq:
926 * Thread.start([args]*) {|args| block } -> thread
927 * Thread.fork([args]*) {|args| block } -> thread
928 *
929 * Basically the same as ::new. However, if class Thread is subclassed, then
930 * calling +start+ in that subclass will not invoke the subclass's
931 * +initialize+ method.
932 */
933
934static VALUE
935thread_start(VALUE klass, VALUE args)
936{
937 struct thread_create_params params = {
938 .type = thread_invoke_type_proc,
939 .args = args,
940 .proc = rb_block_proc(),
941 };
942 return thread_create_core(rb_thread_alloc(klass), &params);
943}
944
945static VALUE
946threadptr_invoke_proc_location(rb_thread_t *th)
947{
948 if (th->invoke_type == thread_invoke_type_proc) {
949 return rb_proc_location(th->invoke_arg.proc.proc);
950 }
951 else {
952 return Qnil;
953 }
954}
955
956/* :nodoc: */
957static VALUE
958thread_initialize(VALUE thread, VALUE args)
959{
960 rb_thread_t *th = rb_thread_ptr(thread);
961
962 if (!rb_block_given_p()) {
963 rb_raise(rb_eThreadError, "must be called with a block");
964 }
965 else if (th->invoke_type != thread_invoke_type_none) {
966 VALUE loc = threadptr_invoke_proc_location(th);
967 if (!NIL_P(loc)) {
968 rb_raise(rb_eThreadError,
969 "already initialized thread - %"PRIsVALUE":%"PRIsVALUE,
970 RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
971 }
972 else {
973 rb_raise(rb_eThreadError, "already initialized thread");
974 }
975 }
976 else {
977 struct thread_create_params params = {
978 .type = thread_invoke_type_proc,
979 .args = args,
980 .proc = rb_block_proc(),
981 };
982 return thread_create_core(thread, &params);
983 }
984}
985
986VALUE
987rb_thread_create(VALUE (*fn)(void *), void *arg)
988{
989 struct thread_create_params params = {
990 .type = thread_invoke_type_func,
991 .fn = fn,
992 .args = (VALUE)arg,
993 };
994 return thread_create_core(rb_thread_alloc(rb_cThread), &params);
995}
996
997VALUE
998rb_thread_create_ractor(rb_ractor_t *r, VALUE args, VALUE proc)
999{
1000 struct thread_create_params params = {
1001 .type = thread_invoke_type_ractor_proc,
1002 .g = r,
1003 .args = args,
1004 .proc = proc,
1005 };
1006 return thread_create_core(rb_thread_alloc(rb_cThread), &params);;
1007}
1008
1010struct join_arg {
1011 struct rb_waiting_list *waiter;
1012 rb_thread_t *target;
1013 VALUE timeout;
1014 rb_hrtime_t *limit;
1015};
1016
1017static VALUE
1018remove_from_join_list(VALUE arg)
1019{
1020 struct join_arg *p = (struct join_arg *)arg;
1021 rb_thread_t *target_thread = p->target;
1022
1023 if (target_thread->status != THREAD_KILLED) {
1024 struct rb_waiting_list **join_list = &target_thread->join_list;
1025
1026 while (*join_list) {
1027 if (*join_list == p->waiter) {
1028 *join_list = (*join_list)->next;
1029 break;
1030 }
1031
1032 join_list = &(*join_list)->next;
1033 }
1034 }
1035
1036 return Qnil;
1037}
1038
1039static int
1040thread_finished(rb_thread_t *th)
1041{
1042 return th->status == THREAD_KILLED || !UNDEF_P(th->value);
1043}
1044
1045static VALUE
1046thread_join_sleep(VALUE arg)
1047{
1048 struct join_arg *p = (struct join_arg *)arg;
1049 rb_thread_t *target_th = p->target, *th = p->waiter->thread;
1050 rb_hrtime_t end = 0, *limit = p->limit;
1051
1052 if (limit) {
1053 end = rb_hrtime_add(*limit, rb_hrtime_now());
1054 }
1055
1056 while (!thread_finished(target_th)) {
1057 VALUE scheduler = rb_fiber_scheduler_current();
1058
1059 if (!limit) {
1060 if (scheduler != Qnil) {
1061 rb_fiber_scheduler_block(scheduler, target_th->self, Qnil);
1062 }
1063 else {
1064 sleep_forever(th, SLEEP_DEADLOCKABLE | SLEEP_ALLOW_SPURIOUS | SLEEP_NO_CHECKINTS);
1065 }
1066 }
1067 else {
1068 if (hrtime_update_expire(limit, end)) {
1069 RUBY_DEBUG_LOG("timeout target_th:%u", rb_th_serial(target_th));
1070 return Qfalse;
1071 }
1072
1073 if (scheduler != Qnil) {
1074 VALUE timeout = rb_float_new(hrtime2double(*limit));
1075 rb_fiber_scheduler_block(scheduler, target_th->self, timeout);
1076 }
1077 else {
1078 th->status = THREAD_STOPPED;
1079 native_sleep(th, limit);
1080 }
1081 }
1082 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1083 th->status = THREAD_RUNNABLE;
1084
1085 RUBY_DEBUG_LOG("interrupted target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1086 }
1087
1088 return Qtrue;
1089}
1090
1091static VALUE
1092thread_join(rb_thread_t *target_th, VALUE timeout, rb_hrtime_t *limit)
1093{
1094 rb_execution_context_t *ec = GET_EC();
1095 rb_thread_t *th = ec->thread_ptr;
1096 rb_fiber_t *fiber = ec->fiber_ptr;
1097
1098 if (th == target_th) {
1099 rb_raise(rb_eThreadError, "Target thread must not be current thread");
1100 }
1101
1102 if (th->ractor->threads.main == target_th) {
1103 rb_raise(rb_eThreadError, "Target thread must not be main thread");
1104 }
1105
1106 RUBY_DEBUG_LOG("target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1107
1108 if (target_th->status != THREAD_KILLED) {
1109 struct rb_waiting_list waiter;
1110 waiter.next = target_th->join_list;
1111 waiter.thread = th;
1112 waiter.fiber = rb_fiberptr_blocking(fiber) ? NULL : fiber;
1113 target_th->join_list = &waiter;
1114
1115 struct join_arg arg;
1116 arg.waiter = &waiter;
1117 arg.target = target_th;
1118 arg.timeout = timeout;
1119 arg.limit = limit;
1120
1121 if (!rb_ensure(thread_join_sleep, (VALUE)&arg, remove_from_join_list, (VALUE)&arg)) {
1122 return Qnil;
1123 }
1124 }
1125
1126 RUBY_DEBUG_LOG("success target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1127
1128 if (target_th->ec->errinfo != Qnil) {
1129 VALUE err = target_th->ec->errinfo;
1130
1131 if (FIXNUM_P(err)) {
1132 switch (err) {
1133 case INT2FIX(TAG_FATAL):
1134 RUBY_DEBUG_LOG("terminated target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1135
1136 /* OK. killed. */
1137 break;
1138 default:
1139 if (err == RUBY_FATAL_FIBER_KILLED) { // not integer constant so can't be a case expression
1140 // root fiber killed in non-main thread
1141 break;
1142 }
1143 rb_bug("thread_join: Fixnum (%d) should not reach here.", FIX2INT(err));
1144 }
1145 }
1146 else if (THROW_DATA_P(target_th->ec->errinfo)) {
1147 rb_bug("thread_join: THROW_DATA should not reach here.");
1148 }
1149 else {
1150 /* normal exception */
1151 rb_exc_raise(err);
1152 }
1153 }
1154 return target_th->self;
1155}
1156
1157/*
1158 * call-seq:
1159 * thr.join -> thr
1160 * thr.join(limit) -> thr
1161 *
1162 * The calling thread will suspend execution and run this +thr+.
1163 *
1164 * Does not return until +thr+ exits or until the given +limit+ seconds have
1165 * passed.
1166 *
1167 * If the time limit expires, +nil+ will be returned, otherwise +thr+ is
1168 * returned.
1169 *
1170 * Any threads not joined will be killed when the main program exits.
1171 *
1172 * If +thr+ had previously raised an exception and the ::abort_on_exception or
1173 * $DEBUG flags are not set, (so the exception has not yet been processed), it
1174 * will be processed at this time.
1175 *
1176 * a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
1177 * x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
1178 * x.join # Let thread x finish, thread a will be killed on exit.
1179 * #=> "axyz"
1180 *
1181 * The following example illustrates the +limit+ parameter.
1182 *
1183 * y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
1184 * puts "Waiting" until y.join(0.15)
1185 *
1186 * This will produce:
1187 *
1188 * tick...
1189 * Waiting
1190 * tick...
1191 * Waiting
1192 * tick...
1193 * tick...
1194 */
1195
1196static VALUE
1197thread_join_m(int argc, VALUE *argv, VALUE self)
1198{
1199 VALUE timeout = Qnil;
1200 rb_hrtime_t rel = 0, *limit = 0;
1201
1202 if (rb_check_arity(argc, 0, 1)) {
1203 timeout = argv[0];
1204 }
1205
1206 // Convert the timeout eagerly, so it's always converted and deterministic
1207 /*
1208 * This supports INFINITY and negative values, so we can't use
1209 * rb_time_interval right now...
1210 */
1211 if (NIL_P(timeout)) {
1212 /* unlimited */
1213 }
1214 else if (FIXNUM_P(timeout)) {
1215 rel = rb_sec2hrtime(NUM2TIMET(timeout));
1216 limit = &rel;
1217 }
1218 else {
1219 limit = double2hrtime(&rel, rb_num2dbl(timeout));
1220 }
1221
1222 return thread_join(rb_thread_ptr(self), timeout, limit);
1223}
1224
1225/*
1226 * call-seq:
1227 * thr.value -> obj
1228 *
1229 * Waits for +thr+ to complete, using #join, and returns its value or raises
1230 * the exception which terminated the thread.
1231 *
1232 * a = Thread.new { 2 + 2 }
1233 * a.value #=> 4
1234 *
1235 * b = Thread.new { raise 'something went wrong' }
1236 * b.value #=> RuntimeError: something went wrong
1237 */
1238
1239static VALUE
1240thread_value(VALUE self)
1241{
1242 rb_thread_t *th = rb_thread_ptr(self);
1243 thread_join(th, Qnil, 0);
1244 if (UNDEF_P(th->value)) {
1245 // If the thread is dead because we forked th->value is still Qundef.
1246 return Qnil;
1247 }
1248 return th->value;
1249}
1250
1251/*
1252 * Thread Scheduling
1253 */
1254
1255static void
1256getclockofday(struct timespec *ts)
1257{
1258#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1259 if (clock_gettime(CLOCK_MONOTONIC, ts) == 0)
1260 return;
1261#endif
1262 rb_timespec_now(ts);
1263}
1264
1265/*
1266 * Don't inline this, since library call is already time consuming
1267 * and we don't want "struct timespec" on stack too long for GC
1268 */
1269NOINLINE(rb_hrtime_t rb_hrtime_now(void));
1270rb_hrtime_t
1271rb_hrtime_now(void)
1272{
1273 struct timespec ts;
1274
1275 getclockofday(&ts);
1276 return rb_timespec2hrtime(&ts);
1277}
1278
1279/*
1280 * at least gcc 7.2 and 7.3 complains about "rb_hrtime_t end"
1281 * being uninitialized, maybe other versions, too.
1282 */
1283COMPILER_WARNING_PUSH
1284#if defined(__GNUC__) && __GNUC__ == 7 && __GNUC_MINOR__ <= 3
1285COMPILER_WARNING_IGNORED(-Wmaybe-uninitialized)
1286#endif
1287#ifndef PRIu64
1288#define PRIu64 PRI_64_PREFIX "u"
1289#endif
1290/*
1291 * @end is the absolute time when @ts is set to expire
1292 * Returns true if @end has past
1293 * Updates @ts and returns false otherwise
1294 */
1295static int
1296hrtime_update_expire(rb_hrtime_t *timeout, const rb_hrtime_t end)
1297{
1298 rb_hrtime_t now = rb_hrtime_now();
1299
1300 if (now > end) return 1;
1301
1302 RUBY_DEBUG_LOG("%"PRIu64" > %"PRIu64"", (uint64_t)end, (uint64_t)now);
1303
1304 *timeout = end - now;
1305 return 0;
1306}
1307COMPILER_WARNING_POP
1308
1309static int
1310sleep_hrtime(rb_thread_t *th, rb_hrtime_t rel, unsigned int fl)
1311{
1312 enum rb_thread_status prev_status = th->status;
1313 int woke;
1314 rb_hrtime_t end = rb_hrtime_add(rb_hrtime_now(), rel);
1315
1316 th->status = THREAD_STOPPED;
1317 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1318 while (th->status == THREAD_STOPPED) {
1319 native_sleep(th, &rel);
1320 woke = vm_check_ints_blocking(th->ec);
1321 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1322 break;
1323 if (hrtime_update_expire(&rel, end))
1324 break;
1325 woke = 1;
1326 }
1327 th->status = prev_status;
1328 return woke;
1329}
1330
1331static int
1332sleep_hrtime_until(rb_thread_t *th, rb_hrtime_t end, unsigned int fl)
1333{
1334 enum rb_thread_status prev_status = th->status;
1335 int woke;
1336 rb_hrtime_t rel = rb_hrtime_sub(end, rb_hrtime_now());
1337
1338 th->status = THREAD_STOPPED;
1339 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1340 while (th->status == THREAD_STOPPED) {
1341 native_sleep(th, &rel);
1342 woke = vm_check_ints_blocking(th->ec);
1343 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1344 break;
1345 if (hrtime_update_expire(&rel, end))
1346 break;
1347 woke = 1;
1348 }
1349 th->status = prev_status;
1350 return woke;
1351}
1352
1353static void
1354sleep_forever(rb_thread_t *th, unsigned int fl)
1355{
1356 enum rb_thread_status prev_status = th->status;
1357 enum rb_thread_status status;
1358 int woke;
1359
1360 status = fl & SLEEP_DEADLOCKABLE ? THREAD_STOPPED_FOREVER : THREAD_STOPPED;
1361 th->status = status;
1362
1363 if (!(fl & SLEEP_NO_CHECKINTS)) RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1364
1365 while (th->status == status) {
1366 if (fl & SLEEP_DEADLOCKABLE) {
1367 rb_ractor_sleeper_threads_inc(th->ractor);
1368 rb_check_deadlock(th->ractor);
1369 }
1370 {
1371 native_sleep(th, 0);
1372 }
1373 if (fl & SLEEP_DEADLOCKABLE) {
1374 rb_ractor_sleeper_threads_dec(th->ractor);
1375 }
1376 if (fl & SLEEP_ALLOW_SPURIOUS) {
1377 break;
1378 }
1379
1380 woke = vm_check_ints_blocking(th->ec);
1381
1382 if (woke && !(fl & SLEEP_SPURIOUS_CHECK)) {
1383 break;
1384 }
1385 }
1386 th->status = prev_status;
1387}
1388
1389void
1391{
1392 RUBY_DEBUG_LOG("forever");
1393 sleep_forever(GET_THREAD(), SLEEP_SPURIOUS_CHECK);
1394}
1395
1396void
1398{
1399 RUBY_DEBUG_LOG("deadly");
1400 sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE|SLEEP_SPURIOUS_CHECK);
1401}
1402
1403static void
1404rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker, VALUE timeout, rb_hrtime_t end)
1405{
1406 VALUE scheduler = rb_fiber_scheduler_current();
1407 if (scheduler != Qnil) {
1408 rb_fiber_scheduler_block(scheduler, blocker, timeout);
1409 }
1410 else {
1411 RUBY_DEBUG_LOG("...");
1412 if (end) {
1413 sleep_hrtime_until(GET_THREAD(), end, SLEEP_SPURIOUS_CHECK);
1414 }
1415 else {
1416 sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE);
1417 }
1418 }
1419}
1420
1421void
1422rb_thread_wait_for(struct timeval time)
1423{
1424 rb_thread_t *th = GET_THREAD();
1425
1426 sleep_hrtime(th, rb_timeval2hrtime(&time), SLEEP_SPURIOUS_CHECK);
1427}
1428
1429void
1430rb_ec_check_ints(rb_execution_context_t *ec)
1431{
1432 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1433}
1434
1435/*
1436 * CAUTION: This function causes thread switching.
1437 * rb_thread_check_ints() check ruby's interrupts.
1438 * some interrupt needs thread switching/invoke handlers,
1439 * and so on.
1440 */
1441
1442void
1444{
1445 rb_ec_check_ints(GET_EC());
1446}
1447
1448/*
1449 * Hidden API for tcl/tk wrapper.
1450 * There is no guarantee to perpetuate it.
1451 */
1452int
1453rb_thread_check_trap_pending(void)
1454{
1455 return rb_signal_buff_size() != 0;
1456}
1457
1458/* This function can be called in blocking region. */
1461{
1462 return (int)RUBY_VM_INTERRUPTED(rb_thread_ptr(thval)->ec);
1463}
1464
1465void
1466rb_thread_sleep(int sec)
1467{
1469}
1470
1471static void
1472rb_thread_schedule_limits(uint32_t limits_us)
1473{
1474 if (!rb_thread_alone()) {
1475 rb_thread_t *th = GET_THREAD();
1476 RUBY_DEBUG_LOG("us:%u", (unsigned int)limits_us);
1477
1478 if (th->running_time_us >= limits_us) {
1479 RUBY_DEBUG_LOG("switch %s", "start");
1480
1481 RB_VM_SAVE_MACHINE_CONTEXT(th);
1482 thread_sched_yield(TH_SCHED(th), th);
1483 rb_ractor_thread_switch(th->ractor, th, true);
1484
1485 RUBY_DEBUG_LOG("switch %s", "done");
1486 }
1487 }
1488}
1489
1490void
1492{
1493 rb_thread_schedule_limits(0);
1494 RUBY_VM_CHECK_INTS(GET_EC());
1495}
1496
1497/* blocking region */
1498
1499static inline int
1500blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
1501 rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted)
1502{
1503#ifdef RUBY_ASSERT_CRITICAL_SECTION
1504 VM_ASSERT(ruby_assert_critical_section_entered == 0);
1505#endif
1506 VM_ASSERT(th == GET_THREAD());
1507
1508 region->prev_status = th->status;
1509 if (unblock_function_set(th, ubf, arg, fail_if_interrupted)) {
1510 th->blocking_region_buffer = region;
1511 th->status = THREAD_STOPPED;
1512 rb_ractor_blocking_threads_inc(th->ractor, __FILE__, __LINE__);
1513
1514 RUBY_DEBUG_LOG("thread_id:%p", (void *)th->nt->thread_id);
1515 return TRUE;
1516 }
1517 else {
1518 return FALSE;
1519 }
1520}
1521
1522static inline void
1523blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region)
1524{
1525 /* entry to ubf_list still permitted at this point, make it impossible: */
1526 unblock_function_clear(th);
1527 /* entry to ubf_list impossible at this point, so unregister is safe: */
1528 unregister_ubf_list(th);
1529
1530 thread_sched_to_running(TH_SCHED(th), th);
1531 rb_ractor_thread_switch(th->ractor, th, false);
1532
1533 th->blocking_region_buffer = 0;
1534 rb_ractor_blocking_threads_dec(th->ractor, __FILE__, __LINE__);
1535 if (th->status == THREAD_STOPPED) {
1536 th->status = region->prev_status;
1537 }
1538
1539 RUBY_DEBUG_LOG("end");
1540
1541#ifndef _WIN32
1542 // GET_THREAD() clears WSAGetLastError()
1543 VM_ASSERT(th == GET_THREAD());
1544#endif
1545}
1546
1547/*
1548 * Resolve sentinel unblock function values to their actual function pointers
1549 * and appropriate data2 values. This centralizes the logic for handling
1550 * RUBY_UBF_IO and RUBY_UBF_PROCESS sentinel values.
1551 *
1552 * @param unblock_function Pointer to unblock function pointer (modified in place)
1553 * @param data2 Pointer to data2 pointer (modified in place)
1554 * @param thread Thread context for resolving data2 when needed
1555 * @return true if sentinel values were resolved, false otherwise
1556 */
1557bool
1558rb_thread_resolve_unblock_function(rb_unblock_function_t **unblock_function, void **data2, struct rb_thread_struct *thread)
1559{
1560 rb_unblock_function_t *ubf = *unblock_function;
1561
1562 if ((ubf == RUBY_UBF_IO) || (ubf == RUBY_UBF_PROCESS)) {
1563 *unblock_function = ubf_select;
1564 *data2 = thread;
1565 return true;
1566 }
1567 return false;
1568}
1569
1570void *
1571rb_nogvl(void *(*func)(void *), void *data1,
1572 rb_unblock_function_t *ubf, void *data2,
1573 int flags)
1574{
1575 if (flags & RB_NOGVL_OFFLOAD_SAFE) {
1576 VALUE scheduler = rb_fiber_scheduler_current();
1577 if (scheduler != Qnil) {
1579
1580 VALUE result = rb_fiber_scheduler_blocking_operation_wait(scheduler, func, data1, ubf, data2, flags, &state);
1581
1582 if (!UNDEF_P(result)) {
1583 rb_errno_set(state.saved_errno);
1584 return state.result;
1585 }
1586 }
1587 }
1588
1589 void *val = 0;
1590 rb_execution_context_t *ec = GET_EC();
1591 rb_thread_t *th = rb_ec_thread_ptr(ec);
1592 rb_vm_t *vm = rb_ec_vm_ptr(ec);
1593 bool is_main_thread = vm->ractor.main_thread == th;
1594 int saved_errno = 0;
1595
1596 rb_thread_resolve_unblock_function(&ubf, &data2, th);
1597
1598 if (ubf && rb_ractor_living_thread_num(th->ractor) == 1 && is_main_thread) {
1599 if (flags & RB_NOGVL_UBF_ASYNC_SAFE) {
1600 vm->ubf_async_safe = 1;
1601 }
1602 }
1603
1604 rb_vm_t *volatile saved_vm = vm;
1605 BLOCKING_REGION(th, {
1606 val = func(data1);
1607 saved_errno = rb_errno();
1608 }, ubf, data2, flags & RB_NOGVL_INTR_FAIL);
1609 vm = saved_vm;
1610
1611 if (is_main_thread) vm->ubf_async_safe = 0;
1612
1613 if ((flags & RB_NOGVL_INTR_FAIL) == 0) {
1614 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1615 }
1616
1617 rb_errno_set(saved_errno);
1618
1619 return val;
1620}
1621
1622/*
1623 * rb_thread_call_without_gvl - permit concurrent/parallel execution.
1624 * rb_thread_call_without_gvl2 - permit concurrent/parallel execution
1625 * without interrupt process.
1626 *
1627 * rb_thread_call_without_gvl() does:
1628 * (1) Check interrupts.
1629 * (2) release GVL.
1630 * Other Ruby threads may run in parallel.
1631 * (3) call func with data1
1632 * (4) acquire GVL.
1633 * Other Ruby threads can not run in parallel any more.
1634 * (5) Check interrupts.
1635 *
1636 * rb_thread_call_without_gvl2() does:
1637 * (1) Check interrupt and return if interrupted.
1638 * (2) release GVL.
1639 * (3) call func with data1 and a pointer to the flags.
1640 * (4) acquire GVL.
1641 *
1642 * If another thread interrupts this thread (Thread#kill, signal delivery,
1643 * VM-shutdown request, and so on), `ubf()' is called (`ubf()' means
1644 * "un-blocking function"). `ubf()' should interrupt `func()' execution by
1645 * toggling a cancellation flag, canceling the invocation of a call inside
1646 * `func()' or similar. Note that `ubf()' may not be called with the GVL.
1647 *
1648 * There are built-in ubfs and you can specify these ubfs:
1649 *
1650 * * RUBY_UBF_IO: ubf for IO operation
1651 * * RUBY_UBF_PROCESS: ubf for process operation
1652 *
1653 * However, we can not guarantee our built-in ubfs interrupt your `func()'
1654 * correctly. Be careful to use rb_thread_call_without_gvl(). If you don't
1655 * provide proper ubf(), your program will not stop for Control+C or other
1656 * shutdown events.
1657 *
1658 * "Check interrupts" on above list means checking asynchronous
1659 * interrupt events (such as Thread#kill, signal delivery, VM-shutdown
1660 * request, and so on) and calling corresponding procedures
1661 * (such as `trap' for signals, raise an exception for Thread#raise).
1662 * If `func()' finished and received interrupts, you may skip interrupt
1663 * checking. For example, assume the following func() it reads data from file.
1664 *
1665 * read_func(...) {
1666 * // (a) before read
1667 * read(buffer); // (b) reading
1668 * // (c) after read
1669 * }
1670 *
1671 * If an interrupt occurs at (a) or (b), then `ubf()' cancels this
1672 * `read_func()' and interrupts are checked. However, if an interrupt occurs
1673 * at (c), after *read* operation is completed, checking interrupts is harmful
1674 * because it causes irrevocable side-effect, the read data will vanish. To
1675 * avoid such problem, the `read_func()' should be used with
1676 * `rb_thread_call_without_gvl2()'.
1677 *
1678 * If `rb_thread_call_without_gvl2()' detects interrupt, it returns
1679 * immediately. This function does not show when the execution was interrupted.
1680 * For example, there are 4 possible timing (a), (b), (c) and before calling
1681 * read_func(). You need to record progress of a read_func() and check
1682 * the progress after `rb_thread_call_without_gvl2()'. You may need to call
1683 * `rb_thread_check_ints()' correctly or your program can not process proper
1684 * process such as `trap' and so on.
1685 *
1686 * NOTE: You can not execute most of Ruby C API and touch Ruby
1687 * objects in `func()' and `ubf()', including raising an
1688 * exception, because current thread doesn't acquire GVL
1689 * (it causes synchronization problems). If you need to
1690 * call ruby functions either use rb_thread_call_with_gvl()
1691 * or read source code of C APIs and confirm safety by
1692 * yourself.
1693 *
1694 * NOTE: In short, this API is difficult to use safely. I recommend you
1695 * use other ways if you have. We lack experiences to use this API.
1696 * Please report your problem related on it.
1697 *
1698 * NOTE: Releasing GVL and re-acquiring GVL may be expensive operations
1699 * for a short running `func()'. Be sure to benchmark and use this
1700 * mechanism when `func()' consumes enough time.
1701 *
1702 * Safe C API:
1703 * * rb_thread_interrupted() - check interrupt flag
1704 * * ruby_xmalloc(), ruby_xrealloc(), ruby_xfree() -
1705 * they will work without GVL, and may acquire GVL when GC is needed.
1706 */
1707void *
1708rb_thread_call_without_gvl2(void *(*func)(void *), void *data1,
1709 rb_unblock_function_t *ubf, void *data2)
1710{
1711 return rb_nogvl(func, data1, ubf, data2, RB_NOGVL_INTR_FAIL);
1712}
1713
1714void *
1715rb_thread_call_without_gvl(void *(*func)(void *data), void *data1,
1716 rb_unblock_function_t *ubf, void *data2)
1717{
1718 return rb_nogvl(func, data1, ubf, data2, 0);
1719}
1720
1721static int
1722waitfd_to_waiting_flag(int wfd_event)
1723{
1724 return wfd_event << 1;
1725}
1726
1727static struct ccan_list_head *
1728rb_io_blocking_operations(struct rb_io *io)
1729{
1730 rb_serial_t fork_generation = GET_VM()->fork_gen;
1731
1732 // On fork, all existing entries in this list (which are stack allocated) become invalid.
1733 // Therefore, we re-initialize the list which clears it.
1734 if (io->fork_generation != fork_generation) {
1735 ccan_list_head_init(&io->blocking_operations);
1736 io->fork_generation = fork_generation;
1737 }
1738
1739 return &io->blocking_operations;
1740}
1741
1742/*
1743 * Registers a blocking operation for an IO object. This is used to track all threads and fibers
1744 * that are currently blocked on this IO for reading, writing or other operations.
1745 *
1746 * When the IO is closed, all blocking operations will be notified via rb_fiber_scheduler_fiber_interrupt
1747 * for fibers with a scheduler, or via rb_threadptr_interrupt for threads without a scheduler.
1748 *
1749 * @parameter io The IO object on which the operation will block
1750 * @parameter blocking_operation The operation details including the execution context that will be blocked
1751 */
1752static void
1753rb_io_blocking_operation_enter(struct rb_io *io, struct rb_io_blocking_operation *blocking_operation)
1754{
1755 ccan_list_add(rb_io_blocking_operations(io), &blocking_operation->list);
1756}
1757
1758static void
1759rb_io_blocking_operation_pop(struct rb_io *io, struct rb_io_blocking_operation *blocking_operation)
1760{
1761 ccan_list_del(&blocking_operation->list);
1762}
1765 struct rb_io *io;
1766 struct rb_io_blocking_operation *blocking_operation;
1767};
1768
1769static VALUE
1770io_blocking_operation_exit(VALUE _arguments)
1771{
1772 struct io_blocking_operation_arguments *arguments = (void*)_arguments;
1773 struct rb_io_blocking_operation *blocking_operation = arguments->blocking_operation;
1774
1775 rb_io_blocking_operation_pop(arguments->io, blocking_operation);
1776
1777 rb_io_t *io = arguments->io;
1778 rb_thread_t *thread = io->closing_ec->thread_ptr;
1779 rb_fiber_t *fiber = io->closing_ec->fiber_ptr;
1780
1781 if (thread->scheduler != Qnil) {
1782 // This can cause spurious wakeups...
1783 rb_fiber_scheduler_unblock(thread->scheduler, io->self, rb_fiberptr_self(fiber));
1784 }
1785 else {
1786 rb_thread_wakeup(thread->self);
1787 }
1788
1789 return Qnil;
1790}
1791
1792/*
1793 * Called when a blocking operation completes or is interrupted. Removes the operation from
1794 * the IO's blocking_operations list and wakes up any waiting threads/fibers.
1795 *
1796 * If there's a wakeup_mutex (meaning an IO close is in progress), synchronizes the cleanup
1797 * through that mutex to ensure proper coordination with the closing thread.
1798 *
1799 * @parameter io The IO object the operation was performed on
1800 * @parameter blocking_operation The completed operation to clean up
1801 */
1802static void
1803rb_io_blocking_operation_exit(struct rb_io *io, struct rb_io_blocking_operation *blocking_operation)
1804{
1805 VALUE wakeup_mutex = io->wakeup_mutex;
1806
1807 // Indicate that the blocking operation is no longer active:
1808 blocking_operation->ec = NULL;
1809
1810 if (RB_TEST(wakeup_mutex)) {
1811 struct io_blocking_operation_arguments arguments = {
1812 .io = io,
1813 .blocking_operation = blocking_operation
1814 };
1815
1816 rb_mutex_synchronize(wakeup_mutex, io_blocking_operation_exit, (VALUE)&arguments);
1817 }
1818 else {
1819 // If there's no wakeup_mutex, we can safely remove the operation directly:
1820 rb_io_blocking_operation_pop(io, blocking_operation);
1821 }
1822}
1823
1824static VALUE
1825rb_thread_io_blocking_operation_ensure(VALUE _argument)
1826{
1827 struct io_blocking_operation_arguments *arguments = (void*)_argument;
1828
1829 rb_io_blocking_operation_exit(arguments->io, arguments->blocking_operation);
1830
1831 return Qnil;
1832}
1833
1834/*
1835 * Executes a function that performs a blocking IO operation, while properly tracking
1836 * the operation in the IO's blocking_operations list. This ensures proper cleanup
1837 * and interruption handling if the IO is closed while blocked.
1838 *
1839 * The operation is automatically removed from the blocking_operations list when the function
1840 * returns, whether normally or due to an exception.
1841 *
1842 * @parameter self The IO object
1843 * @parameter function The function to execute that will perform the blocking operation
1844 * @parameter argument The argument to pass to the function
1845 * @returns The result of the blocking operation function
1846 */
1847VALUE
1848rb_thread_io_blocking_operation(VALUE self, VALUE(*function)(VALUE), VALUE argument)
1849{
1850 struct rb_io *io;
1851 RB_IO_POINTER(self, io);
1852
1853 rb_execution_context_t *ec = GET_EC();
1854 struct rb_io_blocking_operation blocking_operation = {
1855 .ec = ec,
1856 };
1857 rb_io_blocking_operation_enter(io, &blocking_operation);
1858
1860 .io = io,
1861 .blocking_operation = &blocking_operation
1862 };
1863
1864 return rb_ensure(function, argument, rb_thread_io_blocking_operation_ensure, (VALUE)&io_blocking_operation_arguments);
1865}
1866
1867static bool
1868thread_io_mn_schedulable(rb_thread_t *th, int events, const struct timeval *timeout)
1869{
1870#if defined(USE_MN_THREADS) && USE_MN_THREADS
1871 return !th_has_dedicated_nt(th) && (events || timeout) && th->blocking;
1872#else
1873 return false;
1874#endif
1875}
1876
1877// true if need retry
1878static bool
1879thread_io_wait_events(rb_thread_t *th, int fd, int events, const struct timeval *timeout)
1880{
1881#if defined(USE_MN_THREADS) && USE_MN_THREADS
1882 if (thread_io_mn_schedulable(th, events, timeout)) {
1883 rb_hrtime_t rel, *prel;
1884
1885 if (timeout) {
1886 rel = rb_timeval2hrtime(timeout);
1887 prel = &rel;
1888 }
1889 else {
1890 prel = NULL;
1891 }
1892
1893 VM_ASSERT(prel || (events & (RB_WAITFD_IN | RB_WAITFD_OUT)));
1894
1895 if (thread_sched_wait_events(TH_SCHED(th), th, fd, waitfd_to_waiting_flag(events), prel)) {
1896 // timeout
1897 return false;
1898 }
1899 else {
1900 return true;
1901 }
1902 }
1903#endif // defined(USE_MN_THREADS) && USE_MN_THREADS
1904 return false;
1905}
1906
1907// assume read/write
1908static bool
1909blocking_call_retryable_p(int r, int eno)
1910{
1911 if (r != -1) return false;
1912
1913 switch (eno) {
1914 case EAGAIN:
1915#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
1916 case EWOULDBLOCK:
1917#endif
1918 return true;
1919 default:
1920 return false;
1921 }
1922}
1923
1924bool
1925rb_thread_mn_schedulable(VALUE thval)
1926{
1927 rb_thread_t *th = rb_thread_ptr(thval);
1928 return th->mn_schedulable;
1929}
1930
1931VALUE
1932rb_thread_io_blocking_call(struct rb_io* io, rb_blocking_function_t *func, void *data1, int events)
1933{
1934 rb_execution_context_t * volatile ec = GET_EC();
1935 rb_thread_t * volatile th = rb_ec_thread_ptr(ec);
1936
1937 RUBY_DEBUG_LOG("th:%u fd:%d ev:%d", rb_th_serial(th), io->fd, events);
1938
1939 volatile VALUE val = Qundef; /* shouldn't be used */
1940 volatile int saved_errno = 0;
1941 enum ruby_tag_type state;
1942 volatile bool prev_mn_schedulable = th->mn_schedulable;
1943 th->mn_schedulable = thread_io_mn_schedulable(th, events, NULL);
1944
1945 int fd = io->fd;
1946
1947 // `errno` is only valid when there is an actual error - but we can't
1948 // extract that from the return value of `func` alone, so we clear any
1949 // prior `errno` value here so that we can later check if it was set by
1950 // `func` or not (as opposed to some previously set value).
1951 errno = 0;
1952
1953 struct rb_io_blocking_operation blocking_operation = {
1954 .ec = ec,
1955 };
1956 rb_io_blocking_operation_enter(io, &blocking_operation);
1957
1958 {
1959 EC_PUSH_TAG(ec);
1960 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
1961 volatile enum ruby_tag_type saved_state = state; /* for BLOCKING_REGION */
1962 retry:
1963 BLOCKING_REGION(th, {
1964 val = func(data1);
1965 saved_errno = errno;
1966 }, ubf_select, th, FALSE);
1967
1968 RUBY_ASSERT(th == rb_ec_thread_ptr(ec));
1969 if (events &&
1970 blocking_call_retryable_p((int)val, saved_errno) &&
1971 thread_io_wait_events(th, fd, events, NULL)) {
1972 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1973 goto retry;
1974 }
1975
1976 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1977
1978 state = saved_state;
1979 }
1980 EC_POP_TAG();
1981
1982 th = rb_ec_thread_ptr(ec);
1983 th->mn_schedulable = prev_mn_schedulable;
1984 }
1985
1986 rb_io_blocking_operation_exit(io, &blocking_operation);
1987
1988 if (state) {
1989 EC_JUMP_TAG(ec, state);
1990 }
1991
1992 // If the error was a timeout, we raise a specific exception for that:
1993 if (saved_errno == ETIMEDOUT) {
1994 rb_raise(rb_eIOTimeoutError, "Blocking operation timed out!");
1995 }
1996
1997 errno = saved_errno;
1998
1999 return val;
2000}
2001
2002VALUE
2003rb_thread_io_blocking_region(struct rb_io *io, rb_blocking_function_t *func, void *data1)
2004{
2005 return rb_thread_io_blocking_call(io, func, data1, 0);
2006}
2007
2008/*
2009 * rb_thread_call_with_gvl - re-enter the Ruby world after GVL release.
2010 *
2011 * After releasing GVL using
2012 * rb_thread_call_without_gvl() you can not access Ruby values or invoke
2013 * methods. If you need to access Ruby you must use this function
2014 * rb_thread_call_with_gvl().
2015 *
2016 * This function rb_thread_call_with_gvl() does:
2017 * (1) acquire GVL.
2018 * (2) call passed function `func'.
2019 * (3) release GVL.
2020 * (4) return a value which is returned at (2).
2021 *
2022 * NOTE: You should not return Ruby object at (2) because such Object
2023 * will not be marked.
2024 *
2025 * NOTE: If an exception is raised in `func', this function DOES NOT
2026 * protect (catch) the exception. If you have any resources
2027 * which should free before throwing exception, you need use
2028 * rb_protect() in `func' and return a value which represents
2029 * exception was raised.
2030 *
2031 * NOTE: This function should not be called by a thread which was not
2032 * created as Ruby thread (created by Thread.new or so). In other
2033 * words, this function *DOES NOT* associate or convert a NON-Ruby
2034 * thread to a Ruby thread.
2035 */
2036void *
2037rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
2038{
2039 rb_thread_t *th = ruby_thread_from_native();
2040 struct rb_blocking_region_buffer *brb;
2041 struct rb_unblock_callback prev_unblock;
2042 void *r;
2043
2044 if (th == 0) {
2045 /* Error has occurred, but we can't use rb_bug()
2046 * because this thread is not Ruby's thread.
2047 * What should we do?
2048 */
2049 bp();
2050 fprintf(stderr, "[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
2051 exit(EXIT_FAILURE);
2052 }
2053
2054 brb = (struct rb_blocking_region_buffer *)th->blocking_region_buffer;
2055 prev_unblock = th->unblock;
2056
2057 if (brb == 0) {
2058 rb_bug("rb_thread_call_with_gvl: called by a thread which has GVL.");
2059 }
2060
2061 blocking_region_end(th, brb);
2062 /* enter to Ruby world: You can access Ruby values, methods and so on. */
2063 r = (*func)(data1);
2064 /* leave from Ruby world: You can not access Ruby values, etc. */
2065 int released = blocking_region_begin(th, brb, prev_unblock.func, prev_unblock.arg, FALSE);
2066 RUBY_ASSERT_ALWAYS(released);
2067 RB_VM_SAVE_MACHINE_CONTEXT(th);
2068 thread_sched_to_waiting(TH_SCHED(th), th);
2069 return r;
2070}
2071
2072/*
2073 * ruby_thread_has_gvl_p - check if current native thread has GVL.
2074 */
2075
2077ruby_thread_has_gvl_p(void)
2078{
2079 rb_thread_t *th = ruby_thread_from_native();
2080
2081 if (th && th->blocking_region_buffer == 0) {
2082 return 1;
2083 }
2084 else {
2085 return 0;
2086 }
2087}
2088
2089/*
2090 * call-seq:
2091 * Thread.pass -> nil
2092 *
2093 * Give the thread scheduler a hint to pass execution to another thread.
2094 * A running thread may or may not switch, it depends on OS and processor.
2095 */
2096
2097static VALUE
2098thread_s_pass(VALUE klass)
2099{
2101 return Qnil;
2102}
2103
2104/*****************************************************/
2105
2106/*
2107 * rb_threadptr_pending_interrupt_* - manage asynchronous error queue
2108 *
2109 * Async events such as an exception thrown by Thread#raise,
2110 * Thread#kill and thread termination (after main thread termination)
2111 * will be queued to th->pending_interrupt_queue.
2112 * - clear: clear the queue.
2113 * - enque: enqueue err object into queue.
2114 * - deque: dequeue err object from queue.
2115 * - active_p: return 1 if the queue should be checked.
2116 *
2117 * All rb_threadptr_pending_interrupt_* functions are called by
2118 * a GVL acquired thread, of course.
2119 * Note that all "rb_" prefix APIs need GVL to call.
2120 */
2121
2122void
2123rb_threadptr_pending_interrupt_clear(rb_thread_t *th)
2124{
2125 rb_ary_clear(th->pending_interrupt_queue);
2126}
2127
2128void
2129rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
2130{
2131 rb_ary_push(th->pending_interrupt_queue, v);
2132 th->pending_interrupt_queue_checked = 0;
2133}
2134
2135static void
2136threadptr_check_pending_interrupt_queue(rb_thread_t *th)
2137{
2138 if (!th->pending_interrupt_queue) {
2139 rb_raise(rb_eThreadError, "uninitialized thread");
2140 }
2141}
2142
2143enum handle_interrupt_timing {
2144 INTERRUPT_NONE,
2145 INTERRUPT_IMMEDIATE,
2146 INTERRUPT_ON_BLOCKING,
2147 INTERRUPT_NEVER
2148};
2149
2150static enum handle_interrupt_timing
2151rb_threadptr_pending_interrupt_from_symbol(rb_thread_t *th, VALUE sym)
2152{
2153 if (sym == sym_immediate) {
2154 return INTERRUPT_IMMEDIATE;
2155 }
2156 else if (sym == sym_on_blocking) {
2157 return INTERRUPT_ON_BLOCKING;
2158 }
2159 else if (sym == sym_never) {
2160 return INTERRUPT_NEVER;
2161 }
2162 else {
2163 rb_raise(rb_eThreadError, "unknown mask signature");
2164 }
2165}
2166
2167static enum handle_interrupt_timing
2168rb_threadptr_pending_interrupt_check_mask(rb_thread_t *th, VALUE err)
2169{
2170 VALUE mask;
2171 long mask_stack_len = RARRAY_LEN(th->pending_interrupt_mask_stack);
2172 const VALUE *mask_stack = RARRAY_CONST_PTR(th->pending_interrupt_mask_stack);
2173 VALUE mod;
2174 long i;
2175
2176 for (i=0; i<mask_stack_len; i++) {
2177 mask = mask_stack[mask_stack_len-(i+1)];
2178
2179 if (SYMBOL_P(mask)) {
2180 /* do not match RUBY_FATAL_THREAD_KILLED etc */
2181 if (err != rb_cInteger) {
2182 return rb_threadptr_pending_interrupt_from_symbol(th, mask);
2183 }
2184 else {
2185 continue;
2186 }
2187 }
2188
2189 for (mod = err; mod; mod = RCLASS_SUPER(mod)) {
2190 VALUE klass = mod;
2191 VALUE sym;
2192
2193 if (BUILTIN_TYPE(mod) == T_ICLASS) {
2194 klass = RBASIC(mod)->klass;
2195 }
2196 else if (mod != RCLASS_ORIGIN(mod)) {
2197 continue;
2198 }
2199
2200 if ((sym = rb_hash_aref(mask, klass)) != Qnil) {
2201 return rb_threadptr_pending_interrupt_from_symbol(th, sym);
2202 }
2203 }
2204 /* try to next mask */
2205 }
2206 return INTERRUPT_NONE;
2207}
2208
2209static int
2210rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th)
2211{
2212 return RARRAY_LEN(th->pending_interrupt_queue) == 0;
2213}
2214
2215static int
2216rb_threadptr_pending_interrupt_include_p(rb_thread_t *th, VALUE err)
2217{
2218 int i;
2219 for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
2220 VALUE e = RARRAY_AREF(th->pending_interrupt_queue, i);
2221 if (rb_obj_is_kind_of(e, err)) {
2222 return TRUE;
2223 }
2224 }
2225 return FALSE;
2226}
2227
2228static VALUE
2229rb_threadptr_pending_interrupt_deque(rb_thread_t *th, enum handle_interrupt_timing timing)
2230{
2231#if 1 /* 1 to enable Thread#handle_interrupt, 0 to ignore it */
2232 int i;
2233
2234 for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
2235 VALUE err = RARRAY_AREF(th->pending_interrupt_queue, i);
2236
2237 enum handle_interrupt_timing mask_timing = rb_threadptr_pending_interrupt_check_mask(th, CLASS_OF(err));
2238
2239 switch (mask_timing) {
2240 case INTERRUPT_ON_BLOCKING:
2241 if (timing != INTERRUPT_ON_BLOCKING) {
2242 break;
2243 }
2244 /* fall through */
2245 case INTERRUPT_NONE: /* default: IMMEDIATE */
2246 case INTERRUPT_IMMEDIATE:
2247 rb_ary_delete_at(th->pending_interrupt_queue, i);
2248 return err;
2249 case INTERRUPT_NEVER:
2250 break;
2251 }
2252 }
2253
2254 th->pending_interrupt_queue_checked = 1;
2255 return Qundef;
2256#else
2257 VALUE err = rb_ary_shift(th->pending_interrupt_queue);
2258 if (rb_threadptr_pending_interrupt_empty_p(th)) {
2259 th->pending_interrupt_queue_checked = 1;
2260 }
2261 return err;
2262#endif
2263}
2264
2265static int
2266threadptr_pending_interrupt_active_p(rb_thread_t *th)
2267{
2268 /*
2269 * For optimization, we don't check async errinfo queue
2270 * if the queue and the thread interrupt mask were not changed
2271 * since last check.
2272 */
2273 if (th->pending_interrupt_queue_checked) {
2274 return 0;
2275 }
2276
2277 if (rb_threadptr_pending_interrupt_empty_p(th)) {
2278 return 0;
2279 }
2280
2281 return 1;
2282}
2283
2284static int
2285handle_interrupt_arg_check_i(VALUE key, VALUE val, VALUE args)
2286{
2287 VALUE *maskp = (VALUE *)args;
2288
2289 if (val != sym_immediate && val != sym_on_blocking && val != sym_never) {
2290 rb_raise(rb_eArgError, "unknown mask signature");
2291 }
2292
2293 if (key == rb_eException && (UNDEF_P(*maskp) || NIL_P(*maskp))) {
2294 *maskp = val;
2295 return ST_CONTINUE;
2296 }
2297
2298 if (RTEST(*maskp)) {
2299 if (!RB_TYPE_P(*maskp, T_HASH)) {
2300 VALUE prev = *maskp;
2301 *maskp = rb_ident_hash_new();
2302 if (SYMBOL_P(prev)) {
2303 rb_hash_aset(*maskp, rb_eException, prev);
2304 }
2305 }
2306 rb_hash_aset(*maskp, key, val);
2307 }
2308 else {
2309 *maskp = Qfalse;
2310 }
2311
2312 return ST_CONTINUE;
2313}
2314
2315/*
2316 * call-seq:
2317 * Thread.handle_interrupt(hash) { ... } -> result of the block
2318 *
2319 * Changes asynchronous interrupt timing.
2320 *
2321 * _interrupt_ means asynchronous event and corresponding procedure
2322 * by Thread#raise, Thread#kill, signal trap (not supported yet)
2323 * and main thread termination (if main thread terminates, then all
2324 * other thread will be killed).
2325 *
2326 * The given +hash+ has pairs like <code>ExceptionClass =>
2327 * :TimingSymbol</code>. Where the ExceptionClass is the interrupt handled by
2328 * the given block. The TimingSymbol can be one of the following symbols:
2329 *
2330 * [+:immediate+] Invoke interrupts immediately.
2331 * [+:on_blocking+] Invoke interrupts while _BlockingOperation_.
2332 * [+:never+] Never invoke all interrupts.
2333 *
2334 * _BlockingOperation_ means that the operation will block the calling thread,
2335 * such as read and write. On CRuby implementation, _BlockingOperation_ is any
2336 * operation executed without GVL.
2337 *
2338 * Masked asynchronous interrupts are delayed until they are enabled.
2339 * This method is similar to sigprocmask(3).
2340 *
2341 * === NOTE
2342 *
2343 * Asynchronous interrupts are difficult to use.
2344 *
2345 * If you need to communicate between threads, please consider to use another way such as Queue.
2346 *
2347 * Or use them with deep understanding about this method.
2348 *
2349 * === Usage
2350 *
2351 * In this example, we can guard from Thread#raise exceptions.
2352 *
2353 * Using the +:never+ TimingSymbol the RuntimeError exception will always be
2354 * ignored in the first block of the main thread. In the second
2355 * ::handle_interrupt block we can purposefully handle RuntimeError exceptions.
2356 *
2357 * th = Thread.new do
2358 * Thread.handle_interrupt(RuntimeError => :never) {
2359 * begin
2360 * # You can write resource allocation code safely.
2361 * Thread.handle_interrupt(RuntimeError => :immediate) {
2362 * # ...
2363 * }
2364 * ensure
2365 * # You can write resource deallocation code safely.
2366 * end
2367 * }
2368 * end
2369 * Thread.pass
2370 * # ...
2371 * th.raise "stop"
2372 *
2373 * While we are ignoring the RuntimeError exception, it's safe to write our
2374 * resource allocation code. Then, the ensure block is where we can safely
2375 * deallocate your resources.
2376 *
2377 * ==== Stack control settings
2378 *
2379 * It's possible to stack multiple levels of ::handle_interrupt blocks in order
2380 * to control more than one ExceptionClass and TimingSymbol at a time.
2381 *
2382 * Thread.handle_interrupt(FooError => :never) {
2383 * Thread.handle_interrupt(BarError => :never) {
2384 * # FooError and BarError are prohibited.
2385 * }
2386 * }
2387 *
2388 * ==== Inheritance with ExceptionClass
2389 *
2390 * All exceptions inherited from the ExceptionClass parameter will be considered.
2391 *
2392 * Thread.handle_interrupt(Exception => :never) {
2393 * # all exceptions inherited from Exception are prohibited.
2394 * }
2395 *
2396 * For handling all interrupts, use +Object+ and not +Exception+
2397 * as the ExceptionClass, as kill/terminate interrupts are not handled by +Exception+.
2398 */
2399static VALUE
2400rb_thread_s_handle_interrupt(VALUE self, VALUE mask_arg)
2401{
2402 VALUE mask = Qundef;
2403 rb_execution_context_t * volatile ec = GET_EC();
2404 rb_thread_t * volatile th = rb_ec_thread_ptr(ec);
2405 volatile VALUE r = Qnil;
2406 enum ruby_tag_type state;
2407
2408 if (!rb_block_given_p()) {
2409 rb_raise(rb_eArgError, "block is needed.");
2410 }
2411
2412 mask_arg = rb_to_hash_type(mask_arg);
2413
2414 if (OBJ_FROZEN(mask_arg) && rb_hash_compare_by_id_p(mask_arg)) {
2415 mask = Qnil;
2416 }
2417
2418 rb_hash_foreach(mask_arg, handle_interrupt_arg_check_i, (VALUE)&mask);
2419
2420 if (UNDEF_P(mask)) {
2421 return rb_yield(Qnil);
2422 }
2423
2424 if (!RTEST(mask)) {
2425 mask = mask_arg;
2426 }
2427 else if (RB_TYPE_P(mask, T_HASH)) {
2428 OBJ_FREEZE(mask);
2429 }
2430
2431 rb_ary_push(th->pending_interrupt_mask_stack, mask);
2432 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2433 th->pending_interrupt_queue_checked = 0;
2434 RUBY_VM_SET_INTERRUPT(th->ec);
2435 }
2436
2437 EC_PUSH_TAG(th->ec);
2438 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
2439 r = rb_yield(Qnil);
2440 }
2441 EC_POP_TAG();
2442
2443 rb_ary_pop(th->pending_interrupt_mask_stack);
2444 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2445 th->pending_interrupt_queue_checked = 0;
2446 RUBY_VM_SET_INTERRUPT(th->ec);
2447 }
2448
2449 RUBY_VM_CHECK_INTS(th->ec);
2450
2451 if (state) {
2452 EC_JUMP_TAG(th->ec, state);
2453 }
2454
2455 return r;
2456}
2457
2458/*
2459 * call-seq:
2460 * target_thread.pending_interrupt?(error = nil) -> true/false
2461 *
2462 * Returns whether or not the asynchronous queue is empty for the target thread.
2463 *
2464 * If +error+ is given, then check only for +error+ type deferred events.
2465 *
2466 * See ::pending_interrupt? for more information.
2467 */
2468static VALUE
2469rb_thread_pending_interrupt_p(int argc, VALUE *argv, VALUE target_thread)
2470{
2471 rb_thread_t *target_th = rb_thread_ptr(target_thread);
2472
2473 if (!target_th->pending_interrupt_queue) {
2474 return Qfalse;
2475 }
2476 if (rb_threadptr_pending_interrupt_empty_p(target_th)) {
2477 return Qfalse;
2478 }
2479 if (rb_check_arity(argc, 0, 1)) {
2480 VALUE err = argv[0];
2481 if (!rb_obj_is_kind_of(err, rb_cModule)) {
2482 rb_raise(rb_eTypeError, "class or module required for rescue clause");
2483 }
2484 return RBOOL(rb_threadptr_pending_interrupt_include_p(target_th, err));
2485 }
2486 else {
2487 return Qtrue;
2488 }
2489}
2490
2491/*
2492 * call-seq:
2493 * Thread.pending_interrupt?(error = nil) -> true/false
2494 *
2495 * Returns whether or not the asynchronous queue is empty.
2496 *
2497 * Since Thread::handle_interrupt can be used to defer asynchronous events,
2498 * this method can be used to determine if there are any deferred events.
2499 *
2500 * If you find this method returns true, then you may finish +:never+ blocks.
2501 *
2502 * For example, the following method processes deferred asynchronous events
2503 * immediately.
2504 *
2505 * def Thread.kick_interrupt_immediately
2506 * Thread.handle_interrupt(Object => :immediate) {
2507 * Thread.pass
2508 * }
2509 * end
2510 *
2511 * If +error+ is given, then check only for +error+ type deferred events.
2512 *
2513 * === Usage
2514 *
2515 * th = Thread.new{
2516 * Thread.handle_interrupt(RuntimeError => :on_blocking){
2517 * while true
2518 * ...
2519 * # reach safe point to invoke interrupt
2520 * if Thread.pending_interrupt?
2521 * Thread.handle_interrupt(Object => :immediate){}
2522 * end
2523 * ...
2524 * end
2525 * }
2526 * }
2527 * ...
2528 * th.raise # stop thread
2529 *
2530 * This example can also be written as the following, which you should use to
2531 * avoid asynchronous interrupts.
2532 *
2533 * flag = true
2534 * th = Thread.new{
2535 * Thread.handle_interrupt(RuntimeError => :on_blocking){
2536 * while true
2537 * ...
2538 * # reach safe point to invoke interrupt
2539 * break if flag == false
2540 * ...
2541 * end
2542 * }
2543 * }
2544 * ...
2545 * flag = false # stop thread
2546 */
2547
2548static VALUE
2549rb_thread_s_pending_interrupt_p(int argc, VALUE *argv, VALUE self)
2550{
2551 return rb_thread_pending_interrupt_p(argc, argv, GET_THREAD()->self);
2552}
2553
2554NORETURN(static void rb_threadptr_to_kill(rb_thread_t *th));
2555
2556static void
2557rb_threadptr_to_kill(rb_thread_t *th)
2558{
2559 VM_ASSERT(GET_THREAD() == th);
2560 rb_threadptr_pending_interrupt_clear(th);
2561 th->status = THREAD_RUNNABLE;
2562 th->to_kill = 1;
2563 th->ec->errinfo = INT2FIX(TAG_FATAL);
2564 EC_JUMP_TAG(th->ec, TAG_FATAL);
2565}
2566
2567static inline rb_atomic_t
2568threadptr_get_interrupts(rb_thread_t *th)
2569{
2570 rb_execution_context_t *ec = th->ec;
2571 rb_atomic_t interrupt;
2572 rb_atomic_t old;
2573
2574 old = ATOMIC_LOAD_RELAXED(ec->interrupt_flag);
2575 do {
2576 interrupt = old;
2577 old = ATOMIC_CAS(ec->interrupt_flag, interrupt, interrupt & ec->interrupt_mask);
2578 } while (old != interrupt);
2579 return interrupt & (rb_atomic_t)~ec->interrupt_mask;
2580}
2581
2582static void threadptr_interrupt_exec_exec(rb_thread_t *th);
2583
2584// Execute interrupts on currently running thread
2585// In certain situations, calling this function will raise an exception. Some examples are:
2586// * during VM shutdown (`rb_ractor_terminate_all`)
2587// * Call to Thread#exit for current thread (`rb_thread_kill`)
2588// * Call to Thread#raise for current thread
2589int
2590rb_threadptr_execute_interrupts(rb_thread_t *th, int blocking_timing)
2591{
2592 rb_atomic_t interrupt;
2593 int postponed_job_interrupt = 0;
2594 int ret = FALSE;
2595
2596 VM_ASSERT(GET_THREAD() == th);
2597
2598 if (th->ec->raised_flag) return ret;
2599
2600 while ((interrupt = threadptr_get_interrupts(th)) != 0) {
2601 int sig;
2602 int timer_interrupt;
2603 int pending_interrupt;
2604 int trap_interrupt;
2605 int terminate_interrupt;
2606
2607 timer_interrupt = interrupt & TIMER_INTERRUPT_MASK;
2608 pending_interrupt = interrupt & PENDING_INTERRUPT_MASK;
2609 postponed_job_interrupt = interrupt & POSTPONED_JOB_INTERRUPT_MASK;
2610 trap_interrupt = interrupt & TRAP_INTERRUPT_MASK;
2611 terminate_interrupt = interrupt & TERMINATE_INTERRUPT_MASK; // request from other ractors
2612
2613 if (interrupt & VM_BARRIER_INTERRUPT_MASK) {
2614 RB_VM_LOCKING();
2615 }
2616
2617 if (postponed_job_interrupt) {
2618 rb_postponed_job_flush(th->vm);
2619 }
2620
2621 if (trap_interrupt) {
2622 /* signal handling */
2623 if (th == th->vm->ractor.main_thread) {
2624 enum rb_thread_status prev_status = th->status;
2625
2626 th->status = THREAD_RUNNABLE;
2627 {
2628 while ((sig = rb_get_next_signal()) != 0) {
2629 ret |= rb_signal_exec(th, sig);
2630 }
2631 }
2632 th->status = prev_status;
2633 }
2634
2635 if (!ccan_list_empty(&th->interrupt_exec_tasks)) {
2636 enum rb_thread_status prev_status = th->status;
2637
2638 th->status = THREAD_RUNNABLE;
2639 {
2640 threadptr_interrupt_exec_exec(th);
2641 }
2642 th->status = prev_status;
2643 }
2644 }
2645
2646 /* exception from another thread */
2647 if (pending_interrupt && threadptr_pending_interrupt_active_p(th)) {
2648 VALUE err = rb_threadptr_pending_interrupt_deque(th, blocking_timing ? INTERRUPT_ON_BLOCKING : INTERRUPT_NONE);
2649 RUBY_DEBUG_LOG("err:%"PRIdVALUE, err);
2650 ret = TRUE;
2651
2652 if (UNDEF_P(err)) {
2653 /* no error */
2654 }
2655 else if (err == RUBY_FATAL_THREAD_KILLED /* Thread#kill received */ ||
2656 err == RUBY_FATAL_THREAD_TERMINATED /* Terminate thread */ ||
2657 err == INT2FIX(TAG_FATAL) /* Thread.exit etc. */ ) {
2658 terminate_interrupt = 1;
2659 }
2660 else {
2661 if (err == th->vm->special_exceptions[ruby_error_stream_closed]) {
2662 /* the only special exception to be queued across thread */
2663 err = ruby_vm_special_exception_copy(err);
2664 }
2665 /* set runnable if th was slept. */
2666 if (th->status == THREAD_STOPPED ||
2667 th->status == THREAD_STOPPED_FOREVER)
2668 th->status = THREAD_RUNNABLE;
2669 rb_exc_raise(err);
2670 }
2671 }
2672
2673 if (terminate_interrupt) {
2674 rb_threadptr_to_kill(th);
2675 }
2676
2677 if (timer_interrupt) {
2678 uint32_t limits_us = thread_default_quantum_ms * 1000;
2679
2680 if (th->priority > 0)
2681 limits_us <<= th->priority;
2682 else
2683 limits_us >>= -th->priority;
2684
2685 if (th->status == THREAD_RUNNABLE)
2686 th->running_time_us += 10 * 1000; // 10ms = 10_000us // TODO: use macro
2687
2688 VM_ASSERT(th->ec->cfp);
2689 EXEC_EVENT_HOOK(th->ec, RUBY_INTERNAL_EVENT_SWITCH, th->ec->cfp->self,
2690 0, 0, 0, Qundef);
2691
2692 rb_thread_schedule_limits(limits_us);
2693 }
2694 }
2695 return ret;
2696}
2697
2698void
2699rb_thread_execute_interrupts(VALUE thval)
2700{
2701 rb_threadptr_execute_interrupts(rb_thread_ptr(thval), 1);
2702}
2703
2704static void
2705rb_threadptr_ready(rb_thread_t *th)
2706{
2707 rb_threadptr_interrupt(th);
2708}
2709
2710static VALUE
2711rb_threadptr_raise(rb_thread_t *target_th, int argc, VALUE *argv)
2712{
2713 VALUE exc;
2714
2715 if (rb_threadptr_dead(target_th)) {
2716 return Qnil;
2717 }
2718
2719 if (argc == 0) {
2720 exc = rb_exc_new(rb_eRuntimeError, 0, 0);
2721 }
2722 else {
2723 exc = rb_make_exception(argc, argv);
2724 }
2725
2726 /* making an exception object can switch thread,
2727 so we need to check thread deadness again */
2728 if (rb_threadptr_dead(target_th)) {
2729 return Qnil;
2730 }
2731
2732 rb_ec_setup_exception(GET_EC(), exc, Qundef);
2733 rb_threadptr_pending_interrupt_enque(target_th, exc);
2734 rb_threadptr_interrupt(target_th);
2735 return Qnil;
2736}
2737
2738void
2739rb_threadptr_signal_raise(rb_thread_t *th, int sig)
2740{
2741 VALUE argv[2];
2742
2743 argv[0] = rb_eSignal;
2744 argv[1] = INT2FIX(sig);
2745 rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2746}
2747
2748void
2749rb_threadptr_signal_exit(rb_thread_t *th)
2750{
2751 VALUE argv[2];
2752
2753 argv[0] = rb_eSystemExit;
2754 argv[1] = rb_str_new2("exit");
2755
2756 // TODO: check signal raise deliverly
2757 rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2758}
2759
2760int
2761rb_ec_set_raised(rb_execution_context_t *ec)
2762{
2763 if (ec->raised_flag & RAISED_EXCEPTION) {
2764 return 1;
2765 }
2766 ec->raised_flag |= RAISED_EXCEPTION;
2767 return 0;
2768}
2769
2770int
2771rb_ec_reset_raised(rb_execution_context_t *ec)
2772{
2773 if (!(ec->raised_flag & RAISED_EXCEPTION)) {
2774 return 0;
2775 }
2776 ec->raised_flag &= ~RAISED_EXCEPTION;
2777 return 1;
2778}
2779
2780/*
2781 * Thread-safe IO closing mechanism.
2782 *
2783 * When an IO is closed while other threads or fibers are blocked on it, we need to:
2784 * 1. Track and notify all blocking operations through io->blocking_operations
2785 * 2. Ensure only one thread can close at a time using io->closing_ec
2786 * 3. Synchronize cleanup using wakeup_mutex
2787 *
2788 * The close process works as follows:
2789 * - First check if any thread is already closing (io->closing_ec)
2790 * - Set up wakeup_mutex for synchronization
2791 * - Iterate through all blocking operations in io->blocking_operations
2792 * - For each blocked fiber with a scheduler:
2793 * - Notify via rb_fiber_scheduler_fiber_interrupt
2794 * - For each blocked thread without a scheduler:
2795 * - Enqueue IOError via rb_threadptr_pending_interrupt_enque
2796 * - Wake via rb_threadptr_interrupt
2797 * - Wait on wakeup_mutex until all operations are cleaned up
2798 * - Only then clear closing state and allow actual close to proceed
2799 */
2800static VALUE
2801thread_io_close_notify_all(VALUE _io)
2802{
2803 struct rb_io *io = (struct rb_io *)_io;
2804
2805 size_t count = 0;
2806 rb_vm_t *vm = io->closing_ec->thread_ptr->vm;
2807 VALUE error = vm->special_exceptions[ruby_error_stream_closed];
2808
2809 struct rb_io_blocking_operation *blocking_operation;
2810 ccan_list_for_each(rb_io_blocking_operations(io), blocking_operation, list) {
2811 rb_execution_context_t *ec = blocking_operation->ec;
2812
2813 // If the operation is in progress, we need to interrupt it:
2814 if (ec) {
2815 rb_thread_t *thread = ec->thread_ptr;
2816
2817 VALUE result = RUBY_Qundef;
2818 if (thread->scheduler != Qnil) {
2819 result = rb_fiber_scheduler_fiber_interrupt(thread->scheduler, rb_fiberptr_self(ec->fiber_ptr), error);
2820 }
2821
2822 if (result == RUBY_Qundef) {
2823 // If the thread is not the current thread, we need to enqueue an error:
2824 rb_threadptr_pending_interrupt_enque(thread, error);
2825 rb_threadptr_interrupt(thread);
2826 }
2827 }
2828
2829 count += 1;
2830 }
2831
2832 return (VALUE)count;
2833}
2834
2835size_t
2836rb_thread_io_close_interrupt(struct rb_io *io)
2837{
2838 // We guard this operation based on `io->closing_ec` -> only one thread will ever enter this function.
2839 if (io->closing_ec) {
2840 return 0;
2841 }
2842
2843 // If there are no blocking operations, we are done:
2844 if (ccan_list_empty(rb_io_blocking_operations(io))) {
2845 return 0;
2846 }
2847
2848 // Otherwise, we are now closing the IO:
2849 rb_execution_context_t *ec = GET_EC();
2850 io->closing_ec = ec;
2851
2852 // This is used to ensure the correct execution context is woken up after the blocking operation is interrupted:
2853 io->wakeup_mutex = rb_mutex_new();
2854 rb_mutex_allow_trap(io->wakeup_mutex, 1);
2855
2856 // We need to use a mutex here as entering the fiber scheduler may cause a context switch:
2857 VALUE result = rb_mutex_synchronize(io->wakeup_mutex, thread_io_close_notify_all, (VALUE)io);
2858
2859 return (size_t)result;
2860}
2861
2862void
2863rb_thread_io_close_wait(struct rb_io* io)
2864{
2865 VALUE wakeup_mutex = io->wakeup_mutex;
2866
2867 if (!RB_TEST(wakeup_mutex)) {
2868 // There was nobody else using this file when we closed it, so we never bothered to allocate a mutex:
2869 return;
2870 }
2871
2872 rb_mutex_lock(wakeup_mutex);
2873 while (!ccan_list_empty(rb_io_blocking_operations(io))) {
2874 rb_mutex_sleep(wakeup_mutex, Qnil);
2875 }
2876 rb_mutex_unlock(wakeup_mutex);
2877
2878 // We are done closing:
2879 io->wakeup_mutex = Qnil;
2880 io->closing_ec = NULL;
2881}
2882
2883void
2884rb_thread_fd_close(int fd)
2885{
2886 rb_warn("rb_thread_fd_close is deprecated (and is now a no-op).");
2887}
2888
2889/*
2890 * call-seq:
2891 * thr.raise
2892 * thr.raise(string)
2893 * thr.raise(exception [, string [, array]])
2894 *
2895 * Raises an exception from the given thread. The caller does not have to be
2896 * +thr+. See Kernel#raise for more information.
2897 *
2898 * Thread.abort_on_exception = true
2899 * a = Thread.new { sleep(200) }
2900 * a.raise("Gotcha")
2901 *
2902 * This will produce:
2903 *
2904 * prog.rb:3: Gotcha (RuntimeError)
2905 * from prog.rb:2:in `initialize'
2906 * from prog.rb:2:in `new'
2907 * from prog.rb:2
2908 */
2909
2910static VALUE
2911thread_raise_m(int argc, VALUE *argv, VALUE self)
2912{
2913 rb_thread_t *target_th = rb_thread_ptr(self);
2914 const rb_thread_t *current_th = GET_THREAD();
2915
2916 threadptr_check_pending_interrupt_queue(target_th);
2917 rb_threadptr_raise(target_th, argc, argv);
2918
2919 /* To perform Thread.current.raise as Kernel.raise */
2920 if (current_th == target_th) {
2921 RUBY_VM_CHECK_INTS(target_th->ec);
2922 }
2923 return Qnil;
2924}
2925
2926
2927/*
2928 * call-seq:
2929 * thr.exit -> thr
2930 * thr.kill -> thr
2931 * thr.terminate -> thr
2932 *
2933 * Terminates +thr+ and schedules another thread to be run, returning
2934 * the terminated Thread. If this is the main thread, or the last
2935 * thread, exits the process.
2936 */
2937
2939rb_thread_kill(VALUE thread)
2940{
2941 rb_thread_t *target_th = rb_thread_ptr(thread);
2942
2943 if (target_th->to_kill || target_th->status == THREAD_KILLED) {
2944 return thread;
2945 }
2946 if (target_th == target_th->vm->ractor.main_thread) {
2947 rb_exit(EXIT_SUCCESS);
2948 }
2949
2950 RUBY_DEBUG_LOG("target_th:%u", rb_th_serial(target_th));
2951
2952 if (target_th == GET_THREAD()) {
2953 /* kill myself immediately */
2954 rb_threadptr_to_kill(target_th);
2955 }
2956 else {
2957 threadptr_check_pending_interrupt_queue(target_th);
2958 rb_threadptr_pending_interrupt_enque(target_th, RUBY_FATAL_THREAD_KILLED);
2959 rb_threadptr_interrupt(target_th);
2960 }
2961
2962 return thread;
2963}
2964
2965int
2966rb_thread_to_be_killed(VALUE thread)
2967{
2968 rb_thread_t *target_th = rb_thread_ptr(thread);
2969
2970 if (target_th->to_kill || target_th->status == THREAD_KILLED) {
2971 return TRUE;
2972 }
2973 return FALSE;
2974}
2975
2976/*
2977 * call-seq:
2978 * Thread.kill(thread) -> thread
2979 *
2980 * Causes the given +thread+ to exit, see also Thread::exit.
2981 *
2982 * count = 0
2983 * a = Thread.new { loop { count += 1 } }
2984 * sleep(0.1) #=> 0
2985 * Thread.kill(a) #=> #<Thread:0x401b3d30 dead>
2986 * count #=> 93947
2987 * a.alive? #=> false
2988 */
2989
2990static VALUE
2991rb_thread_s_kill(VALUE obj, VALUE th)
2992{
2993 return rb_thread_kill(th);
2994}
2995
2996
2997/*
2998 * call-seq:
2999 * Thread.exit -> thread
3000 *
3001 * Terminates the currently running thread and schedules another thread to be
3002 * run.
3003 *
3004 * If this thread is already marked to be killed, ::exit returns the Thread.
3005 *
3006 * If this is the main thread, or the last thread, exit the process.
3007 */
3008
3009static VALUE
3010rb_thread_exit(VALUE _)
3011{
3012 rb_thread_t *th = GET_THREAD();
3013 return rb_thread_kill(th->self);
3014}
3015
3016
3017/*
3018 * call-seq:
3019 * thr.wakeup -> thr
3020 *
3021 * Marks a given thread as eligible for scheduling, however it may still
3022 * remain blocked on I/O.
3023 *
3024 * *Note:* This does not invoke the scheduler, see #run for more information.
3025 *
3026 * c = Thread.new { Thread.stop; puts "hey!" }
3027 * sleep 0.1 while c.status!='sleep'
3028 * c.wakeup
3029 * c.join
3030 * #=> "hey!"
3031 */
3032
3034rb_thread_wakeup(VALUE thread)
3035{
3036 if (!RTEST(rb_thread_wakeup_alive(thread))) {
3037 rb_raise(rb_eThreadError, "killed thread");
3038 }
3039 return thread;
3040}
3041
3044{
3045 rb_thread_t *target_th = rb_thread_ptr(thread);
3046 if (target_th->status == THREAD_KILLED) return Qnil;
3047
3048 rb_threadptr_ready(target_th);
3049
3050 if (target_th->status == THREAD_STOPPED ||
3051 target_th->status == THREAD_STOPPED_FOREVER) {
3052 target_th->status = THREAD_RUNNABLE;
3053 }
3054
3055 return thread;
3056}
3057
3058
3059/*
3060 * call-seq:
3061 * thr.run -> thr
3062 *
3063 * Wakes up +thr+, making it eligible for scheduling.
3064 *
3065 * a = Thread.new { puts "a"; Thread.stop; puts "c" }
3066 * sleep 0.1 while a.status!='sleep'
3067 * puts "Got here"
3068 * a.run
3069 * a.join
3070 *
3071 * This will produce:
3072 *
3073 * a
3074 * Got here
3075 * c
3076 *
3077 * See also the instance method #wakeup.
3078 */
3079
3081rb_thread_run(VALUE thread)
3082{
3083 rb_thread_wakeup(thread);
3085 return thread;
3086}
3087
3088
3090rb_thread_stop(void)
3091{
3092 if (rb_thread_alone()) {
3093 rb_raise(rb_eThreadError,
3094 "stopping only thread\n\tnote: use sleep to stop forever");
3095 }
3097 return Qnil;
3098}
3099
3100/*
3101 * call-seq:
3102 * Thread.stop -> nil
3103 *
3104 * Stops execution of the current thread, putting it into a ``sleep'' state,
3105 * and schedules execution of another thread.
3106 *
3107 * a = Thread.new { print "a"; Thread.stop; print "c" }
3108 * sleep 0.1 while a.status!='sleep'
3109 * print "b"
3110 * a.run
3111 * a.join
3112 * #=> "abc"
3113 */
3114
3115static VALUE
3116thread_stop(VALUE _)
3117{
3118 return rb_thread_stop();
3119}
3120
3121/********************************************************************/
3122
3123VALUE
3124rb_thread_list(void)
3125{
3126 // TODO
3127 return rb_ractor_thread_list();
3128}
3129
3130/*
3131 * call-seq:
3132 * Thread.list -> array
3133 *
3134 * Returns an array of Thread objects for all threads that are either runnable
3135 * or stopped.
3136 *
3137 * Thread.new { sleep(200) }
3138 * Thread.new { 1000000.times {|i| i*i } }
3139 * Thread.new { Thread.stop }
3140 * Thread.list.each {|t| p t}
3141 *
3142 * This will produce:
3143 *
3144 * #<Thread:0x401b3e84 sleep>
3145 * #<Thread:0x401b3f38 run>
3146 * #<Thread:0x401b3fb0 sleep>
3147 * #<Thread:0x401bdf4c run>
3148 */
3149
3150static VALUE
3151thread_list(VALUE _)
3152{
3153 return rb_thread_list();
3154}
3155
3158{
3159 return GET_THREAD()->self;
3160}
3161
3162/*
3163 * call-seq:
3164 * Thread.current -> thread
3165 *
3166 * Returns the currently executing thread.
3167 *
3168 * Thread.current #=> #<Thread:0x401bdf4c run>
3169 */
3170
3171static VALUE
3172thread_s_current(VALUE klass)
3173{
3174 return rb_thread_current();
3175}
3176
3178rb_thread_main(void)
3179{
3180 return GET_RACTOR()->threads.main->self;
3181}
3182
3183/*
3184 * call-seq:
3185 * Thread.main -> thread
3186 *
3187 * Returns the main thread.
3188 */
3189
3190static VALUE
3191rb_thread_s_main(VALUE klass)
3192{
3193 return rb_thread_main();
3194}
3195
3196
3197/*
3198 * call-seq:
3199 * Thread.abort_on_exception -> true or false
3200 *
3201 * Returns the status of the global ``abort on exception'' condition.
3202 *
3203 * The default is +false+.
3204 *
3205 * When set to +true+, if any thread is aborted by an exception, the
3206 * raised exception will be re-raised in the main thread.
3207 *
3208 * Can also be specified by the global $DEBUG flag or command line option
3209 * +-d+.
3210 *
3211 * See also ::abort_on_exception=.
3212 *
3213 * There is also an instance level method to set this for a specific thread,
3214 * see #abort_on_exception.
3215 */
3216
3217static VALUE
3218rb_thread_s_abort_exc(VALUE _)
3219{
3220 return RBOOL(GET_THREAD()->vm->thread_abort_on_exception);
3221}
3222
3223
3224/*
3225 * call-seq:
3226 * Thread.abort_on_exception= boolean -> true or false
3227 *
3228 * When set to +true+, if any thread is aborted by an exception, the
3229 * raised exception will be re-raised in the main thread.
3230 * Returns the new state.
3231 *
3232 * Thread.abort_on_exception = true
3233 * t1 = Thread.new do
3234 * puts "In new thread"
3235 * raise "Exception from thread"
3236 * end
3237 * sleep(1)
3238 * puts "not reached"
3239 *
3240 * This will produce:
3241 *
3242 * In new thread
3243 * prog.rb:4: Exception from thread (RuntimeError)
3244 * from prog.rb:2:in `initialize'
3245 * from prog.rb:2:in `new'
3246 * from prog.rb:2
3247 *
3248 * See also ::abort_on_exception.
3249 *
3250 * There is also an instance level method to set this for a specific thread,
3251 * see #abort_on_exception=.
3252 */
3253
3254static VALUE
3255rb_thread_s_abort_exc_set(VALUE self, VALUE val)
3256{
3257 GET_THREAD()->vm->thread_abort_on_exception = RTEST(val);
3258 return val;
3259}
3260
3261
3262/*
3263 * call-seq:
3264 * thr.abort_on_exception -> true or false
3265 *
3266 * Returns the status of the thread-local ``abort on exception'' condition for
3267 * this +thr+.
3268 *
3269 * The default is +false+.
3270 *
3271 * See also #abort_on_exception=.
3272 *
3273 * There is also a class level method to set this for all threads, see
3274 * ::abort_on_exception.
3275 */
3276
3277static VALUE
3278rb_thread_abort_exc(VALUE thread)
3279{
3280 return RBOOL(rb_thread_ptr(thread)->abort_on_exception);
3281}
3282
3283
3284/*
3285 * call-seq:
3286 * thr.abort_on_exception= boolean -> true or false
3287 *
3288 * When set to +true+, if this +thr+ is aborted by an exception, the
3289 * raised exception will be re-raised in the main thread.
3290 *
3291 * See also #abort_on_exception.
3292 *
3293 * There is also a class level method to set this for all threads, see
3294 * ::abort_on_exception=.
3295 */
3296
3297static VALUE
3298rb_thread_abort_exc_set(VALUE thread, VALUE val)
3299{
3300 rb_thread_ptr(thread)->abort_on_exception = RTEST(val);
3301 return val;
3302}
3303
3304
3305/*
3306 * call-seq:
3307 * Thread.report_on_exception -> true or false
3308 *
3309 * Returns the status of the global ``report on exception'' condition.
3310 *
3311 * The default is +true+ since Ruby 2.5.
3312 *
3313 * All threads created when this flag is true will report
3314 * a message on $stderr if an exception kills the thread.
3315 *
3316 * Thread.new { 1.times { raise } }
3317 *
3318 * will produce this output on $stderr:
3319 *
3320 * #<Thread:...> terminated with exception (report_on_exception is true):
3321 * Traceback (most recent call last):
3322 * 2: from -e:1:in `block in <main>'
3323 * 1: from -e:1:in `times'
3324 *
3325 * This is done to catch errors in threads early.
3326 * In some cases, you might not want this output.
3327 * There are multiple ways to avoid the extra output:
3328 *
3329 * * If the exception is not intended, the best is to fix the cause of
3330 * the exception so it does not happen anymore.
3331 * * If the exception is intended, it might be better to rescue it closer to
3332 * where it is raised rather then let it kill the Thread.
3333 * * If it is guaranteed the Thread will be joined with Thread#join or
3334 * Thread#value, then it is safe to disable this report with
3335 * <code>Thread.current.report_on_exception = false</code>
3336 * when starting the Thread.
3337 * However, this might handle the exception much later, or not at all
3338 * if the Thread is never joined due to the parent thread being blocked, etc.
3339 *
3340 * See also ::report_on_exception=.
3341 *
3342 * There is also an instance level method to set this for a specific thread,
3343 * see #report_on_exception=.
3344 *
3345 */
3346
3347static VALUE
3348rb_thread_s_report_exc(VALUE _)
3349{
3350 return RBOOL(GET_THREAD()->vm->thread_report_on_exception);
3351}
3352
3353
3354/*
3355 * call-seq:
3356 * Thread.report_on_exception= boolean -> true or false
3357 *
3358 * Returns the new state.
3359 * When set to +true+, all threads created afterwards will inherit the
3360 * condition and report a message on $stderr if an exception kills a thread:
3361 *
3362 * Thread.report_on_exception = true
3363 * t1 = Thread.new do
3364 * puts "In new thread"
3365 * raise "Exception from thread"
3366 * end
3367 * sleep(1)
3368 * puts "In the main thread"
3369 *
3370 * This will produce:
3371 *
3372 * In new thread
3373 * #<Thread:...prog.rb:2> terminated with exception (report_on_exception is true):
3374 * Traceback (most recent call last):
3375 * prog.rb:4:in `block in <main>': Exception from thread (RuntimeError)
3376 * In the main thread
3377 *
3378 * See also ::report_on_exception.
3379 *
3380 * There is also an instance level method to set this for a specific thread,
3381 * see #report_on_exception=.
3382 */
3383
3384static VALUE
3385rb_thread_s_report_exc_set(VALUE self, VALUE val)
3386{
3387 GET_THREAD()->vm->thread_report_on_exception = RTEST(val);
3388 return val;
3389}
3390
3391
3392/*
3393 * call-seq:
3394 * Thread.ignore_deadlock -> true or false
3395 *
3396 * Returns the status of the global ``ignore deadlock'' condition.
3397 * The default is +false+, so that deadlock conditions are not ignored.
3398 *
3399 * See also ::ignore_deadlock=.
3400 *
3401 */
3402
3403static VALUE
3404rb_thread_s_ignore_deadlock(VALUE _)
3405{
3406 return RBOOL(GET_THREAD()->vm->thread_ignore_deadlock);
3407}
3408
3409
3410/*
3411 * call-seq:
3412 * Thread.ignore_deadlock = boolean -> true or false
3413 *
3414 * Returns the new state.
3415 * When set to +true+, the VM will not check for deadlock conditions.
3416 * It is only useful to set this if your application can break a
3417 * deadlock condition via some other means, such as a signal.
3418 *
3419 * Thread.ignore_deadlock = true
3420 * queue = Thread::Queue.new
3421 *
3422 * trap(:SIGUSR1){queue.push "Received signal"}
3423 *
3424 * # raises fatal error unless ignoring deadlock
3425 * puts queue.pop
3426 *
3427 * See also ::ignore_deadlock.
3428 */
3429
3430static VALUE
3431rb_thread_s_ignore_deadlock_set(VALUE self, VALUE val)
3432{
3433 GET_THREAD()->vm->thread_ignore_deadlock = RTEST(val);
3434 return val;
3435}
3436
3437
3438/*
3439 * call-seq:
3440 * thr.report_on_exception -> true or false
3441 *
3442 * Returns the status of the thread-local ``report on exception'' condition for
3443 * this +thr+.
3444 *
3445 * The default value when creating a Thread is the value of
3446 * the global flag Thread.report_on_exception.
3447 *
3448 * See also #report_on_exception=.
3449 *
3450 * There is also a class level method to set this for all new threads, see
3451 * ::report_on_exception=.
3452 */
3453
3454static VALUE
3455rb_thread_report_exc(VALUE thread)
3456{
3457 return RBOOL(rb_thread_ptr(thread)->report_on_exception);
3458}
3459
3460
3461/*
3462 * call-seq:
3463 * thr.report_on_exception= boolean -> true or false
3464 *
3465 * When set to +true+, a message is printed on $stderr if an exception
3466 * kills this +thr+. See ::report_on_exception for details.
3467 *
3468 * See also #report_on_exception.
3469 *
3470 * There is also a class level method to set this for all new threads, see
3471 * ::report_on_exception=.
3472 */
3473
3474static VALUE
3475rb_thread_report_exc_set(VALUE thread, VALUE val)
3476{
3477 rb_thread_ptr(thread)->report_on_exception = RTEST(val);
3478 return val;
3479}
3480
3481
3482/*
3483 * call-seq:
3484 * thr.group -> thgrp or nil
3485 *
3486 * Returns the ThreadGroup which contains the given thread.
3487 *
3488 * Thread.main.group #=> #<ThreadGroup:0x4029d914>
3489 */
3490
3491VALUE
3492rb_thread_group(VALUE thread)
3493{
3494 return rb_thread_ptr(thread)->thgroup;
3495}
3496
3497static const char *
3498thread_status_name(rb_thread_t *th, int detail)
3499{
3500 switch (th->status) {
3501 case THREAD_RUNNABLE:
3502 return th->to_kill ? "aborting" : "run";
3503 case THREAD_STOPPED_FOREVER:
3504 if (detail) return "sleep_forever";
3505 case THREAD_STOPPED:
3506 return "sleep";
3507 case THREAD_KILLED:
3508 return "dead";
3509 default:
3510 return "unknown";
3511 }
3512}
3513
3514static int
3515rb_threadptr_dead(rb_thread_t *th)
3516{
3517 return th->status == THREAD_KILLED;
3518}
3519
3520
3521/*
3522 * call-seq:
3523 * thr.status -> string, false or nil
3524 *
3525 * Returns the status of +thr+.
3526 *
3527 * [<tt>"sleep"</tt>]
3528 * Returned if this thread is sleeping or waiting on I/O
3529 * [<tt>"run"</tt>]
3530 * When this thread is executing
3531 * [<tt>"aborting"</tt>]
3532 * If this thread is aborting
3533 * [+false+]
3534 * When this thread is terminated normally
3535 * [+nil+]
3536 * If terminated with an exception.
3537 *
3538 * a = Thread.new { raise("die now") }
3539 * b = Thread.new { Thread.stop }
3540 * c = Thread.new { Thread.exit }
3541 * d = Thread.new { sleep }
3542 * d.kill #=> #<Thread:0x401b3678 aborting>
3543 * a.status #=> nil
3544 * b.status #=> "sleep"
3545 * c.status #=> false
3546 * d.status #=> "aborting"
3547 * Thread.current.status #=> "run"
3548 *
3549 * See also the instance methods #alive? and #stop?
3550 */
3551
3552static VALUE
3553rb_thread_status(VALUE thread)
3554{
3555 rb_thread_t *target_th = rb_thread_ptr(thread);
3556
3557 if (rb_threadptr_dead(target_th)) {
3558 if (!NIL_P(target_th->ec->errinfo) &&
3559 !FIXNUM_P(target_th->ec->errinfo)) {
3560 return Qnil;
3561 }
3562 else {
3563 return Qfalse;
3564 }
3565 }
3566 else {
3567 return rb_str_new2(thread_status_name(target_th, FALSE));
3568 }
3569}
3570
3571
3572/*
3573 * call-seq:
3574 * thr.alive? -> true or false
3575 *
3576 * Returns +true+ if +thr+ is running or sleeping.
3577 *
3578 * thr = Thread.new { }
3579 * thr.join #=> #<Thread:0x401b3fb0 dead>
3580 * Thread.current.alive? #=> true
3581 * thr.alive? #=> false
3582 *
3583 * See also #stop? and #status.
3584 */
3585
3586static VALUE
3587rb_thread_alive_p(VALUE thread)
3588{
3589 return RBOOL(!thread_finished(rb_thread_ptr(thread)));
3590}
3591
3592/*
3593 * call-seq:
3594 * thr.stop? -> true or false
3595 *
3596 * Returns +true+ if +thr+ is dead or sleeping.
3597 *
3598 * a = Thread.new { Thread.stop }
3599 * b = Thread.current
3600 * a.stop? #=> true
3601 * b.stop? #=> false
3602 *
3603 * See also #alive? and #status.
3604 */
3605
3606static VALUE
3607rb_thread_stop_p(VALUE thread)
3608{
3609 rb_thread_t *th = rb_thread_ptr(thread);
3610
3611 if (rb_threadptr_dead(th)) {
3612 return Qtrue;
3613 }
3614 return RBOOL(th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER);
3615}
3616
3617/*
3618 * call-seq:
3619 * thr.name -> string
3620 *
3621 * show the name of the thread.
3622 */
3623
3624static VALUE
3625rb_thread_getname(VALUE thread)
3626{
3627 return rb_thread_ptr(thread)->name;
3628}
3629
3630/*
3631 * call-seq:
3632 * thr.name=(name) -> string
3633 *
3634 * set given name to the ruby thread.
3635 * On some platform, it may set the name to pthread and/or kernel.
3636 */
3637
3638static VALUE
3639rb_thread_setname(VALUE thread, VALUE name)
3640{
3641 rb_thread_t *target_th = rb_thread_ptr(thread);
3642
3643 if (!NIL_P(name)) {
3644 rb_encoding *enc;
3645 StringValueCStr(name);
3646 enc = rb_enc_get(name);
3647 if (!rb_enc_asciicompat(enc)) {
3648 rb_raise(rb_eArgError, "ASCII incompatible encoding (%s)",
3649 rb_enc_name(enc));
3650 }
3651 name = rb_str_new_frozen(name);
3652 }
3653 target_th->name = name;
3654 if (threadptr_initialized(target_th) && target_th->has_dedicated_nt) {
3655 native_set_another_thread_name(target_th->nt->thread_id, name);
3656 }
3657 return name;
3658}
3659
3660#if USE_NATIVE_THREAD_NATIVE_THREAD_ID
3661/*
3662 * call-seq:
3663 * thr.native_thread_id -> integer
3664 *
3665 * Return the native thread ID which is used by the Ruby thread.
3666 *
3667 * The ID depends on the OS. (not POSIX thread ID returned by pthread_self(3))
3668 * * On Linux it is TID returned by gettid(2).
3669 * * On macOS it is the system-wide unique integral ID of thread returned
3670 * by pthread_threadid_np(3).
3671 * * On FreeBSD it is the unique integral ID of the thread returned by
3672 * pthread_getthreadid_np(3).
3673 * * On Windows it is the thread identifier returned by GetThreadId().
3674 * * On other platforms, it raises NotImplementedError.
3675 *
3676 * NOTE:
3677 * If the thread is not associated yet or already deassociated with a native
3678 * thread, it returns _nil_.
3679 * If the Ruby implementation uses M:N thread model, the ID may change
3680 * depending on the timing.
3681 */
3682
3683static VALUE
3684rb_thread_native_thread_id(VALUE thread)
3685{
3686 rb_thread_t *target_th = rb_thread_ptr(thread);
3687 if (rb_threadptr_dead(target_th)) return Qnil;
3688 return native_thread_native_thread_id(target_th);
3689}
3690#else
3691# define rb_thread_native_thread_id rb_f_notimplement
3692#endif
3693
3694/*
3695 * call-seq:
3696 * thr.to_s -> string
3697 *
3698 * Dump the name, id, and status of _thr_ to a string.
3699 */
3700
3701static VALUE
3702rb_thread_to_s(VALUE thread)
3703{
3704 VALUE cname = rb_class_path(rb_obj_class(thread));
3705 rb_thread_t *target_th = rb_thread_ptr(thread);
3706 const char *status;
3707 VALUE str, loc;
3708
3709 status = thread_status_name(target_th, TRUE);
3710 str = rb_sprintf("#<%"PRIsVALUE":%p", cname, (void *)thread);
3711 if (!NIL_P(target_th->name)) {
3712 rb_str_catf(str, "@%"PRIsVALUE, target_th->name);
3713 }
3714 if ((loc = threadptr_invoke_proc_location(target_th)) != Qnil) {
3715 rb_str_catf(str, " %"PRIsVALUE":%"PRIsVALUE,
3716 RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
3717 }
3718 rb_str_catf(str, " %s>", status);
3719
3720 return str;
3721}
3722
3723/* variables for recursive traversals */
3724#define recursive_key id__recursive_key__
3725
3726static VALUE
3727threadptr_local_aref(rb_thread_t *th, ID id)
3728{
3729 if (id == recursive_key) {
3730 return th->ec->local_storage_recursive_hash;
3731 }
3732 else {
3733 VALUE val;
3734 struct rb_id_table *local_storage = th->ec->local_storage;
3735
3736 if (local_storage != NULL && rb_id_table_lookup(local_storage, id, &val)) {
3737 return val;
3738 }
3739 else {
3740 return Qnil;
3741 }
3742 }
3743}
3744
3746rb_thread_local_aref(VALUE thread, ID id)
3747{
3748 return threadptr_local_aref(rb_thread_ptr(thread), id);
3749}
3750
3751/*
3752 * call-seq:
3753 * thr[sym] -> obj or nil
3754 *
3755 * Attribute Reference---Returns the value of a fiber-local variable (current thread's root fiber
3756 * if not explicitly inside a Fiber), using either a symbol or a string name.
3757 * If the specified variable does not exist, returns +nil+.
3758 *
3759 * [
3760 * Thread.new { Thread.current["name"] = "A" },
3761 * Thread.new { Thread.current[:name] = "B" },
3762 * Thread.new { Thread.current["name"] = "C" }
3763 * ].each do |th|
3764 * th.join
3765 * puts "#{th.inspect}: #{th[:name]}"
3766 * end
3767 *
3768 * This will produce:
3769 *
3770 * #<Thread:0x00000002a54220 dead>: A
3771 * #<Thread:0x00000002a541a8 dead>: B
3772 * #<Thread:0x00000002a54130 dead>: C
3773 *
3774 * Thread#[] and Thread#[]= are not thread-local but fiber-local.
3775 * This confusion did not exist in Ruby 1.8 because
3776 * fibers are only available since Ruby 1.9.
3777 * Ruby 1.9 chooses that the methods behaves fiber-local to save
3778 * following idiom for dynamic scope.
3779 *
3780 * def meth(newvalue)
3781 * begin
3782 * oldvalue = Thread.current[:name]
3783 * Thread.current[:name] = newvalue
3784 * yield
3785 * ensure
3786 * Thread.current[:name] = oldvalue
3787 * end
3788 * end
3789 *
3790 * The idiom may not work as dynamic scope if the methods are thread-local
3791 * and a given block switches fiber.
3792 *
3793 * f = Fiber.new {
3794 * meth(1) {
3795 * Fiber.yield
3796 * }
3797 * }
3798 * meth(2) {
3799 * f.resume
3800 * }
3801 * f.resume
3802 * p Thread.current[:name]
3803 * #=> nil if fiber-local
3804 * #=> 2 if thread-local (The value 2 is leaked to outside of meth method.)
3805 *
3806 * For thread-local variables, please see #thread_variable_get and
3807 * #thread_variable_set.
3808 *
3809 */
3810
3811static VALUE
3812rb_thread_aref(VALUE thread, VALUE key)
3813{
3814 ID id = rb_check_id(&key);
3815 if (!id) return Qnil;
3816 return rb_thread_local_aref(thread, id);
3817}
3818
3819/*
3820 * call-seq:
3821 * thr.fetch(sym) -> obj
3822 * thr.fetch(sym) { } -> obj
3823 * thr.fetch(sym, default) -> obj
3824 *
3825 * Returns a fiber-local for the given key. If the key can't be
3826 * found, there are several options: With no other arguments, it will
3827 * raise a KeyError exception; if <i>default</i> is given, then that
3828 * will be returned; if the optional code block is specified, then
3829 * that will be run and its result returned. See Thread#[] and
3830 * Hash#fetch.
3831 */
3832static VALUE
3833rb_thread_fetch(int argc, VALUE *argv, VALUE self)
3834{
3835 VALUE key, val;
3836 ID id;
3837 rb_thread_t *target_th = rb_thread_ptr(self);
3838 int block_given;
3839
3840 rb_check_arity(argc, 1, 2);
3841 key = argv[0];
3842
3843 block_given = rb_block_given_p();
3844 if (block_given && argc == 2) {
3845 rb_warn("block supersedes default value argument");
3846 }
3847
3848 id = rb_check_id(&key);
3849
3850 if (id == recursive_key) {
3851 return target_th->ec->local_storage_recursive_hash;
3852 }
3853 else if (id && target_th->ec->local_storage &&
3854 rb_id_table_lookup(target_th->ec->local_storage, id, &val)) {
3855 return val;
3856 }
3857 else if (block_given) {
3858 return rb_yield(key);
3859 }
3860 else if (argc == 1) {
3861 rb_key_err_raise(rb_sprintf("key not found: %+"PRIsVALUE, key), self, key);
3862 }
3863 else {
3864 return argv[1];
3865 }
3866}
3867
3868static VALUE
3869threadptr_local_aset(rb_thread_t *th, ID id, VALUE val)
3870{
3871 if (id == recursive_key) {
3872 th->ec->local_storage_recursive_hash = val;
3873 return val;
3874 }
3875 else {
3876 struct rb_id_table *local_storage = th->ec->local_storage;
3877
3878 if (NIL_P(val)) {
3879 if (!local_storage) return Qnil;
3880 rb_id_table_delete(local_storage, id);
3881 return Qnil;
3882 }
3883 else {
3884 if (local_storage == NULL) {
3885 th->ec->local_storage = local_storage = rb_id_table_create(0);
3886 }
3887 rb_id_table_insert(local_storage, id, val);
3888 return val;
3889 }
3890 }
3891}
3892
3894rb_thread_local_aset(VALUE thread, ID id, VALUE val)
3895{
3896 if (OBJ_FROZEN(thread)) {
3897 rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3898 }
3899
3900 return threadptr_local_aset(rb_thread_ptr(thread), id, val);
3901}
3902
3903/*
3904 * call-seq:
3905 * thr[sym] = obj -> obj
3906 *
3907 * Attribute Assignment---Sets or creates the value of a fiber-local variable,
3908 * using either a symbol or a string.
3909 *
3910 * See also Thread#[].
3911 *
3912 * For thread-local variables, please see #thread_variable_set and
3913 * #thread_variable_get.
3914 */
3915
3916static VALUE
3917rb_thread_aset(VALUE self, VALUE id, VALUE val)
3918{
3919 return rb_thread_local_aset(self, rb_to_id(id), val);
3920}
3921
3922/*
3923 * call-seq:
3924 * thr.thread_variable_get(key) -> obj or nil
3925 *
3926 * Returns the value of a thread local variable that has been set. Note that
3927 * these are different than fiber local values. For fiber local values,
3928 * please see Thread#[] and Thread#[]=.
3929 *
3930 * Thread local values are carried along with threads, and do not respect
3931 * fibers. For example:
3932 *
3933 * Thread.new {
3934 * Thread.current.thread_variable_set("foo", "bar") # set a thread local
3935 * Thread.current["foo"] = "bar" # set a fiber local
3936 *
3937 * Fiber.new {
3938 * Fiber.yield [
3939 * Thread.current.thread_variable_get("foo"), # get the thread local
3940 * Thread.current["foo"], # get the fiber local
3941 * ]
3942 * }.resume
3943 * }.join.value # => ['bar', nil]
3944 *
3945 * The value "bar" is returned for the thread local, where nil is returned
3946 * for the fiber local. The fiber is executed in the same thread, so the
3947 * thread local values are available.
3948 */
3949
3950static VALUE
3951rb_thread_variable_get(VALUE thread, VALUE key)
3952{
3953 VALUE locals;
3954 VALUE symbol = rb_to_symbol(key);
3955
3956 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3957 return Qnil;
3958 }
3959 locals = rb_thread_local_storage(thread);
3960 return rb_hash_aref(locals, symbol);
3961}
3962
3963/*
3964 * call-seq:
3965 * thr.thread_variable_set(key, value)
3966 *
3967 * Sets a thread local with +key+ to +value+. Note that these are local to
3968 * threads, and not to fibers. Please see Thread#thread_variable_get and
3969 * Thread#[] for more information.
3970 */
3971
3972static VALUE
3973rb_thread_variable_set(VALUE thread, VALUE key, VALUE val)
3974{
3975 VALUE locals;
3976
3977 if (OBJ_FROZEN(thread)) {
3978 rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3979 }
3980
3981 locals = rb_thread_local_storage(thread);
3982 return rb_hash_aset(locals, rb_to_symbol(key), val);
3983}
3984
3985/*
3986 * call-seq:
3987 * thr.key?(sym) -> true or false
3988 *
3989 * Returns +true+ if the given string (or symbol) exists as a fiber-local
3990 * variable.
3991 *
3992 * me = Thread.current
3993 * me[:oliver] = "a"
3994 * me.key?(:oliver) #=> true
3995 * me.key?(:stanley) #=> false
3996 */
3997
3998static VALUE
3999rb_thread_key_p(VALUE self, VALUE key)
4000{
4001 VALUE val;
4002 ID id = rb_check_id(&key);
4003 struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
4004
4005 if (!id || local_storage == NULL) {
4006 return Qfalse;
4007 }
4008 return RBOOL(rb_id_table_lookup(local_storage, id, &val));
4009}
4010
4011static enum rb_id_table_iterator_result
4012thread_keys_i(ID key, VALUE value, void *ary)
4013{
4014 rb_ary_push((VALUE)ary, ID2SYM(key));
4015 return ID_TABLE_CONTINUE;
4016}
4017
4019rb_thread_alone(void)
4020{
4021 // TODO
4022 return rb_ractor_living_thread_num(GET_RACTOR()) == 1;
4023}
4024
4025/*
4026 * call-seq:
4027 * thr.keys -> array
4028 *
4029 * Returns an array of the names of the fiber-local variables (as Symbols).
4030 *
4031 * thr = Thread.new do
4032 * Thread.current[:cat] = 'meow'
4033 * Thread.current["dog"] = 'woof'
4034 * end
4035 * thr.join #=> #<Thread:0x401b3f10 dead>
4036 * thr.keys #=> [:dog, :cat]
4037 */
4038
4039static VALUE
4040rb_thread_keys(VALUE self)
4041{
4042 struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
4043 VALUE ary = rb_ary_new();
4044
4045 if (local_storage) {
4046 rb_id_table_foreach(local_storage, thread_keys_i, (void *)ary);
4047 }
4048 return ary;
4049}
4050
4051static int
4052keys_i(VALUE key, VALUE value, VALUE ary)
4053{
4054 rb_ary_push(ary, key);
4055 return ST_CONTINUE;
4056}
4057
4058/*
4059 * call-seq:
4060 * thr.thread_variables -> array
4061 *
4062 * Returns an array of the names of the thread-local variables (as Symbols).
4063 *
4064 * thr = Thread.new do
4065 * Thread.current.thread_variable_set(:cat, 'meow')
4066 * Thread.current.thread_variable_set("dog", 'woof')
4067 * end
4068 * thr.join #=> #<Thread:0x401b3f10 dead>
4069 * thr.thread_variables #=> [:dog, :cat]
4070 *
4071 * Note that these are not fiber local variables. Please see Thread#[] and
4072 * Thread#thread_variable_get for more details.
4073 */
4074
4075static VALUE
4076rb_thread_variables(VALUE thread)
4077{
4078 VALUE locals;
4079 VALUE ary;
4080
4081 ary = rb_ary_new();
4082 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
4083 return ary;
4084 }
4085 locals = rb_thread_local_storage(thread);
4086 rb_hash_foreach(locals, keys_i, ary);
4087
4088 return ary;
4089}
4090
4091/*
4092 * call-seq:
4093 * thr.thread_variable?(key) -> true or false
4094 *
4095 * Returns +true+ if the given string (or symbol) exists as a thread-local
4096 * variable.
4097 *
4098 * me = Thread.current
4099 * me.thread_variable_set(:oliver, "a")
4100 * me.thread_variable?(:oliver) #=> true
4101 * me.thread_variable?(:stanley) #=> false
4102 *
4103 * Note that these are not fiber local variables. Please see Thread#[] and
4104 * Thread#thread_variable_get for more details.
4105 */
4106
4107static VALUE
4108rb_thread_variable_p(VALUE thread, VALUE key)
4109{
4110 VALUE locals;
4111 VALUE symbol = rb_to_symbol(key);
4112
4113 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
4114 return Qfalse;
4115 }
4116 locals = rb_thread_local_storage(thread);
4117
4118 return RBOOL(rb_hash_lookup(locals, symbol) != Qnil);
4119}
4120
4121/*
4122 * call-seq:
4123 * thr.priority -> integer
4124 *
4125 * Returns the priority of <i>thr</i>. Default is inherited from the
4126 * current thread which creating the new thread, or zero for the
4127 * initial main thread; higher-priority thread will run more frequently
4128 * than lower-priority threads (but lower-priority threads can also run).
4129 *
4130 * This is just hint for Ruby thread scheduler. It may be ignored on some
4131 * platform.
4132 *
4133 * Thread.current.priority #=> 0
4134 */
4135
4136static VALUE
4137rb_thread_priority(VALUE thread)
4138{
4139 return INT2NUM(rb_thread_ptr(thread)->priority);
4140}
4141
4142
4143/*
4144 * call-seq:
4145 * thr.priority= integer -> thr
4146 *
4147 * Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
4148 * will run more frequently than lower-priority threads (but lower-priority
4149 * threads can also run).
4150 *
4151 * This is just hint for Ruby thread scheduler. It may be ignored on some
4152 * platform.
4153 *
4154 * count1 = count2 = 0
4155 * a = Thread.new do
4156 * loop { count1 += 1 }
4157 * end
4158 * a.priority = -1
4159 *
4160 * b = Thread.new do
4161 * loop { count2 += 1 }
4162 * end
4163 * b.priority = -2
4164 * sleep 1 #=> 1
4165 * count1 #=> 622504
4166 * count2 #=> 5832
4167 */
4168
4169static VALUE
4170rb_thread_priority_set(VALUE thread, VALUE prio)
4171{
4172 rb_thread_t *target_th = rb_thread_ptr(thread);
4173 int priority;
4174
4175#if USE_NATIVE_THREAD_PRIORITY
4176 target_th->priority = NUM2INT(prio);
4177 native_thread_apply_priority(th);
4178#else
4179 priority = NUM2INT(prio);
4180 if (priority > RUBY_THREAD_PRIORITY_MAX) {
4181 priority = RUBY_THREAD_PRIORITY_MAX;
4182 }
4183 else if (priority < RUBY_THREAD_PRIORITY_MIN) {
4184 priority = RUBY_THREAD_PRIORITY_MIN;
4185 }
4186 target_th->priority = (int8_t)priority;
4187#endif
4188 return INT2NUM(target_th->priority);
4189}
4190
4191/* for IO */
4192
4193#if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
4194
4195/*
4196 * several Unix platforms support file descriptors bigger than FD_SETSIZE
4197 * in select(2) system call.
4198 *
4199 * - Linux 2.2.12 (?)
4200 * - NetBSD 1.2 (src/sys/kern/sys_generic.c:1.25)
4201 * select(2) documents how to allocate fd_set dynamically.
4202 * http://netbsd.gw.com/cgi-bin/man-cgi?select++NetBSD-4.0
4203 * - FreeBSD 2.2 (src/sys/kern/sys_generic.c:1.19)
4204 * - OpenBSD 2.0 (src/sys/kern/sys_generic.c:1.4)
4205 * select(2) documents how to allocate fd_set dynamically.
4206 * http://www.openbsd.org/cgi-bin/man.cgi?query=select&manpath=OpenBSD+4.4
4207 * - Solaris 8 has select_large_fdset
4208 * - Mac OS X 10.7 (Lion)
4209 * select(2) returns EINVAL if nfds is greater than FD_SET_SIZE and
4210 * _DARWIN_UNLIMITED_SELECT (or _DARWIN_C_SOURCE) isn't defined.
4211 * https://developer.apple.com/library/archive/releasenotes/Darwin/SymbolVariantsRelNotes/index.html
4212 *
4213 * When fd_set is not big enough to hold big file descriptors,
4214 * it should be allocated dynamically.
4215 * Note that this assumes fd_set is structured as bitmap.
4216 *
4217 * rb_fd_init allocates the memory.
4218 * rb_fd_term free the memory.
4219 * rb_fd_set may re-allocates bitmap.
4220 *
4221 * So rb_fd_set doesn't reject file descriptors bigger than FD_SETSIZE.
4222 */
4223
4224void
4226{
4227 fds->maxfd = 0;
4228 fds->fdset = ALLOC(fd_set);
4229 FD_ZERO(fds->fdset);
4230}
4231
4232void
4233rb_fd_init_copy(rb_fdset_t *dst, rb_fdset_t *src)
4234{
4235 size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
4236
4237 if (size < sizeof(fd_set))
4238 size = sizeof(fd_set);
4239 dst->maxfd = src->maxfd;
4240 dst->fdset = xmalloc(size);
4241 memcpy(dst->fdset, src->fdset, size);
4242}
4243
4244void
4246{
4247 xfree(fds->fdset);
4248 fds->maxfd = 0;
4249 fds->fdset = 0;
4250}
4251
4252void
4254{
4255 if (fds->fdset)
4256 MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
4257}
4258
4259static void
4260rb_fd_resize(int n, rb_fdset_t *fds)
4261{
4262 size_t m = howmany(n + 1, NFDBITS) * sizeof(fd_mask);
4263 size_t o = howmany(fds->maxfd, NFDBITS) * sizeof(fd_mask);
4264
4265 if (m < sizeof(fd_set)) m = sizeof(fd_set);
4266 if (o < sizeof(fd_set)) o = sizeof(fd_set);
4267
4268 if (m > o) {
4269 fds->fdset = xrealloc(fds->fdset, m);
4270 memset((char *)fds->fdset + o, 0, m - o);
4271 }
4272 if (n >= fds->maxfd) fds->maxfd = n + 1;
4273}
4274
4275void
4276rb_fd_set(int n, rb_fdset_t *fds)
4277{
4278 rb_fd_resize(n, fds);
4279 FD_SET(n, fds->fdset);
4280}
4281
4282void
4283rb_fd_clr(int n, rb_fdset_t *fds)
4284{
4285 if (n >= fds->maxfd) return;
4286 FD_CLR(n, fds->fdset);
4287}
4288
4289int
4290rb_fd_isset(int n, const rb_fdset_t *fds)
4291{
4292 if (n >= fds->maxfd) return 0;
4293 return FD_ISSET(n, fds->fdset) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
4294}
4295
4296void
4297rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
4298{
4299 size_t size = howmany(max, NFDBITS) * sizeof(fd_mask);
4300
4301 if (size < sizeof(fd_set)) size = sizeof(fd_set);
4302 dst->maxfd = max;
4303 dst->fdset = xrealloc(dst->fdset, size);
4304 memcpy(dst->fdset, src, size);
4305}
4306
4307void
4308rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
4309{
4310 size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
4311
4312 if (size < sizeof(fd_set))
4313 size = sizeof(fd_set);
4314 dst->maxfd = src->maxfd;
4315 dst->fdset = xrealloc(dst->fdset, size);
4316 memcpy(dst->fdset, src->fdset, size);
4317}
4318
4319int
4320rb_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
4321{
4322 fd_set *r = NULL, *w = NULL, *e = NULL;
4323 if (readfds) {
4324 rb_fd_resize(n - 1, readfds);
4325 r = rb_fd_ptr(readfds);
4326 }
4327 if (writefds) {
4328 rb_fd_resize(n - 1, writefds);
4329 w = rb_fd_ptr(writefds);
4330 }
4331 if (exceptfds) {
4332 rb_fd_resize(n - 1, exceptfds);
4333 e = rb_fd_ptr(exceptfds);
4334 }
4335 return select(n, r, w, e, timeout);
4336}
4337
4338#define rb_fd_no_init(fds) ((void)((fds)->fdset = 0), (void)((fds)->maxfd = 0))
4339
4340#undef FD_ZERO
4341#undef FD_SET
4342#undef FD_CLR
4343#undef FD_ISSET
4344
4345#define FD_ZERO(f) rb_fd_zero(f)
4346#define FD_SET(i, f) rb_fd_set((i), (f))
4347#define FD_CLR(i, f) rb_fd_clr((i), (f))
4348#define FD_ISSET(i, f) rb_fd_isset((i), (f))
4349
4350#elif defined(_WIN32)
4351
4352void
4354{
4355 set->capa = FD_SETSIZE;
4356 set->fdset = ALLOC(fd_set);
4357 FD_ZERO(set->fdset);
4358}
4359
4360void
4361rb_fd_init_copy(rb_fdset_t *dst, rb_fdset_t *src)
4362{
4363 rb_fd_init(dst);
4364 rb_fd_dup(dst, src);
4365}
4366
4367void
4369{
4370 xfree(set->fdset);
4371 set->fdset = NULL;
4372 set->capa = 0;
4373}
4374
4375void
4376rb_fd_set(int fd, rb_fdset_t *set)
4377{
4378 unsigned int i;
4379 SOCKET s = rb_w32_get_osfhandle(fd);
4380
4381 for (i = 0; i < set->fdset->fd_count; i++) {
4382 if (set->fdset->fd_array[i] == s) {
4383 return;
4384 }
4385 }
4386 if (set->fdset->fd_count >= (unsigned)set->capa) {
4387 set->capa = (set->fdset->fd_count / FD_SETSIZE + 1) * FD_SETSIZE;
4388 set->fdset =
4389 rb_xrealloc_mul_add(
4390 set->fdset, set->capa, sizeof(SOCKET), sizeof(unsigned int));
4391 }
4392 set->fdset->fd_array[set->fdset->fd_count++] = s;
4393}
4394
4395#undef FD_ZERO
4396#undef FD_SET
4397#undef FD_CLR
4398#undef FD_ISSET
4399
4400#define FD_ZERO(f) rb_fd_zero(f)
4401#define FD_SET(i, f) rb_fd_set((i), (f))
4402#define FD_CLR(i, f) rb_fd_clr((i), (f))
4403#define FD_ISSET(i, f) rb_fd_isset((i), (f))
4404
4405#define rb_fd_no_init(fds) (void)((fds)->fdset = 0)
4406
4407#endif
4408
4409#ifndef rb_fd_no_init
4410#define rb_fd_no_init(fds) (void)(fds)
4411#endif
4412
4413static int
4414wait_retryable(volatile int *result, int errnum, rb_hrtime_t *rel, rb_hrtime_t end)
4415{
4416 int r = *result;
4417 if (r < 0) {
4418 switch (errnum) {
4419 case EINTR:
4420#ifdef ERESTART
4421 case ERESTART:
4422#endif
4423 *result = 0;
4424 if (rel && hrtime_update_expire(rel, end)) {
4425 *rel = 0;
4426 }
4427 return TRUE;
4428 }
4429 return FALSE;
4430 }
4431 else if (r == 0) {
4432 /* check for spurious wakeup */
4433 if (rel) {
4434 return !hrtime_update_expire(rel, end);
4435 }
4436 return TRUE;
4437 }
4438 return FALSE;
4439}
4441struct select_set {
4442 int max;
4443 rb_thread_t *th;
4444 rb_fdset_t *rset;
4445 rb_fdset_t *wset;
4446 rb_fdset_t *eset;
4447 rb_fdset_t orig_rset;
4448 rb_fdset_t orig_wset;
4449 rb_fdset_t orig_eset;
4450 struct timeval *timeout;
4451};
4452
4453static VALUE
4454select_set_free(VALUE p)
4455{
4456 struct select_set *set = (struct select_set *)p;
4457
4458 rb_fd_term(&set->orig_rset);
4459 rb_fd_term(&set->orig_wset);
4460 rb_fd_term(&set->orig_eset);
4461
4462 return Qfalse;
4463}
4464
4465static VALUE
4466do_select(VALUE p)
4467{
4468 struct select_set *set = (struct select_set *)p;
4469 volatile int result = 0;
4470 int lerrno;
4471 rb_hrtime_t *to, rel, end = 0;
4472
4473 timeout_prepare(&to, &rel, &end, set->timeout);
4474 volatile rb_hrtime_t endtime = end;
4475#define restore_fdset(dst, src) \
4476 ((dst) ? rb_fd_dup(dst, src) : (void)0)
4477#define do_select_update() \
4478 (restore_fdset(set->rset, &set->orig_rset), \
4479 restore_fdset(set->wset, &set->orig_wset), \
4480 restore_fdset(set->eset, &set->orig_eset), \
4481 TRUE)
4482
4483 do {
4484 lerrno = 0;
4485
4486 BLOCKING_REGION(set->th, {
4487 struct timeval tv;
4488
4489 if (!RUBY_VM_INTERRUPTED(set->th->ec)) {
4490 result = native_fd_select(set->max,
4491 set->rset, set->wset, set->eset,
4492 rb_hrtime2timeval(&tv, to), set->th);
4493 if (result < 0) lerrno = errno;
4494 }
4495 }, ubf_select, set->th, TRUE);
4496
4497 RUBY_VM_CHECK_INTS_BLOCKING(set->th->ec); /* may raise */
4498 } while (wait_retryable(&result, lerrno, to, endtime) && do_select_update());
4499
4500 RUBY_VM_CHECK_INTS_BLOCKING(set->th->ec);
4501
4502 if (result < 0) {
4503 errno = lerrno;
4504 }
4505
4506 return (VALUE)result;
4507}
4508
4510rb_thread_fd_select(int max, rb_fdset_t * read, rb_fdset_t * write, rb_fdset_t * except,
4511 struct timeval *timeout)
4512{
4513 struct select_set set;
4514
4515 set.th = GET_THREAD();
4516 RUBY_VM_CHECK_INTS_BLOCKING(set.th->ec);
4517 set.max = max;
4518 set.rset = read;
4519 set.wset = write;
4520 set.eset = except;
4521 set.timeout = timeout;
4522
4523 if (!set.rset && !set.wset && !set.eset) {
4524 if (!timeout) {
4526 return 0;
4527 }
4528 rb_thread_wait_for(*timeout);
4529 return 0;
4530 }
4531
4532#define fd_init_copy(f) do { \
4533 if (set.f) { \
4534 rb_fd_resize(set.max - 1, set.f); \
4535 if (&set.orig_##f != set.f) { /* sigwait_fd */ \
4536 rb_fd_init_copy(&set.orig_##f, set.f); \
4537 } \
4538 } \
4539 else { \
4540 rb_fd_no_init(&set.orig_##f); \
4541 } \
4542 } while (0)
4543 fd_init_copy(rset);
4544 fd_init_copy(wset);
4545 fd_init_copy(eset);
4546#undef fd_init_copy
4547
4548 return (int)rb_ensure(do_select, (VALUE)&set, select_set_free, (VALUE)&set);
4549}
4550
4551#ifdef USE_POLL
4552
4553/* The same with linux kernel. TODO: make platform independent definition. */
4554#define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
4555#define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
4556#define POLLEX_SET (POLLPRI)
4557
4558#ifndef POLLERR_SET /* defined for FreeBSD for now */
4559# define POLLERR_SET (0)
4560#endif
4561
4562static int
4563wait_for_single_fd_blocking_region(rb_thread_t *th, struct pollfd *fds, nfds_t nfds,
4564 rb_hrtime_t *const to, volatile int *lerrno)
4565{
4566 struct timespec ts;
4567 volatile int result = 0;
4568
4569 *lerrno = 0;
4570 BLOCKING_REGION(th, {
4571 if (!RUBY_VM_INTERRUPTED(th->ec)) {
4572 result = ppoll(fds, nfds, rb_hrtime2timespec(&ts, to), 0);
4573 if (result < 0) *lerrno = errno;
4574 }
4575 }, ubf_select, th, TRUE);
4576 return result;
4577}
4578
4579/*
4580 * returns a mask of events
4581 */
4582static int
4583thread_io_wait(struct rb_io *io, int fd, int events, struct timeval *timeout)
4584{
4585 struct pollfd fds[1] = {{
4586 .fd = fd,
4587 .events = (short)events,
4588 .revents = 0,
4589 }};
4590 volatile int result = 0;
4591 nfds_t nfds;
4592 struct rb_io_blocking_operation blocking_operation;
4593 enum ruby_tag_type state;
4594 volatile int lerrno;
4595
4596 rb_execution_context_t *ec = GET_EC();
4597 rb_thread_t *th = rb_ec_thread_ptr(ec);
4598
4599 if (io) {
4600 blocking_operation.ec = ec;
4601 rb_io_blocking_operation_enter(io, &blocking_operation);
4602 }
4603
4604 if (timeout == NULL && thread_io_wait_events(th, fd, events, NULL)) {
4605 // fd is readable
4606 state = 0;
4607 fds[0].revents = events;
4608 errno = 0;
4609 }
4610 else {
4611 EC_PUSH_TAG(ec);
4612 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
4613 rb_hrtime_t *to, rel, end = 0;
4614 RUBY_VM_CHECK_INTS_BLOCKING(ec);
4615 timeout_prepare(&to, &rel, &end, timeout);
4616 do {
4617 nfds = numberof(fds);
4618 result = wait_for_single_fd_blocking_region(th, fds, nfds, to, &lerrno);
4619
4620 RUBY_VM_CHECK_INTS_BLOCKING(ec);
4621 } while (wait_retryable(&result, lerrno, to, end));
4622
4623 RUBY_VM_CHECK_INTS_BLOCKING(ec);
4624 }
4625
4626 EC_POP_TAG();
4627 }
4628
4629 if (io) {
4630 rb_io_blocking_operation_exit(io, &blocking_operation);
4631 }
4632
4633 if (state) {
4634 EC_JUMP_TAG(ec, state);
4635 }
4636
4637 if (result < 0) {
4638 errno = lerrno;
4639 return -1;
4640 }
4641
4642 if (fds[0].revents & POLLNVAL) {
4643 errno = EBADF;
4644 return -1;
4645 }
4646
4647 /*
4648 * POLLIN, POLLOUT have a different meanings from select(2)'s read/write bit.
4649 * Therefore we need to fix it up.
4650 */
4651 result = 0;
4652 if (fds[0].revents & POLLIN_SET)
4653 result |= RB_WAITFD_IN;
4654 if (fds[0].revents & POLLOUT_SET)
4655 result |= RB_WAITFD_OUT;
4656 if (fds[0].revents & POLLEX_SET)
4657 result |= RB_WAITFD_PRI;
4658
4659 /* all requested events are ready if there is an error */
4660 if (fds[0].revents & POLLERR_SET)
4661 result |= events;
4662
4663 return result;
4664}
4665#else /* ! USE_POLL - implement rb_io_poll_fd() using select() */
4666struct select_args {
4667 struct rb_io *io;
4668 struct rb_io_blocking_operation *blocking_operation;
4669
4670 union {
4671 int fd;
4672 int error;
4673 } as;
4674 rb_fdset_t *read;
4675 rb_fdset_t *write;
4676 rb_fdset_t *except;
4677 struct timeval *tv;
4678};
4679
4680static VALUE
4681select_single(VALUE ptr)
4682{
4683 struct select_args *args = (struct select_args *)ptr;
4684 int r;
4685
4686 r = rb_thread_fd_select(args->as.fd + 1,
4687 args->read, args->write, args->except, args->tv);
4688 if (r == -1)
4689 args->as.error = errno;
4690 if (r > 0) {
4691 r = 0;
4692 if (args->read && rb_fd_isset(args->as.fd, args->read))
4693 r |= RB_WAITFD_IN;
4694 if (args->write && rb_fd_isset(args->as.fd, args->write))
4695 r |= RB_WAITFD_OUT;
4696 if (args->except && rb_fd_isset(args->as.fd, args->except))
4697 r |= RB_WAITFD_PRI;
4698 }
4699 return (VALUE)r;
4700}
4701
4702static VALUE
4703select_single_cleanup(VALUE ptr)
4704{
4705 struct select_args *args = (struct select_args *)ptr;
4706
4707 if (args->blocking_operation) {
4708 rb_io_blocking_operation_exit(args->io, args->blocking_operation);
4709 }
4710
4711 if (args->read) rb_fd_term(args->read);
4712 if (args->write) rb_fd_term(args->write);
4713 if (args->except) rb_fd_term(args->except);
4714
4715 return (VALUE)-1;
4716}
4717
4718static rb_fdset_t *
4719init_set_fd(int fd, rb_fdset_t *fds)
4720{
4721 if (fd < 0) {
4722 return 0;
4723 }
4724 rb_fd_init(fds);
4725 rb_fd_set(fd, fds);
4726
4727 return fds;
4728}
4729
4730static int
4731thread_io_wait(struct rb_io *io, int fd, int events, struct timeval *timeout)
4732{
4733 rb_fdset_t rfds, wfds, efds;
4734 struct select_args args;
4735 VALUE ptr = (VALUE)&args;
4736
4737 struct rb_io_blocking_operation blocking_operation;
4738 if (io) {
4739 args.io = io;
4740 blocking_operation.ec = GET_EC();
4741 rb_io_blocking_operation_enter(io, &blocking_operation);
4742 args.blocking_operation = &blocking_operation;
4743 }
4744 else {
4745 args.io = NULL;
4746 blocking_operation.ec = NULL;
4747 args.blocking_operation = NULL;
4748 }
4749
4750 args.as.fd = fd;
4751 args.read = (events & RB_WAITFD_IN) ? init_set_fd(fd, &rfds) : NULL;
4752 args.write = (events & RB_WAITFD_OUT) ? init_set_fd(fd, &wfds) : NULL;
4753 args.except = (events & RB_WAITFD_PRI) ? init_set_fd(fd, &efds) : NULL;
4754 args.tv = timeout;
4755
4756 int result = (int)rb_ensure(select_single, ptr, select_single_cleanup, ptr);
4757 if (result == -1)
4758 errno = args.as.error;
4759
4760 return result;
4761}
4762#endif /* ! USE_POLL */
4763
4764int
4765rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
4766{
4767 return thread_io_wait(NULL, fd, events, timeout);
4768}
4769
4770int
4771rb_thread_io_wait(struct rb_io *io, int events, struct timeval * timeout)
4772{
4773 return thread_io_wait(io, io->fd, events, timeout);
4774}
4775
4776/*
4777 * for GC
4778 */
4779
4780#ifdef USE_CONSERVATIVE_STACK_END
4781void
4782rb_gc_set_stack_end(VALUE **stack_end_p)
4783{
4784 VALUE stack_end;
4785COMPILER_WARNING_PUSH
4786#if RBIMPL_COMPILER_IS(GCC)
4787COMPILER_WARNING_IGNORED(-Wdangling-pointer);
4788#endif
4789 *stack_end_p = &stack_end;
4790COMPILER_WARNING_POP
4791}
4792#endif
4793
4794/*
4795 *
4796 */
4797
4798void
4799rb_threadptr_check_signal(rb_thread_t *mth)
4800{
4801 /* mth must be main_thread */
4802 if (rb_signal_buff_size() > 0) {
4803 /* wakeup main thread */
4804 threadptr_trap_interrupt(mth);
4805 }
4806}
4807
4808static void
4809async_bug_fd(const char *mesg, int errno_arg, int fd)
4810{
4811 char buff[64];
4812 size_t n = strlcpy(buff, mesg, sizeof(buff));
4813 if (n < sizeof(buff)-3) {
4814 ruby_snprintf(buff+n, sizeof(buff)-n, "(%d)", fd);
4815 }
4816 rb_async_bug_errno(buff, errno_arg);
4817}
4818
4819/* VM-dependent API is not available for this function */
4820static int
4821consume_communication_pipe(int fd)
4822{
4823#if USE_EVENTFD
4824 uint64_t buff[1];
4825#else
4826 /* buffer can be shared because no one refers to them. */
4827 static char buff[1024];
4828#endif
4829 ssize_t result;
4830 int ret = FALSE; /* for rb_sigwait_sleep */
4831
4832 while (1) {
4833 result = read(fd, buff, sizeof(buff));
4834#if USE_EVENTFD
4835 RUBY_DEBUG_LOG("resultf:%d buff:%lu", (int)result, (unsigned long)buff[0]);
4836#else
4837 RUBY_DEBUG_LOG("result:%d", (int)result);
4838#endif
4839 if (result > 0) {
4840 ret = TRUE;
4841 if (USE_EVENTFD || result < (ssize_t)sizeof(buff)) {
4842 return ret;
4843 }
4844 }
4845 else if (result == 0) {
4846 return ret;
4847 }
4848 else if (result < 0) {
4849 int e = errno;
4850 switch (e) {
4851 case EINTR:
4852 continue; /* retry */
4853 case EAGAIN:
4854#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
4855 case EWOULDBLOCK:
4856#endif
4857 return ret;
4858 default:
4859 async_bug_fd("consume_communication_pipe: read", e, fd);
4860 }
4861 }
4862 }
4863}
4864
4865void
4866rb_thread_stop_timer_thread(void)
4867{
4868 if (TIMER_THREAD_CREATED_P() && native_stop_timer_thread()) {
4869 native_reset_timer_thread();
4870 }
4871}
4872
4873void
4874rb_thread_reset_timer_thread(void)
4875{
4876 native_reset_timer_thread();
4877}
4878
4879void
4880rb_thread_start_timer_thread(void)
4881{
4882 system_working = 1;
4883 rb_thread_create_timer_thread();
4884}
4885
4886static int
4887clear_coverage_i(st_data_t key, st_data_t val, st_data_t dummy)
4888{
4889 int i;
4890 VALUE coverage = (VALUE)val;
4891 VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
4892 VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
4893
4894 if (lines) {
4895 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
4896 rb_ary_clear(lines);
4897 }
4898 else {
4899 int i;
4900 for (i = 0; i < RARRAY_LEN(lines); i++) {
4901 if (RARRAY_AREF(lines, i) != Qnil)
4902 RARRAY_ASET(lines, i, INT2FIX(0));
4903 }
4904 }
4905 }
4906 if (branches) {
4907 VALUE counters = RARRAY_AREF(branches, 1);
4908 for (i = 0; i < RARRAY_LEN(counters); i++) {
4909 RARRAY_ASET(counters, i, INT2FIX(0));
4910 }
4911 }
4912
4913 return ST_CONTINUE;
4914}
4915
4916void
4917rb_clear_coverages(void)
4918{
4919 VALUE coverages = rb_get_coverages();
4920 if (RTEST(coverages)) {
4921 rb_hash_foreach(coverages, clear_coverage_i, 0);
4922 }
4923}
4924
4925#if defined(HAVE_WORKING_FORK)
4926
4927static void
4928rb_thread_atfork_internal(rb_thread_t *th, void (*atfork)(rb_thread_t *, const rb_thread_t *))
4929{
4930 rb_thread_t *i = 0;
4931 rb_vm_t *vm = th->vm;
4932 rb_ractor_t *r = th->ractor;
4933 vm->ractor.main_ractor = r;
4934 vm->ractor.main_thread = th;
4935 r->threads.main = th;
4936 r->status_ = ractor_created;
4937
4938 thread_sched_atfork(TH_SCHED(th));
4939 ubf_list_atfork();
4940 rb_signal_atfork();
4941
4942 // OK. Only this thread accesses:
4943 ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
4944 if (r != vm->ractor.main_ractor) {
4945 rb_ractor_terminate_atfork(vm, r);
4946 }
4947 ccan_list_for_each(&r->threads.set, i, lt_node) {
4948 atfork(i, th);
4949 }
4950 }
4951 rb_vm_living_threads_init(vm);
4952
4953 rb_ractor_atfork(vm, th);
4954 rb_vm_postponed_job_atfork();
4955
4956 /* may be held by any thread in parent */
4957 rb_native_mutex_initialize(&th->interrupt_lock);
4958 ccan_list_head_init(&th->interrupt_exec_tasks);
4959
4960 vm->fork_gen++;
4961 rb_ractor_sleeper_threads_clear(th->ractor);
4962 rb_clear_coverages();
4963
4964 // restart timer thread (timer threads access to `vm->waitpid_lock` and so on.
4965 rb_thread_reset_timer_thread();
4966 rb_thread_start_timer_thread();
4967
4968 VM_ASSERT(vm->ractor.blocking_cnt == 0);
4969 VM_ASSERT(vm->ractor.cnt == 1);
4970}
4971
4972static void
4973terminate_atfork_i(rb_thread_t *th, const rb_thread_t *current_th)
4974{
4975 if (th != current_th) {
4976 rb_native_mutex_initialize(&th->interrupt_lock);
4977 rb_mutex_abandon_keeping_mutexes(th);
4978 rb_mutex_abandon_locking_mutex(th);
4979 thread_cleanup_func(th, TRUE);
4980 }
4981}
4982
4983void rb_fiber_atfork(rb_thread_t *);
4984void
4985rb_thread_atfork(void)
4986{
4987 rb_thread_t *th = GET_THREAD();
4988 rb_threadptr_pending_interrupt_clear(th);
4989 rb_thread_atfork_internal(th, terminate_atfork_i);
4990 th->join_list = NULL;
4991 rb_fiber_atfork(th);
4992
4993 /* We don't want reproduce CVE-2003-0900. */
4995}
4996
4997static void
4998terminate_atfork_before_exec_i(rb_thread_t *th, const rb_thread_t *current_th)
4999{
5000 if (th != current_th) {
5001 thread_cleanup_func_before_exec(th);
5002 }
5003}
5004
5005void
5007{
5008 rb_thread_t *th = GET_THREAD();
5009 rb_thread_atfork_internal(th, terminate_atfork_before_exec_i);
5010}
5011#else
5012void
5013rb_thread_atfork(void)
5014{
5015}
5016
5017void
5019{
5020}
5021#endif
5023struct thgroup {
5024 int enclosed;
5025};
5026
5027static const rb_data_type_t thgroup_data_type = {
5028 "thgroup",
5029 {
5030 0,
5032 NULL, // No external memory to report
5033 },
5034 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE
5035};
5036
5037/*
5038 * Document-class: ThreadGroup
5039 *
5040 * ThreadGroup provides a means of keeping track of a number of threads as a
5041 * group.
5042 *
5043 * A given Thread object can only belong to one ThreadGroup at a time; adding
5044 * a thread to a new group will remove it from any previous group.
5045 *
5046 * Newly created threads belong to the same group as the thread from which they
5047 * were created.
5048 */
5049
5050/*
5051 * Document-const: Default
5052 *
5053 * The default ThreadGroup created when Ruby starts; all Threads belong to it
5054 * by default.
5055 */
5056static VALUE
5057thgroup_s_alloc(VALUE klass)
5058{
5059 VALUE group;
5060 struct thgroup *data;
5061
5062 group = TypedData_Make_Struct(klass, struct thgroup, &thgroup_data_type, data);
5063 data->enclosed = 0;
5064
5065 return group;
5066}
5067
5068/*
5069 * call-seq:
5070 * thgrp.list -> array
5071 *
5072 * Returns an array of all existing Thread objects that belong to this group.
5073 *
5074 * ThreadGroup::Default.list #=> [#<Thread:0x401bdf4c run>]
5075 */
5076
5077static VALUE
5078thgroup_list(VALUE group)
5079{
5080 VALUE ary = rb_ary_new();
5081 rb_thread_t *th = 0;
5082 rb_ractor_t *r = GET_RACTOR();
5083
5084 ccan_list_for_each(&r->threads.set, th, lt_node) {
5085 if (th->thgroup == group) {
5086 rb_ary_push(ary, th->self);
5087 }
5088 }
5089 return ary;
5090}
5091
5092
5093/*
5094 * call-seq:
5095 * thgrp.enclose -> thgrp
5096 *
5097 * Prevents threads from being added to or removed from the receiving
5098 * ThreadGroup.
5099 *
5100 * New threads can still be started in an enclosed ThreadGroup.
5101 *
5102 * ThreadGroup::Default.enclose #=> #<ThreadGroup:0x4029d914>
5103 * thr = Thread.new { Thread.stop } #=> #<Thread:0x402a7210 sleep>
5104 * tg = ThreadGroup.new #=> #<ThreadGroup:0x402752d4>
5105 * tg.add thr
5106 * #=> ThreadError: can't move from the enclosed thread group
5107 */
5108
5109static VALUE
5110thgroup_enclose(VALUE group)
5111{
5112 struct thgroup *data;
5113
5114 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
5115 data->enclosed = 1;
5116
5117 return group;
5118}
5119
5120
5121/*
5122 * call-seq:
5123 * thgrp.enclosed? -> true or false
5124 *
5125 * Returns +true+ if the +thgrp+ is enclosed. See also ThreadGroup#enclose.
5126 */
5127
5128static VALUE
5129thgroup_enclosed_p(VALUE group)
5130{
5131 struct thgroup *data;
5132
5133 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
5134 return RBOOL(data->enclosed);
5135}
5136
5137
5138/*
5139 * call-seq:
5140 * thgrp.add(thread) -> thgrp
5141 *
5142 * Adds the given +thread+ to this group, removing it from any other
5143 * group to which it may have previously been a member.
5144 *
5145 * puts "Initial group is #{ThreadGroup::Default.list}"
5146 * tg = ThreadGroup.new
5147 * t1 = Thread.new { sleep }
5148 * t2 = Thread.new { sleep }
5149 * puts "t1 is #{t1}"
5150 * puts "t2 is #{t2}"
5151 * tg.add(t1)
5152 * puts "Initial group now #{ThreadGroup::Default.list}"
5153 * puts "tg group now #{tg.list}"
5154 *
5155 * This will produce:
5156 *
5157 * Initial group is #<Thread:0x401bdf4c>
5158 * t1 is #<Thread:0x401b3c90>
5159 * t2 is #<Thread:0x401b3c18>
5160 * Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
5161 * tg group now #<Thread:0x401b3c90>
5162 */
5163
5164static VALUE
5165thgroup_add(VALUE group, VALUE thread)
5166{
5167 rb_thread_t *target_th = rb_thread_ptr(thread);
5168 struct thgroup *data;
5169
5170 if (OBJ_FROZEN(group)) {
5171 rb_raise(rb_eThreadError, "can't move to the frozen thread group");
5172 }
5173 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
5174 if (data->enclosed) {
5175 rb_raise(rb_eThreadError, "can't move to the enclosed thread group");
5176 }
5177
5178 if (OBJ_FROZEN(target_th->thgroup)) {
5179 rb_raise(rb_eThreadError, "can't move from the frozen thread group");
5180 }
5181 TypedData_Get_Struct(target_th->thgroup, struct thgroup, &thgroup_data_type, data);
5182 if (data->enclosed) {
5183 rb_raise(rb_eThreadError,
5184 "can't move from the enclosed thread group");
5185 }
5186
5187 target_th->thgroup = group;
5188 return group;
5189}
5190
5191/*
5192 * Document-class: ThreadShield
5193 */
5194static void
5195thread_shield_mark(void *ptr)
5196{
5197 rb_gc_mark((VALUE)ptr);
5198}
5199
5200static const rb_data_type_t thread_shield_data_type = {
5201 "thread_shield",
5202 {thread_shield_mark, 0, 0,},
5203 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
5204};
5205
5206static VALUE
5207thread_shield_alloc(VALUE klass)
5208{
5209 return TypedData_Wrap_Struct(klass, &thread_shield_data_type, (void *)mutex_alloc(0));
5210}
5211
5212#define GetThreadShieldPtr(obj) ((VALUE)rb_check_typeddata((obj), &thread_shield_data_type))
5213#define THREAD_SHIELD_WAITING_MASK (((FL_USER19-1)&~(FL_USER0-1))|FL_USER19)
5214#define THREAD_SHIELD_WAITING_SHIFT (FL_USHIFT)
5215#define THREAD_SHIELD_WAITING_MAX (THREAD_SHIELD_WAITING_MASK>>THREAD_SHIELD_WAITING_SHIFT)
5216STATIC_ASSERT(THREAD_SHIELD_WAITING_MAX, THREAD_SHIELD_WAITING_MAX <= UINT_MAX);
5217static inline unsigned int
5218rb_thread_shield_waiting(VALUE b)
5219{
5220 return ((RBASIC(b)->flags&THREAD_SHIELD_WAITING_MASK)>>THREAD_SHIELD_WAITING_SHIFT);
5221}
5222
5223static inline void
5224rb_thread_shield_waiting_inc(VALUE b)
5225{
5226 unsigned int w = rb_thread_shield_waiting(b);
5227 w++;
5228 if (w > THREAD_SHIELD_WAITING_MAX)
5229 rb_raise(rb_eRuntimeError, "waiting count overflow");
5230 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
5231 RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
5232}
5233
5234static inline void
5235rb_thread_shield_waiting_dec(VALUE b)
5236{
5237 unsigned int w = rb_thread_shield_waiting(b);
5238 if (!w) rb_raise(rb_eRuntimeError, "waiting count underflow");
5239 w--;
5240 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
5241 RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
5242}
5243
5244VALUE
5245rb_thread_shield_new(void)
5246{
5247 VALUE thread_shield = thread_shield_alloc(rb_cThreadShield);
5248 rb_mutex_lock((VALUE)DATA_PTR(thread_shield));
5249 return thread_shield;
5250}
5251
5252bool
5253rb_thread_shield_owned(VALUE self)
5254{
5255 VALUE mutex = GetThreadShieldPtr(self);
5256 if (!mutex) return false;
5257
5258 rb_mutex_t *m = mutex_ptr(mutex);
5259
5260 return m->fiber == GET_EC()->fiber_ptr;
5261}
5262
5263/*
5264 * Wait a thread shield.
5265 *
5266 * Returns
5267 * true: acquired the thread shield
5268 * false: the thread shield was destroyed and no other threads waiting
5269 * nil: the thread shield was destroyed but still in use
5270 */
5271VALUE
5272rb_thread_shield_wait(VALUE self)
5273{
5274 VALUE mutex = GetThreadShieldPtr(self);
5275 rb_mutex_t *m;
5276
5277 if (!mutex) return Qfalse;
5278 m = mutex_ptr(mutex);
5279 if (m->fiber == GET_EC()->fiber_ptr) return Qnil;
5280 rb_thread_shield_waiting_inc(self);
5281 rb_mutex_lock(mutex);
5282 rb_thread_shield_waiting_dec(self);
5283 if (DATA_PTR(self)) return Qtrue;
5284 rb_mutex_unlock(mutex);
5285 return rb_thread_shield_waiting(self) > 0 ? Qnil : Qfalse;
5286}
5287
5288static VALUE
5289thread_shield_get_mutex(VALUE self)
5290{
5291 VALUE mutex = GetThreadShieldPtr(self);
5292 if (!mutex)
5293 rb_raise(rb_eThreadError, "destroyed thread shield - %p", (void *)self);
5294 return mutex;
5295}
5296
5297/*
5298 * Release a thread shield, and return true if it has waiting threads.
5299 */
5300VALUE
5301rb_thread_shield_release(VALUE self)
5302{
5303 VALUE mutex = thread_shield_get_mutex(self);
5304 rb_mutex_unlock(mutex);
5305 return RBOOL(rb_thread_shield_waiting(self) > 0);
5306}
5307
5308/*
5309 * Release and destroy a thread shield, and return true if it has waiting threads.
5310 */
5311VALUE
5312rb_thread_shield_destroy(VALUE self)
5313{
5314 VALUE mutex = thread_shield_get_mutex(self);
5315 DATA_PTR(self) = 0;
5316 rb_mutex_unlock(mutex);
5317 return RBOOL(rb_thread_shield_waiting(self) > 0);
5318}
5319
5320static VALUE
5321threadptr_recursive_hash(rb_thread_t *th)
5322{
5323 return th->ec->local_storage_recursive_hash;
5324}
5325
5326static void
5327threadptr_recursive_hash_set(rb_thread_t *th, VALUE hash)
5328{
5329 th->ec->local_storage_recursive_hash = hash;
5330}
5331
5333
5334/*
5335 * Returns the current "recursive list" used to detect recursion.
5336 * This list is a hash table, unique for the current thread and for
5337 * the current __callee__.
5338 */
5339
5340static VALUE
5341recursive_list_access(VALUE sym)
5342{
5343 rb_thread_t *th = GET_THREAD();
5344 VALUE hash = threadptr_recursive_hash(th);
5345 VALUE list;
5346 if (NIL_P(hash) || !RB_TYPE_P(hash, T_HASH)) {
5347 hash = rb_ident_hash_new();
5348 threadptr_recursive_hash_set(th, hash);
5349 list = Qnil;
5350 }
5351 else {
5352 list = rb_hash_aref(hash, sym);
5353 }
5354 if (NIL_P(list) || !RB_TYPE_P(list, T_HASH)) {
5355 list = rb_ident_hash_new();
5356 rb_hash_aset(hash, sym, list);
5357 }
5358 return list;
5359}
5360
5361/*
5362 * Returns true if and only if obj (or the pair <obj, paired_obj>) is already
5363 * in the recursion list.
5364 * Assumes the recursion list is valid.
5365 */
5366
5367static bool
5368recursive_check(VALUE list, VALUE obj, VALUE paired_obj_id)
5369{
5370#if SIZEOF_LONG == SIZEOF_VOIDP
5371 #define OBJ_ID_EQL(obj_id, other) ((obj_id) == (other))
5372#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
5373 #define OBJ_ID_EQL(obj_id, other) (RB_BIGNUM_TYPE_P((obj_id)) ? \
5374 rb_big_eql((obj_id), (other)) : ((obj_id) == (other)))
5375#endif
5376
5377 VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
5378 if (UNDEF_P(pair_list))
5379 return false;
5380 if (paired_obj_id) {
5381 if (!RB_TYPE_P(pair_list, T_HASH)) {
5382 if (!OBJ_ID_EQL(paired_obj_id, pair_list))
5383 return false;
5384 }
5385 else {
5386 if (NIL_P(rb_hash_lookup(pair_list, paired_obj_id)))
5387 return false;
5388 }
5389 }
5390 return true;
5391}
5392
5393/*
5394 * Pushes obj (or the pair <obj, paired_obj>) in the recursion list.
5395 * For a single obj, it sets list[obj] to Qtrue.
5396 * For a pair, it sets list[obj] to paired_obj_id if possible,
5397 * otherwise list[obj] becomes a hash like:
5398 * {paired_obj_id_1 => true, paired_obj_id_2 => true, ... }
5399 * Assumes the recursion list is valid.
5400 */
5401
5402static void
5403recursive_push(VALUE list, VALUE obj, VALUE paired_obj)
5404{
5405 VALUE pair_list;
5406
5407 if (!paired_obj) {
5408 rb_hash_aset(list, obj, Qtrue);
5409 }
5410 else if (UNDEF_P(pair_list = rb_hash_lookup2(list, obj, Qundef))) {
5411 rb_hash_aset(list, obj, paired_obj);
5412 }
5413 else {
5414 if (!RB_TYPE_P(pair_list, T_HASH)){
5415 VALUE other_paired_obj = pair_list;
5416 pair_list = rb_hash_new();
5417 rb_hash_aset(pair_list, other_paired_obj, Qtrue);
5418 rb_hash_aset(list, obj, pair_list);
5419 }
5420 rb_hash_aset(pair_list, paired_obj, Qtrue);
5421 }
5422}
5423
5424/*
5425 * Pops obj (or the pair <obj, paired_obj>) from the recursion list.
5426 * For a pair, if list[obj] is a hash, then paired_obj_id is
5427 * removed from the hash and no attempt is made to simplify
5428 * list[obj] from {only_one_paired_id => true} to only_one_paired_id
5429 * Assumes the recursion list is valid.
5430 */
5431
5432static int
5433recursive_pop(VALUE list, VALUE obj, VALUE paired_obj)
5434{
5435 if (paired_obj) {
5436 VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
5437 if (UNDEF_P(pair_list)) {
5438 return 0;
5439 }
5440 if (RB_TYPE_P(pair_list, T_HASH)) {
5441 rb_hash_delete_entry(pair_list, paired_obj);
5442 if (!RHASH_EMPTY_P(pair_list)) {
5443 return 1; /* keep hash until is empty */
5444 }
5445 }
5446 }
5447 rb_hash_delete_entry(list, obj);
5448 return 1;
5449}
5451struct exec_recursive_params {
5452 VALUE (*func) (VALUE, VALUE, int);
5453 VALUE list;
5454 VALUE obj;
5455 VALUE pairid;
5456 VALUE arg;
5457};
5458
5459static VALUE
5460exec_recursive_i(RB_BLOCK_CALL_FUNC_ARGLIST(tag, data))
5461{
5462 struct exec_recursive_params *p = (void *)data;
5463 return (*p->func)(p->obj, p->arg, FALSE);
5464}
5465
5466/*
5467 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5468 * current method is called recursively on obj, or on the pair <obj, pairid>
5469 * If outer is 0, then the innermost func will be called with recursive set
5470 * to true, otherwise the outermost func will be called. In the latter case,
5471 * all inner func are short-circuited by throw.
5472 * Implementation details: the value thrown is the recursive list which is
5473 * proper to the current method and unlikely to be caught anywhere else.
5474 * list[recursive_key] is used as a flag for the outermost call.
5475 */
5476
5477static VALUE
5478exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE pairid, VALUE arg, int outer, ID mid)
5479{
5480 VALUE result = Qundef;
5481 const VALUE sym = mid ? ID2SYM(mid) : ID2SYM(idNULL);
5482 struct exec_recursive_params p;
5483 int outermost;
5484 p.list = recursive_list_access(sym);
5485 p.obj = obj;
5486 p.pairid = pairid;
5487 p.arg = arg;
5488 outermost = outer && !recursive_check(p.list, ID2SYM(recursive_key), 0);
5489
5490 if (recursive_check(p.list, p.obj, pairid)) {
5491 if (outer && !outermost) {
5492 rb_throw_obj(p.list, p.list);
5493 }
5494 return (*func)(obj, arg, TRUE);
5495 }
5496 else {
5497 enum ruby_tag_type state;
5498
5499 p.func = func;
5500
5501 if (outermost) {
5502 recursive_push(p.list, ID2SYM(recursive_key), 0);
5503 recursive_push(p.list, p.obj, p.pairid);
5504 result = rb_catch_protect(p.list, exec_recursive_i, (VALUE)&p, &state);
5505 if (!recursive_pop(p.list, p.obj, p.pairid)) goto invalid;
5506 if (!recursive_pop(p.list, ID2SYM(recursive_key), 0)) goto invalid;
5507 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5508 if (result == p.list) {
5509 result = (*func)(obj, arg, TRUE);
5510 }
5511 }
5512 else {
5513 volatile VALUE ret = Qundef;
5514 recursive_push(p.list, p.obj, p.pairid);
5515 EC_PUSH_TAG(GET_EC());
5516 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
5517 ret = (*func)(obj, arg, FALSE);
5518 }
5519 EC_POP_TAG();
5520 if (!recursive_pop(p.list, p.obj, p.pairid)) {
5521 goto invalid;
5522 }
5523 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5524 result = ret;
5525 }
5526 }
5527 *(volatile struct exec_recursive_params *)&p;
5528 return result;
5529
5530 invalid:
5531 rb_raise(rb_eTypeError, "invalid inspect_tbl pair_list "
5532 "for %+"PRIsVALUE" in %+"PRIsVALUE,
5533 sym, rb_thread_current());
5535}
5536
5537/*
5538 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5539 * current method is called recursively on obj
5540 */
5541
5542VALUE
5543rb_exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
5544{
5545 return exec_recursive(func, obj, 0, arg, 0, rb_frame_last_func());
5546}
5547
5548/*
5549 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5550 * current method is called recursively on the ordered pair <obj, paired_obj>
5551 */
5552
5553VALUE
5554rb_exec_recursive_paired(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
5555{
5556 return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 0, rb_frame_last_func());
5557}
5558
5559/*
5560 * If recursion is detected on the current method and obj, the outermost
5561 * func will be called with (obj, arg, true). All inner func will be
5562 * short-circuited using throw.
5563 */
5564
5565VALUE
5566rb_exec_recursive_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
5567{
5568 return exec_recursive(func, obj, 0, arg, 1, rb_frame_last_func());
5569}
5570
5571VALUE
5572rb_exec_recursive_outer_mid(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg, ID mid)
5573{
5574 return exec_recursive(func, obj, 0, arg, 1, mid);
5575}
5576
5577/*
5578 * If recursion is detected on the current method, obj and paired_obj,
5579 * the outermost func will be called with (obj, arg, true). All inner
5580 * func will be short-circuited using throw.
5581 */
5582
5583VALUE
5584rb_exec_recursive_paired_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
5585{
5586 return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 1, rb_frame_last_func());
5587}
5588
5589/*
5590 * call-seq:
5591 * thread.backtrace -> array or nil
5592 *
5593 * Returns the current backtrace of the target thread.
5594 *
5595 */
5596
5597static VALUE
5598rb_thread_backtrace_m(int argc, VALUE *argv, VALUE thval)
5599{
5600 return rb_vm_thread_backtrace(argc, argv, thval);
5601}
5602
5603/* call-seq:
5604 * thread.backtrace_locations(*args) -> array or nil
5605 *
5606 * Returns the execution stack for the target thread---an array containing
5607 * backtrace location objects.
5608 *
5609 * See Thread::Backtrace::Location for more information.
5610 *
5611 * This method behaves similarly to Kernel#caller_locations except it applies
5612 * to a specific thread.
5613 */
5614static VALUE
5615rb_thread_backtrace_locations_m(int argc, VALUE *argv, VALUE thval)
5616{
5617 return rb_vm_thread_backtrace_locations(argc, argv, thval);
5618}
5619
5620void
5621Init_Thread_Mutex(void)
5622{
5623 rb_thread_t *th = GET_THREAD();
5624
5625 rb_native_mutex_initialize(&th->vm->workqueue_lock);
5626 rb_native_mutex_initialize(&th->interrupt_lock);
5627}
5628
5629/*
5630 * Document-class: ThreadError
5631 *
5632 * Raised when an invalid operation is attempted on a thread.
5633 *
5634 * For example, when no other thread has been started:
5635 *
5636 * Thread.stop
5637 *
5638 * This will raises the following exception:
5639 *
5640 * ThreadError: stopping only thread
5641 * note: use sleep to stop forever
5642 */
5643
5644void
5645Init_Thread(void)
5646{
5647 rb_thread_t *th = GET_THREAD();
5648
5649 sym_never = ID2SYM(rb_intern_const("never"));
5650 sym_immediate = ID2SYM(rb_intern_const("immediate"));
5651 sym_on_blocking = ID2SYM(rb_intern_const("on_blocking"));
5652
5653 rb_define_singleton_method(rb_cThread, "new", thread_s_new, -1);
5654 rb_define_singleton_method(rb_cThread, "start", thread_start, -2);
5655 rb_define_singleton_method(rb_cThread, "fork", thread_start, -2);
5656 rb_define_singleton_method(rb_cThread, "main", rb_thread_s_main, 0);
5657 rb_define_singleton_method(rb_cThread, "current", thread_s_current, 0);
5658 rb_define_singleton_method(rb_cThread, "stop", thread_stop, 0);
5659 rb_define_singleton_method(rb_cThread, "kill", rb_thread_s_kill, 1);
5660 rb_define_singleton_method(rb_cThread, "exit", rb_thread_exit, 0);
5661 rb_define_singleton_method(rb_cThread, "pass", thread_s_pass, 0);
5662 rb_define_singleton_method(rb_cThread, "list", thread_list, 0);
5663 rb_define_singleton_method(rb_cThread, "abort_on_exception", rb_thread_s_abort_exc, 0);
5664 rb_define_singleton_method(rb_cThread, "abort_on_exception=", rb_thread_s_abort_exc_set, 1);
5665 rb_define_singleton_method(rb_cThread, "report_on_exception", rb_thread_s_report_exc, 0);
5666 rb_define_singleton_method(rb_cThread, "report_on_exception=", rb_thread_s_report_exc_set, 1);
5667 rb_define_singleton_method(rb_cThread, "ignore_deadlock", rb_thread_s_ignore_deadlock, 0);
5668 rb_define_singleton_method(rb_cThread, "ignore_deadlock=", rb_thread_s_ignore_deadlock_set, 1);
5669 rb_define_singleton_method(rb_cThread, "handle_interrupt", rb_thread_s_handle_interrupt, 1);
5670 rb_define_singleton_method(rb_cThread, "pending_interrupt?", rb_thread_s_pending_interrupt_p, -1);
5671 rb_define_method(rb_cThread, "pending_interrupt?", rb_thread_pending_interrupt_p, -1);
5672
5673 rb_define_method(rb_cThread, "initialize", thread_initialize, -2);
5674 rb_define_method(rb_cThread, "raise", thread_raise_m, -1);
5675 rb_define_method(rb_cThread, "join", thread_join_m, -1);
5676 rb_define_method(rb_cThread, "value", thread_value, 0);
5677 rb_define_method(rb_cThread, "kill", rb_thread_kill, 0);
5678 rb_define_method(rb_cThread, "terminate", rb_thread_kill, 0);
5679 rb_define_method(rb_cThread, "exit", rb_thread_kill, 0);
5680 rb_define_method(rb_cThread, "run", rb_thread_run, 0);
5681 rb_define_method(rb_cThread, "wakeup", rb_thread_wakeup, 0);
5682 rb_define_method(rb_cThread, "[]", rb_thread_aref, 1);
5683 rb_define_method(rb_cThread, "[]=", rb_thread_aset, 2);
5684 rb_define_method(rb_cThread, "fetch", rb_thread_fetch, -1);
5685 rb_define_method(rb_cThread, "key?", rb_thread_key_p, 1);
5686 rb_define_method(rb_cThread, "keys", rb_thread_keys, 0);
5687 rb_define_method(rb_cThread, "priority", rb_thread_priority, 0);
5688 rb_define_method(rb_cThread, "priority=", rb_thread_priority_set, 1);
5689 rb_define_method(rb_cThread, "status", rb_thread_status, 0);
5690 rb_define_method(rb_cThread, "thread_variable_get", rb_thread_variable_get, 1);
5691 rb_define_method(rb_cThread, "thread_variable_set", rb_thread_variable_set, 2);
5692 rb_define_method(rb_cThread, "thread_variables", rb_thread_variables, 0);
5693 rb_define_method(rb_cThread, "thread_variable?", rb_thread_variable_p, 1);
5694 rb_define_method(rb_cThread, "alive?", rb_thread_alive_p, 0);
5695 rb_define_method(rb_cThread, "stop?", rb_thread_stop_p, 0);
5696 rb_define_method(rb_cThread, "abort_on_exception", rb_thread_abort_exc, 0);
5697 rb_define_method(rb_cThread, "abort_on_exception=", rb_thread_abort_exc_set, 1);
5698 rb_define_method(rb_cThread, "report_on_exception", rb_thread_report_exc, 0);
5699 rb_define_method(rb_cThread, "report_on_exception=", rb_thread_report_exc_set, 1);
5700 rb_define_method(rb_cThread, "group", rb_thread_group, 0);
5701 rb_define_method(rb_cThread, "backtrace", rb_thread_backtrace_m, -1);
5702 rb_define_method(rb_cThread, "backtrace_locations", rb_thread_backtrace_locations_m, -1);
5703
5704 rb_define_method(rb_cThread, "name", rb_thread_getname, 0);
5705 rb_define_method(rb_cThread, "name=", rb_thread_setname, 1);
5706 rb_define_method(rb_cThread, "native_thread_id", rb_thread_native_thread_id, 0);
5707 rb_define_method(rb_cThread, "to_s", rb_thread_to_s, 0);
5708 rb_define_alias(rb_cThread, "inspect", "to_s");
5709
5710 rb_vm_register_special_exception(ruby_error_stream_closed, rb_eIOError,
5711 "stream closed in another thread");
5712
5713 cThGroup = rb_define_class("ThreadGroup", rb_cObject);
5714 rb_define_alloc_func(cThGroup, thgroup_s_alloc);
5715 rb_define_method(cThGroup, "list", thgroup_list, 0);
5716 rb_define_method(cThGroup, "enclose", thgroup_enclose, 0);
5717 rb_define_method(cThGroup, "enclosed?", thgroup_enclosed_p, 0);
5718 rb_define_method(cThGroup, "add", thgroup_add, 1);
5719
5720 const char * ptr = getenv("RUBY_THREAD_TIMESLICE");
5721
5722 if (ptr) {
5723 long quantum = strtol(ptr, NULL, 0);
5724 if (quantum > 0 && !(SIZEOF_LONG > 4 && quantum > UINT32_MAX)) {
5725 thread_default_quantum_ms = (uint32_t)quantum;
5726 }
5727 else if (0) {
5728 fprintf(stderr, "Ignored RUBY_THREAD_TIMESLICE=%s\n", ptr);
5729 }
5730 }
5731
5732 {
5733 th->thgroup = th->ractor->thgroup_default = rb_obj_alloc(cThGroup);
5734 rb_define_const(cThGroup, "Default", th->thgroup);
5735 }
5736
5738
5739 /* init thread core */
5740 {
5741 /* main thread setting */
5742 {
5743 /* acquire global vm lock */
5744#ifdef HAVE_PTHREAD_NP_H
5745 VM_ASSERT(TH_SCHED(th)->running == th);
5746#endif
5747 // thread_sched_to_running() should not be called because
5748 // it assumes blocked by thread_sched_to_waiting().
5749 // thread_sched_to_running(sched, th);
5750
5751 th->pending_interrupt_queue = rb_ary_hidden_new(0);
5752 th->pending_interrupt_queue_checked = 0;
5753 th->pending_interrupt_mask_stack = rb_ary_hidden_new(0);
5754 }
5755 }
5756
5757 rb_thread_create_timer_thread();
5758
5759 Init_thread_sync();
5760
5761 // TODO: Suppress unused function warning for now
5762 // if (0) rb_thread_sched_destroy(NULL);
5763}
5764
5767{
5768 rb_thread_t *th = ruby_thread_from_native();
5769
5770 return th != 0;
5771}
5772
5773#ifdef NON_SCALAR_THREAD_ID
5774 #define thread_id_str(th) (NULL)
5775#else
5776 #define thread_id_str(th) ((void *)(uintptr_t)(th)->nt->thread_id)
5777#endif
5778
5779static void
5780debug_deadlock_check(rb_ractor_t *r, VALUE msg)
5781{
5782 rb_thread_t *th = 0;
5783 VALUE sep = rb_str_new_cstr("\n ");
5784
5785 rb_str_catf(msg, "\n%d threads, %d sleeps current:%p main thread:%p\n",
5786 rb_ractor_living_thread_num(r), rb_ractor_sleeper_thread_num(r),
5787 (void *)GET_THREAD(), (void *)r->threads.main);
5788
5789 ccan_list_for_each(&r->threads.set, th, lt_node) {
5790 rb_str_catf(msg, "* %+"PRIsVALUE"\n rb_thread_t:%p "
5791 "native:%p int:%u",
5792 th->self, (void *)th, th->nt ? thread_id_str(th) : "N/A", th->ec->interrupt_flag);
5793
5794 if (th->locking_mutex) {
5795 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5796 rb_str_catf(msg, " mutex:%p cond:%"PRIuSIZE,
5797 (void *)mutex->fiber, rb_mutex_num_waiting(mutex));
5798 }
5799
5800 {
5801 struct rb_waiting_list *list = th->join_list;
5802 while (list) {
5803 rb_str_catf(msg, "\n depended by: tb_thread_id:%p", (void *)list->thread);
5804 list = list->next;
5805 }
5806 }
5807 rb_str_catf(msg, "\n ");
5808 rb_str_concat(msg, rb_ary_join(rb_ec_backtrace_str_ary(th->ec, RUBY_BACKTRACE_START, RUBY_ALL_BACKTRACE_LINES), sep));
5809 rb_str_catf(msg, "\n");
5810 }
5811}
5812
5813static void
5814rb_check_deadlock(rb_ractor_t *r)
5815{
5816 if (GET_THREAD()->vm->thread_ignore_deadlock) return;
5817
5818#ifdef RUBY_THREAD_PTHREAD_H
5819 if (r->threads.sched.readyq_cnt > 0) return;
5820#endif
5821
5822 int sleeper_num = rb_ractor_sleeper_thread_num(r);
5823 int ltnum = rb_ractor_living_thread_num(r);
5824
5825 if (ltnum > sleeper_num) return;
5826 if (ltnum < sleeper_num) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
5827
5828 int found = 0;
5829 rb_thread_t *th = NULL;
5830
5831 ccan_list_for_each(&r->threads.set, th, lt_node) {
5832 if (th->status != THREAD_STOPPED_FOREVER || RUBY_VM_INTERRUPTED(th->ec)) {
5833 found = 1;
5834 }
5835 else if (th->locking_mutex) {
5836 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5837 if (mutex->fiber == th->ec->fiber_ptr || (!mutex->fiber && !ccan_list_empty(&mutex->waitq))) {
5838 found = 1;
5839 }
5840 }
5841 if (found)
5842 break;
5843 }
5844
5845 if (!found) {
5846 VALUE argv[2];
5847 argv[0] = rb_eFatal;
5848 argv[1] = rb_str_new2("No live threads left. Deadlock?");
5849 debug_deadlock_check(r, argv[1]);
5850 rb_ractor_sleeper_threads_dec(GET_RACTOR());
5851 rb_threadptr_raise(r->threads.main, 2, argv);
5852 }
5853}
5854
5855static void
5856update_line_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5857{
5858 const rb_control_frame_t *cfp = GET_EC()->cfp;
5859 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5860 if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5861 VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
5862 if (lines) {
5863 long line = rb_sourceline() - 1;
5864 VM_ASSERT(line >= 0);
5865 long count;
5866 VALUE num;
5867 void rb_iseq_clear_event_flags(const rb_iseq_t *iseq, size_t pos, rb_event_flag_t reset);
5868 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
5869 rb_iseq_clear_event_flags(cfp->iseq, cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded - 1, RUBY_EVENT_COVERAGE_LINE);
5870 rb_ary_push(lines, LONG2FIX(line + 1));
5871 return;
5872 }
5873 if (line >= RARRAY_LEN(lines)) { /* no longer tracked */
5874 return;
5875 }
5876 num = RARRAY_AREF(lines, line);
5877 if (!FIXNUM_P(num)) return;
5878 count = FIX2LONG(num) + 1;
5879 if (POSFIXABLE(count)) {
5880 RARRAY_ASET(lines, line, LONG2FIX(count));
5881 }
5882 }
5883 }
5884}
5885
5886static void
5887update_branch_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5888{
5889 const rb_control_frame_t *cfp = GET_EC()->cfp;
5890 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5891 if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5892 VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
5893 if (branches) {
5894 long pc = cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded - 1;
5895 long idx = FIX2INT(RARRAY_AREF(ISEQ_PC2BRANCHINDEX(cfp->iseq), pc)), count;
5896 VALUE counters = RARRAY_AREF(branches, 1);
5897 VALUE num = RARRAY_AREF(counters, idx);
5898 count = FIX2LONG(num) + 1;
5899 if (POSFIXABLE(count)) {
5900 RARRAY_ASET(counters, idx, LONG2FIX(count));
5901 }
5902 }
5903 }
5904}
5905
5906const rb_method_entry_t *
5907rb_resolve_me_location(const rb_method_entry_t *me, VALUE resolved_location[5])
5908{
5909 VALUE path, beg_pos_lineno, beg_pos_column, end_pos_lineno, end_pos_column;
5910
5911 if (!me->def) return NULL; // negative cme
5912
5913 retry:
5914 switch (me->def->type) {
5915 case VM_METHOD_TYPE_ISEQ: {
5916 const rb_iseq_t *iseq = me->def->body.iseq.iseqptr;
5917 rb_iseq_location_t *loc = &ISEQ_BODY(iseq)->location;
5918 path = rb_iseq_path(iseq);
5919 beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5920 beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5921 end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5922 end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5923 break;
5924 }
5925 case VM_METHOD_TYPE_BMETHOD: {
5926 const rb_iseq_t *iseq = rb_proc_get_iseq(me->def->body.bmethod.proc, 0);
5927 if (iseq) {
5928 rb_iseq_location_t *loc;
5929 rb_iseq_check(iseq);
5930 path = rb_iseq_path(iseq);
5931 loc = &ISEQ_BODY(iseq)->location;
5932 beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5933 beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5934 end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5935 end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5936 break;
5937 }
5938 return NULL;
5939 }
5940 case VM_METHOD_TYPE_ALIAS:
5941 me = me->def->body.alias.original_me;
5942 goto retry;
5943 case VM_METHOD_TYPE_REFINED:
5944 me = me->def->body.refined.orig_me;
5945 if (!me) return NULL;
5946 goto retry;
5947 default:
5948 return NULL;
5949 }
5950
5951 /* found */
5952 if (RB_TYPE_P(path, T_ARRAY)) {
5953 path = rb_ary_entry(path, 1);
5954 if (!RB_TYPE_P(path, T_STRING)) return NULL; /* just for the case... */
5955 }
5956 if (resolved_location) {
5957 resolved_location[0] = path;
5958 resolved_location[1] = beg_pos_lineno;
5959 resolved_location[2] = beg_pos_column;
5960 resolved_location[3] = end_pos_lineno;
5961 resolved_location[4] = end_pos_column;
5962 }
5963 return me;
5964}
5965
5966static void
5967update_method_coverage(VALUE me2counter, rb_trace_arg_t *trace_arg)
5968{
5969 const rb_control_frame_t *cfp = GET_EC()->cfp;
5970 const rb_callable_method_entry_t *cme = rb_vm_frame_method_entry(cfp);
5971 const rb_method_entry_t *me = (const rb_method_entry_t *)cme;
5972 VALUE rcount;
5973 long count;
5974
5975 me = rb_resolve_me_location(me, 0);
5976 if (!me) return;
5977
5978 rcount = rb_hash_aref(me2counter, (VALUE) me);
5979 count = FIXNUM_P(rcount) ? FIX2LONG(rcount) + 1 : 1;
5980 if (POSFIXABLE(count)) {
5981 rb_hash_aset(me2counter, (VALUE) me, LONG2FIX(count));
5982 }
5983}
5984
5985VALUE
5986rb_get_coverages(void)
5987{
5988 return GET_VM()->coverages;
5989}
5990
5991int
5992rb_get_coverage_mode(void)
5993{
5994 return GET_VM()->coverage_mode;
5995}
5996
5997void
5998rb_set_coverages(VALUE coverages, int mode, VALUE me2counter)
5999{
6000 GET_VM()->coverages = coverages;
6001 GET_VM()->me2counter = me2counter;
6002 GET_VM()->coverage_mode = mode;
6003}
6004
6005void
6006rb_resume_coverages(void)
6007{
6008 int mode = GET_VM()->coverage_mode;
6009 VALUE me2counter = GET_VM()->me2counter;
6010 rb_add_event_hook2((rb_event_hook_func_t) update_line_coverage, RUBY_EVENT_COVERAGE_LINE, Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
6011 if (mode & COVERAGE_TARGET_BRANCHES) {
6012 rb_add_event_hook2((rb_event_hook_func_t) update_branch_coverage, RUBY_EVENT_COVERAGE_BRANCH, Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
6013 }
6014 if (mode & COVERAGE_TARGET_METHODS) {
6015 rb_add_event_hook2((rb_event_hook_func_t) update_method_coverage, RUBY_EVENT_CALL, me2counter, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
6016 }
6017}
6018
6019void
6020rb_suspend_coverages(void)
6021{
6022 rb_remove_event_hook((rb_event_hook_func_t) update_line_coverage);
6023 if (GET_VM()->coverage_mode & COVERAGE_TARGET_BRANCHES) {
6024 rb_remove_event_hook((rb_event_hook_func_t) update_branch_coverage);
6025 }
6026 if (GET_VM()->coverage_mode & COVERAGE_TARGET_METHODS) {
6027 rb_remove_event_hook((rb_event_hook_func_t) update_method_coverage);
6028 }
6029}
6030
6031/* Make coverage arrays empty so old covered files are no longer tracked. */
6032void
6033rb_reset_coverages(void)
6034{
6035 rb_clear_coverages();
6036 rb_iseq_remove_coverage_all();
6037 GET_VM()->coverages = Qfalse;
6038}
6039
6040VALUE
6041rb_default_coverage(int n)
6042{
6043 VALUE coverage = rb_ary_hidden_new_fill(3);
6044 VALUE lines = Qfalse, branches = Qfalse;
6045 int mode = GET_VM()->coverage_mode;
6046
6047 if (mode & COVERAGE_TARGET_LINES) {
6048 lines = n > 0 ? rb_ary_hidden_new_fill(n) : rb_ary_hidden_new(0);
6049 }
6050 RARRAY_ASET(coverage, COVERAGE_INDEX_LINES, lines);
6051
6052 if (mode & COVERAGE_TARGET_BRANCHES) {
6053 branches = rb_ary_hidden_new_fill(2);
6054 /* internal data structures for branch coverage:
6055 *
6056 * { branch base node =>
6057 * [base_type, base_first_lineno, base_first_column, base_last_lineno, base_last_column, {
6058 * branch target id =>
6059 * [target_type, target_first_lineno, target_first_column, target_last_lineno, target_last_column, target_counter_index],
6060 * ...
6061 * }],
6062 * ...
6063 * }
6064 *
6065 * Example:
6066 * { NODE_CASE =>
6067 * [1, 0, 4, 3, {
6068 * NODE_WHEN => [2, 8, 2, 9, 0],
6069 * NODE_WHEN => [3, 8, 3, 9, 1],
6070 * ...
6071 * }],
6072 * ...
6073 * }
6074 */
6075 VALUE structure = rb_hash_new();
6076 rb_obj_hide(structure);
6077 RARRAY_ASET(branches, 0, structure);
6078 /* branch execution counters */
6079 RARRAY_ASET(branches, 1, rb_ary_hidden_new(0));
6080 }
6081 RARRAY_ASET(coverage, COVERAGE_INDEX_BRANCHES, branches);
6082
6083 return coverage;
6084}
6085
6086static VALUE
6087uninterruptible_exit(VALUE v)
6088{
6089 rb_thread_t *cur_th = GET_THREAD();
6090 rb_ary_pop(cur_th->pending_interrupt_mask_stack);
6091
6092 cur_th->pending_interrupt_queue_checked = 0;
6093 if (!rb_threadptr_pending_interrupt_empty_p(cur_th)) {
6094 RUBY_VM_SET_INTERRUPT(cur_th->ec);
6095 }
6096 return Qnil;
6097}
6098
6099VALUE
6100rb_uninterruptible(VALUE (*b_proc)(VALUE), VALUE data)
6101{
6102 VALUE interrupt_mask = rb_ident_hash_new();
6103 rb_thread_t *cur_th = GET_THREAD();
6104
6105 rb_hash_aset(interrupt_mask, rb_cObject, sym_never);
6106 OBJ_FREEZE(interrupt_mask);
6107 rb_ary_push(cur_th->pending_interrupt_mask_stack, interrupt_mask);
6108
6109 VALUE ret = rb_ensure(b_proc, data, uninterruptible_exit, Qnil);
6110
6111 RUBY_VM_CHECK_INTS(cur_th->ec);
6112 return ret;
6113}
6114
6115static void
6116thread_specific_storage_alloc(rb_thread_t *th)
6117{
6118 VM_ASSERT(th->specific_storage == NULL);
6119
6120 if (UNLIKELY(specific_key_count > 0)) {
6121 th->specific_storage = ZALLOC_N(void *, RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
6122 }
6123}
6124
6125rb_internal_thread_specific_key_t
6127{
6128 rb_vm_t *vm = GET_VM();
6129
6130 if (specific_key_count == 0 && vm->ractor.cnt > 1) {
6131 rb_raise(rb_eThreadError, "The first rb_internal_thread_specific_key_create() is called with multiple ractors");
6132 }
6133 else if (specific_key_count > RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX) {
6134 rb_raise(rb_eThreadError, "rb_internal_thread_specific_key_create() is called more than %d times", RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
6135 }
6136 else {
6137 rb_internal_thread_specific_key_t key = specific_key_count++;
6138
6139 if (key == 0) {
6140 // allocate
6141 rb_ractor_t *cr = GET_RACTOR();
6142 rb_thread_t *th;
6143
6144 ccan_list_for_each(&cr->threads.set, th, lt_node) {
6145 thread_specific_storage_alloc(th);
6146 }
6147 }
6148 return key;
6149 }
6150}
6151
6152// async and native thread safe.
6153void *
6154rb_internal_thread_specific_get(VALUE thread_val, rb_internal_thread_specific_key_t key)
6155{
6156 rb_thread_t *th = DATA_PTR(thread_val);
6157
6158 VM_ASSERT(rb_thread_ptr(thread_val) == th);
6159 VM_ASSERT(key < RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
6160 VM_ASSERT(th->specific_storage);
6161
6162 return th->specific_storage[key];
6163}
6164
6165// async and native thread safe.
6166void
6167rb_internal_thread_specific_set(VALUE thread_val, rb_internal_thread_specific_key_t key, void *data)
6168{
6169 rb_thread_t *th = DATA_PTR(thread_val);
6170
6171 VM_ASSERT(rb_thread_ptr(thread_val) == th);
6172 VM_ASSERT(key < RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
6173 VM_ASSERT(th->specific_storage);
6174
6175 th->specific_storage[key] = data;
6176}
6177
6178// interrupt_exec
6181 struct ccan_list_node node;
6182
6183 rb_interrupt_exec_func_t *func;
6184 void *data;
6185 enum rb_interrupt_exec_flag flags;
6186};
6187
6188void
6189rb_threadptr_interrupt_exec_task_mark(rb_thread_t *th)
6190{
6191 struct rb_interrupt_exec_task *task;
6192
6193 ccan_list_for_each(&th->interrupt_exec_tasks, task, node) {
6194 if (task->flags & rb_interrupt_exec_flag_value_data) {
6195 rb_gc_mark((VALUE)task->data);
6196 }
6197 }
6198}
6199
6200// native thread safe
6201// th should be available
6202void
6203rb_threadptr_interrupt_exec(rb_thread_t *th, rb_interrupt_exec_func_t *func, void *data, enum rb_interrupt_exec_flag flags)
6204{
6205 // should not use ALLOC
6207 *task = (struct rb_interrupt_exec_task) {
6208 .flags = flags,
6209 .func = func,
6210 .data = data,
6211 };
6212
6213 rb_native_mutex_lock(&th->interrupt_lock);
6214 {
6215 ccan_list_add_tail(&th->interrupt_exec_tasks, &task->node);
6216 threadptr_set_interrupt_locked(th, true);
6217 }
6218 rb_native_mutex_unlock(&th->interrupt_lock);
6219}
6220
6221static void
6222threadptr_interrupt_exec_exec(rb_thread_t *th)
6223{
6224 while (1) {
6225 struct rb_interrupt_exec_task *task;
6226
6227 rb_native_mutex_lock(&th->interrupt_lock);
6228 {
6229 task = ccan_list_pop(&th->interrupt_exec_tasks, struct rb_interrupt_exec_task, node);
6230 }
6231 rb_native_mutex_unlock(&th->interrupt_lock);
6232
6233 RUBY_DEBUG_LOG("task:%p", task);
6234
6235 if (task) {
6236 if (task->flags & rb_interrupt_exec_flag_new_thread) {
6237 rb_thread_create(task->func, task->data);
6238 }
6239 else {
6240 (*task->func)(task->data);
6241 }
6242 ruby_xfree(task);
6243 }
6244 else {
6245 break;
6246 }
6247 }
6248}
6249
6250static void
6251threadptr_interrupt_exec_cleanup(rb_thread_t *th)
6252{
6253 rb_native_mutex_lock(&th->interrupt_lock);
6254 {
6255 struct rb_interrupt_exec_task *task;
6256
6257 while ((task = ccan_list_pop(&th->interrupt_exec_tasks, struct rb_interrupt_exec_task, node)) != NULL) {
6258 ruby_xfree(task);
6259 }
6260 }
6261 rb_native_mutex_unlock(&th->interrupt_lock);
6262}
6263
6264// native thread safe
6265// func/data should be native thread safe
6266void
6267rb_ractor_interrupt_exec(struct rb_ractor_struct *target_r,
6268 rb_interrupt_exec_func_t *func, void *data, enum rb_interrupt_exec_flag flags)
6269{
6270 RUBY_DEBUG_LOG("flags:%d", (int)flags);
6271
6272 rb_thread_t *main_th = target_r->threads.main;
6273 rb_threadptr_interrupt_exec(main_th, func, data, flags | rb_interrupt_exec_flag_new_thread);
6274}
6275
#define RUBY_ASSERT_ALWAYS(expr,...)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
Definition assert.h:199
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
#define RUBY_INTERNAL_EVENT_SWITCH
Thread switched.
Definition event.h:90
int rb_remove_event_hook(rb_event_hook_func_t func)
Removes the passed function from the list of event hooks.
Definition vm_trace.c:313
#define RUBY_EVENT_THREAD_BEGIN
Encountered a new thread.
Definition event.h:57
void(* rb_event_hook_func_t)(rb_event_flag_t evflag, VALUE data, VALUE self, ID mid, VALUE klass)
Type of event hooks.
Definition event.h:120
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RUBY_EVENT_CALL
A method, written in Ruby, is called.
Definition event.h:41
#define RUBY_EVENT_THREAD_END
Encountered an end of a thread.
Definition event.h:58
static void RB_FL_SET_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_SET().
Definition fl_type.h:600
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition class.c:1479
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
Definition class.c:2843
ID rb_frame_last_func(void)
Returns the ID of the last method in the call stack.
Definition eval.c:1166
int rb_keyword_given_p(void)
Determines if the current method is given a keyword argument.
Definition eval.c:956
int rb_block_given_p(void)
Determines if the current method is given a block.
Definition eval.c:943
#define rb_str_new2
Old name of rb_str_new_cstr.
Definition string.h:1675
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:400
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define OBJ_FROZEN
Old name of RB_OBJ_FROZEN.
Definition fl_type.h:136
#define xrealloc
Old name of ruby_xrealloc.
Definition xmalloc.h:56
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition fl_type.h:134
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition assume.h:29
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:206
#define xmalloc
Old name of ruby_xmalloc.
Definition xmalloc.h:53
#define LONG2FIX
Old name of RB_INT2FIX.
Definition long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition int.h:41
#define ZALLOC_N
Old name of RB_ZALLOC_N.
Definition memory.h:401
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define Qtrue
Old name of RUBY_Qtrue.
#define NUM2INT
Old name of RB_NUM2INT.
Definition int.h:44
#define INT2NUM
Old name of RB_INT2NUM.
Definition int.h:43
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define POSFIXABLE
Old name of RB_POSFIXABLE.
Definition fixnum.h:29
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
void ruby_stop(int ex)
Calls ruby_cleanup() and exits the process.
Definition eval.c:289
#define ruby_debug
This variable controls whether the interpreter is in debug mode.
Definition error.h:486
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition eval.c:682
VALUE rb_eSystemExit
SystemExit exception.
Definition error.c:1423
VALUE rb_eIOError
IOError exception.
Definition io.c:189
VALUE rb_eStandardError
StandardError exception.
Definition error.c:1427
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1430
void rb_frozen_error_raise(VALUE frozen_obj, const char *fmt,...)
Raises an instance of rb_eFrozenError.
Definition error.c:4117
VALUE rb_eFatal
fatal exception.
Definition error.c:1426
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1428
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:466
VALUE rb_exc_new(VALUE etype, const char *ptr, long len)
Creates an instance of the passed exception class.
Definition error.c:1468
VALUE rb_eException
Mother of all exceptions.
Definition error.c:1422
VALUE rb_eThreadError
ThreadError exception.
Definition eval.c:961
void rb_exit(int status)
Terminates the current execution context.
Definition process.c:4360
VALUE rb_eSignal
SignalException exception.
Definition error.c:1425
VALUE rb_obj_alloc(VALUE klass)
Allocates an instance of the given class.
Definition object.c:2125
VALUE rb_cInteger
Module class.
Definition numeric.c:198
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
Definition object.c:101
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:243
VALUE rb_cThread
Thread class.
Definition vm.c:550
VALUE rb_cModule
Module class.
Definition object.c:63
double rb_num2dbl(VALUE num)
Converts an instance of rb_cNumeric into C's double.
Definition object.c:3746
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:880
VALUE rb_ary_shift(VALUE ary)
Destructively deletes an element from the beginning of the passed array and returns what was deleted.
VALUE rb_ary_dup(VALUE ary)
Duplicates an array.
VALUE rb_ary_delete_at(VALUE ary, long pos)
Destructively removes an element which resides at the specific index of the passed array.
VALUE rb_ary_new(void)
Allocates a new, empty array.
VALUE rb_ary_pop(VALUE ary)
Destructively deletes an element from the end of the passed array and returns what was deleted.
VALUE rb_ary_hidden_new(long capa)
Allocates a hidden (no class) empty array.
VALUE rb_ary_clear(VALUE ary)
Destructively removes everything form an array.
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
VALUE rb_ary_entry(VALUE ary, long off)
Queries an element of an array.
VALUE rb_ary_join(VALUE ary, VALUE sep)
Recursively stringises the elements of the passed array, flattens that result, then joins the sequenc...
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:284
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition proc.c:847
void rb_reset_random_seed(void)
Resets the RNG behind rb_genrand_int32()/rb_genrand_real().
Definition random.c:1803
VALUE rb_str_new_frozen(VALUE str)
Creates a frozen copy of the string, if necessary.
Definition string.c:1493
VALUE rb_str_concat(VALUE dst, VALUE src)
Identical to rb_str_append(), except it also accepts an integer as a codepoint.
Definition string.c:3993
#define rb_str_cat_cstr(buf, str)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1656
#define rb_str_new_cstr(str)
Identical to rb_str_new, except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1514
int rb_thread_interrupted(VALUE thval)
Checks if the thread's execution was recently interrupted.
Definition thread.c:1459
VALUE rb_thread_local_aref(VALUE thread, ID key)
This badly named function reads from a Fiber local storage.
Definition thread.c:3745
VALUE rb_mutex_new(void)
Creates a mutex.
VALUE rb_thread_kill(VALUE thread)
Terminates the given thread.
Definition thread.c:2938
#define RUBY_UBF_IO
A special UBF for blocking IO operations.
Definition thread.h:382
VALUE rb_thread_main(void)
Obtains the "main" thread.
Definition thread.c:3177
VALUE rb_exec_recursive(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h)
"Recursion" API entry point.
void rb_thread_sleep_forever(void)
Blocks indefinitely.
Definition thread.c:1389
void rb_thread_fd_close(int fd)
This funciton is now a no-op.
Definition thread.c:2883
void rb_thread_wait_for(struct timeval time)
Identical to rb_thread_sleep(), except it takes struct timeval instead.
Definition thread.c:1421
VALUE rb_mutex_synchronize(VALUE mutex, VALUE(*func)(VALUE arg), VALUE arg)
Obtains the lock, runs the passed function, and releases the lock when it completes.
VALUE rb_thread_stop(void)
Stops the current thread.
Definition thread.c:3089
VALUE rb_mutex_sleep(VALUE self, VALUE timeout)
Releases the lock held in the mutex and waits for the period of time; reacquires the lock on wakeup.
VALUE rb_exec_recursive_paired(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE p, VALUE h)
Identical to rb_exec_recursive(), except it checks for the recursion on the ordered pair of { g,...
void rb_unblock_function_t(void *)
This is the type of UBFs.
Definition thread.h:336
void rb_thread_atfork_before_exec(void)
:FIXME: situation of this function is unclear.
Definition thread.c:5017
void rb_thread_check_ints(void)
Checks for interrupts.
Definition thread.c:1442
VALUE rb_thread_run(VALUE thread)
This is a rb_thread_wakeup() + rb_thread_schedule() combo.
Definition thread.c:3080
VALUE rb_thread_wakeup(VALUE thread)
Marks a given thread as eligible for scheduling.
Definition thread.c:3033
VALUE rb_mutex_unlock(VALUE mutex)
Releases the mutex.
VALUE rb_exec_recursive_paired_outer(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE p, VALUE h)
Identical to rb_exec_recursive_outer(), except it checks for the recursion on the ordered pair of { g...
void rb_thread_sleep_deadly(void)
Identical to rb_thread_sleep_forever(), except the thread calling this function is considered "dead" ...
Definition thread.c:1396
void rb_thread_atfork(void)
A pthread_atfork(3posix)-like API.
Definition thread.c:5012
VALUE rb_thread_current(void)
Obtains the "current" thread.
Definition thread.c:3156
int rb_thread_alone(void)
Checks if the thread this function is running is the only thread that is currently alive.
Definition thread.c:4018
VALUE rb_thread_local_aset(VALUE thread, ID key, VALUE val)
This badly named function writes to a Fiber local storage.
Definition thread.c:3893
void rb_thread_schedule(void)
Tries to switch to another thread.
Definition thread.c:1490
#define RUBY_UBF_PROCESS
A special UBF for blocking process operations.
Definition thread.h:389
VALUE rb_exec_recursive_outer(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h)
Identical to rb_exec_recursive(), except it calls f for outermost recursion only.
VALUE rb_thread_wakeup_alive(VALUE thread)
Identical to rb_thread_wakeup(), except it doesn't raise on an already killed thread.
Definition thread.c:3042
VALUE rb_mutex_lock(VALUE mutex)
Attempts to lock the mutex.
void rb_thread_sleep(int sec)
Blocks for the given period of time.
Definition thread.c:1465
void rb_timespec_now(struct timespec *ts)
Fills the current time into the given struct.
Definition time.c:2016
struct timeval rb_time_timeval(VALUE time)
Converts an instance of rb_cTime to a struct timeval that represents the identical point of time.
Definition time.c:2968
VALUE rb_ivar_set(VALUE obj, ID name, VALUE val)
Identical to rb_iv_set(), except it accepts the name as an ID instead of a C string.
Definition variable.c:2105
VALUE rb_ivar_get(VALUE obj, ID name)
Identical to rb_iv_get(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1458
VALUE rb_class_path(VALUE mod)
Identical to rb_mod_name(), except it returns #<Class: ...> style inspection for anonymous modules.
Definition variable.c:378
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
int rb_sourceline(void)
Resembles __LINE__.
Definition vm.c:1912
static ID rb_intern_const(const char *str)
This is a "tiny optimisation" over rb_intern().
Definition symbol.h:284
ID rb_check_id(volatile VALUE *namep)
Detects if the given name is already interned or not.
Definition symbol.c:1185
VALUE rb_to_symbol(VALUE name)
Identical to rb_intern_str(), except it generates a dynamic symbol if necessary.
Definition string.c:12567
ID rb_to_id(VALUE str)
Definition string.c:12557
#define RB_IO_POINTER(obj, fp)
Queries the underlying IO pointer.
Definition io.h:436
VALUE rb_eIOTimeoutError
Indicates that a timeout has occurred while performing an IO operation.
Definition io.c:190
#define RB_NOGVL_UBF_ASYNC_SAFE
Passing this flag to rb_nogvl() indicates that the passed UBF is async-signal-safe.
Definition thread.h:60
void * rb_internal_thread_specific_get(VALUE thread_val, rb_internal_thread_specific_key_t key)
Get thread and tool specific data.
Definition thread.c:6153
#define RB_NOGVL_INTR_FAIL
Passing this flag to rb_nogvl() prevents it from checking interrupts.
Definition thread.h:48
void rb_internal_thread_specific_set(VALUE thread_val, rb_internal_thread_specific_key_t key, void *data)
Set thread and tool specific data.
Definition thread.c:6166
rb_internal_thread_specific_key_t rb_internal_thread_specific_key_create(void)
Create a key to store thread specific data.
Definition thread.c:6125
void * rb_nogvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2, int flags)
Identical to rb_thread_call_without_gvl(), except it additionally takes "flags" that change the behav...
Definition thread.c:1570
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
Definition thread.c:2036
#define RB_NOGVL_OFFLOAD_SAFE
Passing this flag to rb_nogvl() indicates that the passed function is safe to offload to a background...
Definition thread.h:73
void * rb_thread_call_without_gvl2(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Identical to rb_thread_call_without_gvl(), except it does not interface with signals etc.
Definition thread.c:1707
void * rb_thread_call_without_gvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Allows the passed function to run in parallel with other Ruby threads.
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
Shim for block function parameters.
Definition iterator.h:58
VALUE rb_yield(VALUE val)
Yields the block.
Definition vm_eval.c:1372
void rb_throw_obj(VALUE tag, VALUE val)
Identical to rb_throw(), except it allows arbitrary Ruby object to become a tag.
Definition vm_eval.c:2518
static int rb_fd_max(const rb_fdset_t *f)
It seems this function has no use.
Definition largesize.h:209
void rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
Destructively overwrites an fdset with another.
void rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
Identical to rb_fd_copy(), except it copies unlimited number of file descriptors.
void rb_fd_term(rb_fdset_t *f)
Destroys the rb_fdset_t, releasing any memory and resources it used.
static fd_set * rb_fd_ptr(const rb_fdset_t *f)
Raw pointer to fd_set.
Definition largesize.h:195
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition memory.h:372
#define ALLOCA_N(type, n)
Definition memory.h:292
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
Definition memory.h:360
VALUE rb_thread_create(type *q, void *w)
Creates a rb_cThread instance.
void rb_hash_foreach(VALUE q, int_type *w, VALUE e)
Iteration over the given hash.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define rb_fd_isset
Queries if the given fd is in the rb_fdset_t.
Definition posix.h:60
#define rb_fd_select
Waits for multiple file descriptors at once.
Definition posix.h:66
#define rb_fd_init
Initialises the :given :rb_fdset_t.
Definition posix.h:63
#define rb_fd_set
Sets the given fd to the rb_fdset_t.
Definition posix.h:54
#define rb_fd_zero
Clears the given rb_fdset_t.
Definition posix.h:51
#define rb_fd_clr
Unsets the given fd from the rb_fdset_t.
Definition posix.h:57
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
static int RARRAY_LENINT(VALUE ary)
Identical to rb_array_len(), except it differs for the return type.
Definition rarray.h:281
static void RARRAY_ASET(VALUE ary, long i, VALUE v)
Assigns an object in an array.
Definition rarray.h:386
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:163
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
Definition rclass.h:44
#define DATA_PTR(obj)
Convenient getter macro.
Definition rdata.h:67
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
#define StringValueCStr(v)
Identical to StringValuePtr, except it additionally checks for the contents for viability as a C stri...
Definition rstring.h:89
#define RUBY_TYPED_DEFAULT_FREE
This is a value you can set to rb_data_type_struct::dfree.
Definition rtypeddata.h:79
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
Definition rtypeddata.h:515
#define TypedData_Wrap_Struct(klass, data_type, sval)
Converts sval, a pointer to your struct, into a Ruby object.
Definition rtypeddata.h:450
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
Definition rtypeddata.h:497
#define errno
Ractor-aware version of errno.
Definition ruby.h:388
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition thread.c:5765
int ruby_snprintf(char *str, size_t n, char const *fmt,...)
Our own locale-insensitive version of snprintf(3).
Definition sprintf.c:1041
#define RB_PASS_CALLED_KEYWORDS
Pass keywords if current method is called with keywords, useful for argument delegation.
Definition scan_args.h:78
Scheduler APIs.
VALUE rb_fiber_scheduler_blocking_operation_wait(VALUE scheduler, void *(*function)(void *), void *data, rb_unblock_function_t *unblock_function, void *data2, int flags, struct rb_fiber_scheduler_blocking_operation_state *state)
Defer the execution of the passed function to the scheduler.
Definition scheduler.c:1046
VALUE rb_fiber_scheduler_current(void)
Identical to rb_fiber_scheduler_get(), except it also returns RUBY_Qnil in case of a blocking fiber.
Definition scheduler.c:463
VALUE rb_fiber_scheduler_fiber_interrupt(VALUE scheduler, VALUE fiber, VALUE exception)
Interrupt a fiber by raising an exception.
Definition scheduler.c:1077
VALUE rb_fiber_scheduler_block(VALUE scheduler, VALUE blocker, VALUE timeout)
Non-blocking wait for the passed "blocker", which is for instance Thread.join or Mutex....
Definition scheduler.c:627
VALUE rb_fiber_scheduler_set(VALUE scheduler)
Destructively assigns the passed scheduler to that of the current thread that is calling this functio...
Definition scheduler.c:424
VALUE rb_fiber_scheduler_unblock(VALUE scheduler, VALUE blocker, VALUE fiber)
Wakes up a fiber previously blocked using rb_fiber_scheduler_block().
Definition scheduler.c:646
int rb_thread_fd_select(int nfds, rb_fdset_t *rfds, rb_fdset_t *wfds, rb_fdset_t *efds, struct timeval *timeout)
Waits for multiple file descriptors at once.
Definition thread.c:4509
#define rb_fd_resize(n, f)
Does nothing (defined for compatibility).
Definition select.h:43
static bool RB_TEST(VALUE obj)
Emulates Ruby's "if" statement.
@ RUBY_Qundef
Represents so-called undef.
#define RTEST
This is an old name of RB_TEST.
#define _(args)
This was a transition path from K&R to ANSI.
Definition stdarg.h:35
Definition method.h:63
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:203
The data structure which wraps the fd_set bitmap used by select(2).
Definition largesize.h:71
int maxfd
Maximum allowed number of FDs.
Definition largesize.h:72
fd_set * fdset
File descriptors buffer.
Definition largesize.h:73
int capa
Maximum allowed number of FDs.
Definition win32.h:50
Ruby's IO, metadata and buffers.
Definition io.h:295
VALUE self
The IO's Ruby level counterpart.
Definition io.h:298
int fd
file descriptor.
Definition io.h:306
struct ccan_list_head blocking_operations
Threads that are performing a blocking operation without the GVL using this IO.
Definition io.h:131
Definition method.h:55
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:136
void rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
Blocks until the current thread obtains a lock.
Definition thread.c:295
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
Releases a lock.
Definition thread.c:301
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy.
void rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
Fills the passed lock with an initial value.
Definition thread.c:283
void rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
Destroys the passed mutex.
Definition thread.c:289
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376