Ruby 3.5.0dev (2025-10-06 revision 704677257ecb01c7ee10aa0dfc55ca1d4fc4636d)
thread.c (704677257ecb01c7ee10aa0dfc55ca1d4fc4636d)
1/**********************************************************************
2
3 thread.c -
4
5 $Author$
6
7 Copyright (C) 2004-2007 Koichi Sasada
8
9**********************************************************************/
10
11/*
12 YARV Thread Design
13
14 model 1: Userlevel Thread
15 Same as traditional ruby thread.
16
17 model 2: Native Thread with Global VM lock
18 Using pthread (or Windows thread) and Ruby threads run concurrent.
19
20 model 3: Native Thread with fine grain lock
21 Using pthread and Ruby threads run concurrent or parallel.
22
23 model 4: M:N User:Native threads with Global VM lock
24 Combination of model 1 and 2
25
26 model 5: M:N User:Native thread with fine grain lock
27 Combination of model 1 and 3
28
29------------------------------------------------------------------------
30
31 model 2:
32 A thread has mutex (GVL: Global VM Lock or Giant VM Lock) can run.
33 When thread scheduling, running thread release GVL. If running thread
34 try blocking operation, this thread must release GVL and another
35 thread can continue this flow. After blocking operation, thread
36 must check interrupt (RUBY_VM_CHECK_INTS).
37
38 Every VM can run parallel.
39
40 Ruby threads are scheduled by OS thread scheduler.
41
42------------------------------------------------------------------------
43
44 model 3:
45 Every threads run concurrent or parallel and to access shared object
46 exclusive access control is needed. For example, to access String
47 object or Array object, fine grain lock must be locked every time.
48 */
49
50
51/*
52 * FD_SET, FD_CLR and FD_ISSET have a small sanity check when using glibc
53 * 2.15 or later and set _FORTIFY_SOURCE > 0.
54 * However, the implementation is wrong. Even though Linux's select(2)
55 * supports large fd size (>FD_SETSIZE), it wrongly assumes fd is always
56 * less than FD_SETSIZE (i.e. 1024). And then when enabling HAVE_RB_FD_INIT,
57 * it doesn't work correctly and makes program abort. Therefore we need to
58 * disable FORTIFY_SOURCE until glibc fixes it.
59 */
60#undef _FORTIFY_SOURCE
61#undef __USE_FORTIFY_LEVEL
62#define __USE_FORTIFY_LEVEL 0
63
64/* for model 2 */
65
66#include "ruby/internal/config.h"
67
68#ifdef __linux__
69// Normally, gcc(1) translates calls to alloca() with inlined code. This is not done when either the -ansi, -std=c89, -std=c99, or the -std=c11 option is given and the header <alloca.h> is not included.
70# include <alloca.h>
71#endif
72
73#define TH_SCHED(th) (&(th)->ractor->threads.sched)
74
75#include "eval_intern.h"
76#include "hrtime.h"
77#include "internal.h"
78#include "internal/class.h"
79#include "internal/cont.h"
80#include "internal/error.h"
81#include "internal/eval.h"
82#include "internal/gc.h"
83#include "internal/hash.h"
84#include "internal/io.h"
85#include "internal/object.h"
86#include "internal/proc.h"
88#include "internal/signal.h"
89#include "internal/thread.h"
90#include "internal/time.h"
91#include "internal/warnings.h"
92#include "iseq.h"
93#include "ruby/debug.h"
94#include "ruby/io.h"
95#include "ruby/thread.h"
96#include "ruby/thread_native.h"
97#include "timev.h"
98#include "vm_core.h"
99#include "ractor_core.h"
100#include "vm_debug.h"
101#include "vm_sync.h"
102
103#include "ccan/list/list.h"
104
105#ifndef USE_NATIVE_THREAD_PRIORITY
106#define USE_NATIVE_THREAD_PRIORITY 0
107#define RUBY_THREAD_PRIORITY_MAX 3
108#define RUBY_THREAD_PRIORITY_MIN -3
109#endif
110
111static VALUE rb_cThreadShield;
112static VALUE cThGroup;
113
114static VALUE sym_immediate;
115static VALUE sym_on_blocking;
116static VALUE sym_never;
117
118static uint32_t thread_default_quantum_ms = 100;
119
120#define THREAD_LOCAL_STORAGE_INITIALISED FL_USER13
121#define THREAD_LOCAL_STORAGE_INITIALISED_P(th) RB_FL_TEST_RAW((th), THREAD_LOCAL_STORAGE_INITIALISED)
122
123static inline VALUE
124rb_thread_local_storage(VALUE thread)
125{
126 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
127 rb_ivar_set(thread, idLocals, rb_hash_new());
128 RB_FL_SET_RAW(thread, THREAD_LOCAL_STORAGE_INITIALISED);
129 }
130 return rb_ivar_get(thread, idLocals);
131}
132
133enum SLEEP_FLAGS {
134 SLEEP_DEADLOCKABLE = 0x01,
135 SLEEP_SPURIOUS_CHECK = 0x02,
136 SLEEP_ALLOW_SPURIOUS = 0x04,
137 SLEEP_NO_CHECKINTS = 0x08,
138};
139
140static void sleep_forever(rb_thread_t *th, unsigned int fl);
141static int sleep_hrtime(rb_thread_t *, rb_hrtime_t, unsigned int fl);
142
143static void rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker, VALUE timeout, rb_hrtime_t end);
144static int rb_threadptr_dead(rb_thread_t *th);
145static void rb_check_deadlock(rb_ractor_t *r);
146static int rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th);
147static const char *thread_status_name(rb_thread_t *th, int detail);
148static int hrtime_update_expire(rb_hrtime_t *, const rb_hrtime_t);
149NORETURN(static void async_bug_fd(const char *mesg, int errno_arg, int fd));
150MAYBE_UNUSED(static int consume_communication_pipe(int fd));
151
152static rb_atomic_t system_working = 1;
153static rb_internal_thread_specific_key_t specific_key_count;
154
155/********************************************************************************/
156
157#define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
158
160 enum rb_thread_status prev_status;
161};
162
163static int unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted);
164static void unblock_function_clear(rb_thread_t *th);
165
166static inline int blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
167 rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted);
168static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region);
169
170#define THREAD_BLOCKING_BEGIN(th) do { \
171 struct rb_thread_sched * const sched = TH_SCHED(th); \
172 RB_VM_SAVE_MACHINE_CONTEXT(th); \
173 thread_sched_to_waiting((sched), (th));
174
175#define THREAD_BLOCKING_END(th) \
176 thread_sched_to_running((sched), (th)); \
177 rb_ractor_thread_switch(th->ractor, th, false); \
178} while(0)
179
180#ifdef __GNUC__
181#ifdef HAVE_BUILTIN___BUILTIN_CHOOSE_EXPR_CONSTANT_P
182#define only_if_constant(expr, notconst) __builtin_choose_expr(__builtin_constant_p(expr), (expr), (notconst))
183#else
184#define only_if_constant(expr, notconst) (__builtin_constant_p(expr) ? (expr) : (notconst))
185#endif
186#else
187#define only_if_constant(expr, notconst) notconst
188#endif
189#define BLOCKING_REGION(th, exec, ubf, ubfarg, fail_if_interrupted) do { \
190 struct rb_blocking_region_buffer __region; \
191 if (blocking_region_begin(th, &__region, (ubf), (ubfarg), fail_if_interrupted) || \
192 /* always return true unless fail_if_interrupted */ \
193 !only_if_constant(fail_if_interrupted, TRUE)) { \
194 /* Important that this is inlined into the macro, and not part of \
195 * blocking_region_begin - see bug #20493 */ \
196 RB_VM_SAVE_MACHINE_CONTEXT(th); \
197 thread_sched_to_waiting(TH_SCHED(th), th); \
198 exec; \
199 blocking_region_end(th, &__region); \
200 }; \
201} while(0)
202
203/*
204 * returns true if this thread was spuriously interrupted, false otherwise
205 * (e.g. hit by Thread#run or ran a Ruby-level Signal.trap handler)
206 */
207#define RUBY_VM_CHECK_INTS_BLOCKING(ec) vm_check_ints_blocking(ec)
208static inline int
209vm_check_ints_blocking(rb_execution_context_t *ec)
210{
211#ifdef RUBY_ASSERT_CRITICAL_SECTION
212 VM_ASSERT(ruby_assert_critical_section_entered == 0);
213#endif
214
215 rb_thread_t *th = rb_ec_thread_ptr(ec);
216
217 if (LIKELY(rb_threadptr_pending_interrupt_empty_p(th))) {
218 if (LIKELY(!RUBY_VM_INTERRUPTED_ANY(ec))) return FALSE;
219 }
220 else {
221 th->pending_interrupt_queue_checked = 0;
222 RUBY_VM_SET_INTERRUPT(ec);
223 }
224 return rb_threadptr_execute_interrupts(th, 1);
225}
226
227int
228rb_vm_check_ints_blocking(rb_execution_context_t *ec)
229{
230 return vm_check_ints_blocking(ec);
231}
232
233/*
234 * poll() is supported by many OSes, but so far Linux is the only
235 * one we know of that supports using poll() in all places select()
236 * would work.
237 */
238#if defined(HAVE_POLL)
239# if defined(__linux__)
240# define USE_POLL
241# endif
242# if defined(__FreeBSD_version) && __FreeBSD_version >= 1100000
243# define USE_POLL
244 /* FreeBSD does not set POLLOUT when POLLHUP happens */
245# define POLLERR_SET (POLLHUP | POLLERR)
246# endif
247#endif
248
249static void
250timeout_prepare(rb_hrtime_t **to, rb_hrtime_t *rel, rb_hrtime_t *end,
251 const struct timeval *timeout)
252{
253 if (timeout) {
254 *rel = rb_timeval2hrtime(timeout);
255 *end = rb_hrtime_add(rb_hrtime_now(), *rel);
256 *to = rel;
257 }
258 else {
259 *to = 0;
260 }
261}
262
263MAYBE_UNUSED(NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start)));
264MAYBE_UNUSED(static bool th_has_dedicated_nt(const rb_thread_t *th));
265MAYBE_UNUSED(static int waitfd_to_waiting_flag(int wfd_event));
266
267#include THREAD_IMPL_SRC
268
269/*
270 * TODO: somebody with win32 knowledge should be able to get rid of
271 * timer-thread by busy-waiting on signals. And it should be possible
272 * to make the GVL in thread_pthread.c be platform-independent.
273 */
274#ifndef BUSY_WAIT_SIGNALS
275# define BUSY_WAIT_SIGNALS (0)
276#endif
277
278#ifndef USE_EVENTFD
279# define USE_EVENTFD (0)
280#endif
281
282#include "thread_sync.c"
283
284void
285rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
286{
288}
289
290void
291rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
292{
294}
295
296void
297rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
298{
300}
301
302void
303rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
304{
306}
307
308static int
309unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted)
310{
311 do {
312 if (fail_if_interrupted) {
313 if (RUBY_VM_INTERRUPTED_ANY(th->ec)) {
314 return FALSE;
315 }
316 }
317 else {
318 RUBY_VM_CHECK_INTS(th->ec);
319 }
320
321 rb_native_mutex_lock(&th->interrupt_lock);
322 } while (!th->ec->raised_flag && RUBY_VM_INTERRUPTED_ANY(th->ec) &&
323 (rb_native_mutex_unlock(&th->interrupt_lock), TRUE));
324
325 VM_ASSERT(th->unblock.func == NULL);
326
327 th->unblock.func = func;
328 th->unblock.arg = arg;
329 rb_native_mutex_unlock(&th->interrupt_lock);
330
331 return TRUE;
332}
333
334static void
335unblock_function_clear(rb_thread_t *th)
336{
337 rb_native_mutex_lock(&th->interrupt_lock);
338 th->unblock.func = 0;
339 rb_native_mutex_unlock(&th->interrupt_lock);
340}
341
342static void
343threadptr_set_interrupt_locked(rb_thread_t *th, bool trap)
344{
345 // th->interrupt_lock should be acquired here
346
347 RUBY_DEBUG_LOG("th:%u trap:%d", rb_th_serial(th), trap);
348
349 if (trap) {
350 RUBY_VM_SET_TRAP_INTERRUPT(th->ec);
351 }
352 else {
353 RUBY_VM_SET_INTERRUPT(th->ec);
354 }
355
356 if (th->unblock.func != NULL) {
357 (th->unblock.func)(th->unblock.arg);
358 }
359 else {
360 /* none */
361 }
362}
363
364static void
365threadptr_set_interrupt(rb_thread_t *th, int trap)
366{
367 rb_native_mutex_lock(&th->interrupt_lock);
368 {
369 threadptr_set_interrupt_locked(th, trap);
370 }
371 rb_native_mutex_unlock(&th->interrupt_lock);
372}
373
374/* Set interrupt flag on another thread or current thread, and call its UBF if it has one set */
375void
376rb_threadptr_interrupt(rb_thread_t *th)
377{
378 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
379 threadptr_set_interrupt(th, false);
380}
381
382static void
383threadptr_trap_interrupt(rb_thread_t *th)
384{
385 threadptr_set_interrupt(th, true);
386}
387
388static void
389terminate_all(rb_ractor_t *r, const rb_thread_t *main_thread)
390{
391 rb_thread_t *th = 0;
392
393 ccan_list_for_each(&r->threads.set, th, lt_node) {
394 if (th != main_thread) {
395 RUBY_DEBUG_LOG("terminate start th:%u status:%s", rb_th_serial(th), thread_status_name(th, TRUE));
396
397 rb_threadptr_pending_interrupt_enque(th, RUBY_FATAL_THREAD_TERMINATED);
398 rb_threadptr_interrupt(th);
399
400 RUBY_DEBUG_LOG("terminate done th:%u status:%s", rb_th_serial(th), thread_status_name(th, TRUE));
401 }
402 else {
403 RUBY_DEBUG_LOG("main thread th:%u", rb_th_serial(th));
404 }
405 }
406}
407
408static void
409rb_threadptr_join_list_wakeup(rb_thread_t *thread)
410{
411 while (thread->join_list) {
412 struct rb_waiting_list *join_list = thread->join_list;
413
414 // Consume the entry from the join list:
415 thread->join_list = join_list->next;
416
417 rb_thread_t *target_thread = join_list->thread;
418
419 if (target_thread->scheduler != Qnil && join_list->fiber) {
420 rb_fiber_scheduler_unblock(target_thread->scheduler, target_thread->self, rb_fiberptr_self(join_list->fiber));
421 }
422 else {
423 rb_threadptr_interrupt(target_thread);
424
425 switch (target_thread->status) {
426 case THREAD_STOPPED:
427 case THREAD_STOPPED_FOREVER:
428 target_thread->status = THREAD_RUNNABLE;
429 break;
430 default:
431 break;
432 }
433 }
434 }
435}
436
437void
438rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th)
439{
440 while (th->keeping_mutexes) {
441 rb_mutex_t *mutex = th->keeping_mutexes;
442 th->keeping_mutexes = mutex->next_mutex;
443
444 // rb_warn("mutex #<%p> was not unlocked by thread #<%p>", (void *)mutex, (void*)th);
445 VM_ASSERT(mutex->fiber);
446 const char *error_message = rb_mutex_unlock_th(mutex, th, mutex->fiber);
447 if (error_message) rb_bug("invalid keeping_mutexes: %s", error_message);
448 }
449}
450
451void
452rb_thread_terminate_all(rb_thread_t *th)
453{
454 rb_ractor_t *cr = th->ractor;
455 rb_execution_context_t * volatile ec = th->ec;
456 volatile int sleeping = 0;
457
458 if (cr->threads.main != th) {
459 rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)",
460 (void *)cr->threads.main, (void *)th);
461 }
462
463 /* unlock all locking mutexes */
464 rb_threadptr_unlock_all_locking_mutexes(th);
465
466 EC_PUSH_TAG(ec);
467 if (EC_EXEC_TAG() == TAG_NONE) {
468 retry:
469 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
470
471 terminate_all(cr, th);
472
473 while (rb_ractor_living_thread_num(cr) > 1) {
474 rb_hrtime_t rel = RB_HRTIME_PER_SEC;
475 /*q
476 * Thread exiting routine in thread_start_func_2 notify
477 * me when the last sub-thread exit.
478 */
479 sleeping = 1;
480 native_sleep(th, &rel);
481 RUBY_VM_CHECK_INTS_BLOCKING(ec);
482 sleeping = 0;
483 }
484 }
485 else {
486 /*
487 * When caught an exception (e.g. Ctrl+C), let's broadcast
488 * kill request again to ensure killing all threads even
489 * if they are blocked on sleep, mutex, etc.
490 */
491 if (sleeping) {
492 sleeping = 0;
493 goto retry;
494 }
495 }
496 EC_POP_TAG();
497}
498
499void rb_threadptr_root_fiber_terminate(rb_thread_t *th);
500static void threadptr_interrupt_exec_cleanup(rb_thread_t *th);
501
502static void
503thread_cleanup_func_before_exec(void *th_ptr)
504{
505 rb_thread_t *th = th_ptr;
506 th->status = THREAD_KILLED;
507
508 // The thread stack doesn't exist in the forked process:
509 th->ec->machine.stack_start = th->ec->machine.stack_end = NULL;
510
511 threadptr_interrupt_exec_cleanup(th);
512 rb_threadptr_root_fiber_terminate(th);
513}
514
515static void
516thread_cleanup_func(void *th_ptr, int atfork)
517{
518 rb_thread_t *th = th_ptr;
519
520 th->locking_mutex = Qfalse;
521 thread_cleanup_func_before_exec(th_ptr);
522
523 if (atfork) {
524 native_thread_destroy_atfork(th->nt);
525 th->nt = NULL;
526 return;
527 }
528
529 rb_native_mutex_destroy(&th->interrupt_lock);
530}
531
532void
533rb_thread_free_native_thread(void *th_ptr)
534{
535 rb_thread_t *th = th_ptr;
536
537 native_thread_destroy_atfork(th->nt);
538 th->nt = NULL;
539}
540
541static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *);
542static VALUE rb_thread_to_s(VALUE thread);
543
544void
545ruby_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame)
546{
547 native_thread_init_stack(th, local_in_parent_frame);
548}
549
550const VALUE *
551rb_vm_proc_local_ep(VALUE proc)
552{
553 const VALUE *ep = vm_proc_ep(proc);
554
555 if (ep) {
556 return rb_vm_ep_local_ep(ep);
557 }
558 else {
559 return NULL;
560 }
561}
562
563// for ractor, defined in vm.c
564VALUE rb_vm_invoke_proc_with_self(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
565 int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler);
566
567static VALUE
568thread_do_start_proc(rb_thread_t *th)
569{
570 VALUE args = th->invoke_arg.proc.args;
571 const VALUE *args_ptr;
572 int args_len;
573 VALUE procval = th->invoke_arg.proc.proc;
574 rb_proc_t *proc;
575 GetProcPtr(procval, proc);
576
577 th->ec->errinfo = Qnil;
578 th->ec->root_lep = rb_vm_proc_local_ep(procval);
579 th->ec->root_svar = Qfalse;
580
581 vm_check_ints_blocking(th->ec);
582
583 if (th->invoke_type == thread_invoke_type_ractor_proc) {
584 VALUE self = rb_ractor_self(th->ractor);
585 th->thgroup = th->ractor->thgroup_default = rb_obj_alloc(cThGroup);
586
587 VM_ASSERT(FIXNUM_P(args));
588 args_len = FIX2INT(args);
589 args_ptr = ALLOCA_N(VALUE, args_len);
590 rb_ractor_receive_parameters(th->ec, th->ractor, args_len, (VALUE *)args_ptr);
591 vm_check_ints_blocking(th->ec);
592
593 return rb_vm_invoke_proc_with_self(
594 th->ec, proc, self,
595 args_len, args_ptr,
596 th->invoke_arg.proc.kw_splat,
597 VM_BLOCK_HANDLER_NONE
598 );
599 }
600 else {
601 args_len = RARRAY_LENINT(args);
602 if (args_len < 8) {
603 /* free proc.args if the length is enough small */
604 args_ptr = ALLOCA_N(VALUE, args_len);
605 MEMCPY((VALUE *)args_ptr, RARRAY_CONST_PTR(args), VALUE, args_len);
606 th->invoke_arg.proc.args = Qnil;
607 }
608 else {
609 args_ptr = RARRAY_CONST_PTR(args);
610 }
611
612 vm_check_ints_blocking(th->ec);
613
614 return rb_vm_invoke_proc(
615 th->ec, proc,
616 args_len, args_ptr,
617 th->invoke_arg.proc.kw_splat,
618 VM_BLOCK_HANDLER_NONE
619 );
620 }
621}
622
623static VALUE
624thread_do_start(rb_thread_t *th)
625{
626 native_set_thread_name(th);
627 VALUE result = Qundef;
628
629 switch (th->invoke_type) {
630 case thread_invoke_type_proc:
631 result = thread_do_start_proc(th);
632 break;
633
634 case thread_invoke_type_ractor_proc:
635 result = thread_do_start_proc(th);
636 rb_ractor_atexit(th->ec, result);
637 break;
638
639 case thread_invoke_type_func:
640 result = (*th->invoke_arg.func.func)(th->invoke_arg.func.arg);
641 break;
642
643 case thread_invoke_type_none:
644 rb_bug("unreachable");
645 }
646
647 return result;
648}
649
650void rb_ec_clear_current_thread_trace_func(const rb_execution_context_t *ec);
651
652static int
653thread_start_func_2(rb_thread_t *th, VALUE *stack_start)
654{
655 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
656 VM_ASSERT(th != th->vm->ractor.main_thread);
657
658 enum ruby_tag_type state;
659 VALUE errinfo = Qnil;
660 rb_thread_t *ractor_main_th = th->ractor->threads.main;
661
662 // setup ractor
663 if (rb_ractor_status_p(th->ractor, ractor_blocking)) {
664 RB_VM_LOCK();
665 {
666 rb_vm_ractor_blocking_cnt_dec(th->vm, th->ractor, __FILE__, __LINE__);
667 rb_ractor_t *r = th->ractor;
668 r->r_stdin = rb_io_prep_stdin();
669 r->r_stdout = rb_io_prep_stdout();
670 r->r_stderr = rb_io_prep_stderr();
671 }
672 RB_VM_UNLOCK();
673 }
674
675 // Ensure that we are not joinable.
676 VM_ASSERT(UNDEF_P(th->value));
677
678 int fiber_scheduler_closed = 0, event_thread_end_hooked = 0;
679 VALUE result = Qundef;
680
681 EC_PUSH_TAG(th->ec);
682
683 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
684 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_BEGIN, th->self, 0, 0, 0, Qundef);
685
686 result = thread_do_start(th);
687 }
688
689 if (!fiber_scheduler_closed) {
690 fiber_scheduler_closed = 1;
692 }
693
694 if (!event_thread_end_hooked) {
695 event_thread_end_hooked = 1;
696 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_END, th->self, 0, 0, 0, Qundef);
697 }
698
699 if (state == TAG_NONE) {
700 // This must be set AFTER doing all user-level code. At this point, the thread is effectively finished and calls to `Thread#join` will succeed.
701 th->value = result;
702 }
703 else {
704 errinfo = th->ec->errinfo;
705
706 VALUE exc = rb_vm_make_jump_tag_but_local_jump(state, Qundef);
707 if (!NIL_P(exc)) errinfo = exc;
708
709 if (state == TAG_FATAL) {
710 if (th->invoke_type == thread_invoke_type_ractor_proc) {
711 rb_ractor_atexit(th->ec, Qnil);
712 }
713 /* fatal error within this thread, need to stop whole script */
714 }
715 else if (rb_obj_is_kind_of(errinfo, rb_eSystemExit)) {
716 if (th->invoke_type == thread_invoke_type_ractor_proc) {
717 rb_ractor_atexit_exception(th->ec);
718 }
719
720 /* exit on main_thread. */
721 }
722 else {
723 if (th->report_on_exception) {
724 VALUE mesg = rb_thread_to_s(th->self);
725 rb_str_cat_cstr(mesg, " terminated with exception (report_on_exception is true):\n");
726 rb_write_error_str(mesg);
727 rb_ec_error_print(th->ec, errinfo);
728 }
729
730 if (th->invoke_type == thread_invoke_type_ractor_proc) {
731 rb_ractor_atexit_exception(th->ec);
732 }
733
734 if (th->vm->thread_abort_on_exception ||
735 th->abort_on_exception || RTEST(ruby_debug)) {
736 /* exit on main_thread */
737 }
738 else {
739 errinfo = Qnil;
740 }
741 }
742 th->value = Qnil;
743 }
744
745 // The thread is effectively finished and can be joined.
746 VM_ASSERT(!UNDEF_P(th->value));
747
748 rb_threadptr_join_list_wakeup(th);
749 rb_threadptr_unlock_all_locking_mutexes(th);
750
751 if (th->invoke_type == thread_invoke_type_ractor_proc) {
752 rb_thread_terminate_all(th);
753 rb_ractor_teardown(th->ec);
754 }
755
756 th->status = THREAD_KILLED;
757 RUBY_DEBUG_LOG("killed th:%u", rb_th_serial(th));
758
759 if (th->vm->ractor.main_thread == th) {
760 ruby_stop(0);
761 }
762
763 if (RB_TYPE_P(errinfo, T_OBJECT)) {
764 /* treat with normal error object */
765 rb_threadptr_raise(ractor_main_th, 1, &errinfo);
766 }
767
768 EC_POP_TAG();
769
770 rb_ec_clear_current_thread_trace_func(th->ec);
771
772 /* locking_mutex must be Qfalse */
773 if (th->locking_mutex != Qfalse) {
774 rb_bug("thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
775 (void *)th, th->locking_mutex);
776 }
777
778 if (ractor_main_th->status == THREAD_KILLED &&
779 th->ractor->threads.cnt <= 2 /* main thread and this thread */) {
780 /* I'm last thread. wake up main thread from rb_thread_terminate_all */
781 rb_threadptr_interrupt(ractor_main_th);
782 }
783
784 rb_check_deadlock(th->ractor);
785
786 rb_fiber_close(th->ec->fiber_ptr);
787
788 thread_cleanup_func(th, FALSE);
789 VM_ASSERT(th->ec->vm_stack == NULL);
790
791 if (th->invoke_type == thread_invoke_type_ractor_proc) {
792 // after rb_ractor_living_threads_remove()
793 // GC will happen anytime and this ractor can be collected (and destroy GVL).
794 // So gvl_release() should be before it.
795 thread_sched_to_dead(TH_SCHED(th), th);
796 rb_ractor_living_threads_remove(th->ractor, th);
797 }
798 else {
799 rb_ractor_living_threads_remove(th->ractor, th);
800 thread_sched_to_dead(TH_SCHED(th), th);
801 }
802
803 return 0;
804}
807 enum thread_invoke_type type;
808
809 // for normal proc thread
810 VALUE args;
811 VALUE proc;
812
813 // for ractor
814 rb_ractor_t *g;
815
816 // for func
817 VALUE (*fn)(void *);
818};
819
820static void thread_specific_storage_alloc(rb_thread_t *th);
821
822static VALUE
823thread_create_core(VALUE thval, struct thread_create_params *params)
824{
825 rb_execution_context_t *ec = GET_EC();
826 rb_thread_t *th = rb_thread_ptr(thval), *current_th = rb_ec_thread_ptr(ec);
827 int err;
828
829 thread_specific_storage_alloc(th);
830
831 if (OBJ_FROZEN(current_th->thgroup)) {
832 rb_raise(rb_eThreadError,
833 "can't start a new thread (frozen ThreadGroup)");
834 }
835
836 rb_fiber_inherit_storage(ec, th->ec->fiber_ptr);
837
838 switch (params->type) {
839 case thread_invoke_type_proc:
840 th->invoke_type = thread_invoke_type_proc;
841 th->invoke_arg.proc.args = params->args;
842 th->invoke_arg.proc.proc = params->proc;
843 th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
844 break;
845
846 case thread_invoke_type_ractor_proc:
847#if RACTOR_CHECK_MODE > 0
848 rb_ractor_setup_belonging_to(thval, rb_ractor_id(params->g));
849#endif
850 th->invoke_type = thread_invoke_type_ractor_proc;
851 th->ractor = params->g;
852 th->ractor->threads.main = th;
853 th->invoke_arg.proc.proc = rb_proc_isolate_bang(params->proc, Qnil);
854 th->invoke_arg.proc.args = INT2FIX(RARRAY_LENINT(params->args));
855 th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
856 rb_ractor_send_parameters(ec, params->g, params->args);
857 break;
858
859 case thread_invoke_type_func:
860 th->invoke_type = thread_invoke_type_func;
861 th->invoke_arg.func.func = params->fn;
862 th->invoke_arg.func.arg = (void *)params->args;
863 break;
864
865 default:
866 rb_bug("unreachable");
867 }
868
869 th->priority = current_th->priority;
870 th->thgroup = current_th->thgroup;
871
872 th->pending_interrupt_queue = rb_ary_hidden_new(0);
873 th->pending_interrupt_queue_checked = 0;
874 th->pending_interrupt_mask_stack = rb_ary_dup(current_th->pending_interrupt_mask_stack);
875 RBASIC_CLEAR_CLASS(th->pending_interrupt_mask_stack);
876
877 rb_native_mutex_initialize(&th->interrupt_lock);
878
879 RUBY_DEBUG_LOG("r:%u th:%u", rb_ractor_id(th->ractor), rb_th_serial(th));
880
881 rb_ractor_living_threads_insert(th->ractor, th);
882
883 /* kick thread */
884 err = native_thread_create(th);
885 if (err) {
886 th->status = THREAD_KILLED;
887 rb_ractor_living_threads_remove(th->ractor, th);
888 rb_raise(rb_eThreadError, "can't create Thread: %s", strerror(err));
889 }
890 return thval;
891}
892
893#define threadptr_initialized(th) ((th)->invoke_type != thread_invoke_type_none)
894
895/*
896 * call-seq:
897 * Thread.new { ... } -> thread
898 * Thread.new(*args, &proc) -> thread
899 * Thread.new(*args) { |args| ... } -> thread
900 *
901 * Creates a new thread executing the given block.
902 *
903 * Any +args+ given to ::new will be passed to the block:
904 *
905 * arr = []
906 * a, b, c = 1, 2, 3
907 * Thread.new(a,b,c) { |d,e,f| arr << d << e << f }.join
908 * arr #=> [1, 2, 3]
909 *
910 * A ThreadError exception is raised if ::new is called without a block.
911 *
912 * If you're going to subclass Thread, be sure to call super in your
913 * +initialize+ method, otherwise a ThreadError will be raised.
914 */
915static VALUE
916thread_s_new(int argc, VALUE *argv, VALUE klass)
917{
918 rb_thread_t *th;
919 VALUE thread = rb_thread_alloc(klass);
920
921 if (GET_RACTOR()->threads.main->status == THREAD_KILLED) {
922 rb_raise(rb_eThreadError, "can't alloc thread");
923 }
924
925 rb_obj_call_init_kw(thread, argc, argv, RB_PASS_CALLED_KEYWORDS);
926 th = rb_thread_ptr(thread);
927 if (!threadptr_initialized(th)) {
928 rb_raise(rb_eThreadError, "uninitialized thread - check '%"PRIsVALUE"#initialize'",
929 klass);
930 }
931 return thread;
932}
933
934/*
935 * call-seq:
936 * Thread.start([args]*) {|args| block } -> thread
937 * Thread.fork([args]*) {|args| block } -> thread
938 *
939 * Basically the same as ::new. However, if class Thread is subclassed, then
940 * calling +start+ in that subclass will not invoke the subclass's
941 * +initialize+ method.
942 */
943
944static VALUE
945thread_start(VALUE klass, VALUE args)
946{
947 struct thread_create_params params = {
948 .type = thread_invoke_type_proc,
949 .args = args,
950 .proc = rb_block_proc(),
951 };
952 return thread_create_core(rb_thread_alloc(klass), &params);
953}
954
955static VALUE
956threadptr_invoke_proc_location(rb_thread_t *th)
957{
958 if (th->invoke_type == thread_invoke_type_proc) {
959 return rb_proc_location(th->invoke_arg.proc.proc);
960 }
961 else {
962 return Qnil;
963 }
964}
965
966/* :nodoc: */
967static VALUE
968thread_initialize(VALUE thread, VALUE args)
969{
970 rb_thread_t *th = rb_thread_ptr(thread);
971
972 if (!rb_block_given_p()) {
973 rb_raise(rb_eThreadError, "must be called with a block");
974 }
975 else if (th->invoke_type != thread_invoke_type_none) {
976 VALUE loc = threadptr_invoke_proc_location(th);
977 if (!NIL_P(loc)) {
978 rb_raise(rb_eThreadError,
979 "already initialized thread - %"PRIsVALUE":%"PRIsVALUE,
980 RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
981 }
982 else {
983 rb_raise(rb_eThreadError, "already initialized thread");
984 }
985 }
986 else {
987 struct thread_create_params params = {
988 .type = thread_invoke_type_proc,
989 .args = args,
990 .proc = rb_block_proc(),
991 };
992 return thread_create_core(thread, &params);
993 }
994}
995
996VALUE
997rb_thread_create(VALUE (*fn)(void *), void *arg)
998{
999 struct thread_create_params params = {
1000 .type = thread_invoke_type_func,
1001 .fn = fn,
1002 .args = (VALUE)arg,
1003 };
1004 return thread_create_core(rb_thread_alloc(rb_cThread), &params);
1005}
1006
1007VALUE
1008rb_thread_create_ractor(rb_ractor_t *r, VALUE args, VALUE proc)
1009{
1010 struct thread_create_params params = {
1011 .type = thread_invoke_type_ractor_proc,
1012 .g = r,
1013 .args = args,
1014 .proc = proc,
1015 };
1016 return thread_create_core(rb_thread_alloc(rb_cThread), &params);;
1017}
1018
1020struct join_arg {
1021 struct rb_waiting_list *waiter;
1022 rb_thread_t *target;
1023 VALUE timeout;
1024 rb_hrtime_t *limit;
1025};
1026
1027static VALUE
1028remove_from_join_list(VALUE arg)
1029{
1030 struct join_arg *p = (struct join_arg *)arg;
1031 rb_thread_t *target_thread = p->target;
1032
1033 if (target_thread->status != THREAD_KILLED) {
1034 struct rb_waiting_list **join_list = &target_thread->join_list;
1035
1036 while (*join_list) {
1037 if (*join_list == p->waiter) {
1038 *join_list = (*join_list)->next;
1039 break;
1040 }
1041
1042 join_list = &(*join_list)->next;
1043 }
1044 }
1045
1046 return Qnil;
1047}
1048
1049static int
1050thread_finished(rb_thread_t *th)
1051{
1052 return th->status == THREAD_KILLED || !UNDEF_P(th->value);
1053}
1054
1055static VALUE
1056thread_join_sleep(VALUE arg)
1057{
1058 struct join_arg *p = (struct join_arg *)arg;
1059 rb_thread_t *target_th = p->target, *th = p->waiter->thread;
1060 rb_hrtime_t end = 0, *limit = p->limit;
1061
1062 if (limit) {
1063 end = rb_hrtime_add(*limit, rb_hrtime_now());
1064 }
1065
1066 while (!thread_finished(target_th)) {
1067 VALUE scheduler = rb_fiber_scheduler_current();
1068
1069 if (!limit) {
1070 if (scheduler != Qnil) {
1071 rb_fiber_scheduler_block(scheduler, target_th->self, Qnil);
1072 }
1073 else {
1074 sleep_forever(th, SLEEP_DEADLOCKABLE | SLEEP_ALLOW_SPURIOUS | SLEEP_NO_CHECKINTS);
1075 }
1076 }
1077 else {
1078 if (hrtime_update_expire(limit, end)) {
1079 RUBY_DEBUG_LOG("timeout target_th:%u", rb_th_serial(target_th));
1080 return Qfalse;
1081 }
1082
1083 if (scheduler != Qnil) {
1084 VALUE timeout = rb_float_new(hrtime2double(*limit));
1085 rb_fiber_scheduler_block(scheduler, target_th->self, timeout);
1086 }
1087 else {
1088 th->status = THREAD_STOPPED;
1089 native_sleep(th, limit);
1090 }
1091 }
1092 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1093 th->status = THREAD_RUNNABLE;
1094
1095 RUBY_DEBUG_LOG("interrupted target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1096 }
1097
1098 return Qtrue;
1099}
1100
1101static VALUE
1102thread_join(rb_thread_t *target_th, VALUE timeout, rb_hrtime_t *limit)
1103{
1104 rb_execution_context_t *ec = GET_EC();
1105 rb_thread_t *th = ec->thread_ptr;
1106 rb_fiber_t *fiber = ec->fiber_ptr;
1107
1108 if (th == target_th) {
1109 rb_raise(rb_eThreadError, "Target thread must not be current thread");
1110 }
1111
1112 if (th->ractor->threads.main == target_th) {
1113 rb_raise(rb_eThreadError, "Target thread must not be main thread");
1114 }
1115
1116 RUBY_DEBUG_LOG("target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1117
1118 if (target_th->status != THREAD_KILLED) {
1119 struct rb_waiting_list waiter;
1120 waiter.next = target_th->join_list;
1121 waiter.thread = th;
1122 waiter.fiber = rb_fiberptr_blocking(fiber) ? NULL : fiber;
1123 target_th->join_list = &waiter;
1124
1125 struct join_arg arg;
1126 arg.waiter = &waiter;
1127 arg.target = target_th;
1128 arg.timeout = timeout;
1129 arg.limit = limit;
1130
1131 if (!rb_ensure(thread_join_sleep, (VALUE)&arg, remove_from_join_list, (VALUE)&arg)) {
1132 return Qnil;
1133 }
1134 }
1135
1136 RUBY_DEBUG_LOG("success target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1137
1138 if (target_th->ec->errinfo != Qnil) {
1139 VALUE err = target_th->ec->errinfo;
1140
1141 if (FIXNUM_P(err)) {
1142 switch (err) {
1143 case INT2FIX(TAG_FATAL):
1144 RUBY_DEBUG_LOG("terminated target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1145
1146 /* OK. killed. */
1147 break;
1148 default:
1149 if (err == RUBY_FATAL_FIBER_KILLED) { // not integer constant so can't be a case expression
1150 // root fiber killed in non-main thread
1151 break;
1152 }
1153 rb_bug("thread_join: Fixnum (%d) should not reach here.", FIX2INT(err));
1154 }
1155 }
1156 else if (THROW_DATA_P(target_th->ec->errinfo)) {
1157 rb_bug("thread_join: THROW_DATA should not reach here.");
1158 }
1159 else {
1160 /* normal exception */
1161 rb_exc_raise(err);
1162 }
1163 }
1164 return target_th->self;
1165}
1166
1167/*
1168 * call-seq:
1169 * thr.join -> thr
1170 * thr.join(limit) -> thr
1171 *
1172 * The calling thread will suspend execution and run this +thr+.
1173 *
1174 * Does not return until +thr+ exits or until the given +limit+ seconds have
1175 * passed.
1176 *
1177 * If the time limit expires, +nil+ will be returned, otherwise +thr+ is
1178 * returned.
1179 *
1180 * Any threads not joined will be killed when the main program exits.
1181 *
1182 * If +thr+ had previously raised an exception and the ::abort_on_exception or
1183 * $DEBUG flags are not set, (so the exception has not yet been processed), it
1184 * will be processed at this time.
1185 *
1186 * a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
1187 * x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
1188 * x.join # Let thread x finish, thread a will be killed on exit.
1189 * #=> "axyz"
1190 *
1191 * The following example illustrates the +limit+ parameter.
1192 *
1193 * y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
1194 * puts "Waiting" until y.join(0.15)
1195 *
1196 * This will produce:
1197 *
1198 * tick...
1199 * Waiting
1200 * tick...
1201 * Waiting
1202 * tick...
1203 * tick...
1204 */
1205
1206static VALUE
1207thread_join_m(int argc, VALUE *argv, VALUE self)
1208{
1209 VALUE timeout = Qnil;
1210 rb_hrtime_t rel = 0, *limit = 0;
1211
1212 if (rb_check_arity(argc, 0, 1)) {
1213 timeout = argv[0];
1214 }
1215
1216 // Convert the timeout eagerly, so it's always converted and deterministic
1217 /*
1218 * This supports INFINITY and negative values, so we can't use
1219 * rb_time_interval right now...
1220 */
1221 if (NIL_P(timeout)) {
1222 /* unlimited */
1223 }
1224 else if (FIXNUM_P(timeout)) {
1225 rel = rb_sec2hrtime(NUM2TIMET(timeout));
1226 limit = &rel;
1227 }
1228 else {
1229 limit = double2hrtime(&rel, rb_num2dbl(timeout));
1230 }
1231
1232 return thread_join(rb_thread_ptr(self), timeout, limit);
1233}
1234
1235/*
1236 * call-seq:
1237 * thr.value -> obj
1238 *
1239 * Waits for +thr+ to complete, using #join, and returns its value or raises
1240 * the exception which terminated the thread.
1241 *
1242 * a = Thread.new { 2 + 2 }
1243 * a.value #=> 4
1244 *
1245 * b = Thread.new { raise 'something went wrong' }
1246 * b.value #=> RuntimeError: something went wrong
1247 */
1248
1249static VALUE
1250thread_value(VALUE self)
1251{
1252 rb_thread_t *th = rb_thread_ptr(self);
1253 thread_join(th, Qnil, 0);
1254 if (UNDEF_P(th->value)) {
1255 // If the thread is dead because we forked th->value is still Qundef.
1256 return Qnil;
1257 }
1258 return th->value;
1259}
1260
1261/*
1262 * Thread Scheduling
1263 */
1264
1265static void
1266getclockofday(struct timespec *ts)
1267{
1268#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1269 if (clock_gettime(CLOCK_MONOTONIC, ts) == 0)
1270 return;
1271#endif
1272 rb_timespec_now(ts);
1273}
1274
1275/*
1276 * Don't inline this, since library call is already time consuming
1277 * and we don't want "struct timespec" on stack too long for GC
1278 */
1279NOINLINE(rb_hrtime_t rb_hrtime_now(void));
1280rb_hrtime_t
1281rb_hrtime_now(void)
1282{
1283 struct timespec ts;
1284
1285 getclockofday(&ts);
1286 return rb_timespec2hrtime(&ts);
1287}
1288
1289/*
1290 * at least gcc 7.2 and 7.3 complains about "rb_hrtime_t end"
1291 * being uninitialized, maybe other versions, too.
1292 */
1293COMPILER_WARNING_PUSH
1294#if defined(__GNUC__) && __GNUC__ == 7 && __GNUC_MINOR__ <= 3
1295COMPILER_WARNING_IGNORED(-Wmaybe-uninitialized)
1296#endif
1297#ifndef PRIu64
1298#define PRIu64 PRI_64_PREFIX "u"
1299#endif
1300/*
1301 * @end is the absolute time when @ts is set to expire
1302 * Returns true if @end has past
1303 * Updates @ts and returns false otherwise
1304 */
1305static int
1306hrtime_update_expire(rb_hrtime_t *timeout, const rb_hrtime_t end)
1307{
1308 rb_hrtime_t now = rb_hrtime_now();
1309
1310 if (now > end) return 1;
1311
1312 RUBY_DEBUG_LOG("%"PRIu64" > %"PRIu64"", (uint64_t)end, (uint64_t)now);
1313
1314 *timeout = end - now;
1315 return 0;
1316}
1317COMPILER_WARNING_POP
1318
1319static int
1320sleep_hrtime(rb_thread_t *th, rb_hrtime_t rel, unsigned int fl)
1321{
1322 enum rb_thread_status prev_status = th->status;
1323 int woke;
1324 rb_hrtime_t end = rb_hrtime_add(rb_hrtime_now(), rel);
1325
1326 th->status = THREAD_STOPPED;
1327 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1328 while (th->status == THREAD_STOPPED) {
1329 native_sleep(th, &rel);
1330 woke = vm_check_ints_blocking(th->ec);
1331 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1332 break;
1333 if (hrtime_update_expire(&rel, end))
1334 break;
1335 woke = 1;
1336 }
1337 th->status = prev_status;
1338 return woke;
1339}
1340
1341static int
1342sleep_hrtime_until(rb_thread_t *th, rb_hrtime_t end, unsigned int fl)
1343{
1344 enum rb_thread_status prev_status = th->status;
1345 int woke;
1346 rb_hrtime_t rel = rb_hrtime_sub(end, rb_hrtime_now());
1347
1348 th->status = THREAD_STOPPED;
1349 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1350 while (th->status == THREAD_STOPPED) {
1351 native_sleep(th, &rel);
1352 woke = vm_check_ints_blocking(th->ec);
1353 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1354 break;
1355 if (hrtime_update_expire(&rel, end))
1356 break;
1357 woke = 1;
1358 }
1359 th->status = prev_status;
1360 return woke;
1361}
1362
1363static void
1364sleep_forever(rb_thread_t *th, unsigned int fl)
1365{
1366 enum rb_thread_status prev_status = th->status;
1367 enum rb_thread_status status;
1368 int woke;
1369
1370 status = fl & SLEEP_DEADLOCKABLE ? THREAD_STOPPED_FOREVER : THREAD_STOPPED;
1371 th->status = status;
1372
1373 if (!(fl & SLEEP_NO_CHECKINTS)) RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1374
1375 while (th->status == status) {
1376 if (fl & SLEEP_DEADLOCKABLE) {
1377 rb_ractor_sleeper_threads_inc(th->ractor);
1378 rb_check_deadlock(th->ractor);
1379 }
1380 {
1381 native_sleep(th, 0);
1382 }
1383 if (fl & SLEEP_DEADLOCKABLE) {
1384 rb_ractor_sleeper_threads_dec(th->ractor);
1385 }
1386 if (fl & SLEEP_ALLOW_SPURIOUS) {
1387 break;
1388 }
1389
1390 woke = vm_check_ints_blocking(th->ec);
1391
1392 if (woke && !(fl & SLEEP_SPURIOUS_CHECK)) {
1393 break;
1394 }
1395 }
1396 th->status = prev_status;
1397}
1398
1399void
1401{
1402 RUBY_DEBUG_LOG("forever");
1403 sleep_forever(GET_THREAD(), SLEEP_SPURIOUS_CHECK);
1404}
1405
1406void
1408{
1409 RUBY_DEBUG_LOG("deadly");
1410 sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE|SLEEP_SPURIOUS_CHECK);
1411}
1412
1413static void
1414rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker, VALUE timeout, rb_hrtime_t end)
1415{
1416 VALUE scheduler = rb_fiber_scheduler_current();
1417 if (scheduler != Qnil) {
1418 rb_fiber_scheduler_block(scheduler, blocker, timeout);
1419 }
1420 else {
1421 RUBY_DEBUG_LOG("...");
1422 if (end) {
1423 sleep_hrtime_until(GET_THREAD(), end, SLEEP_SPURIOUS_CHECK);
1424 }
1425 else {
1426 sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE);
1427 }
1428 }
1429}
1430
1431void
1432rb_thread_wait_for(struct timeval time)
1433{
1434 rb_thread_t *th = GET_THREAD();
1435
1436 sleep_hrtime(th, rb_timeval2hrtime(&time), SLEEP_SPURIOUS_CHECK);
1437}
1438
1439void
1440rb_ec_check_ints(rb_execution_context_t *ec)
1441{
1442 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1443}
1444
1445/*
1446 * CAUTION: This function causes thread switching.
1447 * rb_thread_check_ints() check ruby's interrupts.
1448 * some interrupt needs thread switching/invoke handlers,
1449 * and so on.
1450 */
1451
1452void
1454{
1455 rb_ec_check_ints(GET_EC());
1456}
1457
1458/*
1459 * Hidden API for tcl/tk wrapper.
1460 * There is no guarantee to perpetuate it.
1461 */
1462int
1463rb_thread_check_trap_pending(void)
1464{
1465 return rb_signal_buff_size() != 0;
1466}
1467
1468/* This function can be called in blocking region. */
1471{
1472 return (int)RUBY_VM_INTERRUPTED(rb_thread_ptr(thval)->ec);
1473}
1474
1475void
1476rb_thread_sleep(int sec)
1477{
1479}
1480
1481static void
1482rb_thread_schedule_limits(uint32_t limits_us)
1483{
1484 if (!rb_thread_alone()) {
1485 rb_thread_t *th = GET_THREAD();
1486 RUBY_DEBUG_LOG("us:%u", (unsigned int)limits_us);
1487
1488 if (th->running_time_us >= limits_us) {
1489 RUBY_DEBUG_LOG("switch %s", "start");
1490
1491 RB_VM_SAVE_MACHINE_CONTEXT(th);
1492 thread_sched_yield(TH_SCHED(th), th);
1493 rb_ractor_thread_switch(th->ractor, th, true);
1494
1495 RUBY_DEBUG_LOG("switch %s", "done");
1496 }
1497 }
1498}
1499
1500void
1502{
1503 rb_thread_schedule_limits(0);
1504 RUBY_VM_CHECK_INTS(GET_EC());
1505}
1506
1507/* blocking region */
1508
1509static inline int
1510blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
1511 rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted)
1512{
1513#ifdef RUBY_ASSERT_CRITICAL_SECTION
1514 VM_ASSERT(ruby_assert_critical_section_entered == 0);
1515#endif
1516 VM_ASSERT(th == GET_THREAD());
1517
1518 region->prev_status = th->status;
1519 if (unblock_function_set(th, ubf, arg, fail_if_interrupted)) {
1520 th->blocking_region_buffer = region;
1521 th->status = THREAD_STOPPED;
1522 rb_ractor_blocking_threads_inc(th->ractor, __FILE__, __LINE__);
1523
1524 RUBY_DEBUG_LOG("thread_id:%p", (void *)th->nt->thread_id);
1525 return TRUE;
1526 }
1527 else {
1528 return FALSE;
1529 }
1530}
1531
1532static inline void
1533blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region)
1534{
1535 /* entry to ubf_list still permitted at this point, make it impossible: */
1536 unblock_function_clear(th);
1537 /* entry to ubf_list impossible at this point, so unregister is safe: */
1538 unregister_ubf_list(th);
1539
1540 thread_sched_to_running(TH_SCHED(th), th);
1541 rb_ractor_thread_switch(th->ractor, th, false);
1542
1543 th->blocking_region_buffer = 0;
1544 rb_ractor_blocking_threads_dec(th->ractor, __FILE__, __LINE__);
1545 if (th->status == THREAD_STOPPED) {
1546 th->status = region->prev_status;
1547 }
1548
1549 RUBY_DEBUG_LOG("end");
1550
1551#ifndef _WIN32
1552 // GET_THREAD() clears WSAGetLastError()
1553 VM_ASSERT(th == GET_THREAD());
1554#endif
1555}
1556
1557/*
1558 * Resolve sentinel unblock function values to their actual function pointers
1559 * and appropriate data2 values. This centralizes the logic for handling
1560 * RUBY_UBF_IO and RUBY_UBF_PROCESS sentinel values.
1561 *
1562 * @param unblock_function Pointer to unblock function pointer (modified in place)
1563 * @param data2 Pointer to data2 pointer (modified in place)
1564 * @param thread Thread context for resolving data2 when needed
1565 * @return true if sentinel values were resolved, false otherwise
1566 */
1567bool
1568rb_thread_resolve_unblock_function(rb_unblock_function_t **unblock_function, void **data2, struct rb_thread_struct *thread)
1569{
1570 rb_unblock_function_t *ubf = *unblock_function;
1571
1572 if ((ubf == RUBY_UBF_IO) || (ubf == RUBY_UBF_PROCESS)) {
1573 *unblock_function = ubf_select;
1574 *data2 = thread;
1575 return true;
1576 }
1577 return false;
1578}
1579
1580void *
1581rb_nogvl(void *(*func)(void *), void *data1,
1582 rb_unblock_function_t *ubf, void *data2,
1583 int flags)
1584{
1585 if (flags & RB_NOGVL_OFFLOAD_SAFE) {
1586 VALUE scheduler = rb_fiber_scheduler_current();
1587 if (scheduler != Qnil) {
1589
1590 VALUE result = rb_fiber_scheduler_blocking_operation_wait(scheduler, func, data1, ubf, data2, flags, &state);
1591
1592 if (!UNDEF_P(result)) {
1593 rb_errno_set(state.saved_errno);
1594 return state.result;
1595 }
1596 }
1597 }
1598
1599 void *val = 0;
1600 rb_execution_context_t *ec = GET_EC();
1601 rb_thread_t *th = rb_ec_thread_ptr(ec);
1602 rb_vm_t *vm = rb_ec_vm_ptr(ec);
1603 bool is_main_thread = vm->ractor.main_thread == th;
1604 int saved_errno = 0;
1605
1606 rb_thread_resolve_unblock_function(&ubf, &data2, th);
1607
1608 if (ubf && rb_ractor_living_thread_num(th->ractor) == 1 && is_main_thread) {
1609 if (flags & RB_NOGVL_UBF_ASYNC_SAFE) {
1610 vm->ubf_async_safe = 1;
1611 }
1612 }
1613
1614 rb_vm_t *volatile saved_vm = vm;
1615 BLOCKING_REGION(th, {
1616 val = func(data1);
1617 saved_errno = rb_errno();
1618 }, ubf, data2, flags & RB_NOGVL_INTR_FAIL);
1619 vm = saved_vm;
1620
1621 if (is_main_thread) vm->ubf_async_safe = 0;
1622
1623 if ((flags & RB_NOGVL_INTR_FAIL) == 0) {
1624 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1625 }
1626
1627 rb_errno_set(saved_errno);
1628
1629 return val;
1630}
1631
1632/*
1633 * rb_thread_call_without_gvl - permit concurrent/parallel execution.
1634 * rb_thread_call_without_gvl2 - permit concurrent/parallel execution
1635 * without interrupt process.
1636 *
1637 * rb_thread_call_without_gvl() does:
1638 * (1) Check interrupts.
1639 * (2) release GVL.
1640 * Other Ruby threads may run in parallel.
1641 * (3) call func with data1
1642 * (4) acquire GVL.
1643 * Other Ruby threads can not run in parallel any more.
1644 * (5) Check interrupts.
1645 *
1646 * rb_thread_call_without_gvl2() does:
1647 * (1) Check interrupt and return if interrupted.
1648 * (2) release GVL.
1649 * (3) call func with data1 and a pointer to the flags.
1650 * (4) acquire GVL.
1651 *
1652 * If another thread interrupts this thread (Thread#kill, signal delivery,
1653 * VM-shutdown request, and so on), `ubf()' is called (`ubf()' means
1654 * "un-blocking function"). `ubf()' should interrupt `func()' execution by
1655 * toggling a cancellation flag, canceling the invocation of a call inside
1656 * `func()' or similar. Note that `ubf()' may not be called with the GVL.
1657 *
1658 * There are built-in ubfs and you can specify these ubfs:
1659 *
1660 * * RUBY_UBF_IO: ubf for IO operation
1661 * * RUBY_UBF_PROCESS: ubf for process operation
1662 *
1663 * However, we can not guarantee our built-in ubfs interrupt your `func()'
1664 * correctly. Be careful to use rb_thread_call_without_gvl(). If you don't
1665 * provide proper ubf(), your program will not stop for Control+C or other
1666 * shutdown events.
1667 *
1668 * "Check interrupts" on above list means checking asynchronous
1669 * interrupt events (such as Thread#kill, signal delivery, VM-shutdown
1670 * request, and so on) and calling corresponding procedures
1671 * (such as `trap' for signals, raise an exception for Thread#raise).
1672 * If `func()' finished and received interrupts, you may skip interrupt
1673 * checking. For example, assume the following func() it reads data from file.
1674 *
1675 * read_func(...) {
1676 * // (a) before read
1677 * read(buffer); // (b) reading
1678 * // (c) after read
1679 * }
1680 *
1681 * If an interrupt occurs at (a) or (b), then `ubf()' cancels this
1682 * `read_func()' and interrupts are checked. However, if an interrupt occurs
1683 * at (c), after *read* operation is completed, checking interrupts is harmful
1684 * because it causes irrevocable side-effect, the read data will vanish. To
1685 * avoid such problem, the `read_func()' should be used with
1686 * `rb_thread_call_without_gvl2()'.
1687 *
1688 * If `rb_thread_call_without_gvl2()' detects interrupt, it returns
1689 * immediately. This function does not show when the execution was interrupted.
1690 * For example, there are 4 possible timing (a), (b), (c) and before calling
1691 * read_func(). You need to record progress of a read_func() and check
1692 * the progress after `rb_thread_call_without_gvl2()'. You may need to call
1693 * `rb_thread_check_ints()' correctly or your program can not process proper
1694 * process such as `trap' and so on.
1695 *
1696 * NOTE: You can not execute most of Ruby C API and touch Ruby
1697 * objects in `func()' and `ubf()', including raising an
1698 * exception, because current thread doesn't acquire GVL
1699 * (it causes synchronization problems). If you need to
1700 * call ruby functions either use rb_thread_call_with_gvl()
1701 * or read source code of C APIs and confirm safety by
1702 * yourself.
1703 *
1704 * NOTE: In short, this API is difficult to use safely. I recommend you
1705 * use other ways if you have. We lack experiences to use this API.
1706 * Please report your problem related on it.
1707 *
1708 * NOTE: Releasing GVL and re-acquiring GVL may be expensive operations
1709 * for a short running `func()'. Be sure to benchmark and use this
1710 * mechanism when `func()' consumes enough time.
1711 *
1712 * Safe C API:
1713 * * rb_thread_interrupted() - check interrupt flag
1714 * * ruby_xmalloc(), ruby_xrealloc(), ruby_xfree() -
1715 * they will work without GVL, and may acquire GVL when GC is needed.
1716 */
1717void *
1718rb_thread_call_without_gvl2(void *(*func)(void *), void *data1,
1719 rb_unblock_function_t *ubf, void *data2)
1720{
1721 return rb_nogvl(func, data1, ubf, data2, RB_NOGVL_INTR_FAIL);
1722}
1723
1724void *
1725rb_thread_call_without_gvl(void *(*func)(void *data), void *data1,
1726 rb_unblock_function_t *ubf, void *data2)
1727{
1728 return rb_nogvl(func, data1, ubf, data2, 0);
1729}
1730
1731static int
1732waitfd_to_waiting_flag(int wfd_event)
1733{
1734 return wfd_event << 1;
1735}
1736
1737static struct ccan_list_head *
1738rb_io_blocking_operations(struct rb_io *io)
1739{
1740 rb_serial_t fork_generation = GET_VM()->fork_gen;
1741
1742 // On fork, all existing entries in this list (which are stack allocated) become invalid.
1743 // Therefore, we re-initialize the list which clears it.
1744 if (io->fork_generation != fork_generation) {
1745 ccan_list_head_init(&io->blocking_operations);
1746 io->fork_generation = fork_generation;
1747 }
1748
1749 return &io->blocking_operations;
1750}
1751
1752/*
1753 * Registers a blocking operation for an IO object. This is used to track all threads and fibers
1754 * that are currently blocked on this IO for reading, writing or other operations.
1755 *
1756 * When the IO is closed, all blocking operations will be notified via rb_fiber_scheduler_fiber_interrupt
1757 * for fibers with a scheduler, or via rb_threadptr_interrupt for threads without a scheduler.
1758 *
1759 * @parameter io The IO object on which the operation will block
1760 * @parameter blocking_operation The operation details including the execution context that will be blocked
1761 */
1762static void
1763rb_io_blocking_operation_enter(struct rb_io *io, struct rb_io_blocking_operation *blocking_operation)
1764{
1765 ccan_list_add(rb_io_blocking_operations(io), &blocking_operation->list);
1766}
1767
1768static void
1769rb_io_blocking_operation_pop(struct rb_io *io, struct rb_io_blocking_operation *blocking_operation)
1770{
1771 ccan_list_del(&blocking_operation->list);
1772}
1775 struct rb_io *io;
1776 struct rb_io_blocking_operation *blocking_operation;
1777};
1778
1779static VALUE
1780io_blocking_operation_exit(VALUE _arguments)
1781{
1782 struct io_blocking_operation_arguments *arguments = (void*)_arguments;
1783 struct rb_io_blocking_operation *blocking_operation = arguments->blocking_operation;
1784
1785 rb_io_blocking_operation_pop(arguments->io, blocking_operation);
1786
1787 rb_io_t *io = arguments->io;
1788 rb_thread_t *thread = io->closing_ec->thread_ptr;
1789 rb_fiber_t *fiber = io->closing_ec->fiber_ptr;
1790
1791 if (thread->scheduler != Qnil) {
1792 // This can cause spurious wakeups...
1793 rb_fiber_scheduler_unblock(thread->scheduler, io->self, rb_fiberptr_self(fiber));
1794 }
1795 else {
1796 rb_thread_wakeup(thread->self);
1797 }
1798
1799 return Qnil;
1800}
1801
1802/*
1803 * Called when a blocking operation completes or is interrupted. Removes the operation from
1804 * the IO's blocking_operations list and wakes up any waiting threads/fibers.
1805 *
1806 * If there's a wakeup_mutex (meaning an IO close is in progress), synchronizes the cleanup
1807 * through that mutex to ensure proper coordination with the closing thread.
1808 *
1809 * @parameter io The IO object the operation was performed on
1810 * @parameter blocking_operation The completed operation to clean up
1811 */
1812static void
1813rb_io_blocking_operation_exit(struct rb_io *io, struct rb_io_blocking_operation *blocking_operation)
1814{
1815 VALUE wakeup_mutex = io->wakeup_mutex;
1816
1817 // Indicate that the blocking operation is no longer active:
1818 blocking_operation->ec = NULL;
1819
1820 if (RB_TEST(wakeup_mutex)) {
1821 struct io_blocking_operation_arguments arguments = {
1822 .io = io,
1823 .blocking_operation = blocking_operation
1824 };
1825
1826 rb_mutex_synchronize(wakeup_mutex, io_blocking_operation_exit, (VALUE)&arguments);
1827 }
1828 else {
1829 // If there's no wakeup_mutex, we can safely remove the operation directly:
1830 rb_io_blocking_operation_pop(io, blocking_operation);
1831 }
1832}
1833
1834static VALUE
1835rb_thread_io_blocking_operation_ensure(VALUE _argument)
1836{
1837 struct io_blocking_operation_arguments *arguments = (void*)_argument;
1838
1839 rb_io_blocking_operation_exit(arguments->io, arguments->blocking_operation);
1840
1841 return Qnil;
1842}
1843
1844/*
1845 * Executes a function that performs a blocking IO operation, while properly tracking
1846 * the operation in the IO's blocking_operations list. This ensures proper cleanup
1847 * and interruption handling if the IO is closed while blocked.
1848 *
1849 * The operation is automatically removed from the blocking_operations list when the function
1850 * returns, whether normally or due to an exception.
1851 *
1852 * @parameter self The IO object
1853 * @parameter function The function to execute that will perform the blocking operation
1854 * @parameter argument The argument to pass to the function
1855 * @returns The result of the blocking operation function
1856 */
1857VALUE
1858rb_thread_io_blocking_operation(VALUE self, VALUE(*function)(VALUE), VALUE argument)
1859{
1860 struct rb_io *io;
1861 RB_IO_POINTER(self, io);
1862
1863 rb_execution_context_t *ec = GET_EC();
1864 struct rb_io_blocking_operation blocking_operation = {
1865 .ec = ec,
1866 };
1867 rb_io_blocking_operation_enter(io, &blocking_operation);
1868
1870 .io = io,
1871 .blocking_operation = &blocking_operation
1872 };
1873
1874 return rb_ensure(function, argument, rb_thread_io_blocking_operation_ensure, (VALUE)&io_blocking_operation_arguments);
1875}
1876
1877static bool
1878thread_io_mn_schedulable(rb_thread_t *th, int events, const struct timeval *timeout)
1879{
1880#if defined(USE_MN_THREADS) && USE_MN_THREADS
1881 return !th_has_dedicated_nt(th) && (events || timeout) && th->blocking;
1882#else
1883 return false;
1884#endif
1885}
1886
1887// true if need retry
1888static bool
1889thread_io_wait_events(rb_thread_t *th, int fd, int events, const struct timeval *timeout)
1890{
1891#if defined(USE_MN_THREADS) && USE_MN_THREADS
1892 if (thread_io_mn_schedulable(th, events, timeout)) {
1893 rb_hrtime_t rel, *prel;
1894
1895 if (timeout) {
1896 rel = rb_timeval2hrtime(timeout);
1897 prel = &rel;
1898 }
1899 else {
1900 prel = NULL;
1901 }
1902
1903 VM_ASSERT(prel || (events & (RB_WAITFD_IN | RB_WAITFD_OUT)));
1904
1905 if (thread_sched_wait_events(TH_SCHED(th), th, fd, waitfd_to_waiting_flag(events), prel)) {
1906 // timeout
1907 return false;
1908 }
1909 else {
1910 return true;
1911 }
1912 }
1913#endif // defined(USE_MN_THREADS) && USE_MN_THREADS
1914 return false;
1915}
1916
1917// assume read/write
1918static bool
1919blocking_call_retryable_p(int r, int eno)
1920{
1921 if (r != -1) return false;
1922
1923 switch (eno) {
1924 case EAGAIN:
1925#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
1926 case EWOULDBLOCK:
1927#endif
1928 return true;
1929 default:
1930 return false;
1931 }
1932}
1933
1934bool
1935rb_thread_mn_schedulable(VALUE thval)
1936{
1937 rb_thread_t *th = rb_thread_ptr(thval);
1938 return th->mn_schedulable;
1939}
1940
1941VALUE
1942rb_thread_io_blocking_call(struct rb_io* io, rb_blocking_function_t *func, void *data1, int events)
1943{
1944 rb_execution_context_t * volatile ec = GET_EC();
1945 rb_thread_t * volatile th = rb_ec_thread_ptr(ec);
1946
1947 RUBY_DEBUG_LOG("th:%u fd:%d ev:%d", rb_th_serial(th), io->fd, events);
1948
1949 volatile VALUE val = Qundef; /* shouldn't be used */
1950 volatile int saved_errno = 0;
1951 enum ruby_tag_type state;
1952 volatile bool prev_mn_schedulable = th->mn_schedulable;
1953 th->mn_schedulable = thread_io_mn_schedulable(th, events, NULL);
1954
1955 int fd = io->fd;
1956
1957 // `errno` is only valid when there is an actual error - but we can't
1958 // extract that from the return value of `func` alone, so we clear any
1959 // prior `errno` value here so that we can later check if it was set by
1960 // `func` or not (as opposed to some previously set value).
1961 errno = 0;
1962
1963 struct rb_io_blocking_operation blocking_operation = {
1964 .ec = ec,
1965 };
1966 rb_io_blocking_operation_enter(io, &blocking_operation);
1967
1968 {
1969 EC_PUSH_TAG(ec);
1970 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
1971 volatile enum ruby_tag_type saved_state = state; /* for BLOCKING_REGION */
1972 retry:
1973 BLOCKING_REGION(th, {
1974 val = func(data1);
1975 saved_errno = errno;
1976 }, ubf_select, th, FALSE);
1977
1978 RUBY_ASSERT(th == rb_ec_thread_ptr(ec));
1979 if (events &&
1980 blocking_call_retryable_p((int)val, saved_errno) &&
1981 thread_io_wait_events(th, fd, events, NULL)) {
1982 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1983 goto retry;
1984 }
1985
1986 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1987
1988 state = saved_state;
1989 }
1990 EC_POP_TAG();
1991
1992 th = rb_ec_thread_ptr(ec);
1993 th->mn_schedulable = prev_mn_schedulable;
1994 }
1995
1996 rb_io_blocking_operation_exit(io, &blocking_operation);
1997
1998 if (state) {
1999 EC_JUMP_TAG(ec, state);
2000 }
2001
2002 // If the error was a timeout, we raise a specific exception for that:
2003 if (saved_errno == ETIMEDOUT) {
2004 rb_raise(rb_eIOTimeoutError, "Blocking operation timed out!");
2005 }
2006
2007 errno = saved_errno;
2008
2009 return val;
2010}
2011
2012VALUE
2013rb_thread_io_blocking_region(struct rb_io *io, rb_blocking_function_t *func, void *data1)
2014{
2015 return rb_thread_io_blocking_call(io, func, data1, 0);
2016}
2017
2018/*
2019 * rb_thread_call_with_gvl - re-enter the Ruby world after GVL release.
2020 *
2021 * After releasing GVL using
2022 * rb_thread_call_without_gvl() you can not access Ruby values or invoke
2023 * methods. If you need to access Ruby you must use this function
2024 * rb_thread_call_with_gvl().
2025 *
2026 * This function rb_thread_call_with_gvl() does:
2027 * (1) acquire GVL.
2028 * (2) call passed function `func'.
2029 * (3) release GVL.
2030 * (4) return a value which is returned at (2).
2031 *
2032 * NOTE: You should not return Ruby object at (2) because such Object
2033 * will not be marked.
2034 *
2035 * NOTE: If an exception is raised in `func', this function DOES NOT
2036 * protect (catch) the exception. If you have any resources
2037 * which should free before throwing exception, you need use
2038 * rb_protect() in `func' and return a value which represents
2039 * exception was raised.
2040 *
2041 * NOTE: This function should not be called by a thread which was not
2042 * created as Ruby thread (created by Thread.new or so). In other
2043 * words, this function *DOES NOT* associate or convert a NON-Ruby
2044 * thread to a Ruby thread.
2045 */
2046void *
2047rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
2048{
2049 rb_thread_t *th = ruby_thread_from_native();
2050 struct rb_blocking_region_buffer *brb;
2051 struct rb_unblock_callback prev_unblock;
2052 void *r;
2053
2054 if (th == 0) {
2055 /* Error has occurred, but we can't use rb_bug()
2056 * because this thread is not Ruby's thread.
2057 * What should we do?
2058 */
2059 bp();
2060 fprintf(stderr, "[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
2061 exit(EXIT_FAILURE);
2062 }
2063
2064 brb = (struct rb_blocking_region_buffer *)th->blocking_region_buffer;
2065 prev_unblock = th->unblock;
2066
2067 if (brb == 0) {
2068 rb_bug("rb_thread_call_with_gvl: called by a thread which has GVL.");
2069 }
2070
2071 blocking_region_end(th, brb);
2072 /* enter to Ruby world: You can access Ruby values, methods and so on. */
2073 r = (*func)(data1);
2074 /* leave from Ruby world: You can not access Ruby values, etc. */
2075 int released = blocking_region_begin(th, brb, prev_unblock.func, prev_unblock.arg, FALSE);
2076 RUBY_ASSERT_ALWAYS(released);
2077 RB_VM_SAVE_MACHINE_CONTEXT(th);
2078 thread_sched_to_waiting(TH_SCHED(th), th);
2079 return r;
2080}
2081
2082/*
2083 * ruby_thread_has_gvl_p - check if current native thread has GVL.
2084 */
2085
2087ruby_thread_has_gvl_p(void)
2088{
2089 rb_thread_t *th = ruby_thread_from_native();
2090
2091 if (th && th->blocking_region_buffer == 0) {
2092 return 1;
2093 }
2094 else {
2095 return 0;
2096 }
2097}
2098
2099/*
2100 * call-seq:
2101 * Thread.pass -> nil
2102 *
2103 * Give the thread scheduler a hint to pass execution to another thread.
2104 * A running thread may or may not switch, it depends on OS and processor.
2105 */
2106
2107static VALUE
2108thread_s_pass(VALUE klass)
2109{
2111 return Qnil;
2112}
2113
2114/*****************************************************/
2115
2116/*
2117 * rb_threadptr_pending_interrupt_* - manage asynchronous error queue
2118 *
2119 * Async events such as an exception thrown by Thread#raise,
2120 * Thread#kill and thread termination (after main thread termination)
2121 * will be queued to th->pending_interrupt_queue.
2122 * - clear: clear the queue.
2123 * - enque: enqueue err object into queue.
2124 * - deque: dequeue err object from queue.
2125 * - active_p: return 1 if the queue should be checked.
2126 *
2127 * All rb_threadptr_pending_interrupt_* functions are called by
2128 * a GVL acquired thread, of course.
2129 * Note that all "rb_" prefix APIs need GVL to call.
2130 */
2131
2132void
2133rb_threadptr_pending_interrupt_clear(rb_thread_t *th)
2134{
2135 rb_ary_clear(th->pending_interrupt_queue);
2136}
2137
2138void
2139rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
2140{
2141 rb_ary_push(th->pending_interrupt_queue, v);
2142 th->pending_interrupt_queue_checked = 0;
2143}
2144
2145static void
2146threadptr_check_pending_interrupt_queue(rb_thread_t *th)
2147{
2148 if (!th->pending_interrupt_queue) {
2149 rb_raise(rb_eThreadError, "uninitialized thread");
2150 }
2151}
2152
2153enum handle_interrupt_timing {
2154 INTERRUPT_NONE,
2155 INTERRUPT_IMMEDIATE,
2156 INTERRUPT_ON_BLOCKING,
2157 INTERRUPT_NEVER
2158};
2159
2160static enum handle_interrupt_timing
2161rb_threadptr_pending_interrupt_from_symbol(rb_thread_t *th, VALUE sym)
2162{
2163 if (sym == sym_immediate) {
2164 return INTERRUPT_IMMEDIATE;
2165 }
2166 else if (sym == sym_on_blocking) {
2167 return INTERRUPT_ON_BLOCKING;
2168 }
2169 else if (sym == sym_never) {
2170 return INTERRUPT_NEVER;
2171 }
2172 else {
2173 rb_raise(rb_eThreadError, "unknown mask signature");
2174 }
2175}
2176
2177static enum handle_interrupt_timing
2178rb_threadptr_pending_interrupt_check_mask(rb_thread_t *th, VALUE err)
2179{
2180 VALUE mask;
2181 long mask_stack_len = RARRAY_LEN(th->pending_interrupt_mask_stack);
2182 const VALUE *mask_stack = RARRAY_CONST_PTR(th->pending_interrupt_mask_stack);
2183 VALUE mod;
2184 long i;
2185
2186 for (i=0; i<mask_stack_len; i++) {
2187 mask = mask_stack[mask_stack_len-(i+1)];
2188
2189 if (SYMBOL_P(mask)) {
2190 /* do not match RUBY_FATAL_THREAD_KILLED etc */
2191 if (err != rb_cInteger) {
2192 return rb_threadptr_pending_interrupt_from_symbol(th, mask);
2193 }
2194 else {
2195 continue;
2196 }
2197 }
2198
2199 for (mod = err; mod; mod = RCLASS_SUPER(mod)) {
2200 VALUE klass = mod;
2201 VALUE sym;
2202
2203 if (BUILTIN_TYPE(mod) == T_ICLASS) {
2204 klass = RBASIC(mod)->klass;
2205 }
2206 else if (mod != RCLASS_ORIGIN(mod)) {
2207 continue;
2208 }
2209
2210 if ((sym = rb_hash_aref(mask, klass)) != Qnil) {
2211 return rb_threadptr_pending_interrupt_from_symbol(th, sym);
2212 }
2213 }
2214 /* try to next mask */
2215 }
2216 return INTERRUPT_NONE;
2217}
2218
2219static int
2220rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th)
2221{
2222 return RARRAY_LEN(th->pending_interrupt_queue) == 0;
2223}
2224
2225static int
2226rb_threadptr_pending_interrupt_include_p(rb_thread_t *th, VALUE err)
2227{
2228 int i;
2229 for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
2230 VALUE e = RARRAY_AREF(th->pending_interrupt_queue, i);
2231 if (rb_obj_is_kind_of(e, err)) {
2232 return TRUE;
2233 }
2234 }
2235 return FALSE;
2236}
2237
2238static VALUE
2239rb_threadptr_pending_interrupt_deque(rb_thread_t *th, enum handle_interrupt_timing timing)
2240{
2241#if 1 /* 1 to enable Thread#handle_interrupt, 0 to ignore it */
2242 int i;
2243
2244 for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
2245 VALUE err = RARRAY_AREF(th->pending_interrupt_queue, i);
2246
2247 enum handle_interrupt_timing mask_timing = rb_threadptr_pending_interrupt_check_mask(th, CLASS_OF(err));
2248
2249 switch (mask_timing) {
2250 case INTERRUPT_ON_BLOCKING:
2251 if (timing != INTERRUPT_ON_BLOCKING) {
2252 break;
2253 }
2254 /* fall through */
2255 case INTERRUPT_NONE: /* default: IMMEDIATE */
2256 case INTERRUPT_IMMEDIATE:
2257 rb_ary_delete_at(th->pending_interrupt_queue, i);
2258 return err;
2259 case INTERRUPT_NEVER:
2260 break;
2261 }
2262 }
2263
2264 th->pending_interrupt_queue_checked = 1;
2265 return Qundef;
2266#else
2267 VALUE err = rb_ary_shift(th->pending_interrupt_queue);
2268 if (rb_threadptr_pending_interrupt_empty_p(th)) {
2269 th->pending_interrupt_queue_checked = 1;
2270 }
2271 return err;
2272#endif
2273}
2274
2275static int
2276threadptr_pending_interrupt_active_p(rb_thread_t *th)
2277{
2278 /*
2279 * For optimization, we don't check async errinfo queue
2280 * if the queue and the thread interrupt mask were not changed
2281 * since last check.
2282 */
2283 if (th->pending_interrupt_queue_checked) {
2284 return 0;
2285 }
2286
2287 if (rb_threadptr_pending_interrupt_empty_p(th)) {
2288 return 0;
2289 }
2290
2291 return 1;
2292}
2293
2294static int
2295handle_interrupt_arg_check_i(VALUE key, VALUE val, VALUE args)
2296{
2297 VALUE *maskp = (VALUE *)args;
2298
2299 if (val != sym_immediate && val != sym_on_blocking && val != sym_never) {
2300 rb_raise(rb_eArgError, "unknown mask signature");
2301 }
2302
2303 if (key == rb_eException && (UNDEF_P(*maskp) || NIL_P(*maskp))) {
2304 *maskp = val;
2305 return ST_CONTINUE;
2306 }
2307
2308 if (RTEST(*maskp)) {
2309 if (!RB_TYPE_P(*maskp, T_HASH)) {
2310 VALUE prev = *maskp;
2311 *maskp = rb_ident_hash_new();
2312 if (SYMBOL_P(prev)) {
2313 rb_hash_aset(*maskp, rb_eException, prev);
2314 }
2315 }
2316 rb_hash_aset(*maskp, key, val);
2317 }
2318 else {
2319 *maskp = Qfalse;
2320 }
2321
2322 return ST_CONTINUE;
2323}
2324
2325/*
2326 * call-seq:
2327 * Thread.handle_interrupt(hash) { ... } -> result of the block
2328 *
2329 * Changes asynchronous interrupt timing.
2330 *
2331 * _interrupt_ means asynchronous event and corresponding procedure
2332 * by Thread#raise, Thread#kill, signal trap (not supported yet)
2333 * and main thread termination (if main thread terminates, then all
2334 * other thread will be killed).
2335 *
2336 * The given +hash+ has pairs like <code>ExceptionClass =>
2337 * :TimingSymbol</code>. Where the ExceptionClass is the interrupt handled by
2338 * the given block. The TimingSymbol can be one of the following symbols:
2339 *
2340 * [+:immediate+] Invoke interrupts immediately.
2341 * [+:on_blocking+] Invoke interrupts while _BlockingOperation_.
2342 * [+:never+] Never invoke all interrupts.
2343 *
2344 * _BlockingOperation_ means that the operation will block the calling thread,
2345 * such as read and write. On CRuby implementation, _BlockingOperation_ is any
2346 * operation executed without GVL.
2347 *
2348 * Masked asynchronous interrupts are delayed until they are enabled.
2349 * This method is similar to sigprocmask(3).
2350 *
2351 * === NOTE
2352 *
2353 * Asynchronous interrupts are difficult to use.
2354 *
2355 * If you need to communicate between threads, please consider to use another way such as Queue.
2356 *
2357 * Or use them with deep understanding about this method.
2358 *
2359 * === Usage
2360 *
2361 * In this example, we can guard from Thread#raise exceptions.
2362 *
2363 * Using the +:never+ TimingSymbol the RuntimeError exception will always be
2364 * ignored in the first block of the main thread. In the second
2365 * ::handle_interrupt block we can purposefully handle RuntimeError exceptions.
2366 *
2367 * th = Thread.new do
2368 * Thread.handle_interrupt(RuntimeError => :never) {
2369 * begin
2370 * # You can write resource allocation code safely.
2371 * Thread.handle_interrupt(RuntimeError => :immediate) {
2372 * # ...
2373 * }
2374 * ensure
2375 * # You can write resource deallocation code safely.
2376 * end
2377 * }
2378 * end
2379 * Thread.pass
2380 * # ...
2381 * th.raise "stop"
2382 *
2383 * While we are ignoring the RuntimeError exception, it's safe to write our
2384 * resource allocation code. Then, the ensure block is where we can safely
2385 * deallocate your resources.
2386 *
2387 * ==== Stack control settings
2388 *
2389 * It's possible to stack multiple levels of ::handle_interrupt blocks in order
2390 * to control more than one ExceptionClass and TimingSymbol at a time.
2391 *
2392 * Thread.handle_interrupt(FooError => :never) {
2393 * Thread.handle_interrupt(BarError => :never) {
2394 * # FooError and BarError are prohibited.
2395 * }
2396 * }
2397 *
2398 * ==== Inheritance with ExceptionClass
2399 *
2400 * All exceptions inherited from the ExceptionClass parameter will be considered.
2401 *
2402 * Thread.handle_interrupt(Exception => :never) {
2403 * # all exceptions inherited from Exception are prohibited.
2404 * }
2405 *
2406 * For handling all interrupts, use +Object+ and not +Exception+
2407 * as the ExceptionClass, as kill/terminate interrupts are not handled by +Exception+.
2408 */
2409static VALUE
2410rb_thread_s_handle_interrupt(VALUE self, VALUE mask_arg)
2411{
2412 VALUE mask = Qundef;
2413 rb_execution_context_t * volatile ec = GET_EC();
2414 rb_thread_t * volatile th = rb_ec_thread_ptr(ec);
2415 volatile VALUE r = Qnil;
2416 enum ruby_tag_type state;
2417
2418 if (!rb_block_given_p()) {
2419 rb_raise(rb_eArgError, "block is needed.");
2420 }
2421
2422 mask_arg = rb_to_hash_type(mask_arg);
2423
2424 if (OBJ_FROZEN(mask_arg) && rb_hash_compare_by_id_p(mask_arg)) {
2425 mask = Qnil;
2426 }
2427
2428 rb_hash_foreach(mask_arg, handle_interrupt_arg_check_i, (VALUE)&mask);
2429
2430 if (UNDEF_P(mask)) {
2431 return rb_yield(Qnil);
2432 }
2433
2434 if (!RTEST(mask)) {
2435 mask = mask_arg;
2436 }
2437 else if (RB_TYPE_P(mask, T_HASH)) {
2438 OBJ_FREEZE(mask);
2439 }
2440
2441 rb_ary_push(th->pending_interrupt_mask_stack, mask);
2442 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2443 th->pending_interrupt_queue_checked = 0;
2444 RUBY_VM_SET_INTERRUPT(th->ec);
2445 }
2446
2447 EC_PUSH_TAG(th->ec);
2448 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
2449 r = rb_yield(Qnil);
2450 }
2451 EC_POP_TAG();
2452
2453 rb_ary_pop(th->pending_interrupt_mask_stack);
2454 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2455 th->pending_interrupt_queue_checked = 0;
2456 RUBY_VM_SET_INTERRUPT(th->ec);
2457 }
2458
2459 RUBY_VM_CHECK_INTS(th->ec);
2460
2461 if (state) {
2462 EC_JUMP_TAG(th->ec, state);
2463 }
2464
2465 return r;
2466}
2467
2468/*
2469 * call-seq:
2470 * target_thread.pending_interrupt?(error = nil) -> true/false
2471 *
2472 * Returns whether or not the asynchronous queue is empty for the target thread.
2473 *
2474 * If +error+ is given, then check only for +error+ type deferred events.
2475 *
2476 * See ::pending_interrupt? for more information.
2477 */
2478static VALUE
2479rb_thread_pending_interrupt_p(int argc, VALUE *argv, VALUE target_thread)
2480{
2481 rb_thread_t *target_th = rb_thread_ptr(target_thread);
2482
2483 if (!target_th->pending_interrupt_queue) {
2484 return Qfalse;
2485 }
2486 if (rb_threadptr_pending_interrupt_empty_p(target_th)) {
2487 return Qfalse;
2488 }
2489 if (rb_check_arity(argc, 0, 1)) {
2490 VALUE err = argv[0];
2491 if (!rb_obj_is_kind_of(err, rb_cModule)) {
2492 rb_raise(rb_eTypeError, "class or module required for rescue clause");
2493 }
2494 return RBOOL(rb_threadptr_pending_interrupt_include_p(target_th, err));
2495 }
2496 else {
2497 return Qtrue;
2498 }
2499}
2500
2501/*
2502 * call-seq:
2503 * Thread.pending_interrupt?(error = nil) -> true/false
2504 *
2505 * Returns whether or not the asynchronous queue is empty.
2506 *
2507 * Since Thread::handle_interrupt can be used to defer asynchronous events,
2508 * this method can be used to determine if there are any deferred events.
2509 *
2510 * If you find this method returns true, then you may finish +:never+ blocks.
2511 *
2512 * For example, the following method processes deferred asynchronous events
2513 * immediately.
2514 *
2515 * def Thread.kick_interrupt_immediately
2516 * Thread.handle_interrupt(Object => :immediate) {
2517 * Thread.pass
2518 * }
2519 * end
2520 *
2521 * If +error+ is given, then check only for +error+ type deferred events.
2522 *
2523 * === Usage
2524 *
2525 * th = Thread.new{
2526 * Thread.handle_interrupt(RuntimeError => :on_blocking){
2527 * while true
2528 * ...
2529 * # reach safe point to invoke interrupt
2530 * if Thread.pending_interrupt?
2531 * Thread.handle_interrupt(Object => :immediate){}
2532 * end
2533 * ...
2534 * end
2535 * }
2536 * }
2537 * ...
2538 * th.raise # stop thread
2539 *
2540 * This example can also be written as the following, which you should use to
2541 * avoid asynchronous interrupts.
2542 *
2543 * flag = true
2544 * th = Thread.new{
2545 * Thread.handle_interrupt(RuntimeError => :on_blocking){
2546 * while true
2547 * ...
2548 * # reach safe point to invoke interrupt
2549 * break if flag == false
2550 * ...
2551 * end
2552 * }
2553 * }
2554 * ...
2555 * flag = false # stop thread
2556 */
2557
2558static VALUE
2559rb_thread_s_pending_interrupt_p(int argc, VALUE *argv, VALUE self)
2560{
2561 return rb_thread_pending_interrupt_p(argc, argv, GET_THREAD()->self);
2562}
2563
2564NORETURN(static void rb_threadptr_to_kill(rb_thread_t *th));
2565
2566static void
2567rb_threadptr_to_kill(rb_thread_t *th)
2568{
2569 VM_ASSERT(GET_THREAD() == th);
2570 rb_threadptr_pending_interrupt_clear(th);
2571 th->status = THREAD_RUNNABLE;
2572 th->to_kill = 1;
2573 th->ec->errinfo = INT2FIX(TAG_FATAL);
2574 EC_JUMP_TAG(th->ec, TAG_FATAL);
2575}
2576
2577static inline rb_atomic_t
2578threadptr_get_interrupts(rb_thread_t *th)
2579{
2580 rb_execution_context_t *ec = th->ec;
2581 rb_atomic_t interrupt;
2582 rb_atomic_t old;
2583
2584 old = ATOMIC_LOAD_RELAXED(ec->interrupt_flag);
2585 do {
2586 interrupt = old;
2587 old = ATOMIC_CAS(ec->interrupt_flag, interrupt, interrupt & ec->interrupt_mask);
2588 } while (old != interrupt);
2589 return interrupt & (rb_atomic_t)~ec->interrupt_mask;
2590}
2591
2592static void threadptr_interrupt_exec_exec(rb_thread_t *th);
2593
2594// Execute interrupts on currently running thread
2595// In certain situations, calling this function will raise an exception. Some examples are:
2596// * during VM shutdown (`rb_ractor_terminate_all`)
2597// * Call to Thread#exit for current thread (`rb_thread_kill`)
2598// * Call to Thread#raise for current thread
2599int
2600rb_threadptr_execute_interrupts(rb_thread_t *th, int blocking_timing)
2601{
2602 rb_atomic_t interrupt;
2603 int postponed_job_interrupt = 0;
2604 int ret = FALSE;
2605
2606 VM_ASSERT(GET_THREAD() == th);
2607
2608 if (th->ec->raised_flag) return ret;
2609
2610 while ((interrupt = threadptr_get_interrupts(th)) != 0) {
2611 int sig;
2612 int timer_interrupt;
2613 int pending_interrupt;
2614 int trap_interrupt;
2615 int terminate_interrupt;
2616
2617 timer_interrupt = interrupt & TIMER_INTERRUPT_MASK;
2618 pending_interrupt = interrupt & PENDING_INTERRUPT_MASK;
2619 postponed_job_interrupt = interrupt & POSTPONED_JOB_INTERRUPT_MASK;
2620 trap_interrupt = interrupt & TRAP_INTERRUPT_MASK;
2621 terminate_interrupt = interrupt & TERMINATE_INTERRUPT_MASK; // request from other ractors
2622
2623 if (interrupt & VM_BARRIER_INTERRUPT_MASK) {
2624 RB_VM_LOCKING();
2625 }
2626
2627 if (postponed_job_interrupt) {
2628 rb_postponed_job_flush(th->vm);
2629 }
2630
2631 if (trap_interrupt) {
2632 /* signal handling */
2633 if (th == th->vm->ractor.main_thread) {
2634 enum rb_thread_status prev_status = th->status;
2635
2636 th->status = THREAD_RUNNABLE;
2637 {
2638 while ((sig = rb_get_next_signal()) != 0) {
2639 ret |= rb_signal_exec(th, sig);
2640 }
2641 }
2642 th->status = prev_status;
2643 }
2644
2645 if (!ccan_list_empty(&th->interrupt_exec_tasks)) {
2646 enum rb_thread_status prev_status = th->status;
2647
2648 th->status = THREAD_RUNNABLE;
2649 {
2650 threadptr_interrupt_exec_exec(th);
2651 }
2652 th->status = prev_status;
2653 }
2654 }
2655
2656 /* exception from another thread */
2657 if (pending_interrupt && threadptr_pending_interrupt_active_p(th)) {
2658 VALUE err = rb_threadptr_pending_interrupt_deque(th, blocking_timing ? INTERRUPT_ON_BLOCKING : INTERRUPT_NONE);
2659 RUBY_DEBUG_LOG("err:%"PRIdVALUE, err);
2660 ret = TRUE;
2661
2662 if (UNDEF_P(err)) {
2663 /* no error */
2664 }
2665 else if (err == RUBY_FATAL_THREAD_KILLED /* Thread#kill received */ ||
2666 err == RUBY_FATAL_THREAD_TERMINATED /* Terminate thread */ ||
2667 err == INT2FIX(TAG_FATAL) /* Thread.exit etc. */ ) {
2668 terminate_interrupt = 1;
2669 }
2670 else {
2671 if (err == th->vm->special_exceptions[ruby_error_stream_closed]) {
2672 /* the only special exception to be queued across thread */
2673 err = ruby_vm_special_exception_copy(err);
2674 }
2675 /* set runnable if th was slept. */
2676 if (th->status == THREAD_STOPPED ||
2677 th->status == THREAD_STOPPED_FOREVER)
2678 th->status = THREAD_RUNNABLE;
2679 rb_exc_raise(err);
2680 }
2681 }
2682
2683 if (terminate_interrupt) {
2684 rb_threadptr_to_kill(th);
2685 }
2686
2687 if (timer_interrupt) {
2688 uint32_t limits_us = thread_default_quantum_ms * 1000;
2689
2690 if (th->priority > 0)
2691 limits_us <<= th->priority;
2692 else
2693 limits_us >>= -th->priority;
2694
2695 if (th->status == THREAD_RUNNABLE)
2696 th->running_time_us += 10 * 1000; // 10ms = 10_000us // TODO: use macro
2697
2698 VM_ASSERT(th->ec->cfp);
2699 EXEC_EVENT_HOOK(th->ec, RUBY_INTERNAL_EVENT_SWITCH, th->ec->cfp->self,
2700 0, 0, 0, Qundef);
2701
2702 rb_thread_schedule_limits(limits_us);
2703 }
2704 }
2705 return ret;
2706}
2707
2708void
2709rb_thread_execute_interrupts(VALUE thval)
2710{
2711 rb_threadptr_execute_interrupts(rb_thread_ptr(thval), 1);
2712}
2713
2714static void
2715rb_threadptr_ready(rb_thread_t *th)
2716{
2717 rb_threadptr_interrupt(th);
2718}
2719
2720static VALUE
2721rb_threadptr_raise(rb_thread_t *target_th, int argc, VALUE *argv)
2722{
2723 if (rb_threadptr_dead(target_th)) {
2724 return Qnil;
2725 }
2726
2727 VALUE exception = rb_exception_setup(argc, argv);
2728
2729 /* making an exception object can switch thread,
2730 so we need to check thread deadness again */
2731 if (rb_threadptr_dead(target_th)) {
2732 return Qnil;
2733 }
2734
2735 rb_threadptr_pending_interrupt_enque(target_th, exception);
2736 rb_threadptr_interrupt(target_th);
2737
2738 return Qnil;
2739}
2740
2741void
2742rb_threadptr_signal_raise(rb_thread_t *th, int sig)
2743{
2744 VALUE argv[2];
2745
2746 argv[0] = rb_eSignal;
2747 argv[1] = INT2FIX(sig);
2748 rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2749}
2750
2751void
2752rb_threadptr_signal_exit(rb_thread_t *th)
2753{
2754 VALUE argv[2];
2755
2756 argv[0] = rb_eSystemExit;
2757 argv[1] = rb_str_new2("exit");
2758
2759 // TODO: check signal raise deliverly
2760 rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2761}
2762
2763int
2764rb_ec_set_raised(rb_execution_context_t *ec)
2765{
2766 if (ec->raised_flag & RAISED_EXCEPTION) {
2767 return 1;
2768 }
2769 ec->raised_flag |= RAISED_EXCEPTION;
2770 return 0;
2771}
2772
2773int
2774rb_ec_reset_raised(rb_execution_context_t *ec)
2775{
2776 if (!(ec->raised_flag & RAISED_EXCEPTION)) {
2777 return 0;
2778 }
2779 ec->raised_flag &= ~RAISED_EXCEPTION;
2780 return 1;
2781}
2782
2783/*
2784 * Thread-safe IO closing mechanism.
2785 *
2786 * When an IO is closed while other threads or fibers are blocked on it, we need to:
2787 * 1. Track and notify all blocking operations through io->blocking_operations
2788 * 2. Ensure only one thread can close at a time using io->closing_ec
2789 * 3. Synchronize cleanup using wakeup_mutex
2790 *
2791 * The close process works as follows:
2792 * - First check if any thread is already closing (io->closing_ec)
2793 * - Set up wakeup_mutex for synchronization
2794 * - Iterate through all blocking operations in io->blocking_operations
2795 * - For each blocked fiber with a scheduler:
2796 * - Notify via rb_fiber_scheduler_fiber_interrupt
2797 * - For each blocked thread without a scheduler:
2798 * - Enqueue IOError via rb_threadptr_pending_interrupt_enque
2799 * - Wake via rb_threadptr_interrupt
2800 * - Wait on wakeup_mutex until all operations are cleaned up
2801 * - Only then clear closing state and allow actual close to proceed
2802 */
2803static VALUE
2804thread_io_close_notify_all(VALUE _io)
2805{
2806 struct rb_io *io = (struct rb_io *)_io;
2807
2808 size_t count = 0;
2809 rb_vm_t *vm = io->closing_ec->thread_ptr->vm;
2810 VALUE error = vm->special_exceptions[ruby_error_stream_closed];
2811
2812 struct rb_io_blocking_operation *blocking_operation;
2813 ccan_list_for_each(rb_io_blocking_operations(io), blocking_operation, list) {
2814 rb_execution_context_t *ec = blocking_operation->ec;
2815
2816 // If the operation is in progress, we need to interrupt it:
2817 if (ec) {
2818 rb_thread_t *thread = ec->thread_ptr;
2819
2820 VALUE result = RUBY_Qundef;
2821 if (thread->scheduler != Qnil) {
2822 result = rb_fiber_scheduler_fiber_interrupt(thread->scheduler, rb_fiberptr_self(ec->fiber_ptr), error);
2823 }
2824
2825 if (result == RUBY_Qundef) {
2826 // If the thread is not the current thread, we need to enqueue an error:
2827 rb_threadptr_pending_interrupt_enque(thread, error);
2828 rb_threadptr_interrupt(thread);
2829 }
2830 }
2831
2832 count += 1;
2833 }
2834
2835 return (VALUE)count;
2836}
2837
2838size_t
2839rb_thread_io_close_interrupt(struct rb_io *io)
2840{
2841 // We guard this operation based on `io->closing_ec` -> only one thread will ever enter this function.
2842 if (io->closing_ec) {
2843 return 0;
2844 }
2845
2846 // If there are no blocking operations, we are done:
2847 if (ccan_list_empty(rb_io_blocking_operations(io))) {
2848 return 0;
2849 }
2850
2851 // Otherwise, we are now closing the IO:
2852 rb_execution_context_t *ec = GET_EC();
2853 io->closing_ec = ec;
2854
2855 // This is used to ensure the correct execution context is woken up after the blocking operation is interrupted:
2856 io->wakeup_mutex = rb_mutex_new();
2857 rb_mutex_allow_trap(io->wakeup_mutex, 1);
2858
2859 // We need to use a mutex here as entering the fiber scheduler may cause a context switch:
2860 VALUE result = rb_mutex_synchronize(io->wakeup_mutex, thread_io_close_notify_all, (VALUE)io);
2861
2862 return (size_t)result;
2863}
2864
2865void
2866rb_thread_io_close_wait(struct rb_io* io)
2867{
2868 VALUE wakeup_mutex = io->wakeup_mutex;
2869
2870 if (!RB_TEST(wakeup_mutex)) {
2871 // There was nobody else using this file when we closed it, so we never bothered to allocate a mutex:
2872 return;
2873 }
2874
2875 rb_mutex_lock(wakeup_mutex);
2876 while (!ccan_list_empty(rb_io_blocking_operations(io))) {
2877 rb_mutex_sleep(wakeup_mutex, Qnil);
2878 }
2879 rb_mutex_unlock(wakeup_mutex);
2880
2881 // We are done closing:
2882 io->wakeup_mutex = Qnil;
2883 io->closing_ec = NULL;
2884}
2885
2886void
2887rb_thread_fd_close(int fd)
2888{
2889 rb_warn("rb_thread_fd_close is deprecated (and is now a no-op).");
2890}
2891
2892/*
2893 * call-seq:
2894 * thr.raise
2895 * thr.raise(string)
2896 * thr.raise(exception [, string [, array]])
2897 *
2898 * Raises an exception from the given thread. The caller does not have to be
2899 * +thr+. See Kernel#raise for more information.
2900 *
2901 * Thread.abort_on_exception = true
2902 * a = Thread.new { sleep(200) }
2903 * a.raise("Gotcha")
2904 *
2905 * This will produce:
2906 *
2907 * prog.rb:3: Gotcha (RuntimeError)
2908 * from prog.rb:2:in `initialize'
2909 * from prog.rb:2:in `new'
2910 * from prog.rb:2
2911 */
2912
2913static VALUE
2914thread_raise_m(int argc, VALUE *argv, VALUE self)
2915{
2916 rb_thread_t *target_th = rb_thread_ptr(self);
2917 const rb_thread_t *current_th = GET_THREAD();
2918
2919 threadptr_check_pending_interrupt_queue(target_th);
2920 rb_threadptr_raise(target_th, argc, argv);
2921
2922 /* To perform Thread.current.raise as Kernel.raise */
2923 if (current_th == target_th) {
2924 RUBY_VM_CHECK_INTS(target_th->ec);
2925 }
2926 return Qnil;
2927}
2928
2929
2930/*
2931 * call-seq:
2932 * thr.exit -> thr
2933 * thr.kill -> thr
2934 * thr.terminate -> thr
2935 *
2936 * Terminates +thr+ and schedules another thread to be run, returning
2937 * the terminated Thread. If this is the main thread, or the last
2938 * thread, exits the process.
2939 */
2940
2942rb_thread_kill(VALUE thread)
2943{
2944 rb_thread_t *target_th = rb_thread_ptr(thread);
2945
2946 if (target_th->to_kill || target_th->status == THREAD_KILLED) {
2947 return thread;
2948 }
2949 if (target_th == target_th->vm->ractor.main_thread) {
2950 rb_exit(EXIT_SUCCESS);
2951 }
2952
2953 RUBY_DEBUG_LOG("target_th:%u", rb_th_serial(target_th));
2954
2955 if (target_th == GET_THREAD()) {
2956 /* kill myself immediately */
2957 rb_threadptr_to_kill(target_th);
2958 }
2959 else {
2960 threadptr_check_pending_interrupt_queue(target_th);
2961 rb_threadptr_pending_interrupt_enque(target_th, RUBY_FATAL_THREAD_KILLED);
2962 rb_threadptr_interrupt(target_th);
2963 }
2964
2965 return thread;
2966}
2967
2968int
2969rb_thread_to_be_killed(VALUE thread)
2970{
2971 rb_thread_t *target_th = rb_thread_ptr(thread);
2972
2973 if (target_th->to_kill || target_th->status == THREAD_KILLED) {
2974 return TRUE;
2975 }
2976 return FALSE;
2977}
2978
2979/*
2980 * call-seq:
2981 * Thread.kill(thread) -> thread
2982 *
2983 * Causes the given +thread+ to exit, see also Thread::exit.
2984 *
2985 * count = 0
2986 * a = Thread.new { loop { count += 1 } }
2987 * sleep(0.1) #=> 0
2988 * Thread.kill(a) #=> #<Thread:0x401b3d30 dead>
2989 * count #=> 93947
2990 * a.alive? #=> false
2991 */
2992
2993static VALUE
2994rb_thread_s_kill(VALUE obj, VALUE th)
2995{
2996 return rb_thread_kill(th);
2997}
2998
2999
3000/*
3001 * call-seq:
3002 * Thread.exit -> thread
3003 *
3004 * Terminates the currently running thread and schedules another thread to be
3005 * run.
3006 *
3007 * If this thread is already marked to be killed, ::exit returns the Thread.
3008 *
3009 * If this is the main thread, or the last thread, exit the process.
3010 */
3011
3012static VALUE
3013rb_thread_exit(VALUE _)
3014{
3015 rb_thread_t *th = GET_THREAD();
3016 return rb_thread_kill(th->self);
3017}
3018
3019
3020/*
3021 * call-seq:
3022 * thr.wakeup -> thr
3023 *
3024 * Marks a given thread as eligible for scheduling, however it may still
3025 * remain blocked on I/O.
3026 *
3027 * *Note:* This does not invoke the scheduler, see #run for more information.
3028 *
3029 * c = Thread.new { Thread.stop; puts "hey!" }
3030 * sleep 0.1 while c.status!='sleep'
3031 * c.wakeup
3032 * c.join
3033 * #=> "hey!"
3034 */
3035
3037rb_thread_wakeup(VALUE thread)
3038{
3039 if (!RTEST(rb_thread_wakeup_alive(thread))) {
3040 rb_raise(rb_eThreadError, "killed thread");
3041 }
3042 return thread;
3043}
3044
3047{
3048 rb_thread_t *target_th = rb_thread_ptr(thread);
3049 if (target_th->status == THREAD_KILLED) return Qnil;
3050
3051 rb_threadptr_ready(target_th);
3052
3053 if (target_th->status == THREAD_STOPPED ||
3054 target_th->status == THREAD_STOPPED_FOREVER) {
3055 target_th->status = THREAD_RUNNABLE;
3056 }
3057
3058 return thread;
3059}
3060
3061
3062/*
3063 * call-seq:
3064 * thr.run -> thr
3065 *
3066 * Wakes up +thr+, making it eligible for scheduling.
3067 *
3068 * a = Thread.new { puts "a"; Thread.stop; puts "c" }
3069 * sleep 0.1 while a.status!='sleep'
3070 * puts "Got here"
3071 * a.run
3072 * a.join
3073 *
3074 * This will produce:
3075 *
3076 * a
3077 * Got here
3078 * c
3079 *
3080 * See also the instance method #wakeup.
3081 */
3082
3084rb_thread_run(VALUE thread)
3085{
3086 rb_thread_wakeup(thread);
3088 return thread;
3089}
3090
3091
3093rb_thread_stop(void)
3094{
3095 if (rb_thread_alone()) {
3096 rb_raise(rb_eThreadError,
3097 "stopping only thread\n\tnote: use sleep to stop forever");
3098 }
3100 return Qnil;
3101}
3102
3103/*
3104 * call-seq:
3105 * Thread.stop -> nil
3106 *
3107 * Stops execution of the current thread, putting it into a ``sleep'' state,
3108 * and schedules execution of another thread.
3109 *
3110 * a = Thread.new { print "a"; Thread.stop; print "c" }
3111 * sleep 0.1 while a.status!='sleep'
3112 * print "b"
3113 * a.run
3114 * a.join
3115 * #=> "abc"
3116 */
3117
3118static VALUE
3119thread_stop(VALUE _)
3120{
3121 return rb_thread_stop();
3122}
3123
3124/********************************************************************/
3125
3126VALUE
3127rb_thread_list(void)
3128{
3129 // TODO
3130 return rb_ractor_thread_list();
3131}
3132
3133/*
3134 * call-seq:
3135 * Thread.list -> array
3136 *
3137 * Returns an array of Thread objects for all threads that are either runnable
3138 * or stopped.
3139 *
3140 * Thread.new { sleep(200) }
3141 * Thread.new { 1000000.times {|i| i*i } }
3142 * Thread.new { Thread.stop }
3143 * Thread.list.each {|t| p t}
3144 *
3145 * This will produce:
3146 *
3147 * #<Thread:0x401b3e84 sleep>
3148 * #<Thread:0x401b3f38 run>
3149 * #<Thread:0x401b3fb0 sleep>
3150 * #<Thread:0x401bdf4c run>
3151 */
3152
3153static VALUE
3154thread_list(VALUE _)
3155{
3156 return rb_thread_list();
3157}
3158
3161{
3162 return GET_THREAD()->self;
3163}
3164
3165/*
3166 * call-seq:
3167 * Thread.current -> thread
3168 *
3169 * Returns the currently executing thread.
3170 *
3171 * Thread.current #=> #<Thread:0x401bdf4c run>
3172 */
3173
3174static VALUE
3175thread_s_current(VALUE klass)
3176{
3177 return rb_thread_current();
3178}
3179
3181rb_thread_main(void)
3182{
3183 return GET_RACTOR()->threads.main->self;
3184}
3185
3186/*
3187 * call-seq:
3188 * Thread.main -> thread
3189 *
3190 * Returns the main thread.
3191 */
3192
3193static VALUE
3194rb_thread_s_main(VALUE klass)
3195{
3196 return rb_thread_main();
3197}
3198
3199
3200/*
3201 * call-seq:
3202 * Thread.abort_on_exception -> true or false
3203 *
3204 * Returns the status of the global ``abort on exception'' condition.
3205 *
3206 * The default is +false+.
3207 *
3208 * When set to +true+, if any thread is aborted by an exception, the
3209 * raised exception will be re-raised in the main thread.
3210 *
3211 * Can also be specified by the global $DEBUG flag or command line option
3212 * +-d+.
3213 *
3214 * See also ::abort_on_exception=.
3215 *
3216 * There is also an instance level method to set this for a specific thread,
3217 * see #abort_on_exception.
3218 */
3219
3220static VALUE
3221rb_thread_s_abort_exc(VALUE _)
3222{
3223 return RBOOL(GET_THREAD()->vm->thread_abort_on_exception);
3224}
3225
3226
3227/*
3228 * call-seq:
3229 * Thread.abort_on_exception= boolean -> true or false
3230 *
3231 * When set to +true+, if any thread is aborted by an exception, the
3232 * raised exception will be re-raised in the main thread.
3233 * Returns the new state.
3234 *
3235 * Thread.abort_on_exception = true
3236 * t1 = Thread.new do
3237 * puts "In new thread"
3238 * raise "Exception from thread"
3239 * end
3240 * sleep(1)
3241 * puts "not reached"
3242 *
3243 * This will produce:
3244 *
3245 * In new thread
3246 * prog.rb:4: Exception from thread (RuntimeError)
3247 * from prog.rb:2:in `initialize'
3248 * from prog.rb:2:in `new'
3249 * from prog.rb:2
3250 *
3251 * See also ::abort_on_exception.
3252 *
3253 * There is also an instance level method to set this for a specific thread,
3254 * see #abort_on_exception=.
3255 */
3256
3257static VALUE
3258rb_thread_s_abort_exc_set(VALUE self, VALUE val)
3259{
3260 GET_THREAD()->vm->thread_abort_on_exception = RTEST(val);
3261 return val;
3262}
3263
3264
3265/*
3266 * call-seq:
3267 * thr.abort_on_exception -> true or false
3268 *
3269 * Returns the status of the thread-local ``abort on exception'' condition for
3270 * this +thr+.
3271 *
3272 * The default is +false+.
3273 *
3274 * See also #abort_on_exception=.
3275 *
3276 * There is also a class level method to set this for all threads, see
3277 * ::abort_on_exception.
3278 */
3279
3280static VALUE
3281rb_thread_abort_exc(VALUE thread)
3282{
3283 return RBOOL(rb_thread_ptr(thread)->abort_on_exception);
3284}
3285
3286
3287/*
3288 * call-seq:
3289 * thr.abort_on_exception= boolean -> true or false
3290 *
3291 * When set to +true+, if this +thr+ is aborted by an exception, the
3292 * raised exception will be re-raised in the main thread.
3293 *
3294 * See also #abort_on_exception.
3295 *
3296 * There is also a class level method to set this for all threads, see
3297 * ::abort_on_exception=.
3298 */
3299
3300static VALUE
3301rb_thread_abort_exc_set(VALUE thread, VALUE val)
3302{
3303 rb_thread_ptr(thread)->abort_on_exception = RTEST(val);
3304 return val;
3305}
3306
3307
3308/*
3309 * call-seq:
3310 * Thread.report_on_exception -> true or false
3311 *
3312 * Returns the status of the global ``report on exception'' condition.
3313 *
3314 * The default is +true+ since Ruby 2.5.
3315 *
3316 * All threads created when this flag is true will report
3317 * a message on $stderr if an exception kills the thread.
3318 *
3319 * Thread.new { 1.times { raise } }
3320 *
3321 * will produce this output on $stderr:
3322 *
3323 * #<Thread:...> terminated with exception (report_on_exception is true):
3324 * Traceback (most recent call last):
3325 * 2: from -e:1:in `block in <main>'
3326 * 1: from -e:1:in `times'
3327 *
3328 * This is done to catch errors in threads early.
3329 * In some cases, you might not want this output.
3330 * There are multiple ways to avoid the extra output:
3331 *
3332 * * If the exception is not intended, the best is to fix the cause of
3333 * the exception so it does not happen anymore.
3334 * * If the exception is intended, it might be better to rescue it closer to
3335 * where it is raised rather then let it kill the Thread.
3336 * * If it is guaranteed the Thread will be joined with Thread#join or
3337 * Thread#value, then it is safe to disable this report with
3338 * <code>Thread.current.report_on_exception = false</code>
3339 * when starting the Thread.
3340 * However, this might handle the exception much later, or not at all
3341 * if the Thread is never joined due to the parent thread being blocked, etc.
3342 *
3343 * See also ::report_on_exception=.
3344 *
3345 * There is also an instance level method to set this for a specific thread,
3346 * see #report_on_exception=.
3347 *
3348 */
3349
3350static VALUE
3351rb_thread_s_report_exc(VALUE _)
3352{
3353 return RBOOL(GET_THREAD()->vm->thread_report_on_exception);
3354}
3355
3356
3357/*
3358 * call-seq:
3359 * Thread.report_on_exception= boolean -> true or false
3360 *
3361 * Returns the new state.
3362 * When set to +true+, all threads created afterwards will inherit the
3363 * condition and report a message on $stderr if an exception kills a thread:
3364 *
3365 * Thread.report_on_exception = true
3366 * t1 = Thread.new do
3367 * puts "In new thread"
3368 * raise "Exception from thread"
3369 * end
3370 * sleep(1)
3371 * puts "In the main thread"
3372 *
3373 * This will produce:
3374 *
3375 * In new thread
3376 * #<Thread:...prog.rb:2> terminated with exception (report_on_exception is true):
3377 * Traceback (most recent call last):
3378 * prog.rb:4:in `block in <main>': Exception from thread (RuntimeError)
3379 * In the main thread
3380 *
3381 * See also ::report_on_exception.
3382 *
3383 * There is also an instance level method to set this for a specific thread,
3384 * see #report_on_exception=.
3385 */
3386
3387static VALUE
3388rb_thread_s_report_exc_set(VALUE self, VALUE val)
3389{
3390 GET_THREAD()->vm->thread_report_on_exception = RTEST(val);
3391 return val;
3392}
3393
3394
3395/*
3396 * call-seq:
3397 * Thread.ignore_deadlock -> true or false
3398 *
3399 * Returns the status of the global ``ignore deadlock'' condition.
3400 * The default is +false+, so that deadlock conditions are not ignored.
3401 *
3402 * See also ::ignore_deadlock=.
3403 *
3404 */
3405
3406static VALUE
3407rb_thread_s_ignore_deadlock(VALUE _)
3408{
3409 return RBOOL(GET_THREAD()->vm->thread_ignore_deadlock);
3410}
3411
3412
3413/*
3414 * call-seq:
3415 * Thread.ignore_deadlock = boolean -> true or false
3416 *
3417 * Returns the new state.
3418 * When set to +true+, the VM will not check for deadlock conditions.
3419 * It is only useful to set this if your application can break a
3420 * deadlock condition via some other means, such as a signal.
3421 *
3422 * Thread.ignore_deadlock = true
3423 * queue = Thread::Queue.new
3424 *
3425 * trap(:SIGUSR1){queue.push "Received signal"}
3426 *
3427 * # raises fatal error unless ignoring deadlock
3428 * puts queue.pop
3429 *
3430 * See also ::ignore_deadlock.
3431 */
3432
3433static VALUE
3434rb_thread_s_ignore_deadlock_set(VALUE self, VALUE val)
3435{
3436 GET_THREAD()->vm->thread_ignore_deadlock = RTEST(val);
3437 return val;
3438}
3439
3440
3441/*
3442 * call-seq:
3443 * thr.report_on_exception -> true or false
3444 *
3445 * Returns the status of the thread-local ``report on exception'' condition for
3446 * this +thr+.
3447 *
3448 * The default value when creating a Thread is the value of
3449 * the global flag Thread.report_on_exception.
3450 *
3451 * See also #report_on_exception=.
3452 *
3453 * There is also a class level method to set this for all new threads, see
3454 * ::report_on_exception=.
3455 */
3456
3457static VALUE
3458rb_thread_report_exc(VALUE thread)
3459{
3460 return RBOOL(rb_thread_ptr(thread)->report_on_exception);
3461}
3462
3463
3464/*
3465 * call-seq:
3466 * thr.report_on_exception= boolean -> true or false
3467 *
3468 * When set to +true+, a message is printed on $stderr if an exception
3469 * kills this +thr+. See ::report_on_exception for details.
3470 *
3471 * See also #report_on_exception.
3472 *
3473 * There is also a class level method to set this for all new threads, see
3474 * ::report_on_exception=.
3475 */
3476
3477static VALUE
3478rb_thread_report_exc_set(VALUE thread, VALUE val)
3479{
3480 rb_thread_ptr(thread)->report_on_exception = RTEST(val);
3481 return val;
3482}
3483
3484
3485/*
3486 * call-seq:
3487 * thr.group -> thgrp or nil
3488 *
3489 * Returns the ThreadGroup which contains the given thread.
3490 *
3491 * Thread.main.group #=> #<ThreadGroup:0x4029d914>
3492 */
3493
3494VALUE
3495rb_thread_group(VALUE thread)
3496{
3497 return rb_thread_ptr(thread)->thgroup;
3498}
3499
3500static const char *
3501thread_status_name(rb_thread_t *th, int detail)
3502{
3503 switch (th->status) {
3504 case THREAD_RUNNABLE:
3505 return th->to_kill ? "aborting" : "run";
3506 case THREAD_STOPPED_FOREVER:
3507 if (detail) return "sleep_forever";
3508 case THREAD_STOPPED:
3509 return "sleep";
3510 case THREAD_KILLED:
3511 return "dead";
3512 default:
3513 return "unknown";
3514 }
3515}
3516
3517static int
3518rb_threadptr_dead(rb_thread_t *th)
3519{
3520 return th->status == THREAD_KILLED;
3521}
3522
3523
3524/*
3525 * call-seq:
3526 * thr.status -> string, false or nil
3527 *
3528 * Returns the status of +thr+.
3529 *
3530 * [<tt>"sleep"</tt>]
3531 * Returned if this thread is sleeping or waiting on I/O
3532 * [<tt>"run"</tt>]
3533 * When this thread is executing
3534 * [<tt>"aborting"</tt>]
3535 * If this thread is aborting
3536 * [+false+]
3537 * When this thread is terminated normally
3538 * [+nil+]
3539 * If terminated with an exception.
3540 *
3541 * a = Thread.new { raise("die now") }
3542 * b = Thread.new { Thread.stop }
3543 * c = Thread.new { Thread.exit }
3544 * d = Thread.new { sleep }
3545 * d.kill #=> #<Thread:0x401b3678 aborting>
3546 * a.status #=> nil
3547 * b.status #=> "sleep"
3548 * c.status #=> false
3549 * d.status #=> "aborting"
3550 * Thread.current.status #=> "run"
3551 *
3552 * See also the instance methods #alive? and #stop?
3553 */
3554
3555static VALUE
3556rb_thread_status(VALUE thread)
3557{
3558 rb_thread_t *target_th = rb_thread_ptr(thread);
3559
3560 if (rb_threadptr_dead(target_th)) {
3561 if (!NIL_P(target_th->ec->errinfo) &&
3562 !FIXNUM_P(target_th->ec->errinfo)) {
3563 return Qnil;
3564 }
3565 else {
3566 return Qfalse;
3567 }
3568 }
3569 else {
3570 return rb_str_new2(thread_status_name(target_th, FALSE));
3571 }
3572}
3573
3574
3575/*
3576 * call-seq:
3577 * thr.alive? -> true or false
3578 *
3579 * Returns +true+ if +thr+ is running or sleeping.
3580 *
3581 * thr = Thread.new { }
3582 * thr.join #=> #<Thread:0x401b3fb0 dead>
3583 * Thread.current.alive? #=> true
3584 * thr.alive? #=> false
3585 *
3586 * See also #stop? and #status.
3587 */
3588
3589static VALUE
3590rb_thread_alive_p(VALUE thread)
3591{
3592 return RBOOL(!thread_finished(rb_thread_ptr(thread)));
3593}
3594
3595/*
3596 * call-seq:
3597 * thr.stop? -> true or false
3598 *
3599 * Returns +true+ if +thr+ is dead or sleeping.
3600 *
3601 * a = Thread.new { Thread.stop }
3602 * b = Thread.current
3603 * a.stop? #=> true
3604 * b.stop? #=> false
3605 *
3606 * See also #alive? and #status.
3607 */
3608
3609static VALUE
3610rb_thread_stop_p(VALUE thread)
3611{
3612 rb_thread_t *th = rb_thread_ptr(thread);
3613
3614 if (rb_threadptr_dead(th)) {
3615 return Qtrue;
3616 }
3617 return RBOOL(th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER);
3618}
3619
3620/*
3621 * call-seq:
3622 * thr.name -> string
3623 *
3624 * show the name of the thread.
3625 */
3626
3627static VALUE
3628rb_thread_getname(VALUE thread)
3629{
3630 return rb_thread_ptr(thread)->name;
3631}
3632
3633/*
3634 * call-seq:
3635 * thr.name=(name) -> string
3636 *
3637 * set given name to the ruby thread.
3638 * On some platform, it may set the name to pthread and/or kernel.
3639 */
3640
3641static VALUE
3642rb_thread_setname(VALUE thread, VALUE name)
3643{
3644 rb_thread_t *target_th = rb_thread_ptr(thread);
3645
3646 if (!NIL_P(name)) {
3647 rb_encoding *enc;
3648 StringValueCStr(name);
3649 enc = rb_enc_get(name);
3650 if (!rb_enc_asciicompat(enc)) {
3651 rb_raise(rb_eArgError, "ASCII incompatible encoding (%s)",
3652 rb_enc_name(enc));
3653 }
3654 name = rb_str_new_frozen(name);
3655 }
3656 target_th->name = name;
3657 if (threadptr_initialized(target_th) && target_th->has_dedicated_nt) {
3658 native_set_another_thread_name(target_th->nt->thread_id, name);
3659 }
3660 return name;
3661}
3662
3663#if USE_NATIVE_THREAD_NATIVE_THREAD_ID
3664/*
3665 * call-seq:
3666 * thr.native_thread_id -> integer
3667 *
3668 * Return the native thread ID which is used by the Ruby thread.
3669 *
3670 * The ID depends on the OS. (not POSIX thread ID returned by pthread_self(3))
3671 * * On Linux it is TID returned by gettid(2).
3672 * * On macOS it is the system-wide unique integral ID of thread returned
3673 * by pthread_threadid_np(3).
3674 * * On FreeBSD it is the unique integral ID of the thread returned by
3675 * pthread_getthreadid_np(3).
3676 * * On Windows it is the thread identifier returned by GetThreadId().
3677 * * On other platforms, it raises NotImplementedError.
3678 *
3679 * NOTE:
3680 * If the thread is not associated yet or already deassociated with a native
3681 * thread, it returns _nil_.
3682 * If the Ruby implementation uses M:N thread model, the ID may change
3683 * depending on the timing.
3684 */
3685
3686static VALUE
3687rb_thread_native_thread_id(VALUE thread)
3688{
3689 rb_thread_t *target_th = rb_thread_ptr(thread);
3690 if (rb_threadptr_dead(target_th)) return Qnil;
3691 return native_thread_native_thread_id(target_th);
3692}
3693#else
3694# define rb_thread_native_thread_id rb_f_notimplement
3695#endif
3696
3697/*
3698 * call-seq:
3699 * thr.to_s -> string
3700 *
3701 * Dump the name, id, and status of _thr_ to a string.
3702 */
3703
3704static VALUE
3705rb_thread_to_s(VALUE thread)
3706{
3707 VALUE cname = rb_class_path(rb_obj_class(thread));
3708 rb_thread_t *target_th = rb_thread_ptr(thread);
3709 const char *status;
3710 VALUE str, loc;
3711
3712 status = thread_status_name(target_th, TRUE);
3713 str = rb_sprintf("#<%"PRIsVALUE":%p", cname, (void *)thread);
3714 if (!NIL_P(target_th->name)) {
3715 rb_str_catf(str, "@%"PRIsVALUE, target_th->name);
3716 }
3717 if ((loc = threadptr_invoke_proc_location(target_th)) != Qnil) {
3718 rb_str_catf(str, " %"PRIsVALUE":%"PRIsVALUE,
3719 RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
3720 }
3721 rb_str_catf(str, " %s>", status);
3722
3723 return str;
3724}
3725
3726/* variables for recursive traversals */
3727#define recursive_key id__recursive_key__
3728
3729static VALUE
3730threadptr_local_aref(rb_thread_t *th, ID id)
3731{
3732 if (id == recursive_key) {
3733 return th->ec->local_storage_recursive_hash;
3734 }
3735 else {
3736 VALUE val;
3737 struct rb_id_table *local_storage = th->ec->local_storage;
3738
3739 if (local_storage != NULL && rb_id_table_lookup(local_storage, id, &val)) {
3740 return val;
3741 }
3742 else {
3743 return Qnil;
3744 }
3745 }
3746}
3747
3749rb_thread_local_aref(VALUE thread, ID id)
3750{
3751 return threadptr_local_aref(rb_thread_ptr(thread), id);
3752}
3753
3754/*
3755 * call-seq:
3756 * thr[sym] -> obj or nil
3757 *
3758 * Attribute Reference---Returns the value of a fiber-local variable (current thread's root fiber
3759 * if not explicitly inside a Fiber), using either a symbol or a string name.
3760 * If the specified variable does not exist, returns +nil+.
3761 *
3762 * [
3763 * Thread.new { Thread.current["name"] = "A" },
3764 * Thread.new { Thread.current[:name] = "B" },
3765 * Thread.new { Thread.current["name"] = "C" }
3766 * ].each do |th|
3767 * th.join
3768 * puts "#{th.inspect}: #{th[:name]}"
3769 * end
3770 *
3771 * This will produce:
3772 *
3773 * #<Thread:0x00000002a54220 dead>: A
3774 * #<Thread:0x00000002a541a8 dead>: B
3775 * #<Thread:0x00000002a54130 dead>: C
3776 *
3777 * Thread#[] and Thread#[]= are not thread-local but fiber-local.
3778 * This confusion did not exist in Ruby 1.8 because
3779 * fibers are only available since Ruby 1.9.
3780 * Ruby 1.9 chooses that the methods behaves fiber-local to save
3781 * following idiom for dynamic scope.
3782 *
3783 * def meth(newvalue)
3784 * begin
3785 * oldvalue = Thread.current[:name]
3786 * Thread.current[:name] = newvalue
3787 * yield
3788 * ensure
3789 * Thread.current[:name] = oldvalue
3790 * end
3791 * end
3792 *
3793 * The idiom may not work as dynamic scope if the methods are thread-local
3794 * and a given block switches fiber.
3795 *
3796 * f = Fiber.new {
3797 * meth(1) {
3798 * Fiber.yield
3799 * }
3800 * }
3801 * meth(2) {
3802 * f.resume
3803 * }
3804 * f.resume
3805 * p Thread.current[:name]
3806 * #=> nil if fiber-local
3807 * #=> 2 if thread-local (The value 2 is leaked to outside of meth method.)
3808 *
3809 * For thread-local variables, please see #thread_variable_get and
3810 * #thread_variable_set.
3811 *
3812 */
3813
3814static VALUE
3815rb_thread_aref(VALUE thread, VALUE key)
3816{
3817 ID id = rb_check_id(&key);
3818 if (!id) return Qnil;
3819 return rb_thread_local_aref(thread, id);
3820}
3821
3822/*
3823 * call-seq:
3824 * thr.fetch(sym) -> obj
3825 * thr.fetch(sym) { } -> obj
3826 * thr.fetch(sym, default) -> obj
3827 *
3828 * Returns a fiber-local for the given key. If the key can't be
3829 * found, there are several options: With no other arguments, it will
3830 * raise a KeyError exception; if <i>default</i> is given, then that
3831 * will be returned; if the optional code block is specified, then
3832 * that will be run and its result returned. See Thread#[] and
3833 * Hash#fetch.
3834 */
3835static VALUE
3836rb_thread_fetch(int argc, VALUE *argv, VALUE self)
3837{
3838 VALUE key, val;
3839 ID id;
3840 rb_thread_t *target_th = rb_thread_ptr(self);
3841 int block_given;
3842
3843 rb_check_arity(argc, 1, 2);
3844 key = argv[0];
3845
3846 block_given = rb_block_given_p();
3847 if (block_given && argc == 2) {
3848 rb_warn("block supersedes default value argument");
3849 }
3850
3851 id = rb_check_id(&key);
3852
3853 if (id == recursive_key) {
3854 return target_th->ec->local_storage_recursive_hash;
3855 }
3856 else if (id && target_th->ec->local_storage &&
3857 rb_id_table_lookup(target_th->ec->local_storage, id, &val)) {
3858 return val;
3859 }
3860 else if (block_given) {
3861 return rb_yield(key);
3862 }
3863 else if (argc == 1) {
3864 rb_key_err_raise(rb_sprintf("key not found: %+"PRIsVALUE, key), self, key);
3865 }
3866 else {
3867 return argv[1];
3868 }
3869}
3870
3871static VALUE
3872threadptr_local_aset(rb_thread_t *th, ID id, VALUE val)
3873{
3874 if (id == recursive_key) {
3875 th->ec->local_storage_recursive_hash = val;
3876 return val;
3877 }
3878 else {
3879 struct rb_id_table *local_storage = th->ec->local_storage;
3880
3881 if (NIL_P(val)) {
3882 if (!local_storage) return Qnil;
3883 rb_id_table_delete(local_storage, id);
3884 return Qnil;
3885 }
3886 else {
3887 if (local_storage == NULL) {
3888 th->ec->local_storage = local_storage = rb_id_table_create(0);
3889 }
3890 rb_id_table_insert(local_storage, id, val);
3891 return val;
3892 }
3893 }
3894}
3895
3897rb_thread_local_aset(VALUE thread, ID id, VALUE val)
3898{
3899 if (OBJ_FROZEN(thread)) {
3900 rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3901 }
3902
3903 return threadptr_local_aset(rb_thread_ptr(thread), id, val);
3904}
3905
3906/*
3907 * call-seq:
3908 * thr[sym] = obj -> obj
3909 *
3910 * Attribute Assignment---Sets or creates the value of a fiber-local variable,
3911 * using either a symbol or a string.
3912 *
3913 * See also Thread#[].
3914 *
3915 * For thread-local variables, please see #thread_variable_set and
3916 * #thread_variable_get.
3917 */
3918
3919static VALUE
3920rb_thread_aset(VALUE self, VALUE id, VALUE val)
3921{
3922 return rb_thread_local_aset(self, rb_to_id(id), val);
3923}
3924
3925/*
3926 * call-seq:
3927 * thr.thread_variable_get(key) -> obj or nil
3928 *
3929 * Returns the value of a thread local variable that has been set. Note that
3930 * these are different than fiber local values. For fiber local values,
3931 * please see Thread#[] and Thread#[]=.
3932 *
3933 * Thread local values are carried along with threads, and do not respect
3934 * fibers. For example:
3935 *
3936 * Thread.new {
3937 * Thread.current.thread_variable_set("foo", "bar") # set a thread local
3938 * Thread.current["foo"] = "bar" # set a fiber local
3939 *
3940 * Fiber.new {
3941 * Fiber.yield [
3942 * Thread.current.thread_variable_get("foo"), # get the thread local
3943 * Thread.current["foo"], # get the fiber local
3944 * ]
3945 * }.resume
3946 * }.join.value # => ['bar', nil]
3947 *
3948 * The value "bar" is returned for the thread local, where nil is returned
3949 * for the fiber local. The fiber is executed in the same thread, so the
3950 * thread local values are available.
3951 */
3952
3953static VALUE
3954rb_thread_variable_get(VALUE thread, VALUE key)
3955{
3956 VALUE locals;
3957 VALUE symbol = rb_to_symbol(key);
3958
3959 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3960 return Qnil;
3961 }
3962 locals = rb_thread_local_storage(thread);
3963 return rb_hash_aref(locals, symbol);
3964}
3965
3966/*
3967 * call-seq:
3968 * thr.thread_variable_set(key, value)
3969 *
3970 * Sets a thread local with +key+ to +value+. Note that these are local to
3971 * threads, and not to fibers. Please see Thread#thread_variable_get and
3972 * Thread#[] for more information.
3973 */
3974
3975static VALUE
3976rb_thread_variable_set(VALUE thread, VALUE key, VALUE val)
3977{
3978 VALUE locals;
3979
3980 if (OBJ_FROZEN(thread)) {
3981 rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3982 }
3983
3984 locals = rb_thread_local_storage(thread);
3985 return rb_hash_aset(locals, rb_to_symbol(key), val);
3986}
3987
3988/*
3989 * call-seq:
3990 * thr.key?(sym) -> true or false
3991 *
3992 * Returns +true+ if the given string (or symbol) exists as a fiber-local
3993 * variable.
3994 *
3995 * me = Thread.current
3996 * me[:oliver] = "a"
3997 * me.key?(:oliver) #=> true
3998 * me.key?(:stanley) #=> false
3999 */
4000
4001static VALUE
4002rb_thread_key_p(VALUE self, VALUE key)
4003{
4004 VALUE val;
4005 ID id = rb_check_id(&key);
4006 struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
4007
4008 if (!id || local_storage == NULL) {
4009 return Qfalse;
4010 }
4011 return RBOOL(rb_id_table_lookup(local_storage, id, &val));
4012}
4013
4014static enum rb_id_table_iterator_result
4015thread_keys_i(ID key, VALUE value, void *ary)
4016{
4017 rb_ary_push((VALUE)ary, ID2SYM(key));
4018 return ID_TABLE_CONTINUE;
4019}
4020
4022rb_thread_alone(void)
4023{
4024 // TODO
4025 return rb_ractor_living_thread_num(GET_RACTOR()) == 1;
4026}
4027
4028/*
4029 * call-seq:
4030 * thr.keys -> array
4031 *
4032 * Returns an array of the names of the fiber-local variables (as Symbols).
4033 *
4034 * thr = Thread.new do
4035 * Thread.current[:cat] = 'meow'
4036 * Thread.current["dog"] = 'woof'
4037 * end
4038 * thr.join #=> #<Thread:0x401b3f10 dead>
4039 * thr.keys #=> [:dog, :cat]
4040 */
4041
4042static VALUE
4043rb_thread_keys(VALUE self)
4044{
4045 struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
4046 VALUE ary = rb_ary_new();
4047
4048 if (local_storage) {
4049 rb_id_table_foreach(local_storage, thread_keys_i, (void *)ary);
4050 }
4051 return ary;
4052}
4053
4054static int
4055keys_i(VALUE key, VALUE value, VALUE ary)
4056{
4057 rb_ary_push(ary, key);
4058 return ST_CONTINUE;
4059}
4060
4061/*
4062 * call-seq:
4063 * thr.thread_variables -> array
4064 *
4065 * Returns an array of the names of the thread-local variables (as Symbols).
4066 *
4067 * thr = Thread.new do
4068 * Thread.current.thread_variable_set(:cat, 'meow')
4069 * Thread.current.thread_variable_set("dog", 'woof')
4070 * end
4071 * thr.join #=> #<Thread:0x401b3f10 dead>
4072 * thr.thread_variables #=> [:dog, :cat]
4073 *
4074 * Note that these are not fiber local variables. Please see Thread#[] and
4075 * Thread#thread_variable_get for more details.
4076 */
4077
4078static VALUE
4079rb_thread_variables(VALUE thread)
4080{
4081 VALUE locals;
4082 VALUE ary;
4083
4084 ary = rb_ary_new();
4085 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
4086 return ary;
4087 }
4088 locals = rb_thread_local_storage(thread);
4089 rb_hash_foreach(locals, keys_i, ary);
4090
4091 return ary;
4092}
4093
4094/*
4095 * call-seq:
4096 * thr.thread_variable?(key) -> true or false
4097 *
4098 * Returns +true+ if the given string (or symbol) exists as a thread-local
4099 * variable.
4100 *
4101 * me = Thread.current
4102 * me.thread_variable_set(:oliver, "a")
4103 * me.thread_variable?(:oliver) #=> true
4104 * me.thread_variable?(:stanley) #=> false
4105 *
4106 * Note that these are not fiber local variables. Please see Thread#[] and
4107 * Thread#thread_variable_get for more details.
4108 */
4109
4110static VALUE
4111rb_thread_variable_p(VALUE thread, VALUE key)
4112{
4113 VALUE locals;
4114 VALUE symbol = rb_to_symbol(key);
4115
4116 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
4117 return Qfalse;
4118 }
4119 locals = rb_thread_local_storage(thread);
4120
4121 return RBOOL(rb_hash_lookup(locals, symbol) != Qnil);
4122}
4123
4124/*
4125 * call-seq:
4126 * thr.priority -> integer
4127 *
4128 * Returns the priority of <i>thr</i>. Default is inherited from the
4129 * current thread which creating the new thread, or zero for the
4130 * initial main thread; higher-priority thread will run more frequently
4131 * than lower-priority threads (but lower-priority threads can also run).
4132 *
4133 * This is just hint for Ruby thread scheduler. It may be ignored on some
4134 * platform.
4135 *
4136 * Thread.current.priority #=> 0
4137 */
4138
4139static VALUE
4140rb_thread_priority(VALUE thread)
4141{
4142 return INT2NUM(rb_thread_ptr(thread)->priority);
4143}
4144
4145
4146/*
4147 * call-seq:
4148 * thr.priority= integer -> thr
4149 *
4150 * Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
4151 * will run more frequently than lower-priority threads (but lower-priority
4152 * threads can also run).
4153 *
4154 * This is just hint for Ruby thread scheduler. It may be ignored on some
4155 * platform.
4156 *
4157 * count1 = count2 = 0
4158 * a = Thread.new do
4159 * loop { count1 += 1 }
4160 * end
4161 * a.priority = -1
4162 *
4163 * b = Thread.new do
4164 * loop { count2 += 1 }
4165 * end
4166 * b.priority = -2
4167 * sleep 1 #=> 1
4168 * count1 #=> 622504
4169 * count2 #=> 5832
4170 */
4171
4172static VALUE
4173rb_thread_priority_set(VALUE thread, VALUE prio)
4174{
4175 rb_thread_t *target_th = rb_thread_ptr(thread);
4176 int priority;
4177
4178#if USE_NATIVE_THREAD_PRIORITY
4179 target_th->priority = NUM2INT(prio);
4180 native_thread_apply_priority(th);
4181#else
4182 priority = NUM2INT(prio);
4183 if (priority > RUBY_THREAD_PRIORITY_MAX) {
4184 priority = RUBY_THREAD_PRIORITY_MAX;
4185 }
4186 else if (priority < RUBY_THREAD_PRIORITY_MIN) {
4187 priority = RUBY_THREAD_PRIORITY_MIN;
4188 }
4189 target_th->priority = (int8_t)priority;
4190#endif
4191 return INT2NUM(target_th->priority);
4192}
4193
4194/* for IO */
4195
4196#if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
4197
4198/*
4199 * several Unix platforms support file descriptors bigger than FD_SETSIZE
4200 * in select(2) system call.
4201 *
4202 * - Linux 2.2.12 (?)
4203 * - NetBSD 1.2 (src/sys/kern/sys_generic.c:1.25)
4204 * select(2) documents how to allocate fd_set dynamically.
4205 * http://netbsd.gw.com/cgi-bin/man-cgi?select++NetBSD-4.0
4206 * - FreeBSD 2.2 (src/sys/kern/sys_generic.c:1.19)
4207 * - OpenBSD 2.0 (src/sys/kern/sys_generic.c:1.4)
4208 * select(2) documents how to allocate fd_set dynamically.
4209 * http://www.openbsd.org/cgi-bin/man.cgi?query=select&manpath=OpenBSD+4.4
4210 * - Solaris 8 has select_large_fdset
4211 * - Mac OS X 10.7 (Lion)
4212 * select(2) returns EINVAL if nfds is greater than FD_SET_SIZE and
4213 * _DARWIN_UNLIMITED_SELECT (or _DARWIN_C_SOURCE) isn't defined.
4214 * https://developer.apple.com/library/archive/releasenotes/Darwin/SymbolVariantsRelNotes/index.html
4215 *
4216 * When fd_set is not big enough to hold big file descriptors,
4217 * it should be allocated dynamically.
4218 * Note that this assumes fd_set is structured as bitmap.
4219 *
4220 * rb_fd_init allocates the memory.
4221 * rb_fd_term free the memory.
4222 * rb_fd_set may re-allocates bitmap.
4223 *
4224 * So rb_fd_set doesn't reject file descriptors bigger than FD_SETSIZE.
4225 */
4226
4227void
4229{
4230 fds->maxfd = 0;
4231 fds->fdset = ALLOC(fd_set);
4232 FD_ZERO(fds->fdset);
4233}
4234
4235void
4236rb_fd_init_copy(rb_fdset_t *dst, rb_fdset_t *src)
4237{
4238 size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
4239
4240 if (size < sizeof(fd_set))
4241 size = sizeof(fd_set);
4242 dst->maxfd = src->maxfd;
4243 dst->fdset = xmalloc(size);
4244 memcpy(dst->fdset, src->fdset, size);
4245}
4246
4247void
4249{
4250 xfree(fds->fdset);
4251 fds->maxfd = 0;
4252 fds->fdset = 0;
4253}
4254
4255void
4257{
4258 if (fds->fdset)
4259 MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
4260}
4261
4262static void
4263rb_fd_resize(int n, rb_fdset_t *fds)
4264{
4265 size_t m = howmany(n + 1, NFDBITS) * sizeof(fd_mask);
4266 size_t o = howmany(fds->maxfd, NFDBITS) * sizeof(fd_mask);
4267
4268 if (m < sizeof(fd_set)) m = sizeof(fd_set);
4269 if (o < sizeof(fd_set)) o = sizeof(fd_set);
4270
4271 if (m > o) {
4272 fds->fdset = xrealloc(fds->fdset, m);
4273 memset((char *)fds->fdset + o, 0, m - o);
4274 }
4275 if (n >= fds->maxfd) fds->maxfd = n + 1;
4276}
4277
4278void
4279rb_fd_set(int n, rb_fdset_t *fds)
4280{
4281 rb_fd_resize(n, fds);
4282 FD_SET(n, fds->fdset);
4283}
4284
4285void
4286rb_fd_clr(int n, rb_fdset_t *fds)
4287{
4288 if (n >= fds->maxfd) return;
4289 FD_CLR(n, fds->fdset);
4290}
4291
4292int
4293rb_fd_isset(int n, const rb_fdset_t *fds)
4294{
4295 if (n >= fds->maxfd) return 0;
4296 return FD_ISSET(n, fds->fdset) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
4297}
4298
4299void
4300rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
4301{
4302 size_t size = howmany(max, NFDBITS) * sizeof(fd_mask);
4303
4304 if (size < sizeof(fd_set)) size = sizeof(fd_set);
4305 dst->maxfd = max;
4306 dst->fdset = xrealloc(dst->fdset, size);
4307 memcpy(dst->fdset, src, size);
4308}
4309
4310void
4311rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
4312{
4313 size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
4314
4315 if (size < sizeof(fd_set))
4316 size = sizeof(fd_set);
4317 dst->maxfd = src->maxfd;
4318 dst->fdset = xrealloc(dst->fdset, size);
4319 memcpy(dst->fdset, src->fdset, size);
4320}
4321
4322int
4323rb_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
4324{
4325 fd_set *r = NULL, *w = NULL, *e = NULL;
4326 if (readfds) {
4327 rb_fd_resize(n - 1, readfds);
4328 r = rb_fd_ptr(readfds);
4329 }
4330 if (writefds) {
4331 rb_fd_resize(n - 1, writefds);
4332 w = rb_fd_ptr(writefds);
4333 }
4334 if (exceptfds) {
4335 rb_fd_resize(n - 1, exceptfds);
4336 e = rb_fd_ptr(exceptfds);
4337 }
4338 return select(n, r, w, e, timeout);
4339}
4340
4341#define rb_fd_no_init(fds) ((void)((fds)->fdset = 0), (void)((fds)->maxfd = 0))
4342
4343#undef FD_ZERO
4344#undef FD_SET
4345#undef FD_CLR
4346#undef FD_ISSET
4347
4348#define FD_ZERO(f) rb_fd_zero(f)
4349#define FD_SET(i, f) rb_fd_set((i), (f))
4350#define FD_CLR(i, f) rb_fd_clr((i), (f))
4351#define FD_ISSET(i, f) rb_fd_isset((i), (f))
4352
4353#elif defined(_WIN32)
4354
4355void
4357{
4358 set->capa = FD_SETSIZE;
4359 set->fdset = ALLOC(fd_set);
4360 FD_ZERO(set->fdset);
4361}
4362
4363void
4364rb_fd_init_copy(rb_fdset_t *dst, rb_fdset_t *src)
4365{
4366 rb_fd_init(dst);
4367 rb_fd_dup(dst, src);
4368}
4369
4370void
4372{
4373 xfree(set->fdset);
4374 set->fdset = NULL;
4375 set->capa = 0;
4376}
4377
4378void
4379rb_fd_set(int fd, rb_fdset_t *set)
4380{
4381 unsigned int i;
4382 SOCKET s = rb_w32_get_osfhandle(fd);
4383
4384 for (i = 0; i < set->fdset->fd_count; i++) {
4385 if (set->fdset->fd_array[i] == s) {
4386 return;
4387 }
4388 }
4389 if (set->fdset->fd_count >= (unsigned)set->capa) {
4390 set->capa = (set->fdset->fd_count / FD_SETSIZE + 1) * FD_SETSIZE;
4391 set->fdset =
4392 rb_xrealloc_mul_add(
4393 set->fdset, set->capa, sizeof(SOCKET), sizeof(unsigned int));
4394 }
4395 set->fdset->fd_array[set->fdset->fd_count++] = s;
4396}
4397
4398#undef FD_ZERO
4399#undef FD_SET
4400#undef FD_CLR
4401#undef FD_ISSET
4402
4403#define FD_ZERO(f) rb_fd_zero(f)
4404#define FD_SET(i, f) rb_fd_set((i), (f))
4405#define FD_CLR(i, f) rb_fd_clr((i), (f))
4406#define FD_ISSET(i, f) rb_fd_isset((i), (f))
4407
4408#define rb_fd_no_init(fds) (void)((fds)->fdset = 0)
4409
4410#endif
4411
4412#ifndef rb_fd_no_init
4413#define rb_fd_no_init(fds) (void)(fds)
4414#endif
4415
4416static int
4417wait_retryable(volatile int *result, int errnum, rb_hrtime_t *rel, rb_hrtime_t end)
4418{
4419 int r = *result;
4420 if (r < 0) {
4421 switch (errnum) {
4422 case EINTR:
4423#ifdef ERESTART
4424 case ERESTART:
4425#endif
4426 *result = 0;
4427 if (rel && hrtime_update_expire(rel, end)) {
4428 *rel = 0;
4429 }
4430 return TRUE;
4431 }
4432 return FALSE;
4433 }
4434 else if (r == 0) {
4435 /* check for spurious wakeup */
4436 if (rel) {
4437 return !hrtime_update_expire(rel, end);
4438 }
4439 return TRUE;
4440 }
4441 return FALSE;
4442}
4444struct select_set {
4445 int max;
4446 rb_thread_t *th;
4447 rb_fdset_t *rset;
4448 rb_fdset_t *wset;
4449 rb_fdset_t *eset;
4450 rb_fdset_t orig_rset;
4451 rb_fdset_t orig_wset;
4452 rb_fdset_t orig_eset;
4453 struct timeval *timeout;
4454};
4455
4456static VALUE
4457select_set_free(VALUE p)
4458{
4459 struct select_set *set = (struct select_set *)p;
4460
4461 rb_fd_term(&set->orig_rset);
4462 rb_fd_term(&set->orig_wset);
4463 rb_fd_term(&set->orig_eset);
4464
4465 return Qfalse;
4466}
4467
4468static VALUE
4469do_select(VALUE p)
4470{
4471 struct select_set *set = (struct select_set *)p;
4472 volatile int result = 0;
4473 int lerrno;
4474 rb_hrtime_t *to, rel, end = 0;
4475
4476 timeout_prepare(&to, &rel, &end, set->timeout);
4477 volatile rb_hrtime_t endtime = end;
4478#define restore_fdset(dst, src) \
4479 ((dst) ? rb_fd_dup(dst, src) : (void)0)
4480#define do_select_update() \
4481 (restore_fdset(set->rset, &set->orig_rset), \
4482 restore_fdset(set->wset, &set->orig_wset), \
4483 restore_fdset(set->eset, &set->orig_eset), \
4484 TRUE)
4485
4486 do {
4487 lerrno = 0;
4488
4489 BLOCKING_REGION(set->th, {
4490 struct timeval tv;
4491
4492 if (!RUBY_VM_INTERRUPTED(set->th->ec)) {
4493 result = native_fd_select(set->max,
4494 set->rset, set->wset, set->eset,
4495 rb_hrtime2timeval(&tv, to), set->th);
4496 if (result < 0) lerrno = errno;
4497 }
4498 }, ubf_select, set->th, TRUE);
4499
4500 RUBY_VM_CHECK_INTS_BLOCKING(set->th->ec); /* may raise */
4501 } while (wait_retryable(&result, lerrno, to, endtime) && do_select_update());
4502
4503 RUBY_VM_CHECK_INTS_BLOCKING(set->th->ec);
4504
4505 if (result < 0) {
4506 errno = lerrno;
4507 }
4508
4509 return (VALUE)result;
4510}
4511
4513rb_thread_fd_select(int max, rb_fdset_t * read, rb_fdset_t * write, rb_fdset_t * except,
4514 struct timeval *timeout)
4515{
4516 struct select_set set;
4517
4518 set.th = GET_THREAD();
4519 RUBY_VM_CHECK_INTS_BLOCKING(set.th->ec);
4520 set.max = max;
4521 set.rset = read;
4522 set.wset = write;
4523 set.eset = except;
4524 set.timeout = timeout;
4525
4526 if (!set.rset && !set.wset && !set.eset) {
4527 if (!timeout) {
4529 return 0;
4530 }
4531 rb_thread_wait_for(*timeout);
4532 return 0;
4533 }
4534
4535#define fd_init_copy(f) do { \
4536 if (set.f) { \
4537 rb_fd_resize(set.max - 1, set.f); \
4538 if (&set.orig_##f != set.f) { /* sigwait_fd */ \
4539 rb_fd_init_copy(&set.orig_##f, set.f); \
4540 } \
4541 } \
4542 else { \
4543 rb_fd_no_init(&set.orig_##f); \
4544 } \
4545 } while (0)
4546 fd_init_copy(rset);
4547 fd_init_copy(wset);
4548 fd_init_copy(eset);
4549#undef fd_init_copy
4550
4551 return (int)rb_ensure(do_select, (VALUE)&set, select_set_free, (VALUE)&set);
4552}
4553
4554#ifdef USE_POLL
4555
4556/* The same with linux kernel. TODO: make platform independent definition. */
4557#define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
4558#define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
4559#define POLLEX_SET (POLLPRI)
4560
4561#ifndef POLLERR_SET /* defined for FreeBSD for now */
4562# define POLLERR_SET (0)
4563#endif
4564
4565static int
4566wait_for_single_fd_blocking_region(rb_thread_t *th, struct pollfd *fds, nfds_t nfds,
4567 rb_hrtime_t *const to, volatile int *lerrno)
4568{
4569 struct timespec ts;
4570 volatile int result = 0;
4571
4572 *lerrno = 0;
4573 BLOCKING_REGION(th, {
4574 if (!RUBY_VM_INTERRUPTED(th->ec)) {
4575 result = ppoll(fds, nfds, rb_hrtime2timespec(&ts, to), 0);
4576 if (result < 0) *lerrno = errno;
4577 }
4578 }, ubf_select, th, TRUE);
4579 return result;
4580}
4581
4582/*
4583 * returns a mask of events
4584 */
4585static int
4586thread_io_wait(struct rb_io *io, int fd, int events, struct timeval *timeout)
4587{
4588 struct pollfd fds[1] = {{
4589 .fd = fd,
4590 .events = (short)events,
4591 .revents = 0,
4592 }};
4593 volatile int result = 0;
4594 nfds_t nfds;
4595 struct rb_io_blocking_operation blocking_operation;
4596 enum ruby_tag_type state;
4597 volatile int lerrno;
4598
4599 rb_execution_context_t *ec = GET_EC();
4600 rb_thread_t *th = rb_ec_thread_ptr(ec);
4601
4602 if (io) {
4603 blocking_operation.ec = ec;
4604 rb_io_blocking_operation_enter(io, &blocking_operation);
4605 }
4606
4607 if (timeout == NULL && thread_io_wait_events(th, fd, events, NULL)) {
4608 // fd is readable
4609 state = 0;
4610 fds[0].revents = events;
4611 errno = 0;
4612 }
4613 else {
4614 EC_PUSH_TAG(ec);
4615 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
4616 rb_hrtime_t *to, rel, end = 0;
4617 RUBY_VM_CHECK_INTS_BLOCKING(ec);
4618 timeout_prepare(&to, &rel, &end, timeout);
4619 do {
4620 nfds = numberof(fds);
4621 result = wait_for_single_fd_blocking_region(th, fds, nfds, to, &lerrno);
4622
4623 RUBY_VM_CHECK_INTS_BLOCKING(ec);
4624 } while (wait_retryable(&result, lerrno, to, end));
4625
4626 RUBY_VM_CHECK_INTS_BLOCKING(ec);
4627 }
4628
4629 EC_POP_TAG();
4630 }
4631
4632 if (io) {
4633 rb_io_blocking_operation_exit(io, &blocking_operation);
4634 }
4635
4636 if (state) {
4637 EC_JUMP_TAG(ec, state);
4638 }
4639
4640 if (result < 0) {
4641 errno = lerrno;
4642 return -1;
4643 }
4644
4645 if (fds[0].revents & POLLNVAL) {
4646 errno = EBADF;
4647 return -1;
4648 }
4649
4650 /*
4651 * POLLIN, POLLOUT have a different meanings from select(2)'s read/write bit.
4652 * Therefore we need to fix it up.
4653 */
4654 result = 0;
4655 if (fds[0].revents & POLLIN_SET)
4656 result |= RB_WAITFD_IN;
4657 if (fds[0].revents & POLLOUT_SET)
4658 result |= RB_WAITFD_OUT;
4659 if (fds[0].revents & POLLEX_SET)
4660 result |= RB_WAITFD_PRI;
4661
4662 /* all requested events are ready if there is an error */
4663 if (fds[0].revents & POLLERR_SET)
4664 result |= events;
4665
4666 return result;
4667}
4668#else /* ! USE_POLL - implement rb_io_poll_fd() using select() */
4669struct select_args {
4670 struct rb_io *io;
4671 struct rb_io_blocking_operation *blocking_operation;
4672
4673 union {
4674 int fd;
4675 int error;
4676 } as;
4677 rb_fdset_t *read;
4678 rb_fdset_t *write;
4679 rb_fdset_t *except;
4680 struct timeval *tv;
4681};
4682
4683static VALUE
4684select_single(VALUE ptr)
4685{
4686 struct select_args *args = (struct select_args *)ptr;
4687 int r;
4688
4689 r = rb_thread_fd_select(args->as.fd + 1,
4690 args->read, args->write, args->except, args->tv);
4691 if (r == -1)
4692 args->as.error = errno;
4693 if (r > 0) {
4694 r = 0;
4695 if (args->read && rb_fd_isset(args->as.fd, args->read))
4696 r |= RB_WAITFD_IN;
4697 if (args->write && rb_fd_isset(args->as.fd, args->write))
4698 r |= RB_WAITFD_OUT;
4699 if (args->except && rb_fd_isset(args->as.fd, args->except))
4700 r |= RB_WAITFD_PRI;
4701 }
4702 return (VALUE)r;
4703}
4704
4705static VALUE
4706select_single_cleanup(VALUE ptr)
4707{
4708 struct select_args *args = (struct select_args *)ptr;
4709
4710 if (args->blocking_operation) {
4711 rb_io_blocking_operation_exit(args->io, args->blocking_operation);
4712 }
4713
4714 if (args->read) rb_fd_term(args->read);
4715 if (args->write) rb_fd_term(args->write);
4716 if (args->except) rb_fd_term(args->except);
4717
4718 return (VALUE)-1;
4719}
4720
4721static rb_fdset_t *
4722init_set_fd(int fd, rb_fdset_t *fds)
4723{
4724 if (fd < 0) {
4725 return 0;
4726 }
4727 rb_fd_init(fds);
4728 rb_fd_set(fd, fds);
4729
4730 return fds;
4731}
4732
4733static int
4734thread_io_wait(struct rb_io *io, int fd, int events, struct timeval *timeout)
4735{
4736 rb_fdset_t rfds, wfds, efds;
4737 struct select_args args;
4738 VALUE ptr = (VALUE)&args;
4739
4740 struct rb_io_blocking_operation blocking_operation;
4741 if (io) {
4742 args.io = io;
4743 blocking_operation.ec = GET_EC();
4744 rb_io_blocking_operation_enter(io, &blocking_operation);
4745 args.blocking_operation = &blocking_operation;
4746 }
4747 else {
4748 args.io = NULL;
4749 blocking_operation.ec = NULL;
4750 args.blocking_operation = NULL;
4751 }
4752
4753 args.as.fd = fd;
4754 args.read = (events & RB_WAITFD_IN) ? init_set_fd(fd, &rfds) : NULL;
4755 args.write = (events & RB_WAITFD_OUT) ? init_set_fd(fd, &wfds) : NULL;
4756 args.except = (events & RB_WAITFD_PRI) ? init_set_fd(fd, &efds) : NULL;
4757 args.tv = timeout;
4758
4759 int result = (int)rb_ensure(select_single, ptr, select_single_cleanup, ptr);
4760 if (result == -1)
4761 errno = args.as.error;
4762
4763 return result;
4764}
4765#endif /* ! USE_POLL */
4766
4767int
4768rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
4769{
4770 return thread_io_wait(NULL, fd, events, timeout);
4771}
4772
4773int
4774rb_thread_io_wait(struct rb_io *io, int events, struct timeval * timeout)
4775{
4776 return thread_io_wait(io, io->fd, events, timeout);
4777}
4778
4779/*
4780 * for GC
4781 */
4782
4783#ifdef USE_CONSERVATIVE_STACK_END
4784void
4785rb_gc_set_stack_end(VALUE **stack_end_p)
4786{
4787 VALUE stack_end;
4788COMPILER_WARNING_PUSH
4789#if RBIMPL_COMPILER_IS(GCC)
4790COMPILER_WARNING_IGNORED(-Wdangling-pointer);
4791#endif
4792 *stack_end_p = &stack_end;
4793COMPILER_WARNING_POP
4794}
4795#endif
4796
4797/*
4798 *
4799 */
4800
4801void
4802rb_threadptr_check_signal(rb_thread_t *mth)
4803{
4804 /* mth must be main_thread */
4805 if (rb_signal_buff_size() > 0) {
4806 /* wakeup main thread */
4807 threadptr_trap_interrupt(mth);
4808 }
4809}
4810
4811static void
4812async_bug_fd(const char *mesg, int errno_arg, int fd)
4813{
4814 char buff[64];
4815 size_t n = strlcpy(buff, mesg, sizeof(buff));
4816 if (n < sizeof(buff)-3) {
4817 ruby_snprintf(buff+n, sizeof(buff)-n, "(%d)", fd);
4818 }
4819 rb_async_bug_errno(buff, errno_arg);
4820}
4821
4822/* VM-dependent API is not available for this function */
4823static int
4824consume_communication_pipe(int fd)
4825{
4826#if USE_EVENTFD
4827 uint64_t buff[1];
4828#else
4829 /* buffer can be shared because no one refers to them. */
4830 static char buff[1024];
4831#endif
4832 ssize_t result;
4833 int ret = FALSE; /* for rb_sigwait_sleep */
4834
4835 while (1) {
4836 result = read(fd, buff, sizeof(buff));
4837#if USE_EVENTFD
4838 RUBY_DEBUG_LOG("resultf:%d buff:%lu", (int)result, (unsigned long)buff[0]);
4839#else
4840 RUBY_DEBUG_LOG("result:%d", (int)result);
4841#endif
4842 if (result > 0) {
4843 ret = TRUE;
4844 if (USE_EVENTFD || result < (ssize_t)sizeof(buff)) {
4845 return ret;
4846 }
4847 }
4848 else if (result == 0) {
4849 return ret;
4850 }
4851 else if (result < 0) {
4852 int e = errno;
4853 switch (e) {
4854 case EINTR:
4855 continue; /* retry */
4856 case EAGAIN:
4857#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
4858 case EWOULDBLOCK:
4859#endif
4860 return ret;
4861 default:
4862 async_bug_fd("consume_communication_pipe: read", e, fd);
4863 }
4864 }
4865 }
4866}
4867
4868void
4869rb_thread_stop_timer_thread(void)
4870{
4871 if (TIMER_THREAD_CREATED_P() && native_stop_timer_thread()) {
4872 native_reset_timer_thread();
4873 }
4874}
4875
4876void
4877rb_thread_reset_timer_thread(void)
4878{
4879 native_reset_timer_thread();
4880}
4881
4882void
4883rb_thread_start_timer_thread(void)
4884{
4885 system_working = 1;
4886 rb_thread_create_timer_thread();
4887}
4888
4889static int
4890clear_coverage_i(st_data_t key, st_data_t val, st_data_t dummy)
4891{
4892 int i;
4893 VALUE coverage = (VALUE)val;
4894 VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
4895 VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
4896
4897 if (lines) {
4898 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
4899 rb_ary_clear(lines);
4900 }
4901 else {
4902 int i;
4903 for (i = 0; i < RARRAY_LEN(lines); i++) {
4904 if (RARRAY_AREF(lines, i) != Qnil)
4905 RARRAY_ASET(lines, i, INT2FIX(0));
4906 }
4907 }
4908 }
4909 if (branches) {
4910 VALUE counters = RARRAY_AREF(branches, 1);
4911 for (i = 0; i < RARRAY_LEN(counters); i++) {
4912 RARRAY_ASET(counters, i, INT2FIX(0));
4913 }
4914 }
4915
4916 return ST_CONTINUE;
4917}
4918
4919void
4920rb_clear_coverages(void)
4921{
4922 VALUE coverages = rb_get_coverages();
4923 if (RTEST(coverages)) {
4924 rb_hash_foreach(coverages, clear_coverage_i, 0);
4925 }
4926}
4927
4928#if defined(HAVE_WORKING_FORK)
4929
4930static void
4931rb_thread_atfork_internal(rb_thread_t *th, void (*atfork)(rb_thread_t *, const rb_thread_t *))
4932{
4933 rb_thread_t *i = 0;
4934 rb_vm_t *vm = th->vm;
4935 rb_ractor_t *r = th->ractor;
4936 vm->ractor.main_ractor = r;
4937 vm->ractor.main_thread = th;
4938 r->threads.main = th;
4939 r->status_ = ractor_created;
4940
4941 thread_sched_atfork(TH_SCHED(th));
4942 ubf_list_atfork();
4943 rb_signal_atfork();
4944
4945 // OK. Only this thread accesses:
4946 ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
4947 if (r != vm->ractor.main_ractor) {
4948 rb_ractor_terminate_atfork(vm, r);
4949 }
4950 ccan_list_for_each(&r->threads.set, i, lt_node) {
4951 atfork(i, th);
4952 }
4953 }
4954 rb_vm_living_threads_init(vm);
4955
4956 rb_ractor_atfork(vm, th);
4957 rb_vm_postponed_job_atfork();
4958
4959 /* may be held by any thread in parent */
4960 rb_native_mutex_initialize(&th->interrupt_lock);
4961 ccan_list_head_init(&th->interrupt_exec_tasks);
4962
4963 vm->fork_gen++;
4964 rb_ractor_sleeper_threads_clear(th->ractor);
4965 rb_clear_coverages();
4966
4967 // restart timer thread (timer threads access to `vm->waitpid_lock` and so on.
4968 rb_thread_reset_timer_thread();
4969 rb_thread_start_timer_thread();
4970
4971 VM_ASSERT(vm->ractor.blocking_cnt == 0);
4972 VM_ASSERT(vm->ractor.cnt == 1);
4973}
4974
4975static void
4976terminate_atfork_i(rb_thread_t *th, const rb_thread_t *current_th)
4977{
4978 if (th != current_th) {
4979 rb_native_mutex_initialize(&th->interrupt_lock);
4980 rb_mutex_abandon_keeping_mutexes(th);
4981 rb_mutex_abandon_locking_mutex(th);
4982 thread_cleanup_func(th, TRUE);
4983 }
4984}
4985
4986void rb_fiber_atfork(rb_thread_t *);
4987void
4988rb_thread_atfork(void)
4989{
4990 rb_thread_t *th = GET_THREAD();
4991 rb_threadptr_pending_interrupt_clear(th);
4992 rb_thread_atfork_internal(th, terminate_atfork_i);
4993 th->join_list = NULL;
4994 rb_fiber_atfork(th);
4995
4996 /* We don't want reproduce CVE-2003-0900. */
4998}
4999
5000static void
5001terminate_atfork_before_exec_i(rb_thread_t *th, const rb_thread_t *current_th)
5002{
5003 if (th != current_th) {
5004 thread_cleanup_func_before_exec(th);
5005 }
5006}
5007
5008void
5010{
5011 rb_thread_t *th = GET_THREAD();
5012 rb_thread_atfork_internal(th, terminate_atfork_before_exec_i);
5013}
5014#else
5015void
5016rb_thread_atfork(void)
5017{
5018}
5019
5020void
5022{
5023}
5024#endif
5026struct thgroup {
5027 int enclosed;
5028};
5029
5030static const rb_data_type_t thgroup_data_type = {
5031 "thgroup",
5032 {
5033 0,
5035 NULL, // No external memory to report
5036 },
5037 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE
5038};
5039
5040/*
5041 * Document-class: ThreadGroup
5042 *
5043 * ThreadGroup provides a means of keeping track of a number of threads as a
5044 * group.
5045 *
5046 * A given Thread object can only belong to one ThreadGroup at a time; adding
5047 * a thread to a new group will remove it from any previous group.
5048 *
5049 * Newly created threads belong to the same group as the thread from which they
5050 * were created.
5051 */
5052
5053/*
5054 * Document-const: Default
5055 *
5056 * The default ThreadGroup created when Ruby starts; all Threads belong to it
5057 * by default.
5058 */
5059static VALUE
5060thgroup_s_alloc(VALUE klass)
5061{
5062 VALUE group;
5063 struct thgroup *data;
5064
5065 group = TypedData_Make_Struct(klass, struct thgroup, &thgroup_data_type, data);
5066 data->enclosed = 0;
5067
5068 return group;
5069}
5070
5071/*
5072 * call-seq:
5073 * thgrp.list -> array
5074 *
5075 * Returns an array of all existing Thread objects that belong to this group.
5076 *
5077 * ThreadGroup::Default.list #=> [#<Thread:0x401bdf4c run>]
5078 */
5079
5080static VALUE
5081thgroup_list(VALUE group)
5082{
5083 VALUE ary = rb_ary_new();
5084 rb_thread_t *th = 0;
5085 rb_ractor_t *r = GET_RACTOR();
5086
5087 ccan_list_for_each(&r->threads.set, th, lt_node) {
5088 if (th->thgroup == group) {
5089 rb_ary_push(ary, th->self);
5090 }
5091 }
5092 return ary;
5093}
5094
5095
5096/*
5097 * call-seq:
5098 * thgrp.enclose -> thgrp
5099 *
5100 * Prevents threads from being added to or removed from the receiving
5101 * ThreadGroup.
5102 *
5103 * New threads can still be started in an enclosed ThreadGroup.
5104 *
5105 * ThreadGroup::Default.enclose #=> #<ThreadGroup:0x4029d914>
5106 * thr = Thread.new { Thread.stop } #=> #<Thread:0x402a7210 sleep>
5107 * tg = ThreadGroup.new #=> #<ThreadGroup:0x402752d4>
5108 * tg.add thr
5109 * #=> ThreadError: can't move from the enclosed thread group
5110 */
5111
5112static VALUE
5113thgroup_enclose(VALUE group)
5114{
5115 struct thgroup *data;
5116
5117 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
5118 data->enclosed = 1;
5119
5120 return group;
5121}
5122
5123
5124/*
5125 * call-seq:
5126 * thgrp.enclosed? -> true or false
5127 *
5128 * Returns +true+ if the +thgrp+ is enclosed. See also ThreadGroup#enclose.
5129 */
5130
5131static VALUE
5132thgroup_enclosed_p(VALUE group)
5133{
5134 struct thgroup *data;
5135
5136 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
5137 return RBOOL(data->enclosed);
5138}
5139
5140
5141/*
5142 * call-seq:
5143 * thgrp.add(thread) -> thgrp
5144 *
5145 * Adds the given +thread+ to this group, removing it from any other
5146 * group to which it may have previously been a member.
5147 *
5148 * puts "Initial group is #{ThreadGroup::Default.list}"
5149 * tg = ThreadGroup.new
5150 * t1 = Thread.new { sleep }
5151 * t2 = Thread.new { sleep }
5152 * puts "t1 is #{t1}"
5153 * puts "t2 is #{t2}"
5154 * tg.add(t1)
5155 * puts "Initial group now #{ThreadGroup::Default.list}"
5156 * puts "tg group now #{tg.list}"
5157 *
5158 * This will produce:
5159 *
5160 * Initial group is #<Thread:0x401bdf4c>
5161 * t1 is #<Thread:0x401b3c90>
5162 * t2 is #<Thread:0x401b3c18>
5163 * Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
5164 * tg group now #<Thread:0x401b3c90>
5165 */
5166
5167static VALUE
5168thgroup_add(VALUE group, VALUE thread)
5169{
5170 rb_thread_t *target_th = rb_thread_ptr(thread);
5171 struct thgroup *data;
5172
5173 if (OBJ_FROZEN(group)) {
5174 rb_raise(rb_eThreadError, "can't move to the frozen thread group");
5175 }
5176 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
5177 if (data->enclosed) {
5178 rb_raise(rb_eThreadError, "can't move to the enclosed thread group");
5179 }
5180
5181 if (OBJ_FROZEN(target_th->thgroup)) {
5182 rb_raise(rb_eThreadError, "can't move from the frozen thread group");
5183 }
5184 TypedData_Get_Struct(target_th->thgroup, struct thgroup, &thgroup_data_type, data);
5185 if (data->enclosed) {
5186 rb_raise(rb_eThreadError,
5187 "can't move from the enclosed thread group");
5188 }
5189
5190 target_th->thgroup = group;
5191 return group;
5192}
5193
5194/*
5195 * Document-class: ThreadShield
5196 */
5197static void
5198thread_shield_mark(void *ptr)
5199{
5200 rb_gc_mark((VALUE)ptr);
5201}
5202
5203static const rb_data_type_t thread_shield_data_type = {
5204 "thread_shield",
5205 {thread_shield_mark, 0, 0,},
5206 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
5207};
5208
5209static VALUE
5210thread_shield_alloc(VALUE klass)
5211{
5212 return TypedData_Wrap_Struct(klass, &thread_shield_data_type, (void *)mutex_alloc(0));
5213}
5214
5215#define GetThreadShieldPtr(obj) ((VALUE)rb_check_typeddata((obj), &thread_shield_data_type))
5216#define THREAD_SHIELD_WAITING_MASK (((FL_USER19-1)&~(FL_USER0-1))|FL_USER19)
5217#define THREAD_SHIELD_WAITING_SHIFT (FL_USHIFT)
5218#define THREAD_SHIELD_WAITING_MAX (THREAD_SHIELD_WAITING_MASK>>THREAD_SHIELD_WAITING_SHIFT)
5219STATIC_ASSERT(THREAD_SHIELD_WAITING_MAX, THREAD_SHIELD_WAITING_MAX <= UINT_MAX);
5220static inline unsigned int
5221rb_thread_shield_waiting(VALUE b)
5222{
5223 return ((RBASIC(b)->flags&THREAD_SHIELD_WAITING_MASK)>>THREAD_SHIELD_WAITING_SHIFT);
5224}
5225
5226static inline void
5227rb_thread_shield_waiting_inc(VALUE b)
5228{
5229 unsigned int w = rb_thread_shield_waiting(b);
5230 w++;
5231 if (w > THREAD_SHIELD_WAITING_MAX)
5232 rb_raise(rb_eRuntimeError, "waiting count overflow");
5233 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
5234 RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
5235}
5236
5237static inline void
5238rb_thread_shield_waiting_dec(VALUE b)
5239{
5240 unsigned int w = rb_thread_shield_waiting(b);
5241 if (!w) rb_raise(rb_eRuntimeError, "waiting count underflow");
5242 w--;
5243 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
5244 RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
5245}
5246
5247VALUE
5248rb_thread_shield_new(void)
5249{
5250 VALUE thread_shield = thread_shield_alloc(rb_cThreadShield);
5251 rb_mutex_lock((VALUE)DATA_PTR(thread_shield));
5252 return thread_shield;
5253}
5254
5255bool
5256rb_thread_shield_owned(VALUE self)
5257{
5258 VALUE mutex = GetThreadShieldPtr(self);
5259 if (!mutex) return false;
5260
5261 rb_mutex_t *m = mutex_ptr(mutex);
5262
5263 return m->fiber == GET_EC()->fiber_ptr;
5264}
5265
5266/*
5267 * Wait a thread shield.
5268 *
5269 * Returns
5270 * true: acquired the thread shield
5271 * false: the thread shield was destroyed and no other threads waiting
5272 * nil: the thread shield was destroyed but still in use
5273 */
5274VALUE
5275rb_thread_shield_wait(VALUE self)
5276{
5277 VALUE mutex = GetThreadShieldPtr(self);
5278 rb_mutex_t *m;
5279
5280 if (!mutex) return Qfalse;
5281 m = mutex_ptr(mutex);
5282 if (m->fiber == GET_EC()->fiber_ptr) return Qnil;
5283 rb_thread_shield_waiting_inc(self);
5284 rb_mutex_lock(mutex);
5285 rb_thread_shield_waiting_dec(self);
5286 if (DATA_PTR(self)) return Qtrue;
5287 rb_mutex_unlock(mutex);
5288 return rb_thread_shield_waiting(self) > 0 ? Qnil : Qfalse;
5289}
5290
5291static VALUE
5292thread_shield_get_mutex(VALUE self)
5293{
5294 VALUE mutex = GetThreadShieldPtr(self);
5295 if (!mutex)
5296 rb_raise(rb_eThreadError, "destroyed thread shield - %p", (void *)self);
5297 return mutex;
5298}
5299
5300/*
5301 * Release a thread shield, and return true if it has waiting threads.
5302 */
5303VALUE
5304rb_thread_shield_release(VALUE self)
5305{
5306 VALUE mutex = thread_shield_get_mutex(self);
5307 rb_mutex_unlock(mutex);
5308 return RBOOL(rb_thread_shield_waiting(self) > 0);
5309}
5310
5311/*
5312 * Release and destroy a thread shield, and return true if it has waiting threads.
5313 */
5314VALUE
5315rb_thread_shield_destroy(VALUE self)
5316{
5317 VALUE mutex = thread_shield_get_mutex(self);
5318 DATA_PTR(self) = 0;
5319 rb_mutex_unlock(mutex);
5320 return RBOOL(rb_thread_shield_waiting(self) > 0);
5321}
5322
5323static VALUE
5324threadptr_recursive_hash(rb_thread_t *th)
5325{
5326 return th->ec->local_storage_recursive_hash;
5327}
5328
5329static void
5330threadptr_recursive_hash_set(rb_thread_t *th, VALUE hash)
5331{
5332 th->ec->local_storage_recursive_hash = hash;
5333}
5334
5336
5337/*
5338 * Returns the current "recursive list" used to detect recursion.
5339 * This list is a hash table, unique for the current thread and for
5340 * the current __callee__.
5341 */
5342
5343static VALUE
5344recursive_list_access(VALUE sym)
5345{
5346 rb_thread_t *th = GET_THREAD();
5347 VALUE hash = threadptr_recursive_hash(th);
5348 VALUE list;
5349 if (NIL_P(hash) || !RB_TYPE_P(hash, T_HASH)) {
5350 hash = rb_ident_hash_new();
5351 threadptr_recursive_hash_set(th, hash);
5352 list = Qnil;
5353 }
5354 else {
5355 list = rb_hash_aref(hash, sym);
5356 }
5357 if (NIL_P(list) || !RB_TYPE_P(list, T_HASH)) {
5358 list = rb_ident_hash_new();
5359 rb_hash_aset(hash, sym, list);
5360 }
5361 return list;
5362}
5363
5364/*
5365 * Returns true if and only if obj (or the pair <obj, paired_obj>) is already
5366 * in the recursion list.
5367 * Assumes the recursion list is valid.
5368 */
5369
5370static bool
5371recursive_check(VALUE list, VALUE obj, VALUE paired_obj_id)
5372{
5373#if SIZEOF_LONG == SIZEOF_VOIDP
5374 #define OBJ_ID_EQL(obj_id, other) ((obj_id) == (other))
5375#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
5376 #define OBJ_ID_EQL(obj_id, other) (RB_BIGNUM_TYPE_P((obj_id)) ? \
5377 rb_big_eql((obj_id), (other)) : ((obj_id) == (other)))
5378#endif
5379
5380 VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
5381 if (UNDEF_P(pair_list))
5382 return false;
5383 if (paired_obj_id) {
5384 if (!RB_TYPE_P(pair_list, T_HASH)) {
5385 if (!OBJ_ID_EQL(paired_obj_id, pair_list))
5386 return false;
5387 }
5388 else {
5389 if (NIL_P(rb_hash_lookup(pair_list, paired_obj_id)))
5390 return false;
5391 }
5392 }
5393 return true;
5394}
5395
5396/*
5397 * Pushes obj (or the pair <obj, paired_obj>) in the recursion list.
5398 * For a single obj, it sets list[obj] to Qtrue.
5399 * For a pair, it sets list[obj] to paired_obj_id if possible,
5400 * otherwise list[obj] becomes a hash like:
5401 * {paired_obj_id_1 => true, paired_obj_id_2 => true, ... }
5402 * Assumes the recursion list is valid.
5403 */
5404
5405static void
5406recursive_push(VALUE list, VALUE obj, VALUE paired_obj)
5407{
5408 VALUE pair_list;
5409
5410 if (!paired_obj) {
5411 rb_hash_aset(list, obj, Qtrue);
5412 }
5413 else if (UNDEF_P(pair_list = rb_hash_lookup2(list, obj, Qundef))) {
5414 rb_hash_aset(list, obj, paired_obj);
5415 }
5416 else {
5417 if (!RB_TYPE_P(pair_list, T_HASH)){
5418 VALUE other_paired_obj = pair_list;
5419 pair_list = rb_hash_new();
5420 rb_hash_aset(pair_list, other_paired_obj, Qtrue);
5421 rb_hash_aset(list, obj, pair_list);
5422 }
5423 rb_hash_aset(pair_list, paired_obj, Qtrue);
5424 }
5425}
5426
5427/*
5428 * Pops obj (or the pair <obj, paired_obj>) from the recursion list.
5429 * For a pair, if list[obj] is a hash, then paired_obj_id is
5430 * removed from the hash and no attempt is made to simplify
5431 * list[obj] from {only_one_paired_id => true} to only_one_paired_id
5432 * Assumes the recursion list is valid.
5433 */
5434
5435static int
5436recursive_pop(VALUE list, VALUE obj, VALUE paired_obj)
5437{
5438 if (paired_obj) {
5439 VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
5440 if (UNDEF_P(pair_list)) {
5441 return 0;
5442 }
5443 if (RB_TYPE_P(pair_list, T_HASH)) {
5444 rb_hash_delete_entry(pair_list, paired_obj);
5445 if (!RHASH_EMPTY_P(pair_list)) {
5446 return 1; /* keep hash until is empty */
5447 }
5448 }
5449 }
5450 rb_hash_delete_entry(list, obj);
5451 return 1;
5452}
5454struct exec_recursive_params {
5455 VALUE (*func) (VALUE, VALUE, int);
5456 VALUE list;
5457 VALUE obj;
5458 VALUE pairid;
5459 VALUE arg;
5460};
5461
5462static VALUE
5463exec_recursive_i(RB_BLOCK_CALL_FUNC_ARGLIST(tag, data))
5464{
5465 struct exec_recursive_params *p = (void *)data;
5466 return (*p->func)(p->obj, p->arg, FALSE);
5467}
5468
5469/*
5470 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5471 * current method is called recursively on obj, or on the pair <obj, pairid>
5472 * If outer is 0, then the innermost func will be called with recursive set
5473 * to true, otherwise the outermost func will be called. In the latter case,
5474 * all inner func are short-circuited by throw.
5475 * Implementation details: the value thrown is the recursive list which is
5476 * proper to the current method and unlikely to be caught anywhere else.
5477 * list[recursive_key] is used as a flag for the outermost call.
5478 */
5479
5480static VALUE
5481exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE pairid, VALUE arg, int outer, ID mid)
5482{
5483 VALUE result = Qundef;
5484 const VALUE sym = mid ? ID2SYM(mid) : ID2SYM(idNULL);
5485 struct exec_recursive_params p;
5486 int outermost;
5487 p.list = recursive_list_access(sym);
5488 p.obj = obj;
5489 p.pairid = pairid;
5490 p.arg = arg;
5491 outermost = outer && !recursive_check(p.list, ID2SYM(recursive_key), 0);
5492
5493 if (recursive_check(p.list, p.obj, pairid)) {
5494 if (outer && !outermost) {
5495 rb_throw_obj(p.list, p.list);
5496 }
5497 return (*func)(obj, arg, TRUE);
5498 }
5499 else {
5500 enum ruby_tag_type state;
5501
5502 p.func = func;
5503
5504 if (outermost) {
5505 recursive_push(p.list, ID2SYM(recursive_key), 0);
5506 recursive_push(p.list, p.obj, p.pairid);
5507 result = rb_catch_protect(p.list, exec_recursive_i, (VALUE)&p, &state);
5508 if (!recursive_pop(p.list, p.obj, p.pairid)) goto invalid;
5509 if (!recursive_pop(p.list, ID2SYM(recursive_key), 0)) goto invalid;
5510 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5511 if (result == p.list) {
5512 result = (*func)(obj, arg, TRUE);
5513 }
5514 }
5515 else {
5516 volatile VALUE ret = Qundef;
5517 recursive_push(p.list, p.obj, p.pairid);
5518 EC_PUSH_TAG(GET_EC());
5519 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
5520 ret = (*func)(obj, arg, FALSE);
5521 }
5522 EC_POP_TAG();
5523 if (!recursive_pop(p.list, p.obj, p.pairid)) {
5524 goto invalid;
5525 }
5526 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5527 result = ret;
5528 }
5529 }
5530 *(volatile struct exec_recursive_params *)&p;
5531 return result;
5532
5533 invalid:
5534 rb_raise(rb_eTypeError, "invalid inspect_tbl pair_list "
5535 "for %+"PRIsVALUE" in %+"PRIsVALUE,
5536 sym, rb_thread_current());
5538}
5539
5540/*
5541 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5542 * current method is called recursively on obj
5543 */
5544
5545VALUE
5546rb_exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
5547{
5548 return exec_recursive(func, obj, 0, arg, 0, rb_frame_last_func());
5549}
5550
5551/*
5552 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5553 * current method is called recursively on the ordered pair <obj, paired_obj>
5554 */
5555
5556VALUE
5557rb_exec_recursive_paired(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
5558{
5559 return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 0, rb_frame_last_func());
5560}
5561
5562/*
5563 * If recursion is detected on the current method and obj, the outermost
5564 * func will be called with (obj, arg, true). All inner func will be
5565 * short-circuited using throw.
5566 */
5567
5568VALUE
5569rb_exec_recursive_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
5570{
5571 return exec_recursive(func, obj, 0, arg, 1, rb_frame_last_func());
5572}
5573
5574VALUE
5575rb_exec_recursive_outer_mid(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg, ID mid)
5576{
5577 return exec_recursive(func, obj, 0, arg, 1, mid);
5578}
5579
5580/*
5581 * If recursion is detected on the current method, obj and paired_obj,
5582 * the outermost func will be called with (obj, arg, true). All inner
5583 * func will be short-circuited using throw.
5584 */
5585
5586VALUE
5587rb_exec_recursive_paired_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
5588{
5589 return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 1, rb_frame_last_func());
5590}
5591
5592/*
5593 * call-seq:
5594 * thread.backtrace -> array or nil
5595 *
5596 * Returns the current backtrace of the target thread.
5597 *
5598 */
5599
5600static VALUE
5601rb_thread_backtrace_m(int argc, VALUE *argv, VALUE thval)
5602{
5603 return rb_vm_thread_backtrace(argc, argv, thval);
5604}
5605
5606/* call-seq:
5607 * thread.backtrace_locations(*args) -> array or nil
5608 *
5609 * Returns the execution stack for the target thread---an array containing
5610 * backtrace location objects.
5611 *
5612 * See Thread::Backtrace::Location for more information.
5613 *
5614 * This method behaves similarly to Kernel#caller_locations except it applies
5615 * to a specific thread.
5616 */
5617static VALUE
5618rb_thread_backtrace_locations_m(int argc, VALUE *argv, VALUE thval)
5619{
5620 return rb_vm_thread_backtrace_locations(argc, argv, thval);
5621}
5622
5623void
5624Init_Thread_Mutex(void)
5625{
5626 rb_thread_t *th = GET_THREAD();
5627
5628 rb_native_mutex_initialize(&th->vm->workqueue_lock);
5629 rb_native_mutex_initialize(&th->interrupt_lock);
5630}
5631
5632/*
5633 * Document-class: ThreadError
5634 *
5635 * Raised when an invalid operation is attempted on a thread.
5636 *
5637 * For example, when no other thread has been started:
5638 *
5639 * Thread.stop
5640 *
5641 * This will raises the following exception:
5642 *
5643 * ThreadError: stopping only thread
5644 * note: use sleep to stop forever
5645 */
5646
5647void
5648Init_Thread(void)
5649{
5650 rb_thread_t *th = GET_THREAD();
5651
5652 sym_never = ID2SYM(rb_intern_const("never"));
5653 sym_immediate = ID2SYM(rb_intern_const("immediate"));
5654 sym_on_blocking = ID2SYM(rb_intern_const("on_blocking"));
5655
5656 rb_define_singleton_method(rb_cThread, "new", thread_s_new, -1);
5657 rb_define_singleton_method(rb_cThread, "start", thread_start, -2);
5658 rb_define_singleton_method(rb_cThread, "fork", thread_start, -2);
5659 rb_define_singleton_method(rb_cThread, "main", rb_thread_s_main, 0);
5660 rb_define_singleton_method(rb_cThread, "current", thread_s_current, 0);
5661 rb_define_singleton_method(rb_cThread, "stop", thread_stop, 0);
5662 rb_define_singleton_method(rb_cThread, "kill", rb_thread_s_kill, 1);
5663 rb_define_singleton_method(rb_cThread, "exit", rb_thread_exit, 0);
5664 rb_define_singleton_method(rb_cThread, "pass", thread_s_pass, 0);
5665 rb_define_singleton_method(rb_cThread, "list", thread_list, 0);
5666 rb_define_singleton_method(rb_cThread, "abort_on_exception", rb_thread_s_abort_exc, 0);
5667 rb_define_singleton_method(rb_cThread, "abort_on_exception=", rb_thread_s_abort_exc_set, 1);
5668 rb_define_singleton_method(rb_cThread, "report_on_exception", rb_thread_s_report_exc, 0);
5669 rb_define_singleton_method(rb_cThread, "report_on_exception=", rb_thread_s_report_exc_set, 1);
5670 rb_define_singleton_method(rb_cThread, "ignore_deadlock", rb_thread_s_ignore_deadlock, 0);
5671 rb_define_singleton_method(rb_cThread, "ignore_deadlock=", rb_thread_s_ignore_deadlock_set, 1);
5672 rb_define_singleton_method(rb_cThread, "handle_interrupt", rb_thread_s_handle_interrupt, 1);
5673 rb_define_singleton_method(rb_cThread, "pending_interrupt?", rb_thread_s_pending_interrupt_p, -1);
5674 rb_define_method(rb_cThread, "pending_interrupt?", rb_thread_pending_interrupt_p, -1);
5675
5676 rb_define_method(rb_cThread, "initialize", thread_initialize, -2);
5677 rb_define_method(rb_cThread, "raise", thread_raise_m, -1);
5678 rb_define_method(rb_cThread, "join", thread_join_m, -1);
5679 rb_define_method(rb_cThread, "value", thread_value, 0);
5680 rb_define_method(rb_cThread, "kill", rb_thread_kill, 0);
5681 rb_define_method(rb_cThread, "terminate", rb_thread_kill, 0);
5682 rb_define_method(rb_cThread, "exit", rb_thread_kill, 0);
5683 rb_define_method(rb_cThread, "run", rb_thread_run, 0);
5684 rb_define_method(rb_cThread, "wakeup", rb_thread_wakeup, 0);
5685 rb_define_method(rb_cThread, "[]", rb_thread_aref, 1);
5686 rb_define_method(rb_cThread, "[]=", rb_thread_aset, 2);
5687 rb_define_method(rb_cThread, "fetch", rb_thread_fetch, -1);
5688 rb_define_method(rb_cThread, "key?", rb_thread_key_p, 1);
5689 rb_define_method(rb_cThread, "keys", rb_thread_keys, 0);
5690 rb_define_method(rb_cThread, "priority", rb_thread_priority, 0);
5691 rb_define_method(rb_cThread, "priority=", rb_thread_priority_set, 1);
5692 rb_define_method(rb_cThread, "status", rb_thread_status, 0);
5693 rb_define_method(rb_cThread, "thread_variable_get", rb_thread_variable_get, 1);
5694 rb_define_method(rb_cThread, "thread_variable_set", rb_thread_variable_set, 2);
5695 rb_define_method(rb_cThread, "thread_variables", rb_thread_variables, 0);
5696 rb_define_method(rb_cThread, "thread_variable?", rb_thread_variable_p, 1);
5697 rb_define_method(rb_cThread, "alive?", rb_thread_alive_p, 0);
5698 rb_define_method(rb_cThread, "stop?", rb_thread_stop_p, 0);
5699 rb_define_method(rb_cThread, "abort_on_exception", rb_thread_abort_exc, 0);
5700 rb_define_method(rb_cThread, "abort_on_exception=", rb_thread_abort_exc_set, 1);
5701 rb_define_method(rb_cThread, "report_on_exception", rb_thread_report_exc, 0);
5702 rb_define_method(rb_cThread, "report_on_exception=", rb_thread_report_exc_set, 1);
5703 rb_define_method(rb_cThread, "group", rb_thread_group, 0);
5704 rb_define_method(rb_cThread, "backtrace", rb_thread_backtrace_m, -1);
5705 rb_define_method(rb_cThread, "backtrace_locations", rb_thread_backtrace_locations_m, -1);
5706
5707 rb_define_method(rb_cThread, "name", rb_thread_getname, 0);
5708 rb_define_method(rb_cThread, "name=", rb_thread_setname, 1);
5709 rb_define_method(rb_cThread, "native_thread_id", rb_thread_native_thread_id, 0);
5710 rb_define_method(rb_cThread, "to_s", rb_thread_to_s, 0);
5711 rb_define_alias(rb_cThread, "inspect", "to_s");
5712
5713 rb_vm_register_special_exception(ruby_error_stream_closed, rb_eIOError,
5714 "stream closed in another thread");
5715
5716 cThGroup = rb_define_class("ThreadGroup", rb_cObject);
5717 rb_define_alloc_func(cThGroup, thgroup_s_alloc);
5718 rb_define_method(cThGroup, "list", thgroup_list, 0);
5719 rb_define_method(cThGroup, "enclose", thgroup_enclose, 0);
5720 rb_define_method(cThGroup, "enclosed?", thgroup_enclosed_p, 0);
5721 rb_define_method(cThGroup, "add", thgroup_add, 1);
5722
5723 const char * ptr = getenv("RUBY_THREAD_TIMESLICE");
5724
5725 if (ptr) {
5726 long quantum = strtol(ptr, NULL, 0);
5727 if (quantum > 0 && !(SIZEOF_LONG > 4 && quantum > UINT32_MAX)) {
5728 thread_default_quantum_ms = (uint32_t)quantum;
5729 }
5730 else if (0) {
5731 fprintf(stderr, "Ignored RUBY_THREAD_TIMESLICE=%s\n", ptr);
5732 }
5733 }
5734
5735 {
5736 th->thgroup = th->ractor->thgroup_default = rb_obj_alloc(cThGroup);
5737 rb_define_const(cThGroup, "Default", th->thgroup);
5738 }
5739
5741
5742 /* init thread core */
5743 {
5744 /* main thread setting */
5745 {
5746 /* acquire global vm lock */
5747#ifdef HAVE_PTHREAD_NP_H
5748 VM_ASSERT(TH_SCHED(th)->running == th);
5749#endif
5750 // thread_sched_to_running() should not be called because
5751 // it assumes blocked by thread_sched_to_waiting().
5752 // thread_sched_to_running(sched, th);
5753
5754 th->pending_interrupt_queue = rb_ary_hidden_new(0);
5755 th->pending_interrupt_queue_checked = 0;
5756 th->pending_interrupt_mask_stack = rb_ary_hidden_new(0);
5757 }
5758 }
5759
5760 rb_thread_create_timer_thread();
5761
5762 Init_thread_sync();
5763
5764 // TODO: Suppress unused function warning for now
5765 // if (0) rb_thread_sched_destroy(NULL);
5766}
5767
5770{
5771 rb_thread_t *th = ruby_thread_from_native();
5772
5773 return th != 0;
5774}
5775
5776#ifdef NON_SCALAR_THREAD_ID
5777 #define thread_id_str(th) (NULL)
5778#else
5779 #define thread_id_str(th) ((void *)(uintptr_t)(th)->nt->thread_id)
5780#endif
5781
5782static void
5783debug_deadlock_check(rb_ractor_t *r, VALUE msg)
5784{
5785 rb_thread_t *th = 0;
5786 VALUE sep = rb_str_new_cstr("\n ");
5787
5788 rb_str_catf(msg, "\n%d threads, %d sleeps current:%p main thread:%p\n",
5789 rb_ractor_living_thread_num(r), rb_ractor_sleeper_thread_num(r),
5790 (void *)GET_THREAD(), (void *)r->threads.main);
5791
5792 ccan_list_for_each(&r->threads.set, th, lt_node) {
5793 rb_str_catf(msg, "* %+"PRIsVALUE"\n rb_thread_t:%p "
5794 "native:%p int:%u",
5795 th->self, (void *)th, th->nt ? thread_id_str(th) : "N/A", th->ec->interrupt_flag);
5796
5797 if (th->locking_mutex) {
5798 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5799 rb_str_catf(msg, " mutex:%p cond:%"PRIuSIZE,
5800 (void *)mutex->fiber, rb_mutex_num_waiting(mutex));
5801 }
5802
5803 {
5804 struct rb_waiting_list *list = th->join_list;
5805 while (list) {
5806 rb_str_catf(msg, "\n depended by: tb_thread_id:%p", (void *)list->thread);
5807 list = list->next;
5808 }
5809 }
5810 rb_str_catf(msg, "\n ");
5811 rb_str_concat(msg, rb_ary_join(rb_ec_backtrace_str_ary(th->ec, RUBY_BACKTRACE_START, RUBY_ALL_BACKTRACE_LINES), sep));
5812 rb_str_catf(msg, "\n");
5813 }
5814}
5815
5816static void
5817rb_check_deadlock(rb_ractor_t *r)
5818{
5819 if (GET_THREAD()->vm->thread_ignore_deadlock) return;
5820
5821#ifdef RUBY_THREAD_PTHREAD_H
5822 if (r->threads.sched.readyq_cnt > 0) return;
5823#endif
5824
5825 int sleeper_num = rb_ractor_sleeper_thread_num(r);
5826 int ltnum = rb_ractor_living_thread_num(r);
5827
5828 if (ltnum > sleeper_num) return;
5829 if (ltnum < sleeper_num) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
5830
5831 int found = 0;
5832 rb_thread_t *th = NULL;
5833
5834 ccan_list_for_each(&r->threads.set, th, lt_node) {
5835 if (th->status != THREAD_STOPPED_FOREVER || RUBY_VM_INTERRUPTED(th->ec)) {
5836 found = 1;
5837 }
5838 else if (th->locking_mutex) {
5839 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5840 if (mutex->fiber == th->ec->fiber_ptr || (!mutex->fiber && !ccan_list_empty(&mutex->waitq))) {
5841 found = 1;
5842 }
5843 }
5844 if (found)
5845 break;
5846 }
5847
5848 if (!found) {
5849 VALUE argv[2];
5850 argv[0] = rb_eFatal;
5851 argv[1] = rb_str_new2("No live threads left. Deadlock?");
5852 debug_deadlock_check(r, argv[1]);
5853 rb_ractor_sleeper_threads_dec(GET_RACTOR());
5854 rb_threadptr_raise(r->threads.main, 2, argv);
5855 }
5856}
5857
5858static void
5859update_line_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5860{
5861 const rb_control_frame_t *cfp = GET_EC()->cfp;
5862 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5863 if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5864 VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
5865 if (lines) {
5866 long line = rb_sourceline() - 1;
5867 VM_ASSERT(line >= 0);
5868 long count;
5869 VALUE num;
5870 void rb_iseq_clear_event_flags(const rb_iseq_t *iseq, size_t pos, rb_event_flag_t reset);
5871 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
5872 rb_iseq_clear_event_flags(cfp->iseq, cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded - 1, RUBY_EVENT_COVERAGE_LINE);
5873 rb_ary_push(lines, LONG2FIX(line + 1));
5874 return;
5875 }
5876 if (line >= RARRAY_LEN(lines)) { /* no longer tracked */
5877 return;
5878 }
5879 num = RARRAY_AREF(lines, line);
5880 if (!FIXNUM_P(num)) return;
5881 count = FIX2LONG(num) + 1;
5882 if (POSFIXABLE(count)) {
5883 RARRAY_ASET(lines, line, LONG2FIX(count));
5884 }
5885 }
5886 }
5887}
5888
5889static void
5890update_branch_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5891{
5892 const rb_control_frame_t *cfp = GET_EC()->cfp;
5893 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5894 if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5895 VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
5896 if (branches) {
5897 long pc = cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded - 1;
5898 long idx = FIX2INT(RARRAY_AREF(ISEQ_PC2BRANCHINDEX(cfp->iseq), pc)), count;
5899 VALUE counters = RARRAY_AREF(branches, 1);
5900 VALUE num = RARRAY_AREF(counters, idx);
5901 count = FIX2LONG(num) + 1;
5902 if (POSFIXABLE(count)) {
5903 RARRAY_ASET(counters, idx, LONG2FIX(count));
5904 }
5905 }
5906 }
5907}
5908
5909const rb_method_entry_t *
5910rb_resolve_me_location(const rb_method_entry_t *me, VALUE resolved_location[5])
5911{
5912 VALUE path, beg_pos_lineno, beg_pos_column, end_pos_lineno, end_pos_column;
5913
5914 if (!me->def) return NULL; // negative cme
5915
5916 retry:
5917 switch (me->def->type) {
5918 case VM_METHOD_TYPE_ISEQ: {
5919 const rb_iseq_t *iseq = me->def->body.iseq.iseqptr;
5920 rb_iseq_location_t *loc = &ISEQ_BODY(iseq)->location;
5921 path = rb_iseq_path(iseq);
5922 beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5923 beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5924 end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5925 end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5926 break;
5927 }
5928 case VM_METHOD_TYPE_BMETHOD: {
5929 const rb_iseq_t *iseq = rb_proc_get_iseq(me->def->body.bmethod.proc, 0);
5930 if (iseq) {
5931 rb_iseq_location_t *loc;
5932 rb_iseq_check(iseq);
5933 path = rb_iseq_path(iseq);
5934 loc = &ISEQ_BODY(iseq)->location;
5935 beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5936 beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5937 end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5938 end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5939 break;
5940 }
5941 return NULL;
5942 }
5943 case VM_METHOD_TYPE_ALIAS:
5944 me = me->def->body.alias.original_me;
5945 goto retry;
5946 case VM_METHOD_TYPE_REFINED:
5947 me = me->def->body.refined.orig_me;
5948 if (!me) return NULL;
5949 goto retry;
5950 default:
5951 return NULL;
5952 }
5953
5954 /* found */
5955 if (RB_TYPE_P(path, T_ARRAY)) {
5956 path = rb_ary_entry(path, 1);
5957 if (!RB_TYPE_P(path, T_STRING)) return NULL; /* just for the case... */
5958 }
5959 if (resolved_location) {
5960 resolved_location[0] = path;
5961 resolved_location[1] = beg_pos_lineno;
5962 resolved_location[2] = beg_pos_column;
5963 resolved_location[3] = end_pos_lineno;
5964 resolved_location[4] = end_pos_column;
5965 }
5966 return me;
5967}
5968
5969static void
5970update_method_coverage(VALUE me2counter, rb_trace_arg_t *trace_arg)
5971{
5972 const rb_control_frame_t *cfp = GET_EC()->cfp;
5973 const rb_callable_method_entry_t *cme = rb_vm_frame_method_entry(cfp);
5974 const rb_method_entry_t *me = (const rb_method_entry_t *)cme;
5975 VALUE rcount;
5976 long count;
5977
5978 me = rb_resolve_me_location(me, 0);
5979 if (!me) return;
5980
5981 rcount = rb_hash_aref(me2counter, (VALUE) me);
5982 count = FIXNUM_P(rcount) ? FIX2LONG(rcount) + 1 : 1;
5983 if (POSFIXABLE(count)) {
5984 rb_hash_aset(me2counter, (VALUE) me, LONG2FIX(count));
5985 }
5986}
5987
5988VALUE
5989rb_get_coverages(void)
5990{
5991 return GET_VM()->coverages;
5992}
5993
5994int
5995rb_get_coverage_mode(void)
5996{
5997 return GET_VM()->coverage_mode;
5998}
5999
6000void
6001rb_set_coverages(VALUE coverages, int mode, VALUE me2counter)
6002{
6003 GET_VM()->coverages = coverages;
6004 GET_VM()->me2counter = me2counter;
6005 GET_VM()->coverage_mode = mode;
6006}
6007
6008void
6009rb_resume_coverages(void)
6010{
6011 int mode = GET_VM()->coverage_mode;
6012 VALUE me2counter = GET_VM()->me2counter;
6013 rb_add_event_hook2((rb_event_hook_func_t) update_line_coverage, RUBY_EVENT_COVERAGE_LINE, Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
6014 if (mode & COVERAGE_TARGET_BRANCHES) {
6015 rb_add_event_hook2((rb_event_hook_func_t) update_branch_coverage, RUBY_EVENT_COVERAGE_BRANCH, Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
6016 }
6017 if (mode & COVERAGE_TARGET_METHODS) {
6018 rb_add_event_hook2((rb_event_hook_func_t) update_method_coverage, RUBY_EVENT_CALL, me2counter, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
6019 }
6020}
6021
6022void
6023rb_suspend_coverages(void)
6024{
6025 rb_remove_event_hook((rb_event_hook_func_t) update_line_coverage);
6026 if (GET_VM()->coverage_mode & COVERAGE_TARGET_BRANCHES) {
6027 rb_remove_event_hook((rb_event_hook_func_t) update_branch_coverage);
6028 }
6029 if (GET_VM()->coverage_mode & COVERAGE_TARGET_METHODS) {
6030 rb_remove_event_hook((rb_event_hook_func_t) update_method_coverage);
6031 }
6032}
6033
6034/* Make coverage arrays empty so old covered files are no longer tracked. */
6035void
6036rb_reset_coverages(void)
6037{
6038 rb_clear_coverages();
6039 rb_iseq_remove_coverage_all();
6040 GET_VM()->coverages = Qfalse;
6041}
6042
6043VALUE
6044rb_default_coverage(int n)
6045{
6046 VALUE coverage = rb_ary_hidden_new_fill(3);
6047 VALUE lines = Qfalse, branches = Qfalse;
6048 int mode = GET_VM()->coverage_mode;
6049
6050 if (mode & COVERAGE_TARGET_LINES) {
6051 lines = n > 0 ? rb_ary_hidden_new_fill(n) : rb_ary_hidden_new(0);
6052 }
6053 RARRAY_ASET(coverage, COVERAGE_INDEX_LINES, lines);
6054
6055 if (mode & COVERAGE_TARGET_BRANCHES) {
6056 branches = rb_ary_hidden_new_fill(2);
6057 /* internal data structures for branch coverage:
6058 *
6059 * { branch base node =>
6060 * [base_type, base_first_lineno, base_first_column, base_last_lineno, base_last_column, {
6061 * branch target id =>
6062 * [target_type, target_first_lineno, target_first_column, target_last_lineno, target_last_column, target_counter_index],
6063 * ...
6064 * }],
6065 * ...
6066 * }
6067 *
6068 * Example:
6069 * { NODE_CASE =>
6070 * [1, 0, 4, 3, {
6071 * NODE_WHEN => [2, 8, 2, 9, 0],
6072 * NODE_WHEN => [3, 8, 3, 9, 1],
6073 * ...
6074 * }],
6075 * ...
6076 * }
6077 */
6078 VALUE structure = rb_hash_new();
6079 rb_obj_hide(structure);
6080 RARRAY_ASET(branches, 0, structure);
6081 /* branch execution counters */
6082 RARRAY_ASET(branches, 1, rb_ary_hidden_new(0));
6083 }
6084 RARRAY_ASET(coverage, COVERAGE_INDEX_BRANCHES, branches);
6085
6086 return coverage;
6087}
6088
6089static VALUE
6090uninterruptible_exit(VALUE v)
6091{
6092 rb_thread_t *cur_th = GET_THREAD();
6093 rb_ary_pop(cur_th->pending_interrupt_mask_stack);
6094
6095 cur_th->pending_interrupt_queue_checked = 0;
6096 if (!rb_threadptr_pending_interrupt_empty_p(cur_th)) {
6097 RUBY_VM_SET_INTERRUPT(cur_th->ec);
6098 }
6099 return Qnil;
6100}
6101
6102VALUE
6103rb_uninterruptible(VALUE (*b_proc)(VALUE), VALUE data)
6104{
6105 VALUE interrupt_mask = rb_ident_hash_new();
6106 rb_thread_t *cur_th = GET_THREAD();
6107
6108 rb_hash_aset(interrupt_mask, rb_cObject, sym_never);
6109 OBJ_FREEZE(interrupt_mask);
6110 rb_ary_push(cur_th->pending_interrupt_mask_stack, interrupt_mask);
6111
6112 VALUE ret = rb_ensure(b_proc, data, uninterruptible_exit, Qnil);
6113
6114 RUBY_VM_CHECK_INTS(cur_th->ec);
6115 return ret;
6116}
6117
6118static void
6119thread_specific_storage_alloc(rb_thread_t *th)
6120{
6121 VM_ASSERT(th->specific_storage == NULL);
6122
6123 if (UNLIKELY(specific_key_count > 0)) {
6124 th->specific_storage = ZALLOC_N(void *, RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
6125 }
6126}
6127
6128rb_internal_thread_specific_key_t
6130{
6131 rb_vm_t *vm = GET_VM();
6132
6133 if (specific_key_count == 0 && vm->ractor.cnt > 1) {
6134 rb_raise(rb_eThreadError, "The first rb_internal_thread_specific_key_create() is called with multiple ractors");
6135 }
6136 else if (specific_key_count > RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX) {
6137 rb_raise(rb_eThreadError, "rb_internal_thread_specific_key_create() is called more than %d times", RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
6138 }
6139 else {
6140 rb_internal_thread_specific_key_t key = specific_key_count++;
6141
6142 if (key == 0) {
6143 // allocate
6144 rb_ractor_t *cr = GET_RACTOR();
6145 rb_thread_t *th;
6146
6147 ccan_list_for_each(&cr->threads.set, th, lt_node) {
6148 thread_specific_storage_alloc(th);
6149 }
6150 }
6151 return key;
6152 }
6153}
6154
6155// async and native thread safe.
6156void *
6157rb_internal_thread_specific_get(VALUE thread_val, rb_internal_thread_specific_key_t key)
6158{
6159 rb_thread_t *th = DATA_PTR(thread_val);
6160
6161 VM_ASSERT(rb_thread_ptr(thread_val) == th);
6162 VM_ASSERT(key < RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
6163 VM_ASSERT(th->specific_storage);
6164
6165 return th->specific_storage[key];
6166}
6167
6168// async and native thread safe.
6169void
6170rb_internal_thread_specific_set(VALUE thread_val, rb_internal_thread_specific_key_t key, void *data)
6171{
6172 rb_thread_t *th = DATA_PTR(thread_val);
6173
6174 VM_ASSERT(rb_thread_ptr(thread_val) == th);
6175 VM_ASSERT(key < RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
6176 VM_ASSERT(th->specific_storage);
6177
6178 th->specific_storage[key] = data;
6179}
6180
6181// interrupt_exec
6184 struct ccan_list_node node;
6185
6186 rb_interrupt_exec_func_t *func;
6187 void *data;
6188 enum rb_interrupt_exec_flag flags;
6189};
6190
6191void
6192rb_threadptr_interrupt_exec_task_mark(rb_thread_t *th)
6193{
6194 struct rb_interrupt_exec_task *task;
6195
6196 ccan_list_for_each(&th->interrupt_exec_tasks, task, node) {
6197 if (task->flags & rb_interrupt_exec_flag_value_data) {
6198 rb_gc_mark((VALUE)task->data);
6199 }
6200 }
6201}
6202
6203// native thread safe
6204// th should be available
6205void
6206rb_threadptr_interrupt_exec(rb_thread_t *th, rb_interrupt_exec_func_t *func, void *data, enum rb_interrupt_exec_flag flags)
6207{
6208 // should not use ALLOC
6210 *task = (struct rb_interrupt_exec_task) {
6211 .flags = flags,
6212 .func = func,
6213 .data = data,
6214 };
6215
6216 rb_native_mutex_lock(&th->interrupt_lock);
6217 {
6218 ccan_list_add_tail(&th->interrupt_exec_tasks, &task->node);
6219 threadptr_set_interrupt_locked(th, true);
6220 }
6221 rb_native_mutex_unlock(&th->interrupt_lock);
6222}
6223
6224static void
6225threadptr_interrupt_exec_exec(rb_thread_t *th)
6226{
6227 while (1) {
6228 struct rb_interrupt_exec_task *task;
6229
6230 rb_native_mutex_lock(&th->interrupt_lock);
6231 {
6232 task = ccan_list_pop(&th->interrupt_exec_tasks, struct rb_interrupt_exec_task, node);
6233 }
6234 rb_native_mutex_unlock(&th->interrupt_lock);
6235
6236 RUBY_DEBUG_LOG("task:%p", task);
6237
6238 if (task) {
6239 if (task->flags & rb_interrupt_exec_flag_new_thread) {
6240 rb_thread_create(task->func, task->data);
6241 }
6242 else {
6243 (*task->func)(task->data);
6244 }
6245 ruby_xfree(task);
6246 }
6247 else {
6248 break;
6249 }
6250 }
6251}
6252
6253static void
6254threadptr_interrupt_exec_cleanup(rb_thread_t *th)
6255{
6256 rb_native_mutex_lock(&th->interrupt_lock);
6257 {
6258 struct rb_interrupt_exec_task *task;
6259
6260 while ((task = ccan_list_pop(&th->interrupt_exec_tasks, struct rb_interrupt_exec_task, node)) != NULL) {
6261 ruby_xfree(task);
6262 }
6263 }
6264 rb_native_mutex_unlock(&th->interrupt_lock);
6265}
6266
6267// native thread safe
6268// func/data should be native thread safe
6269void
6270rb_ractor_interrupt_exec(struct rb_ractor_struct *target_r,
6271 rb_interrupt_exec_func_t *func, void *data, enum rb_interrupt_exec_flag flags)
6272{
6273 RUBY_DEBUG_LOG("flags:%d", (int)flags);
6274
6275 rb_thread_t *main_th = target_r->threads.main;
6276 rb_threadptr_interrupt_exec(main_th, func, data, flags | rb_interrupt_exec_flag_new_thread);
6277}
6278
#define RUBY_ASSERT_ALWAYS(expr,...)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
Definition assert.h:199
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
#define RUBY_INTERNAL_EVENT_SWITCH
Thread switched.
Definition event.h:90
int rb_remove_event_hook(rb_event_hook_func_t func)
Removes the passed function from the list of event hooks.
Definition vm_trace.c:315
#define RUBY_EVENT_THREAD_BEGIN
Encountered a new thread.
Definition event.h:57
void(* rb_event_hook_func_t)(rb_event_flag_t evflag, VALUE data, VALUE self, ID mid, VALUE klass)
Type of event hooks.
Definition event.h:120
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RUBY_EVENT_CALL
A method, written in Ruby, is called.
Definition event.h:41
#define RUBY_EVENT_THREAD_END
Encountered an end of a thread.
Definition event.h:58
static void RB_FL_SET_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_SET().
Definition fl_type.h:600
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition class.c:1484
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
Definition class.c:2853
ID rb_frame_last_func(void)
Returns the ID of the last method in the call stack.
Definition eval.c:1260
int rb_keyword_given_p(void)
Determines if the current method is given a keyword argument.
Definition eval.c:1050
int rb_block_given_p(void)
Determines if the current method is given a block.
Definition eval.c:1037
#define rb_str_new2
Old name of rb_str_new_cstr.
Definition string.h:1674
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:400
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define OBJ_FROZEN
Old name of RB_OBJ_FROZEN.
Definition fl_type.h:136
#define xrealloc
Old name of ruby_xrealloc.
Definition xmalloc.h:56
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition fl_type.h:134
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition assume.h:29
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:206
#define xmalloc
Old name of ruby_xmalloc.
Definition xmalloc.h:53
#define LONG2FIX
Old name of RB_INT2FIX.
Definition long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition int.h:41
#define ZALLOC_N
Old name of RB_ZALLOC_N.
Definition memory.h:401
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define Qtrue
Old name of RUBY_Qtrue.
#define NUM2INT
Old name of RB_NUM2INT.
Definition int.h:44
#define INT2NUM
Old name of RB_INT2NUM.
Definition int.h:43
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define POSFIXABLE
Old name of RB_POSFIXABLE.
Definition fixnum.h:29
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
void ruby_stop(int ex)
Calls ruby_cleanup() and exits the process.
Definition eval.c:290
#define ruby_debug
This variable controls whether the interpreter is in debug mode.
Definition error.h:486
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition eval.c:683
VALUE rb_eSystemExit
SystemExit exception.
Definition error.c:1423
VALUE rb_eIOError
IOError exception.
Definition io.c:189
VALUE rb_eStandardError
StandardError exception.
Definition error.c:1427
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1430
void rb_frozen_error_raise(VALUE frozen_obj, const char *fmt,...)
Raises an instance of rb_eFrozenError.
Definition error.c:4120
VALUE rb_eFatal
fatal exception.
Definition error.c:1426
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1428
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:466
VALUE rb_eException
Mother of all exceptions.
Definition error.c:1422
VALUE rb_eThreadError
ThreadError exception.
Definition eval.c:1055
void rb_exit(int status)
Terminates the current execution context.
Definition process.c:4360
VALUE rb_eSignal
SignalException exception.
Definition error.c:1425
VALUE rb_obj_alloc(VALUE klass)
Allocates an instance of the given class.
Definition object.c:2164
VALUE rb_cInteger
Module class.
Definition numeric.c:198
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
Definition object.c:100
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:265
VALUE rb_cThread
Thread class.
Definition vm.c:617
VALUE rb_cModule
Module class.
Definition object.c:62
double rb_num2dbl(VALUE num)
Converts an instance of rb_cNumeric into C's double.
Definition object.c:3785
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:910
VALUE rb_ary_shift(VALUE ary)
Destructively deletes an element from the beginning of the passed array and returns what was deleted.
VALUE rb_ary_dup(VALUE ary)
Duplicates an array.
VALUE rb_ary_delete_at(VALUE ary, long pos)
Destructively removes an element which resides at the specific index of the passed array.
VALUE rb_ary_new(void)
Allocates a new, empty array.
VALUE rb_ary_pop(VALUE ary)
Destructively deletes an element from the end of the passed array and returns what was deleted.
VALUE rb_ary_hidden_new(long capa)
Allocates a hidden (no class) empty array.
VALUE rb_ary_clear(VALUE ary)
Destructively removes everything form an array.
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
VALUE rb_ary_entry(VALUE ary, long off)
Queries an element of an array.
VALUE rb_ary_join(VALUE ary, VALUE sep)
Recursively stringises the elements of the passed array, flattens that result, then joins the sequenc...
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:284
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition proc.c:848
void rb_reset_random_seed(void)
Resets the RNG behind rb_genrand_int32()/rb_genrand_real().
Definition random.c:1817
VALUE rb_str_new_frozen(VALUE str)
Creates a frozen copy of the string, if necessary.
Definition string.c:1503
VALUE rb_str_concat(VALUE dst, VALUE src)
Identical to rb_str_append(), except it also accepts an integer as a codepoint.
Definition string.c:4009
#define rb_str_cat_cstr(buf, str)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1655
#define rb_str_new_cstr(str)
Identical to rb_str_new, except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1513
int rb_thread_interrupted(VALUE thval)
Checks if the thread's execution was recently interrupted.
Definition thread.c:1469
VALUE rb_thread_local_aref(VALUE thread, ID key)
This badly named function reads from a Fiber local storage.
Definition thread.c:3748
VALUE rb_mutex_new(void)
Creates a mutex.
VALUE rb_thread_kill(VALUE thread)
Terminates the given thread.
Definition thread.c:2941
#define RUBY_UBF_IO
A special UBF for blocking IO operations.
Definition thread.h:382
VALUE rb_thread_main(void)
Obtains the "main" thread.
Definition thread.c:3180
VALUE rb_exec_recursive(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h)
"Recursion" API entry point.
void rb_thread_sleep_forever(void)
Blocks indefinitely.
Definition thread.c:1399
void rb_thread_fd_close(int fd)
This funciton is now a no-op.
Definition thread.c:2886
void rb_thread_wait_for(struct timeval time)
Identical to rb_thread_sleep(), except it takes struct timeval instead.
Definition thread.c:1431
VALUE rb_mutex_synchronize(VALUE mutex, VALUE(*func)(VALUE arg), VALUE arg)
Obtains the lock, runs the passed function, and releases the lock when it completes.
VALUE rb_thread_stop(void)
Stops the current thread.
Definition thread.c:3092
VALUE rb_mutex_sleep(VALUE self, VALUE timeout)
Releases the lock held in the mutex and waits for the period of time; reacquires the lock on wakeup.
VALUE rb_exec_recursive_paired(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE p, VALUE h)
Identical to rb_exec_recursive(), except it checks for the recursion on the ordered pair of { g,...
void rb_unblock_function_t(void *)
This is the type of UBFs.
Definition thread.h:336
void rb_thread_atfork_before_exec(void)
:FIXME: situation of this function is unclear.
Definition thread.c:5020
void rb_thread_check_ints(void)
Checks for interrupts.
Definition thread.c:1452
VALUE rb_thread_run(VALUE thread)
This is a rb_thread_wakeup() + rb_thread_schedule() combo.
Definition thread.c:3083
VALUE rb_thread_wakeup(VALUE thread)
Marks a given thread as eligible for scheduling.
Definition thread.c:3036
VALUE rb_mutex_unlock(VALUE mutex)
Releases the mutex.
VALUE rb_exec_recursive_paired_outer(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE p, VALUE h)
Identical to rb_exec_recursive_outer(), except it checks for the recursion on the ordered pair of { g...
void rb_thread_sleep_deadly(void)
Identical to rb_thread_sleep_forever(), except the thread calling this function is considered "dead" ...
Definition thread.c:1406
void rb_thread_atfork(void)
A pthread_atfork(3posix)-like API.
Definition thread.c:5015
VALUE rb_thread_current(void)
Obtains the "current" thread.
Definition thread.c:3159
int rb_thread_alone(void)
Checks if the thread this function is running is the only thread that is currently alive.
Definition thread.c:4021
VALUE rb_thread_local_aset(VALUE thread, ID key, VALUE val)
This badly named function writes to a Fiber local storage.
Definition thread.c:3896
void rb_thread_schedule(void)
Tries to switch to another thread.
Definition thread.c:1500
#define RUBY_UBF_PROCESS
A special UBF for blocking process operations.
Definition thread.h:389
VALUE rb_exec_recursive_outer(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h)
Identical to rb_exec_recursive(), except it calls f for outermost recursion only.
VALUE rb_thread_wakeup_alive(VALUE thread)
Identical to rb_thread_wakeup(), except it doesn't raise on an already killed thread.
Definition thread.c:3045
VALUE rb_mutex_lock(VALUE mutex)
Attempts to lock the mutex.
void rb_thread_sleep(int sec)
Blocks for the given period of time.
Definition thread.c:1475
void rb_timespec_now(struct timespec *ts)
Fills the current time into the given struct.
Definition time.c:2002
struct timeval rb_time_timeval(VALUE time)
Converts an instance of rb_cTime to a struct timeval that represents the identical point of time.
Definition time.c:2954
VALUE rb_ivar_set(VALUE obj, ID name, VALUE val)
Identical to rb_iv_set(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1985
VALUE rb_ivar_get(VALUE obj, ID name)
Identical to rb_iv_get(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1460
VALUE rb_class_path(VALUE mod)
Identical to rb_mod_name(), except it returns #<Class: ...> style inspection for anonymous modules.
Definition variable.c:379
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
int rb_sourceline(void)
Resembles __LINE__.
Definition vm.c:2001
static ID rb_intern_const(const char *str)
This is a "tiny optimisation" over rb_intern().
Definition symbol.h:284
ID rb_check_id(volatile VALUE *namep)
Detects if the given name is already interned or not.
Definition symbol.c:1133
VALUE rb_to_symbol(VALUE name)
Identical to rb_intern_str(), except it generates a dynamic symbol if necessary.
Definition string.c:12715
ID rb_to_id(VALUE str)
Definition string.c:12705
#define RB_IO_POINTER(obj, fp)
Queries the underlying IO pointer.
Definition io.h:436
VALUE rb_eIOTimeoutError
Indicates that a timeout has occurred while performing an IO operation.
Definition io.c:190
#define RB_NOGVL_UBF_ASYNC_SAFE
Passing this flag to rb_nogvl() indicates that the passed UBF is async-signal-safe.
Definition thread.h:60
void * rb_internal_thread_specific_get(VALUE thread_val, rb_internal_thread_specific_key_t key)
Get thread and tool specific data.
Definition thread.c:6156
#define RB_NOGVL_INTR_FAIL
Passing this flag to rb_nogvl() prevents it from checking interrupts.
Definition thread.h:48
void rb_internal_thread_specific_set(VALUE thread_val, rb_internal_thread_specific_key_t key, void *data)
Set thread and tool specific data.
Definition thread.c:6169
rb_internal_thread_specific_key_t rb_internal_thread_specific_key_create(void)
Create a key to store thread specific data.
Definition thread.c:6128
void * rb_nogvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2, int flags)
Identical to rb_thread_call_without_gvl(), except it additionally takes "flags" that change the behav...
Definition thread.c:1580
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
Definition thread.c:2046
#define RB_NOGVL_OFFLOAD_SAFE
Passing this flag to rb_nogvl() indicates that the passed function is safe to offload to a background...
Definition thread.h:73
void * rb_thread_call_without_gvl2(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Identical to rb_thread_call_without_gvl(), except it does not interface with signals etc.
Definition thread.c:1717
void * rb_thread_call_without_gvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Allows the passed function to run in parallel with other Ruby threads.
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
Shim for block function parameters.
Definition iterator.h:58
VALUE rb_yield(VALUE val)
Yields the block.
Definition vm_eval.c:1372
void rb_throw_obj(VALUE tag, VALUE val)
Identical to rb_throw(), except it allows arbitrary Ruby object to become a tag.
Definition vm_eval.c:2522
static int rb_fd_max(const rb_fdset_t *f)
It seems this function has no use.
Definition largesize.h:209
void rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
Destructively overwrites an fdset with another.
void rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
Identical to rb_fd_copy(), except it copies unlimited number of file descriptors.
void rb_fd_term(rb_fdset_t *f)
Destroys the rb_fdset_t, releasing any memory and resources it used.
static fd_set * rb_fd_ptr(const rb_fdset_t *f)
Raw pointer to fd_set.
Definition largesize.h:195
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition memory.h:372
#define ALLOCA_N(type, n)
Definition memory.h:292
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
Definition memory.h:360
VALUE rb_thread_create(type *q, void *w)
Creates a rb_cThread instance.
void rb_hash_foreach(VALUE q, int_type *w, VALUE e)
Iteration over the given hash.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define rb_fd_isset
Queries if the given fd is in the rb_fdset_t.
Definition posix.h:60
#define rb_fd_select
Waits for multiple file descriptors at once.
Definition posix.h:66
#define rb_fd_init
Initialises the :given :rb_fdset_t.
Definition posix.h:63
#define rb_fd_set
Sets the given fd to the rb_fdset_t.
Definition posix.h:54
#define rb_fd_zero
Clears the given rb_fdset_t.
Definition posix.h:51
#define rb_fd_clr
Unsets the given fd from the rb_fdset_t.
Definition posix.h:57
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
static int RARRAY_LENINT(VALUE ary)
Identical to rb_array_len(), except it differs for the return type.
Definition rarray.h:281
static void RARRAY_ASET(VALUE ary, long i, VALUE v)
Assigns an object in an array.
Definition rarray.h:386
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:163
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
Definition rclass.h:44
#define DATA_PTR(obj)
Convenient getter macro.
Definition rdata.h:67
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
#define StringValueCStr(v)
Identical to StringValuePtr, except it additionally checks for the contents for viability as a C stri...
Definition rstring.h:89
#define RUBY_TYPED_DEFAULT_FREE
This is a value you can set to rb_data_type_struct::dfree.
Definition rtypeddata.h:80
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
Definition rtypeddata.h:521
#define TypedData_Wrap_Struct(klass, data_type, sval)
Converts sval, a pointer to your struct, into a Ruby object.
Definition rtypeddata.h:456
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
Definition rtypeddata.h:503
#define errno
Ractor-aware version of errno.
Definition ruby.h:388
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition thread.c:5768
int ruby_snprintf(char *str, size_t n, char const *fmt,...)
Our own locale-insensitive version of snprintf(3).
Definition sprintf.c:1041
#define RB_PASS_CALLED_KEYWORDS
Pass keywords if current method is called with keywords, useful for argument delegation.
Definition scan_args.h:78
Scheduler APIs.
VALUE rb_fiber_scheduler_blocking_operation_wait(VALUE scheduler, void *(*function)(void *), void *data, rb_unblock_function_t *unblock_function, void *data2, int flags, struct rb_fiber_scheduler_blocking_operation_state *state)
Defer the execution of the passed function to the scheduler.
Definition scheduler.c:1061
VALUE rb_fiber_scheduler_current(void)
Identical to rb_fiber_scheduler_get(), except it also returns RUBY_Qnil in case of a blocking fiber.
Definition scheduler.c:464
VALUE rb_fiber_scheduler_fiber_interrupt(VALUE scheduler, VALUE fiber, VALUE exception)
Interrupt a fiber by raising an exception.
Definition scheduler.c:1092
VALUE rb_fiber_scheduler_block(VALUE scheduler, VALUE blocker, VALUE timeout)
Non-blocking wait for the passed "blocker", which is for instance Thread.join or Mutex....
Definition scheduler.c:628
VALUE rb_fiber_scheduler_set(VALUE scheduler)
Destructively assigns the passed scheduler to that of the current thread that is calling this functio...
Definition scheduler.c:425
VALUE rb_fiber_scheduler_unblock(VALUE scheduler, VALUE blocker, VALUE fiber)
Wakes up a fiber previously blocked using rb_fiber_scheduler_block().
Definition scheduler.c:647
int rb_thread_fd_select(int nfds, rb_fdset_t *rfds, rb_fdset_t *wfds, rb_fdset_t *efds, struct timeval *timeout)
Waits for multiple file descriptors at once.
Definition thread.c:4512
#define rb_fd_resize(n, f)
Does nothing (defined for compatibility).
Definition select.h:43
static bool RB_TEST(VALUE obj)
Emulates Ruby's "if" statement.
@ RUBY_Qundef
Represents so-called undef.
#define RTEST
This is an old name of RB_TEST.
#define _(args)
This was a transition path from K&R to ANSI.
Definition stdarg.h:35
Definition method.h:63
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:202
The data structure which wraps the fd_set bitmap used by select(2).
Definition largesize.h:71
int maxfd
Maximum allowed number of FDs.
Definition largesize.h:72
fd_set * fdset
File descriptors buffer.
Definition largesize.h:73
int capa
Maximum allowed number of FDs.
Definition win32.h:50
Ruby's IO, metadata and buffers.
Definition io.h:295
VALUE self
The IO's Ruby level counterpart.
Definition io.h:298
int fd
file descriptor.
Definition io.h:306
struct ccan_list_head blocking_operations
Threads that are performing a blocking operation without the GVL using this IO.
Definition io.h:131
Definition method.h:55
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:143
void rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
Blocks until the current thread obtains a lock.
Definition thread.c:296
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
Releases a lock.
Definition thread.c:302
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy.
void rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
Fills the passed lock with an initial value.
Definition thread.c:284
void rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
Destroys the passed mutex.
Definition thread.c:290
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376