Ruby 3.5.0dev (2025-08-13 revision a9230e76ee19716c7d2e035be7bd1be9bdca2b59)
thread.c (a9230e76ee19716c7d2e035be7bd1be9bdca2b59)
1/**********************************************************************
2
3 thread.c -
4
5 $Author$
6
7 Copyright (C) 2004-2007 Koichi Sasada
8
9**********************************************************************/
10
11/*
12 YARV Thread Design
13
14 model 1: Userlevel Thread
15 Same as traditional ruby thread.
16
17 model 2: Native Thread with Global VM lock
18 Using pthread (or Windows thread) and Ruby threads run concurrent.
19
20 model 3: Native Thread with fine grain lock
21 Using pthread and Ruby threads run concurrent or parallel.
22
23 model 4: M:N User:Native threads with Global VM lock
24 Combination of model 1 and 2
25
26 model 5: M:N User:Native thread with fine grain lock
27 Combination of model 1 and 3
28
29------------------------------------------------------------------------
30
31 model 2:
32 A thread has mutex (GVL: Global VM Lock or Giant VM Lock) can run.
33 When thread scheduling, running thread release GVL. If running thread
34 try blocking operation, this thread must release GVL and another
35 thread can continue this flow. After blocking operation, thread
36 must check interrupt (RUBY_VM_CHECK_INTS).
37
38 Every VM can run parallel.
39
40 Ruby threads are scheduled by OS thread scheduler.
41
42------------------------------------------------------------------------
43
44 model 3:
45 Every threads run concurrent or parallel and to access shared object
46 exclusive access control is needed. For example, to access String
47 object or Array object, fine grain lock must be locked every time.
48 */
49
50
51/*
52 * FD_SET, FD_CLR and FD_ISSET have a small sanity check when using glibc
53 * 2.15 or later and set _FORTIFY_SOURCE > 0.
54 * However, the implementation is wrong. Even though Linux's select(2)
55 * supports large fd size (>FD_SETSIZE), it wrongly assumes fd is always
56 * less than FD_SETSIZE (i.e. 1024). And then when enabling HAVE_RB_FD_INIT,
57 * it doesn't work correctly and makes program abort. Therefore we need to
58 * disable FORTIFY_SOURCE until glibc fixes it.
59 */
60#undef _FORTIFY_SOURCE
61#undef __USE_FORTIFY_LEVEL
62#define __USE_FORTIFY_LEVEL 0
63
64/* for model 2 */
65
66#include "ruby/internal/config.h"
67
68#ifdef __linux__
69// Normally, gcc(1) translates calls to alloca() with inlined code. This is not done when either the -ansi, -std=c89, -std=c99, or the -std=c11 option is given and the header <alloca.h> is not included.
70# include <alloca.h>
71#endif
72
73#define TH_SCHED(th) (&(th)->ractor->threads.sched)
74
75#include "eval_intern.h"
76#include "hrtime.h"
77#include "internal.h"
78#include "internal/class.h"
79#include "internal/cont.h"
80#include "internal/error.h"
81#include "internal/eval.h"
82#include "internal/gc.h"
83#include "internal/hash.h"
84#include "internal/io.h"
85#include "internal/object.h"
86#include "internal/proc.h"
88#include "internal/signal.h"
89#include "internal/thread.h"
90#include "internal/time.h"
91#include "internal/warnings.h"
92#include "iseq.h"
93#include "ruby/debug.h"
94#include "ruby/io.h"
95#include "ruby/thread.h"
96#include "ruby/thread_native.h"
97#include "timev.h"
98#include "vm_core.h"
99#include "ractor_core.h"
100#include "vm_debug.h"
101#include "vm_sync.h"
102
103#include "ccan/list/list.h"
104
105#ifndef USE_NATIVE_THREAD_PRIORITY
106#define USE_NATIVE_THREAD_PRIORITY 0
107#define RUBY_THREAD_PRIORITY_MAX 3
108#define RUBY_THREAD_PRIORITY_MIN -3
109#endif
110
111static VALUE rb_cThreadShield;
112static VALUE cThGroup;
113
114static VALUE sym_immediate;
115static VALUE sym_on_blocking;
116static VALUE sym_never;
117
118static uint32_t thread_default_quantum_ms = 100;
119
120#define THREAD_LOCAL_STORAGE_INITIALISED FL_USER13
121#define THREAD_LOCAL_STORAGE_INITIALISED_P(th) RB_FL_TEST_RAW((th), THREAD_LOCAL_STORAGE_INITIALISED)
122
123static inline VALUE
124rb_thread_local_storage(VALUE thread)
125{
126 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
127 rb_ivar_set(thread, idLocals, rb_hash_new());
128 RB_FL_SET_RAW(thread, THREAD_LOCAL_STORAGE_INITIALISED);
129 }
130 return rb_ivar_get(thread, idLocals);
131}
132
133enum SLEEP_FLAGS {
134 SLEEP_DEADLOCKABLE = 0x01,
135 SLEEP_SPURIOUS_CHECK = 0x02,
136 SLEEP_ALLOW_SPURIOUS = 0x04,
137 SLEEP_NO_CHECKINTS = 0x08,
138};
139
140static void sleep_forever(rb_thread_t *th, unsigned int fl);
141static int sleep_hrtime(rb_thread_t *, rb_hrtime_t, unsigned int fl);
142
143static void rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker, VALUE timeout, rb_hrtime_t end);
144static int rb_threadptr_dead(rb_thread_t *th);
145static void rb_check_deadlock(rb_ractor_t *r);
146static int rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th);
147static const char *thread_status_name(rb_thread_t *th, int detail);
148static int hrtime_update_expire(rb_hrtime_t *, const rb_hrtime_t);
149NORETURN(static void async_bug_fd(const char *mesg, int errno_arg, int fd));
150MAYBE_UNUSED(static int consume_communication_pipe(int fd));
151
152static rb_atomic_t system_working = 1;
153static rb_internal_thread_specific_key_t specific_key_count;
154
155/********************************************************************************/
156
157#define THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
158
160 enum rb_thread_status prev_status;
161};
162
163static int unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted);
164static void unblock_function_clear(rb_thread_t *th);
165
166static inline int blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
167 rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted);
168static inline void blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region);
169
170#define THREAD_BLOCKING_BEGIN(th) do { \
171 struct rb_thread_sched * const sched = TH_SCHED(th); \
172 RB_VM_SAVE_MACHINE_CONTEXT(th); \
173 thread_sched_to_waiting((sched), (th));
174
175#define THREAD_BLOCKING_END(th) \
176 thread_sched_to_running((sched), (th)); \
177 rb_ractor_thread_switch(th->ractor, th, false); \
178} while(0)
179
180#ifdef __GNUC__
181#ifdef HAVE_BUILTIN___BUILTIN_CHOOSE_EXPR_CONSTANT_P
182#define only_if_constant(expr, notconst) __builtin_choose_expr(__builtin_constant_p(expr), (expr), (notconst))
183#else
184#define only_if_constant(expr, notconst) (__builtin_constant_p(expr) ? (expr) : (notconst))
185#endif
186#else
187#define only_if_constant(expr, notconst) notconst
188#endif
189#define BLOCKING_REGION(th, exec, ubf, ubfarg, fail_if_interrupted) do { \
190 struct rb_blocking_region_buffer __region; \
191 if (blocking_region_begin(th, &__region, (ubf), (ubfarg), fail_if_interrupted) || \
192 /* always return true unless fail_if_interrupted */ \
193 !only_if_constant(fail_if_interrupted, TRUE)) { \
194 /* Important that this is inlined into the macro, and not part of \
195 * blocking_region_begin - see bug #20493 */ \
196 RB_VM_SAVE_MACHINE_CONTEXT(th); \
197 thread_sched_to_waiting(TH_SCHED(th), th); \
198 exec; \
199 blocking_region_end(th, &__region); \
200 }; \
201} while(0)
202
203/*
204 * returns true if this thread was spuriously interrupted, false otherwise
205 * (e.g. hit by Thread#run or ran a Ruby-level Signal.trap handler)
206 */
207#define RUBY_VM_CHECK_INTS_BLOCKING(ec) vm_check_ints_blocking(ec)
208static inline int
209vm_check_ints_blocking(rb_execution_context_t *ec)
210{
211#ifdef RUBY_ASSERT_CRITICAL_SECTION
212 VM_ASSERT(ruby_assert_critical_section_entered == 0);
213#endif
214
215 rb_thread_t *th = rb_ec_thread_ptr(ec);
216
217 if (LIKELY(rb_threadptr_pending_interrupt_empty_p(th))) {
218 if (LIKELY(!RUBY_VM_INTERRUPTED_ANY(ec))) return FALSE;
219 }
220 else {
221 th->pending_interrupt_queue_checked = 0;
222 RUBY_VM_SET_INTERRUPT(ec);
223 }
224 return rb_threadptr_execute_interrupts(th, 1);
225}
226
227int
228rb_vm_check_ints_blocking(rb_execution_context_t *ec)
229{
230 return vm_check_ints_blocking(ec);
231}
232
233/*
234 * poll() is supported by many OSes, but so far Linux is the only
235 * one we know of that supports using poll() in all places select()
236 * would work.
237 */
238#if defined(HAVE_POLL)
239# if defined(__linux__)
240# define USE_POLL
241# endif
242# if defined(__FreeBSD_version) && __FreeBSD_version >= 1100000
243# define USE_POLL
244 /* FreeBSD does not set POLLOUT when POLLHUP happens */
245# define POLLERR_SET (POLLHUP | POLLERR)
246# endif
247#endif
248
249static void
250timeout_prepare(rb_hrtime_t **to, rb_hrtime_t *rel, rb_hrtime_t *end,
251 const struct timeval *timeout)
252{
253 if (timeout) {
254 *rel = rb_timeval2hrtime(timeout);
255 *end = rb_hrtime_add(rb_hrtime_now(), *rel);
256 *to = rel;
257 }
258 else {
259 *to = 0;
260 }
261}
262
263MAYBE_UNUSED(NOINLINE(static int thread_start_func_2(rb_thread_t *th, VALUE *stack_start)));
264MAYBE_UNUSED(static bool th_has_dedicated_nt(const rb_thread_t *th));
265MAYBE_UNUSED(static int waitfd_to_waiting_flag(int wfd_event));
266
267#include THREAD_IMPL_SRC
268
269/*
270 * TODO: somebody with win32 knowledge should be able to get rid of
271 * timer-thread by busy-waiting on signals. And it should be possible
272 * to make the GVL in thread_pthread.c be platform-independent.
273 */
274#ifndef BUSY_WAIT_SIGNALS
275# define BUSY_WAIT_SIGNALS (0)
276#endif
277
278#ifndef USE_EVENTFD
279# define USE_EVENTFD (0)
280#endif
281
282#include "thread_sync.c"
283
284void
285rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
286{
288}
289
290void
291rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
292{
294}
295
296void
297rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
298{
300}
301
302void
303rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
304{
306}
307
308static int
309unblock_function_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg, int fail_if_interrupted)
310{
311 do {
312 if (fail_if_interrupted) {
313 if (RUBY_VM_INTERRUPTED_ANY(th->ec)) {
314 return FALSE;
315 }
316 }
317 else {
318 RUBY_VM_CHECK_INTS(th->ec);
319 }
320
321 rb_native_mutex_lock(&th->interrupt_lock);
322 } while (!th->ec->raised_flag && RUBY_VM_INTERRUPTED_ANY(th->ec) &&
323 (rb_native_mutex_unlock(&th->interrupt_lock), TRUE));
324
325 VM_ASSERT(th->unblock.func == NULL);
326
327 th->unblock.func = func;
328 th->unblock.arg = arg;
329 rb_native_mutex_unlock(&th->interrupt_lock);
330
331 return TRUE;
332}
333
334static void
335unblock_function_clear(rb_thread_t *th)
336{
337 rb_native_mutex_lock(&th->interrupt_lock);
338 th->unblock.func = 0;
339 rb_native_mutex_unlock(&th->interrupt_lock);
340}
341
342static void
343threadptr_set_interrupt_locked(rb_thread_t *th, bool trap)
344{
345 // th->interrupt_lock should be acquired here
346
347 RUBY_DEBUG_LOG("th:%u trap:%d", rb_th_serial(th), trap);
348
349 if (trap) {
350 RUBY_VM_SET_TRAP_INTERRUPT(th->ec);
351 }
352 else {
353 RUBY_VM_SET_INTERRUPT(th->ec);
354 }
355
356 if (th->unblock.func != NULL) {
357 (th->unblock.func)(th->unblock.arg);
358 }
359 else {
360 /* none */
361 }
362}
363
364static void
365threadptr_set_interrupt(rb_thread_t *th, int trap)
366{
367 rb_native_mutex_lock(&th->interrupt_lock);
368 {
369 threadptr_set_interrupt_locked(th, trap);
370 }
371 rb_native_mutex_unlock(&th->interrupt_lock);
372}
373
374/* Set interrupt flag on another thread or current thread, and call its UBF if it has one set */
375void
376rb_threadptr_interrupt(rb_thread_t *th)
377{
378 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
379 threadptr_set_interrupt(th, false);
380}
381
382static void
383threadptr_trap_interrupt(rb_thread_t *th)
384{
385 threadptr_set_interrupt(th, true);
386}
387
388static void
389terminate_all(rb_ractor_t *r, const rb_thread_t *main_thread)
390{
391 rb_thread_t *th = 0;
392
393 ccan_list_for_each(&r->threads.set, th, lt_node) {
394 if (th != main_thread) {
395 RUBY_DEBUG_LOG("terminate start th:%u status:%s", rb_th_serial(th), thread_status_name(th, TRUE));
396
397 rb_threadptr_pending_interrupt_enque(th, RUBY_FATAL_THREAD_TERMINATED);
398 rb_threadptr_interrupt(th);
399
400 RUBY_DEBUG_LOG("terminate done th:%u status:%s", rb_th_serial(th), thread_status_name(th, TRUE));
401 }
402 else {
403 RUBY_DEBUG_LOG("main thread th:%u", rb_th_serial(th));
404 }
405 }
406}
407
408static void
409rb_threadptr_join_list_wakeup(rb_thread_t *thread)
410{
411 while (thread->join_list) {
412 struct rb_waiting_list *join_list = thread->join_list;
413
414 // Consume the entry from the join list:
415 thread->join_list = join_list->next;
416
417 rb_thread_t *target_thread = join_list->thread;
418
419 if (target_thread->scheduler != Qnil && join_list->fiber) {
420 rb_fiber_scheduler_unblock(target_thread->scheduler, target_thread->self, rb_fiberptr_self(join_list->fiber));
421 }
422 else {
423 rb_threadptr_interrupt(target_thread);
424
425 switch (target_thread->status) {
426 case THREAD_STOPPED:
427 case THREAD_STOPPED_FOREVER:
428 target_thread->status = THREAD_RUNNABLE;
429 break;
430 default:
431 break;
432 }
433 }
434 }
435}
436
437void
438rb_threadptr_unlock_all_locking_mutexes(rb_thread_t *th)
439{
440 while (th->keeping_mutexes) {
441 rb_mutex_t *mutex = th->keeping_mutexes;
442 th->keeping_mutexes = mutex->next_mutex;
443
444 // rb_warn("mutex #<%p> was not unlocked by thread #<%p>", (void *)mutex, (void*)th);
445
446 const char *error_message = rb_mutex_unlock_th(mutex, th, mutex->fiber);
447 if (error_message) rb_bug("invalid keeping_mutexes: %s", error_message);
448 }
449}
450
451void
452rb_thread_terminate_all(rb_thread_t *th)
453{
454 rb_ractor_t *cr = th->ractor;
455 rb_execution_context_t * volatile ec = th->ec;
456 volatile int sleeping = 0;
457
458 if (cr->threads.main != th) {
459 rb_bug("rb_thread_terminate_all: called by child thread (%p, %p)",
460 (void *)cr->threads.main, (void *)th);
461 }
462
463 /* unlock all locking mutexes */
464 rb_threadptr_unlock_all_locking_mutexes(th);
465
466 EC_PUSH_TAG(ec);
467 if (EC_EXEC_TAG() == TAG_NONE) {
468 retry:
469 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
470
471 terminate_all(cr, th);
472
473 while (rb_ractor_living_thread_num(cr) > 1) {
474 rb_hrtime_t rel = RB_HRTIME_PER_SEC;
475 /*q
476 * Thread exiting routine in thread_start_func_2 notify
477 * me when the last sub-thread exit.
478 */
479 sleeping = 1;
480 native_sleep(th, &rel);
481 RUBY_VM_CHECK_INTS_BLOCKING(ec);
482 sleeping = 0;
483 }
484 }
485 else {
486 /*
487 * When caught an exception (e.g. Ctrl+C), let's broadcast
488 * kill request again to ensure killing all threads even
489 * if they are blocked on sleep, mutex, etc.
490 */
491 if (sleeping) {
492 sleeping = 0;
493 goto retry;
494 }
495 }
496 EC_POP_TAG();
497}
498
499void rb_threadptr_root_fiber_terminate(rb_thread_t *th);
500static void threadptr_interrupt_exec_cleanup(rb_thread_t *th);
501
502static void
503thread_cleanup_func_before_exec(void *th_ptr)
504{
505 rb_thread_t *th = th_ptr;
506 th->status = THREAD_KILLED;
507
508 // The thread stack doesn't exist in the forked process:
509 th->ec->machine.stack_start = th->ec->machine.stack_end = NULL;
510
511 threadptr_interrupt_exec_cleanup(th);
512 rb_threadptr_root_fiber_terminate(th);
513}
514
515static void
516thread_cleanup_func(void *th_ptr, int atfork)
517{
518 rb_thread_t *th = th_ptr;
519
520 th->locking_mutex = Qfalse;
521 thread_cleanup_func_before_exec(th_ptr);
522
523 if (atfork) {
524 native_thread_destroy_atfork(th->nt);
525 th->nt = NULL;
526 return;
527 }
528
529 rb_native_mutex_destroy(&th->interrupt_lock);
530}
531
532static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *);
533static VALUE rb_thread_to_s(VALUE thread);
534
535void
536ruby_thread_init_stack(rb_thread_t *th, void *local_in_parent_frame)
537{
538 native_thread_init_stack(th, local_in_parent_frame);
539}
540
541const VALUE *
542rb_vm_proc_local_ep(VALUE proc)
543{
544 const VALUE *ep = vm_proc_ep(proc);
545
546 if (ep) {
547 return rb_vm_ep_local_ep(ep);
548 }
549 else {
550 return NULL;
551 }
552}
553
554// for ractor, defined in vm.c
555VALUE rb_vm_invoke_proc_with_self(rb_execution_context_t *ec, rb_proc_t *proc, VALUE self,
556 int argc, const VALUE *argv, int kw_splat, VALUE passed_block_handler);
557
558static VALUE
559thread_do_start_proc(rb_thread_t *th)
560{
561 VALUE args = th->invoke_arg.proc.args;
562 const VALUE *args_ptr;
563 int args_len;
564 VALUE procval = th->invoke_arg.proc.proc;
565 rb_proc_t *proc;
566 GetProcPtr(procval, proc);
567
568 th->ec->errinfo = Qnil;
569 th->ec->root_lep = rb_vm_proc_local_ep(procval);
570 th->ec->root_svar = Qfalse;
571
572 vm_check_ints_blocking(th->ec);
573
574 if (th->invoke_type == thread_invoke_type_ractor_proc) {
575 VALUE self = rb_ractor_self(th->ractor);
576 th->thgroup = th->ractor->thgroup_default = rb_obj_alloc(cThGroup);
577
578 VM_ASSERT(FIXNUM_P(args));
579 args_len = FIX2INT(args);
580 args_ptr = ALLOCA_N(VALUE, args_len);
581 rb_ractor_receive_parameters(th->ec, th->ractor, args_len, (VALUE *)args_ptr);
582 vm_check_ints_blocking(th->ec);
583
584 return rb_vm_invoke_proc_with_self(
585 th->ec, proc, self,
586 args_len, args_ptr,
587 th->invoke_arg.proc.kw_splat,
588 VM_BLOCK_HANDLER_NONE
589 );
590 }
591 else {
592 args_len = RARRAY_LENINT(args);
593 if (args_len < 8) {
594 /* free proc.args if the length is enough small */
595 args_ptr = ALLOCA_N(VALUE, args_len);
596 MEMCPY((VALUE *)args_ptr, RARRAY_CONST_PTR(args), VALUE, args_len);
597 th->invoke_arg.proc.args = Qnil;
598 }
599 else {
600 args_ptr = RARRAY_CONST_PTR(args);
601 }
602
603 vm_check_ints_blocking(th->ec);
604
605 return rb_vm_invoke_proc(
606 th->ec, proc,
607 args_len, args_ptr,
608 th->invoke_arg.proc.kw_splat,
609 VM_BLOCK_HANDLER_NONE
610 );
611 }
612}
613
614static VALUE
615thread_do_start(rb_thread_t *th)
616{
617 native_set_thread_name(th);
618 VALUE result = Qundef;
619
620 switch (th->invoke_type) {
621 case thread_invoke_type_proc:
622 result = thread_do_start_proc(th);
623 break;
624
625 case thread_invoke_type_ractor_proc:
626 result = thread_do_start_proc(th);
627 rb_ractor_atexit(th->ec, result);
628 break;
629
630 case thread_invoke_type_func:
631 result = (*th->invoke_arg.func.func)(th->invoke_arg.func.arg);
632 break;
633
634 case thread_invoke_type_none:
635 rb_bug("unreachable");
636 }
637
638 return result;
639}
640
641void rb_ec_clear_current_thread_trace_func(const rb_execution_context_t *ec);
642
643static int
644thread_start_func_2(rb_thread_t *th, VALUE *stack_start)
645{
646 RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
647 VM_ASSERT(th != th->vm->ractor.main_thread);
648
649 enum ruby_tag_type state;
650 VALUE errinfo = Qnil;
651 rb_thread_t *ractor_main_th = th->ractor->threads.main;
652
653 // setup ractor
654 if (rb_ractor_status_p(th->ractor, ractor_blocking)) {
655 RB_VM_LOCK();
656 {
657 rb_vm_ractor_blocking_cnt_dec(th->vm, th->ractor, __FILE__, __LINE__);
658 rb_ractor_t *r = th->ractor;
659 r->r_stdin = rb_io_prep_stdin();
660 r->r_stdout = rb_io_prep_stdout();
661 r->r_stderr = rb_io_prep_stderr();
662 }
663 RB_VM_UNLOCK();
664 }
665
666 // Ensure that we are not joinable.
667 VM_ASSERT(UNDEF_P(th->value));
668
669 int fiber_scheduler_closed = 0, event_thread_end_hooked = 0;
670 VALUE result = Qundef;
671
672 EC_PUSH_TAG(th->ec);
673
674 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
675 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_BEGIN, th->self, 0, 0, 0, Qundef);
676
677 result = thread_do_start(th);
678 }
679
680 if (!fiber_scheduler_closed) {
681 fiber_scheduler_closed = 1;
683 }
684
685 if (!event_thread_end_hooked) {
686 event_thread_end_hooked = 1;
687 EXEC_EVENT_HOOK(th->ec, RUBY_EVENT_THREAD_END, th->self, 0, 0, 0, Qundef);
688 }
689
690 if (state == TAG_NONE) {
691 // This must be set AFTER doing all user-level code. At this point, the thread is effectively finished and calls to `Thread#join` will succeed.
692 th->value = result;
693 }
694 else {
695 errinfo = th->ec->errinfo;
696
697 VALUE exc = rb_vm_make_jump_tag_but_local_jump(state, Qundef);
698 if (!NIL_P(exc)) errinfo = exc;
699
700 if (state == TAG_FATAL) {
701 if (th->invoke_type == thread_invoke_type_ractor_proc) {
702 rb_ractor_atexit(th->ec, Qnil);
703 }
704 /* fatal error within this thread, need to stop whole script */
705 }
706 else if (rb_obj_is_kind_of(errinfo, rb_eSystemExit)) {
707 if (th->invoke_type == thread_invoke_type_ractor_proc) {
708 rb_ractor_atexit_exception(th->ec);
709 }
710
711 /* exit on main_thread. */
712 }
713 else {
714 if (th->report_on_exception) {
715 VALUE mesg = rb_thread_to_s(th->self);
716 rb_str_cat_cstr(mesg, " terminated with exception (report_on_exception is true):\n");
717 rb_write_error_str(mesg);
718 rb_ec_error_print(th->ec, errinfo);
719 }
720
721 if (th->invoke_type == thread_invoke_type_ractor_proc) {
722 rb_ractor_atexit_exception(th->ec);
723 }
724
725 if (th->vm->thread_abort_on_exception ||
726 th->abort_on_exception || RTEST(ruby_debug)) {
727 /* exit on main_thread */
728 }
729 else {
730 errinfo = Qnil;
731 }
732 }
733 th->value = Qnil;
734 }
735
736 // The thread is effectively finished and can be joined.
737 VM_ASSERT(!UNDEF_P(th->value));
738
739 rb_threadptr_join_list_wakeup(th);
740 rb_threadptr_unlock_all_locking_mutexes(th);
741
742 if (th->invoke_type == thread_invoke_type_ractor_proc) {
743 rb_thread_terminate_all(th);
744 rb_ractor_teardown(th->ec);
745 }
746
747 th->status = THREAD_KILLED;
748 RUBY_DEBUG_LOG("killed th:%u", rb_th_serial(th));
749
750 if (th->vm->ractor.main_thread == th) {
751 ruby_stop(0);
752 }
753
754 if (RB_TYPE_P(errinfo, T_OBJECT)) {
755 /* treat with normal error object */
756 rb_threadptr_raise(ractor_main_th, 1, &errinfo);
757 }
758
759 EC_POP_TAG();
760
761 rb_ec_clear_current_thread_trace_func(th->ec);
762
763 /* locking_mutex must be Qfalse */
764 if (th->locking_mutex != Qfalse) {
765 rb_bug("thread_start_func_2: locking_mutex must not be set (%p:%"PRIxVALUE")",
766 (void *)th, th->locking_mutex);
767 }
768
769 if (ractor_main_th->status == THREAD_KILLED &&
770 th->ractor->threads.cnt <= 2 /* main thread and this thread */) {
771 /* I'm last thread. wake up main thread from rb_thread_terminate_all */
772 rb_threadptr_interrupt(ractor_main_th);
773 }
774
775 rb_check_deadlock(th->ractor);
776
777 rb_fiber_close(th->ec->fiber_ptr);
778
779 thread_cleanup_func(th, FALSE);
780 VM_ASSERT(th->ec->vm_stack == NULL);
781
782 if (th->invoke_type == thread_invoke_type_ractor_proc) {
783 // after rb_ractor_living_threads_remove()
784 // GC will happen anytime and this ractor can be collected (and destroy GVL).
785 // So gvl_release() should be before it.
786 thread_sched_to_dead(TH_SCHED(th), th);
787 rb_ractor_living_threads_remove(th->ractor, th);
788 }
789 else {
790 rb_ractor_living_threads_remove(th->ractor, th);
791 thread_sched_to_dead(TH_SCHED(th), th);
792 }
793
794 return 0;
795}
798 enum thread_invoke_type type;
799
800 // for normal proc thread
801 VALUE args;
802 VALUE proc;
803
804 // for ractor
805 rb_ractor_t *g;
806
807 // for func
808 VALUE (*fn)(void *);
809};
810
811static void thread_specific_storage_alloc(rb_thread_t *th);
812
813static VALUE
814thread_create_core(VALUE thval, struct thread_create_params *params)
815{
816 rb_execution_context_t *ec = GET_EC();
817 rb_thread_t *th = rb_thread_ptr(thval), *current_th = rb_ec_thread_ptr(ec);
818 int err;
819
820 thread_specific_storage_alloc(th);
821
822 if (OBJ_FROZEN(current_th->thgroup)) {
823 rb_raise(rb_eThreadError,
824 "can't start a new thread (frozen ThreadGroup)");
825 }
826
827 rb_fiber_inherit_storage(ec, th->ec->fiber_ptr);
828
829 switch (params->type) {
830 case thread_invoke_type_proc:
831 th->invoke_type = thread_invoke_type_proc;
832 th->invoke_arg.proc.args = params->args;
833 th->invoke_arg.proc.proc = params->proc;
834 th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
835 break;
836
837 case thread_invoke_type_ractor_proc:
838#if RACTOR_CHECK_MODE > 0
839 rb_ractor_setup_belonging_to(thval, rb_ractor_id(params->g));
840#endif
841 th->invoke_type = thread_invoke_type_ractor_proc;
842 th->ractor = params->g;
843 th->ractor->threads.main = th;
844 th->invoke_arg.proc.proc = rb_proc_isolate_bang(params->proc);
845 th->invoke_arg.proc.args = INT2FIX(RARRAY_LENINT(params->args));
846 th->invoke_arg.proc.kw_splat = rb_keyword_given_p();
847 rb_ractor_send_parameters(ec, params->g, params->args);
848 break;
849
850 case thread_invoke_type_func:
851 th->invoke_type = thread_invoke_type_func;
852 th->invoke_arg.func.func = params->fn;
853 th->invoke_arg.func.arg = (void *)params->args;
854 break;
855
856 default:
857 rb_bug("unreachable");
858 }
859
860 th->priority = current_th->priority;
861 th->thgroup = current_th->thgroup;
862
863 th->pending_interrupt_queue = rb_ary_hidden_new(0);
864 th->pending_interrupt_queue_checked = 0;
865 th->pending_interrupt_mask_stack = rb_ary_dup(current_th->pending_interrupt_mask_stack);
866 RBASIC_CLEAR_CLASS(th->pending_interrupt_mask_stack);
867
868 rb_native_mutex_initialize(&th->interrupt_lock);
869
870 RUBY_DEBUG_LOG("r:%u th:%u", rb_ractor_id(th->ractor), rb_th_serial(th));
871
872 rb_ractor_living_threads_insert(th->ractor, th);
873
874 /* kick thread */
875 err = native_thread_create(th);
876 if (err) {
877 th->status = THREAD_KILLED;
878 rb_ractor_living_threads_remove(th->ractor, th);
879 rb_raise(rb_eThreadError, "can't create Thread: %s", strerror(err));
880 }
881 return thval;
882}
883
884#define threadptr_initialized(th) ((th)->invoke_type != thread_invoke_type_none)
885
886/*
887 * call-seq:
888 * Thread.new { ... } -> thread
889 * Thread.new(*args, &proc) -> thread
890 * Thread.new(*args) { |args| ... } -> thread
891 *
892 * Creates a new thread executing the given block.
893 *
894 * Any +args+ given to ::new will be passed to the block:
895 *
896 * arr = []
897 * a, b, c = 1, 2, 3
898 * Thread.new(a,b,c) { |d,e,f| arr << d << e << f }.join
899 * arr #=> [1, 2, 3]
900 *
901 * A ThreadError exception is raised if ::new is called without a block.
902 *
903 * If you're going to subclass Thread, be sure to call super in your
904 * +initialize+ method, otherwise a ThreadError will be raised.
905 */
906static VALUE
907thread_s_new(int argc, VALUE *argv, VALUE klass)
908{
909 rb_thread_t *th;
910 VALUE thread = rb_thread_alloc(klass);
911
912 if (GET_RACTOR()->threads.main->status == THREAD_KILLED) {
913 rb_raise(rb_eThreadError, "can't alloc thread");
914 }
915
916 rb_obj_call_init_kw(thread, argc, argv, RB_PASS_CALLED_KEYWORDS);
917 th = rb_thread_ptr(thread);
918 if (!threadptr_initialized(th)) {
919 rb_raise(rb_eThreadError, "uninitialized thread - check '%"PRIsVALUE"#initialize'",
920 klass);
921 }
922 return thread;
923}
924
925/*
926 * call-seq:
927 * Thread.start([args]*) {|args| block } -> thread
928 * Thread.fork([args]*) {|args| block } -> thread
929 *
930 * Basically the same as ::new. However, if class Thread is subclassed, then
931 * calling +start+ in that subclass will not invoke the subclass's
932 * +initialize+ method.
933 */
934
935static VALUE
936thread_start(VALUE klass, VALUE args)
937{
938 struct thread_create_params params = {
939 .type = thread_invoke_type_proc,
940 .args = args,
941 .proc = rb_block_proc(),
942 };
943 return thread_create_core(rb_thread_alloc(klass), &params);
944}
945
946static VALUE
947threadptr_invoke_proc_location(rb_thread_t *th)
948{
949 if (th->invoke_type == thread_invoke_type_proc) {
950 return rb_proc_location(th->invoke_arg.proc.proc);
951 }
952 else {
953 return Qnil;
954 }
955}
956
957/* :nodoc: */
958static VALUE
959thread_initialize(VALUE thread, VALUE args)
960{
961 rb_thread_t *th = rb_thread_ptr(thread);
962
963 if (!rb_block_given_p()) {
964 rb_raise(rb_eThreadError, "must be called with a block");
965 }
966 else if (th->invoke_type != thread_invoke_type_none) {
967 VALUE loc = threadptr_invoke_proc_location(th);
968 if (!NIL_P(loc)) {
969 rb_raise(rb_eThreadError,
970 "already initialized thread - %"PRIsVALUE":%"PRIsVALUE,
971 RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
972 }
973 else {
974 rb_raise(rb_eThreadError, "already initialized thread");
975 }
976 }
977 else {
978 struct thread_create_params params = {
979 .type = thread_invoke_type_proc,
980 .args = args,
981 .proc = rb_block_proc(),
982 };
983 return thread_create_core(thread, &params);
984 }
985}
986
987VALUE
988rb_thread_create(VALUE (*fn)(void *), void *arg)
989{
990 struct thread_create_params params = {
991 .type = thread_invoke_type_func,
992 .fn = fn,
993 .args = (VALUE)arg,
994 };
995 return thread_create_core(rb_thread_alloc(rb_cThread), &params);
996}
997
998VALUE
999rb_thread_create_ractor(rb_ractor_t *r, VALUE args, VALUE proc)
1000{
1001 struct thread_create_params params = {
1002 .type = thread_invoke_type_ractor_proc,
1003 .g = r,
1004 .args = args,
1005 .proc = proc,
1006 };
1007 return thread_create_core(rb_thread_alloc(rb_cThread), &params);;
1008}
1009
1011struct join_arg {
1012 struct rb_waiting_list *waiter;
1013 rb_thread_t *target;
1014 VALUE timeout;
1015 rb_hrtime_t *limit;
1016};
1017
1018static VALUE
1019remove_from_join_list(VALUE arg)
1020{
1021 struct join_arg *p = (struct join_arg *)arg;
1022 rb_thread_t *target_thread = p->target;
1023
1024 if (target_thread->status != THREAD_KILLED) {
1025 struct rb_waiting_list **join_list = &target_thread->join_list;
1026
1027 while (*join_list) {
1028 if (*join_list == p->waiter) {
1029 *join_list = (*join_list)->next;
1030 break;
1031 }
1032
1033 join_list = &(*join_list)->next;
1034 }
1035 }
1036
1037 return Qnil;
1038}
1039
1040static int
1041thread_finished(rb_thread_t *th)
1042{
1043 return th->status == THREAD_KILLED || !UNDEF_P(th->value);
1044}
1045
1046static VALUE
1047thread_join_sleep(VALUE arg)
1048{
1049 struct join_arg *p = (struct join_arg *)arg;
1050 rb_thread_t *target_th = p->target, *th = p->waiter->thread;
1051 rb_hrtime_t end = 0, *limit = p->limit;
1052
1053 if (limit) {
1054 end = rb_hrtime_add(*limit, rb_hrtime_now());
1055 }
1056
1057 while (!thread_finished(target_th)) {
1058 VALUE scheduler = rb_fiber_scheduler_current();
1059
1060 if (!limit) {
1061 if (scheduler != Qnil) {
1062 rb_fiber_scheduler_block(scheduler, target_th->self, Qnil);
1063 }
1064 else {
1065 sleep_forever(th, SLEEP_DEADLOCKABLE | SLEEP_ALLOW_SPURIOUS | SLEEP_NO_CHECKINTS);
1066 }
1067 }
1068 else {
1069 if (hrtime_update_expire(limit, end)) {
1070 RUBY_DEBUG_LOG("timeout target_th:%u", rb_th_serial(target_th));
1071 return Qfalse;
1072 }
1073
1074 if (scheduler != Qnil) {
1075 VALUE timeout = rb_float_new(hrtime2double(*limit));
1076 rb_fiber_scheduler_block(scheduler, target_th->self, timeout);
1077 }
1078 else {
1079 th->status = THREAD_STOPPED;
1080 native_sleep(th, limit);
1081 }
1082 }
1083 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1084 th->status = THREAD_RUNNABLE;
1085
1086 RUBY_DEBUG_LOG("interrupted target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1087 }
1088
1089 return Qtrue;
1090}
1091
1092static VALUE
1093thread_join(rb_thread_t *target_th, VALUE timeout, rb_hrtime_t *limit)
1094{
1095 rb_execution_context_t *ec = GET_EC();
1096 rb_thread_t *th = ec->thread_ptr;
1097 rb_fiber_t *fiber = ec->fiber_ptr;
1098
1099 if (th == target_th) {
1100 rb_raise(rb_eThreadError, "Target thread must not be current thread");
1101 }
1102
1103 if (th->ractor->threads.main == target_th) {
1104 rb_raise(rb_eThreadError, "Target thread must not be main thread");
1105 }
1106
1107 RUBY_DEBUG_LOG("target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1108
1109 if (target_th->status != THREAD_KILLED) {
1110 struct rb_waiting_list waiter;
1111 waiter.next = target_th->join_list;
1112 waiter.thread = th;
1113 waiter.fiber = rb_fiberptr_blocking(fiber) ? NULL : fiber;
1114 target_th->join_list = &waiter;
1115
1116 struct join_arg arg;
1117 arg.waiter = &waiter;
1118 arg.target = target_th;
1119 arg.timeout = timeout;
1120 arg.limit = limit;
1121
1122 if (!rb_ensure(thread_join_sleep, (VALUE)&arg, remove_from_join_list, (VALUE)&arg)) {
1123 return Qnil;
1124 }
1125 }
1126
1127 RUBY_DEBUG_LOG("success target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1128
1129 if (target_th->ec->errinfo != Qnil) {
1130 VALUE err = target_th->ec->errinfo;
1131
1132 if (FIXNUM_P(err)) {
1133 switch (err) {
1134 case INT2FIX(TAG_FATAL):
1135 RUBY_DEBUG_LOG("terminated target_th:%u status:%s", rb_th_serial(target_th), thread_status_name(target_th, TRUE));
1136
1137 /* OK. killed. */
1138 break;
1139 default:
1140 if (err == RUBY_FATAL_FIBER_KILLED) { // not integer constant so can't be a case expression
1141 // root fiber killed in non-main thread
1142 break;
1143 }
1144 rb_bug("thread_join: Fixnum (%d) should not reach here.", FIX2INT(err));
1145 }
1146 }
1147 else if (THROW_DATA_P(target_th->ec->errinfo)) {
1148 rb_bug("thread_join: THROW_DATA should not reach here.");
1149 }
1150 else {
1151 /* normal exception */
1152 rb_exc_raise(err);
1153 }
1154 }
1155 return target_th->self;
1156}
1157
1158/*
1159 * call-seq:
1160 * thr.join -> thr
1161 * thr.join(limit) -> thr
1162 *
1163 * The calling thread will suspend execution and run this +thr+.
1164 *
1165 * Does not return until +thr+ exits or until the given +limit+ seconds have
1166 * passed.
1167 *
1168 * If the time limit expires, +nil+ will be returned, otherwise +thr+ is
1169 * returned.
1170 *
1171 * Any threads not joined will be killed when the main program exits.
1172 *
1173 * If +thr+ had previously raised an exception and the ::abort_on_exception or
1174 * $DEBUG flags are not set, (so the exception has not yet been processed), it
1175 * will be processed at this time.
1176 *
1177 * a = Thread.new { print "a"; sleep(10); print "b"; print "c" }
1178 * x = Thread.new { print "x"; Thread.pass; print "y"; print "z" }
1179 * x.join # Let thread x finish, thread a will be killed on exit.
1180 * #=> "axyz"
1181 *
1182 * The following example illustrates the +limit+ parameter.
1183 *
1184 * y = Thread.new { 4.times { sleep 0.1; puts 'tick... ' }}
1185 * puts "Waiting" until y.join(0.15)
1186 *
1187 * This will produce:
1188 *
1189 * tick...
1190 * Waiting
1191 * tick...
1192 * Waiting
1193 * tick...
1194 * tick...
1195 */
1196
1197static VALUE
1198thread_join_m(int argc, VALUE *argv, VALUE self)
1199{
1200 VALUE timeout = Qnil;
1201 rb_hrtime_t rel = 0, *limit = 0;
1202
1203 if (rb_check_arity(argc, 0, 1)) {
1204 timeout = argv[0];
1205 }
1206
1207 // Convert the timeout eagerly, so it's always converted and deterministic
1208 /*
1209 * This supports INFINITY and negative values, so we can't use
1210 * rb_time_interval right now...
1211 */
1212 if (NIL_P(timeout)) {
1213 /* unlimited */
1214 }
1215 else if (FIXNUM_P(timeout)) {
1216 rel = rb_sec2hrtime(NUM2TIMET(timeout));
1217 limit = &rel;
1218 }
1219 else {
1220 limit = double2hrtime(&rel, rb_num2dbl(timeout));
1221 }
1222
1223 return thread_join(rb_thread_ptr(self), timeout, limit);
1224}
1225
1226/*
1227 * call-seq:
1228 * thr.value -> obj
1229 *
1230 * Waits for +thr+ to complete, using #join, and returns its value or raises
1231 * the exception which terminated the thread.
1232 *
1233 * a = Thread.new { 2 + 2 }
1234 * a.value #=> 4
1235 *
1236 * b = Thread.new { raise 'something went wrong' }
1237 * b.value #=> RuntimeError: something went wrong
1238 */
1239
1240static VALUE
1241thread_value(VALUE self)
1242{
1243 rb_thread_t *th = rb_thread_ptr(self);
1244 thread_join(th, Qnil, 0);
1245 if (UNDEF_P(th->value)) {
1246 // If the thread is dead because we forked th->value is still Qundef.
1247 return Qnil;
1248 }
1249 return th->value;
1250}
1251
1252/*
1253 * Thread Scheduling
1254 */
1255
1256static void
1257getclockofday(struct timespec *ts)
1258{
1259#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1260 if (clock_gettime(CLOCK_MONOTONIC, ts) == 0)
1261 return;
1262#endif
1263 rb_timespec_now(ts);
1264}
1265
1266/*
1267 * Don't inline this, since library call is already time consuming
1268 * and we don't want "struct timespec" on stack too long for GC
1269 */
1270NOINLINE(rb_hrtime_t rb_hrtime_now(void));
1271rb_hrtime_t
1272rb_hrtime_now(void)
1273{
1274 struct timespec ts;
1275
1276 getclockofday(&ts);
1277 return rb_timespec2hrtime(&ts);
1278}
1279
1280/*
1281 * at least gcc 7.2 and 7.3 complains about "rb_hrtime_t end"
1282 * being uninitialized, maybe other versions, too.
1283 */
1284COMPILER_WARNING_PUSH
1285#if defined(__GNUC__) && __GNUC__ == 7 && __GNUC_MINOR__ <= 3
1286COMPILER_WARNING_IGNORED(-Wmaybe-uninitialized)
1287#endif
1288#ifndef PRIu64
1289#define PRIu64 PRI_64_PREFIX "u"
1290#endif
1291/*
1292 * @end is the absolute time when @ts is set to expire
1293 * Returns true if @end has past
1294 * Updates @ts and returns false otherwise
1295 */
1296static int
1297hrtime_update_expire(rb_hrtime_t *timeout, const rb_hrtime_t end)
1298{
1299 rb_hrtime_t now = rb_hrtime_now();
1300
1301 if (now > end) return 1;
1302
1303 RUBY_DEBUG_LOG("%"PRIu64" > %"PRIu64"", (uint64_t)end, (uint64_t)now);
1304
1305 *timeout = end - now;
1306 return 0;
1307}
1308COMPILER_WARNING_POP
1309
1310static int
1311sleep_hrtime(rb_thread_t *th, rb_hrtime_t rel, unsigned int fl)
1312{
1313 enum rb_thread_status prev_status = th->status;
1314 int woke;
1315 rb_hrtime_t end = rb_hrtime_add(rb_hrtime_now(), rel);
1316
1317 th->status = THREAD_STOPPED;
1318 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1319 while (th->status == THREAD_STOPPED) {
1320 native_sleep(th, &rel);
1321 woke = vm_check_ints_blocking(th->ec);
1322 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1323 break;
1324 if (hrtime_update_expire(&rel, end))
1325 break;
1326 woke = 1;
1327 }
1328 th->status = prev_status;
1329 return woke;
1330}
1331
1332static int
1333sleep_hrtime_until(rb_thread_t *th, rb_hrtime_t end, unsigned int fl)
1334{
1335 enum rb_thread_status prev_status = th->status;
1336 int woke;
1337 rb_hrtime_t rel = rb_hrtime_sub(end, rb_hrtime_now());
1338
1339 th->status = THREAD_STOPPED;
1340 RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1341 while (th->status == THREAD_STOPPED) {
1342 native_sleep(th, &rel);
1343 woke = vm_check_ints_blocking(th->ec);
1344 if (woke && !(fl & SLEEP_SPURIOUS_CHECK))
1345 break;
1346 if (hrtime_update_expire(&rel, end))
1347 break;
1348 woke = 1;
1349 }
1350 th->status = prev_status;
1351 return woke;
1352}
1353
1354static void
1355sleep_forever(rb_thread_t *th, unsigned int fl)
1356{
1357 enum rb_thread_status prev_status = th->status;
1358 enum rb_thread_status status;
1359 int woke;
1360
1361 status = fl & SLEEP_DEADLOCKABLE ? THREAD_STOPPED_FOREVER : THREAD_STOPPED;
1362 th->status = status;
1363
1364 if (!(fl & SLEEP_NO_CHECKINTS)) RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
1365
1366 while (th->status == status) {
1367 if (fl & SLEEP_DEADLOCKABLE) {
1368 rb_ractor_sleeper_threads_inc(th->ractor);
1369 rb_check_deadlock(th->ractor);
1370 }
1371 {
1372 native_sleep(th, 0);
1373 }
1374 if (fl & SLEEP_DEADLOCKABLE) {
1375 rb_ractor_sleeper_threads_dec(th->ractor);
1376 }
1377 if (fl & SLEEP_ALLOW_SPURIOUS) {
1378 break;
1379 }
1380
1381 woke = vm_check_ints_blocking(th->ec);
1382
1383 if (woke && !(fl & SLEEP_SPURIOUS_CHECK)) {
1384 break;
1385 }
1386 }
1387 th->status = prev_status;
1388}
1389
1390void
1392{
1393 RUBY_DEBUG_LOG("forever");
1394 sleep_forever(GET_THREAD(), SLEEP_SPURIOUS_CHECK);
1395}
1396
1397void
1399{
1400 RUBY_DEBUG_LOG("deadly");
1401 sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE|SLEEP_SPURIOUS_CHECK);
1402}
1403
1404static void
1405rb_thread_sleep_deadly_allow_spurious_wakeup(VALUE blocker, VALUE timeout, rb_hrtime_t end)
1406{
1407 VALUE scheduler = rb_fiber_scheduler_current();
1408 if (scheduler != Qnil) {
1409 rb_fiber_scheduler_block(scheduler, blocker, timeout);
1410 }
1411 else {
1412 RUBY_DEBUG_LOG("...");
1413 if (end) {
1414 sleep_hrtime_until(GET_THREAD(), end, SLEEP_SPURIOUS_CHECK);
1415 }
1416 else {
1417 sleep_forever(GET_THREAD(), SLEEP_DEADLOCKABLE);
1418 }
1419 }
1420}
1421
1422void
1423rb_thread_wait_for(struct timeval time)
1424{
1425 rb_thread_t *th = GET_THREAD();
1426
1427 sleep_hrtime(th, rb_timeval2hrtime(&time), SLEEP_SPURIOUS_CHECK);
1428}
1429
1430void
1431rb_ec_check_ints(rb_execution_context_t *ec)
1432{
1433 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1434}
1435
1436/*
1437 * CAUTION: This function causes thread switching.
1438 * rb_thread_check_ints() check ruby's interrupts.
1439 * some interrupt needs thread switching/invoke handlers,
1440 * and so on.
1441 */
1442
1443void
1445{
1446 rb_ec_check_ints(GET_EC());
1447}
1448
1449/*
1450 * Hidden API for tcl/tk wrapper.
1451 * There is no guarantee to perpetuate it.
1452 */
1453int
1454rb_thread_check_trap_pending(void)
1455{
1456 return rb_signal_buff_size() != 0;
1457}
1458
1459/* This function can be called in blocking region. */
1462{
1463 return (int)RUBY_VM_INTERRUPTED(rb_thread_ptr(thval)->ec);
1464}
1465
1466void
1467rb_thread_sleep(int sec)
1468{
1470}
1471
1472static void
1473rb_thread_schedule_limits(uint32_t limits_us)
1474{
1475 if (!rb_thread_alone()) {
1476 rb_thread_t *th = GET_THREAD();
1477 RUBY_DEBUG_LOG("us:%u", (unsigned int)limits_us);
1478
1479 if (th->running_time_us >= limits_us) {
1480 RUBY_DEBUG_LOG("switch %s", "start");
1481
1482 RB_VM_SAVE_MACHINE_CONTEXT(th);
1483 thread_sched_yield(TH_SCHED(th), th);
1484 rb_ractor_thread_switch(th->ractor, th, true);
1485
1486 RUBY_DEBUG_LOG("switch %s", "done");
1487 }
1488 }
1489}
1490
1491void
1493{
1494 rb_thread_schedule_limits(0);
1495 RUBY_VM_CHECK_INTS(GET_EC());
1496}
1497
1498/* blocking region */
1499
1500static inline int
1501blocking_region_begin(rb_thread_t *th, struct rb_blocking_region_buffer *region,
1502 rb_unblock_function_t *ubf, void *arg, int fail_if_interrupted)
1503{
1504#ifdef RUBY_ASSERT_CRITICAL_SECTION
1505 VM_ASSERT(ruby_assert_critical_section_entered == 0);
1506#endif
1507 VM_ASSERT(th == GET_THREAD());
1508
1509 region->prev_status = th->status;
1510 if (unblock_function_set(th, ubf, arg, fail_if_interrupted)) {
1511 th->blocking_region_buffer = region;
1512 th->status = THREAD_STOPPED;
1513 rb_ractor_blocking_threads_inc(th->ractor, __FILE__, __LINE__);
1514
1515 RUBY_DEBUG_LOG("thread_id:%p", (void *)th->nt->thread_id);
1516 return TRUE;
1517 }
1518 else {
1519 return FALSE;
1520 }
1521}
1522
1523static inline void
1524blocking_region_end(rb_thread_t *th, struct rb_blocking_region_buffer *region)
1525{
1526 /* entry to ubf_list still permitted at this point, make it impossible: */
1527 unblock_function_clear(th);
1528 /* entry to ubf_list impossible at this point, so unregister is safe: */
1529 unregister_ubf_list(th);
1530
1531 thread_sched_to_running(TH_SCHED(th), th);
1532 rb_ractor_thread_switch(th->ractor, th, false);
1533
1534 th->blocking_region_buffer = 0;
1535 rb_ractor_blocking_threads_dec(th->ractor, __FILE__, __LINE__);
1536 if (th->status == THREAD_STOPPED) {
1537 th->status = region->prev_status;
1538 }
1539
1540 RUBY_DEBUG_LOG("end");
1541
1542#ifndef _WIN32
1543 // GET_THREAD() clears WSAGetLastError()
1544 VM_ASSERT(th == GET_THREAD());
1545#endif
1546}
1547
1548/*
1549 * Resolve sentinel unblock function values to their actual function pointers
1550 * and appropriate data2 values. This centralizes the logic for handling
1551 * RUBY_UBF_IO and RUBY_UBF_PROCESS sentinel values.
1552 *
1553 * @param unblock_function Pointer to unblock function pointer (modified in place)
1554 * @param data2 Pointer to data2 pointer (modified in place)
1555 * @param thread Thread context for resolving data2 when needed
1556 * @return true if sentinel values were resolved, false otherwise
1557 */
1558bool
1559rb_thread_resolve_unblock_function(rb_unblock_function_t **unblock_function, void **data2, struct rb_thread_struct *thread)
1560{
1561 rb_unblock_function_t *ubf = *unblock_function;
1562
1563 if ((ubf == RUBY_UBF_IO) || (ubf == RUBY_UBF_PROCESS)) {
1564 *unblock_function = ubf_select;
1565 *data2 = thread;
1566 return true;
1567 }
1568 return false;
1569}
1570
1571void *
1572rb_nogvl(void *(*func)(void *), void *data1,
1573 rb_unblock_function_t *ubf, void *data2,
1574 int flags)
1575{
1576 if (flags & RB_NOGVL_OFFLOAD_SAFE) {
1577 VALUE scheduler = rb_fiber_scheduler_current();
1578 if (scheduler != Qnil) {
1580
1581 VALUE result = rb_fiber_scheduler_blocking_operation_wait(scheduler, func, data1, ubf, data2, flags, &state);
1582
1583 if (!UNDEF_P(result)) {
1584 rb_errno_set(state.saved_errno);
1585 return state.result;
1586 }
1587 }
1588 }
1589
1590 void *val = 0;
1591 rb_execution_context_t *ec = GET_EC();
1592 rb_thread_t *th = rb_ec_thread_ptr(ec);
1593 rb_vm_t *vm = rb_ec_vm_ptr(ec);
1594 bool is_main_thread = vm->ractor.main_thread == th;
1595 int saved_errno = 0;
1596
1597 rb_thread_resolve_unblock_function(&ubf, &data2, th);
1598
1599 if (ubf && rb_ractor_living_thread_num(th->ractor) == 1 && is_main_thread) {
1600 if (flags & RB_NOGVL_UBF_ASYNC_SAFE) {
1601 vm->ubf_async_safe = 1;
1602 }
1603 }
1604
1605 rb_vm_t *volatile saved_vm = vm;
1606 BLOCKING_REGION(th, {
1607 val = func(data1);
1608 saved_errno = rb_errno();
1609 }, ubf, data2, flags & RB_NOGVL_INTR_FAIL);
1610 vm = saved_vm;
1611
1612 if (is_main_thread) vm->ubf_async_safe = 0;
1613
1614 if ((flags & RB_NOGVL_INTR_FAIL) == 0) {
1615 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1616 }
1617
1618 rb_errno_set(saved_errno);
1619
1620 return val;
1621}
1622
1623/*
1624 * rb_thread_call_without_gvl - permit concurrent/parallel execution.
1625 * rb_thread_call_without_gvl2 - permit concurrent/parallel execution
1626 * without interrupt process.
1627 *
1628 * rb_thread_call_without_gvl() does:
1629 * (1) Check interrupts.
1630 * (2) release GVL.
1631 * Other Ruby threads may run in parallel.
1632 * (3) call func with data1
1633 * (4) acquire GVL.
1634 * Other Ruby threads can not run in parallel any more.
1635 * (5) Check interrupts.
1636 *
1637 * rb_thread_call_without_gvl2() does:
1638 * (1) Check interrupt and return if interrupted.
1639 * (2) release GVL.
1640 * (3) call func with data1 and a pointer to the flags.
1641 * (4) acquire GVL.
1642 *
1643 * If another thread interrupts this thread (Thread#kill, signal delivery,
1644 * VM-shutdown request, and so on), `ubf()' is called (`ubf()' means
1645 * "un-blocking function"). `ubf()' should interrupt `func()' execution by
1646 * toggling a cancellation flag, canceling the invocation of a call inside
1647 * `func()' or similar. Note that `ubf()' may not be called with the GVL.
1648 *
1649 * There are built-in ubfs and you can specify these ubfs:
1650 *
1651 * * RUBY_UBF_IO: ubf for IO operation
1652 * * RUBY_UBF_PROCESS: ubf for process operation
1653 *
1654 * However, we can not guarantee our built-in ubfs interrupt your `func()'
1655 * correctly. Be careful to use rb_thread_call_without_gvl(). If you don't
1656 * provide proper ubf(), your program will not stop for Control+C or other
1657 * shutdown events.
1658 *
1659 * "Check interrupts" on above list means checking asynchronous
1660 * interrupt events (such as Thread#kill, signal delivery, VM-shutdown
1661 * request, and so on) and calling corresponding procedures
1662 * (such as `trap' for signals, raise an exception for Thread#raise).
1663 * If `func()' finished and received interrupts, you may skip interrupt
1664 * checking. For example, assume the following func() it reads data from file.
1665 *
1666 * read_func(...) {
1667 * // (a) before read
1668 * read(buffer); // (b) reading
1669 * // (c) after read
1670 * }
1671 *
1672 * If an interrupt occurs at (a) or (b), then `ubf()' cancels this
1673 * `read_func()' and interrupts are checked. However, if an interrupt occurs
1674 * at (c), after *read* operation is completed, checking interrupts is harmful
1675 * because it causes irrevocable side-effect, the read data will vanish. To
1676 * avoid such problem, the `read_func()' should be used with
1677 * `rb_thread_call_without_gvl2()'.
1678 *
1679 * If `rb_thread_call_without_gvl2()' detects interrupt, it returns
1680 * immediately. This function does not show when the execution was interrupted.
1681 * For example, there are 4 possible timing (a), (b), (c) and before calling
1682 * read_func(). You need to record progress of a read_func() and check
1683 * the progress after `rb_thread_call_without_gvl2()'. You may need to call
1684 * `rb_thread_check_ints()' correctly or your program can not process proper
1685 * process such as `trap' and so on.
1686 *
1687 * NOTE: You can not execute most of Ruby C API and touch Ruby
1688 * objects in `func()' and `ubf()', including raising an
1689 * exception, because current thread doesn't acquire GVL
1690 * (it causes synchronization problems). If you need to
1691 * call ruby functions either use rb_thread_call_with_gvl()
1692 * or read source code of C APIs and confirm safety by
1693 * yourself.
1694 *
1695 * NOTE: In short, this API is difficult to use safely. I recommend you
1696 * use other ways if you have. We lack experiences to use this API.
1697 * Please report your problem related on it.
1698 *
1699 * NOTE: Releasing GVL and re-acquiring GVL may be expensive operations
1700 * for a short running `func()'. Be sure to benchmark and use this
1701 * mechanism when `func()' consumes enough time.
1702 *
1703 * Safe C API:
1704 * * rb_thread_interrupted() - check interrupt flag
1705 * * ruby_xmalloc(), ruby_xrealloc(), ruby_xfree() -
1706 * they will work without GVL, and may acquire GVL when GC is needed.
1707 */
1708void *
1709rb_thread_call_without_gvl2(void *(*func)(void *), void *data1,
1710 rb_unblock_function_t *ubf, void *data2)
1711{
1712 return rb_nogvl(func, data1, ubf, data2, RB_NOGVL_INTR_FAIL);
1713}
1714
1715void *
1716rb_thread_call_without_gvl(void *(*func)(void *data), void *data1,
1717 rb_unblock_function_t *ubf, void *data2)
1718{
1719 return rb_nogvl(func, data1, ubf, data2, 0);
1720}
1721
1722static int
1723waitfd_to_waiting_flag(int wfd_event)
1724{
1725 return wfd_event << 1;
1726}
1727
1728static struct ccan_list_head *
1729rb_io_blocking_operations(struct rb_io *io)
1730{
1731 rb_serial_t fork_generation = GET_VM()->fork_gen;
1732
1733 // On fork, all existing entries in this list (which are stack allocated) become invalid.
1734 // Therefore, we re-initialize the list which clears it.
1735 if (io->fork_generation != fork_generation) {
1736 ccan_list_head_init(&io->blocking_operations);
1737 io->fork_generation = fork_generation;
1738 }
1739
1740 return &io->blocking_operations;
1741}
1742
1743/*
1744 * Registers a blocking operation for an IO object. This is used to track all threads and fibers
1745 * that are currently blocked on this IO for reading, writing or other operations.
1746 *
1747 * When the IO is closed, all blocking operations will be notified via rb_fiber_scheduler_fiber_interrupt
1748 * for fibers with a scheduler, or via rb_threadptr_interrupt for threads without a scheduler.
1749 *
1750 * @parameter io The IO object on which the operation will block
1751 * @parameter blocking_operation The operation details including the execution context that will be blocked
1752 */
1753static void
1754rb_io_blocking_operation_enter(struct rb_io *io, struct rb_io_blocking_operation *blocking_operation)
1755{
1756 ccan_list_add(rb_io_blocking_operations(io), &blocking_operation->list);
1757}
1758
1759static void
1760rb_io_blocking_operation_pop(struct rb_io *io, struct rb_io_blocking_operation *blocking_operation)
1761{
1762 ccan_list_del(&blocking_operation->list);
1763}
1766 struct rb_io *io;
1767 struct rb_io_blocking_operation *blocking_operation;
1768};
1769
1770static VALUE
1771io_blocking_operation_exit(VALUE _arguments)
1772{
1773 struct io_blocking_operation_arguments *arguments = (void*)_arguments;
1774 struct rb_io_blocking_operation *blocking_operation = arguments->blocking_operation;
1775
1776 rb_io_blocking_operation_pop(arguments->io, blocking_operation);
1777
1778 rb_io_t *io = arguments->io;
1779 rb_thread_t *thread = io->closing_ec->thread_ptr;
1780 rb_fiber_t *fiber = io->closing_ec->fiber_ptr;
1781
1782 if (thread->scheduler != Qnil) {
1783 // This can cause spurious wakeups...
1784 rb_fiber_scheduler_unblock(thread->scheduler, io->self, rb_fiberptr_self(fiber));
1785 }
1786 else {
1787 rb_thread_wakeup(thread->self);
1788 }
1789
1790 return Qnil;
1791}
1792
1793/*
1794 * Called when a blocking operation completes or is interrupted. Removes the operation from
1795 * the IO's blocking_operations list and wakes up any waiting threads/fibers.
1796 *
1797 * If there's a wakeup_mutex (meaning an IO close is in progress), synchronizes the cleanup
1798 * through that mutex to ensure proper coordination with the closing thread.
1799 *
1800 * @parameter io The IO object the operation was performed on
1801 * @parameter blocking_operation The completed operation to clean up
1802 */
1803static void
1804rb_io_blocking_operation_exit(struct rb_io *io, struct rb_io_blocking_operation *blocking_operation)
1805{
1806 VALUE wakeup_mutex = io->wakeup_mutex;
1807
1808 // Indicate that the blocking operation is no longer active:
1809 blocking_operation->ec = NULL;
1810
1811 if (RB_TEST(wakeup_mutex)) {
1812 struct io_blocking_operation_arguments arguments = {
1813 .io = io,
1814 .blocking_operation = blocking_operation
1815 };
1816
1817 rb_mutex_synchronize(wakeup_mutex, io_blocking_operation_exit, (VALUE)&arguments);
1818 }
1819 else {
1820 // If there's no wakeup_mutex, we can safely remove the operation directly:
1821 rb_io_blocking_operation_pop(io, blocking_operation);
1822 }
1823}
1824
1825static VALUE
1826rb_thread_io_blocking_operation_ensure(VALUE _argument)
1827{
1828 struct io_blocking_operation_arguments *arguments = (void*)_argument;
1829
1830 rb_io_blocking_operation_exit(arguments->io, arguments->blocking_operation);
1831
1832 return Qnil;
1833}
1834
1835/*
1836 * Executes a function that performs a blocking IO operation, while properly tracking
1837 * the operation in the IO's blocking_operations list. This ensures proper cleanup
1838 * and interruption handling if the IO is closed while blocked.
1839 *
1840 * The operation is automatically removed from the blocking_operations list when the function
1841 * returns, whether normally or due to an exception.
1842 *
1843 * @parameter self The IO object
1844 * @parameter function The function to execute that will perform the blocking operation
1845 * @parameter argument The argument to pass to the function
1846 * @returns The result of the blocking operation function
1847 */
1848VALUE
1849rb_thread_io_blocking_operation(VALUE self, VALUE(*function)(VALUE), VALUE argument)
1850{
1851 struct rb_io *io;
1852 RB_IO_POINTER(self, io);
1853
1854 rb_execution_context_t *ec = GET_EC();
1855 struct rb_io_blocking_operation blocking_operation = {
1856 .ec = ec,
1857 };
1858 rb_io_blocking_operation_enter(io, &blocking_operation);
1859
1861 .io = io,
1862 .blocking_operation = &blocking_operation
1863 };
1864
1865 return rb_ensure(function, argument, rb_thread_io_blocking_operation_ensure, (VALUE)&io_blocking_operation_arguments);
1866}
1867
1868static bool
1869thread_io_mn_schedulable(rb_thread_t *th, int events, const struct timeval *timeout)
1870{
1871#if defined(USE_MN_THREADS) && USE_MN_THREADS
1872 return !th_has_dedicated_nt(th) && (events || timeout) && th->blocking;
1873#else
1874 return false;
1875#endif
1876}
1877
1878// true if need retry
1879static bool
1880thread_io_wait_events(rb_thread_t *th, int fd, int events, const struct timeval *timeout)
1881{
1882#if defined(USE_MN_THREADS) && USE_MN_THREADS
1883 if (thread_io_mn_schedulable(th, events, timeout)) {
1884 rb_hrtime_t rel, *prel;
1885
1886 if (timeout) {
1887 rel = rb_timeval2hrtime(timeout);
1888 prel = &rel;
1889 }
1890 else {
1891 prel = NULL;
1892 }
1893
1894 VM_ASSERT(prel || (events & (RB_WAITFD_IN | RB_WAITFD_OUT)));
1895
1896 if (thread_sched_wait_events(TH_SCHED(th), th, fd, waitfd_to_waiting_flag(events), prel)) {
1897 // timeout
1898 return false;
1899 }
1900 else {
1901 return true;
1902 }
1903 }
1904#endif // defined(USE_MN_THREADS) && USE_MN_THREADS
1905 return false;
1906}
1907
1908// assume read/write
1909static bool
1910blocking_call_retryable_p(int r, int eno)
1911{
1912 if (r != -1) return false;
1913
1914 switch (eno) {
1915 case EAGAIN:
1916#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
1917 case EWOULDBLOCK:
1918#endif
1919 return true;
1920 default:
1921 return false;
1922 }
1923}
1924
1925bool
1926rb_thread_mn_schedulable(VALUE thval)
1927{
1928 rb_thread_t *th = rb_thread_ptr(thval);
1929 return th->mn_schedulable;
1930}
1931
1932VALUE
1933rb_thread_io_blocking_call(struct rb_io* io, rb_blocking_function_t *func, void *data1, int events)
1934{
1935 rb_execution_context_t * volatile ec = GET_EC();
1936 rb_thread_t * volatile th = rb_ec_thread_ptr(ec);
1937
1938 RUBY_DEBUG_LOG("th:%u fd:%d ev:%d", rb_th_serial(th), io->fd, events);
1939
1940 volatile VALUE val = Qundef; /* shouldn't be used */
1941 volatile int saved_errno = 0;
1942 enum ruby_tag_type state;
1943 volatile bool prev_mn_schedulable = th->mn_schedulable;
1944 th->mn_schedulable = thread_io_mn_schedulable(th, events, NULL);
1945
1946 int fd = io->fd;
1947
1948 // `errno` is only valid when there is an actual error - but we can't
1949 // extract that from the return value of `func` alone, so we clear any
1950 // prior `errno` value here so that we can later check if it was set by
1951 // `func` or not (as opposed to some previously set value).
1952 errno = 0;
1953
1954 struct rb_io_blocking_operation blocking_operation = {
1955 .ec = ec,
1956 };
1957 rb_io_blocking_operation_enter(io, &blocking_operation);
1958
1959 {
1960 EC_PUSH_TAG(ec);
1961 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
1962 volatile enum ruby_tag_type saved_state = state; /* for BLOCKING_REGION */
1963 retry:
1964 BLOCKING_REGION(th, {
1965 val = func(data1);
1966 saved_errno = errno;
1967 }, ubf_select, th, FALSE);
1968
1969 RUBY_ASSERT(th == rb_ec_thread_ptr(ec));
1970 if (events &&
1971 blocking_call_retryable_p((int)val, saved_errno) &&
1972 thread_io_wait_events(th, fd, events, NULL)) {
1973 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1974 goto retry;
1975 }
1976
1977 RUBY_VM_CHECK_INTS_BLOCKING(ec);
1978
1979 state = saved_state;
1980 }
1981 EC_POP_TAG();
1982
1983 th = rb_ec_thread_ptr(ec);
1984 th->mn_schedulable = prev_mn_schedulable;
1985 }
1986
1987 rb_io_blocking_operation_exit(io, &blocking_operation);
1988
1989 if (state) {
1990 EC_JUMP_TAG(ec, state);
1991 }
1992
1993 // If the error was a timeout, we raise a specific exception for that:
1994 if (saved_errno == ETIMEDOUT) {
1995 rb_raise(rb_eIOTimeoutError, "Blocking operation timed out!");
1996 }
1997
1998 errno = saved_errno;
1999
2000 return val;
2001}
2002
2003VALUE
2004rb_thread_io_blocking_region(struct rb_io *io, rb_blocking_function_t *func, void *data1)
2005{
2006 return rb_thread_io_blocking_call(io, func, data1, 0);
2007}
2008
2009/*
2010 * rb_thread_call_with_gvl - re-enter the Ruby world after GVL release.
2011 *
2012 * After releasing GVL using
2013 * rb_thread_call_without_gvl() you can not access Ruby values or invoke
2014 * methods. If you need to access Ruby you must use this function
2015 * rb_thread_call_with_gvl().
2016 *
2017 * This function rb_thread_call_with_gvl() does:
2018 * (1) acquire GVL.
2019 * (2) call passed function `func'.
2020 * (3) release GVL.
2021 * (4) return a value which is returned at (2).
2022 *
2023 * NOTE: You should not return Ruby object at (2) because such Object
2024 * will not be marked.
2025 *
2026 * NOTE: If an exception is raised in `func', this function DOES NOT
2027 * protect (catch) the exception. If you have any resources
2028 * which should free before throwing exception, you need use
2029 * rb_protect() in `func' and return a value which represents
2030 * exception was raised.
2031 *
2032 * NOTE: This function should not be called by a thread which was not
2033 * created as Ruby thread (created by Thread.new or so). In other
2034 * words, this function *DOES NOT* associate or convert a NON-Ruby
2035 * thread to a Ruby thread.
2036 */
2037void *
2038rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
2039{
2040 rb_thread_t *th = ruby_thread_from_native();
2041 struct rb_blocking_region_buffer *brb;
2042 struct rb_unblock_callback prev_unblock;
2043 void *r;
2044
2045 if (th == 0) {
2046 /* Error has occurred, but we can't use rb_bug()
2047 * because this thread is not Ruby's thread.
2048 * What should we do?
2049 */
2050 bp();
2051 fprintf(stderr, "[BUG] rb_thread_call_with_gvl() is called by non-ruby thread\n");
2052 exit(EXIT_FAILURE);
2053 }
2054
2055 brb = (struct rb_blocking_region_buffer *)th->blocking_region_buffer;
2056 prev_unblock = th->unblock;
2057
2058 if (brb == 0) {
2059 rb_bug("rb_thread_call_with_gvl: called by a thread which has GVL.");
2060 }
2061
2062 blocking_region_end(th, brb);
2063 /* enter to Ruby world: You can access Ruby values, methods and so on. */
2064 r = (*func)(data1);
2065 /* leave from Ruby world: You can not access Ruby values, etc. */
2066 int released = blocking_region_begin(th, brb, prev_unblock.func, prev_unblock.arg, FALSE);
2067 RUBY_ASSERT_ALWAYS(released);
2068 RB_VM_SAVE_MACHINE_CONTEXT(th);
2069 thread_sched_to_waiting(TH_SCHED(th), th);
2070 return r;
2071}
2072
2073/*
2074 * ruby_thread_has_gvl_p - check if current native thread has GVL.
2075 */
2076
2078ruby_thread_has_gvl_p(void)
2079{
2080 rb_thread_t *th = ruby_thread_from_native();
2081
2082 if (th && th->blocking_region_buffer == 0) {
2083 return 1;
2084 }
2085 else {
2086 return 0;
2087 }
2088}
2089
2090/*
2091 * call-seq:
2092 * Thread.pass -> nil
2093 *
2094 * Give the thread scheduler a hint to pass execution to another thread.
2095 * A running thread may or may not switch, it depends on OS and processor.
2096 */
2097
2098static VALUE
2099thread_s_pass(VALUE klass)
2100{
2102 return Qnil;
2103}
2104
2105/*****************************************************/
2106
2107/*
2108 * rb_threadptr_pending_interrupt_* - manage asynchronous error queue
2109 *
2110 * Async events such as an exception thrown by Thread#raise,
2111 * Thread#kill and thread termination (after main thread termination)
2112 * will be queued to th->pending_interrupt_queue.
2113 * - clear: clear the queue.
2114 * - enque: enqueue err object into queue.
2115 * - deque: dequeue err object from queue.
2116 * - active_p: return 1 if the queue should be checked.
2117 *
2118 * All rb_threadptr_pending_interrupt_* functions are called by
2119 * a GVL acquired thread, of course.
2120 * Note that all "rb_" prefix APIs need GVL to call.
2121 */
2122
2123void
2124rb_threadptr_pending_interrupt_clear(rb_thread_t *th)
2125{
2126 rb_ary_clear(th->pending_interrupt_queue);
2127}
2128
2129void
2130rb_threadptr_pending_interrupt_enque(rb_thread_t *th, VALUE v)
2131{
2132 rb_ary_push(th->pending_interrupt_queue, v);
2133 th->pending_interrupt_queue_checked = 0;
2134}
2135
2136static void
2137threadptr_check_pending_interrupt_queue(rb_thread_t *th)
2138{
2139 if (!th->pending_interrupt_queue) {
2140 rb_raise(rb_eThreadError, "uninitialized thread");
2141 }
2142}
2143
2144enum handle_interrupt_timing {
2145 INTERRUPT_NONE,
2146 INTERRUPT_IMMEDIATE,
2147 INTERRUPT_ON_BLOCKING,
2148 INTERRUPT_NEVER
2149};
2150
2151static enum handle_interrupt_timing
2152rb_threadptr_pending_interrupt_from_symbol(rb_thread_t *th, VALUE sym)
2153{
2154 if (sym == sym_immediate) {
2155 return INTERRUPT_IMMEDIATE;
2156 }
2157 else if (sym == sym_on_blocking) {
2158 return INTERRUPT_ON_BLOCKING;
2159 }
2160 else if (sym == sym_never) {
2161 return INTERRUPT_NEVER;
2162 }
2163 else {
2164 rb_raise(rb_eThreadError, "unknown mask signature");
2165 }
2166}
2167
2168static enum handle_interrupt_timing
2169rb_threadptr_pending_interrupt_check_mask(rb_thread_t *th, VALUE err)
2170{
2171 VALUE mask;
2172 long mask_stack_len = RARRAY_LEN(th->pending_interrupt_mask_stack);
2173 const VALUE *mask_stack = RARRAY_CONST_PTR(th->pending_interrupt_mask_stack);
2174 VALUE mod;
2175 long i;
2176
2177 for (i=0; i<mask_stack_len; i++) {
2178 mask = mask_stack[mask_stack_len-(i+1)];
2179
2180 if (SYMBOL_P(mask)) {
2181 /* do not match RUBY_FATAL_THREAD_KILLED etc */
2182 if (err != rb_cInteger) {
2183 return rb_threadptr_pending_interrupt_from_symbol(th, mask);
2184 }
2185 else {
2186 continue;
2187 }
2188 }
2189
2190 for (mod = err; mod; mod = RCLASS_SUPER(mod)) {
2191 VALUE klass = mod;
2192 VALUE sym;
2193
2194 if (BUILTIN_TYPE(mod) == T_ICLASS) {
2195 klass = RBASIC(mod)->klass;
2196 }
2197 else if (mod != RCLASS_ORIGIN(mod)) {
2198 continue;
2199 }
2200
2201 if ((sym = rb_hash_aref(mask, klass)) != Qnil) {
2202 return rb_threadptr_pending_interrupt_from_symbol(th, sym);
2203 }
2204 }
2205 /* try to next mask */
2206 }
2207 return INTERRUPT_NONE;
2208}
2209
2210static int
2211rb_threadptr_pending_interrupt_empty_p(const rb_thread_t *th)
2212{
2213 return RARRAY_LEN(th->pending_interrupt_queue) == 0;
2214}
2215
2216static int
2217rb_threadptr_pending_interrupt_include_p(rb_thread_t *th, VALUE err)
2218{
2219 int i;
2220 for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
2221 VALUE e = RARRAY_AREF(th->pending_interrupt_queue, i);
2222 if (rb_obj_is_kind_of(e, err)) {
2223 return TRUE;
2224 }
2225 }
2226 return FALSE;
2227}
2228
2229static VALUE
2230rb_threadptr_pending_interrupt_deque(rb_thread_t *th, enum handle_interrupt_timing timing)
2231{
2232#if 1 /* 1 to enable Thread#handle_interrupt, 0 to ignore it */
2233 int i;
2234
2235 for (i=0; i<RARRAY_LEN(th->pending_interrupt_queue); i++) {
2236 VALUE err = RARRAY_AREF(th->pending_interrupt_queue, i);
2237
2238 enum handle_interrupt_timing mask_timing = rb_threadptr_pending_interrupt_check_mask(th, CLASS_OF(err));
2239
2240 switch (mask_timing) {
2241 case INTERRUPT_ON_BLOCKING:
2242 if (timing != INTERRUPT_ON_BLOCKING) {
2243 break;
2244 }
2245 /* fall through */
2246 case INTERRUPT_NONE: /* default: IMMEDIATE */
2247 case INTERRUPT_IMMEDIATE:
2248 rb_ary_delete_at(th->pending_interrupt_queue, i);
2249 return err;
2250 case INTERRUPT_NEVER:
2251 break;
2252 }
2253 }
2254
2255 th->pending_interrupt_queue_checked = 1;
2256 return Qundef;
2257#else
2258 VALUE err = rb_ary_shift(th->pending_interrupt_queue);
2259 if (rb_threadptr_pending_interrupt_empty_p(th)) {
2260 th->pending_interrupt_queue_checked = 1;
2261 }
2262 return err;
2263#endif
2264}
2265
2266static int
2267threadptr_pending_interrupt_active_p(rb_thread_t *th)
2268{
2269 /*
2270 * For optimization, we don't check async errinfo queue
2271 * if the queue and the thread interrupt mask were not changed
2272 * since last check.
2273 */
2274 if (th->pending_interrupt_queue_checked) {
2275 return 0;
2276 }
2277
2278 if (rb_threadptr_pending_interrupt_empty_p(th)) {
2279 return 0;
2280 }
2281
2282 return 1;
2283}
2284
2285static int
2286handle_interrupt_arg_check_i(VALUE key, VALUE val, VALUE args)
2287{
2288 VALUE *maskp = (VALUE *)args;
2289
2290 if (val != sym_immediate && val != sym_on_blocking && val != sym_never) {
2291 rb_raise(rb_eArgError, "unknown mask signature");
2292 }
2293
2294 if (key == rb_eException && (UNDEF_P(*maskp) || NIL_P(*maskp))) {
2295 *maskp = val;
2296 return ST_CONTINUE;
2297 }
2298
2299 if (RTEST(*maskp)) {
2300 if (!RB_TYPE_P(*maskp, T_HASH)) {
2301 VALUE prev = *maskp;
2302 *maskp = rb_ident_hash_new();
2303 if (SYMBOL_P(prev)) {
2304 rb_hash_aset(*maskp, rb_eException, prev);
2305 }
2306 }
2307 rb_hash_aset(*maskp, key, val);
2308 }
2309 else {
2310 *maskp = Qfalse;
2311 }
2312
2313 return ST_CONTINUE;
2314}
2315
2316/*
2317 * call-seq:
2318 * Thread.handle_interrupt(hash) { ... } -> result of the block
2319 *
2320 * Changes asynchronous interrupt timing.
2321 *
2322 * _interrupt_ means asynchronous event and corresponding procedure
2323 * by Thread#raise, Thread#kill, signal trap (not supported yet)
2324 * and main thread termination (if main thread terminates, then all
2325 * other thread will be killed).
2326 *
2327 * The given +hash+ has pairs like <code>ExceptionClass =>
2328 * :TimingSymbol</code>. Where the ExceptionClass is the interrupt handled by
2329 * the given block. The TimingSymbol can be one of the following symbols:
2330 *
2331 * [+:immediate+] Invoke interrupts immediately.
2332 * [+:on_blocking+] Invoke interrupts while _BlockingOperation_.
2333 * [+:never+] Never invoke all interrupts.
2334 *
2335 * _BlockingOperation_ means that the operation will block the calling thread,
2336 * such as read and write. On CRuby implementation, _BlockingOperation_ is any
2337 * operation executed without GVL.
2338 *
2339 * Masked asynchronous interrupts are delayed until they are enabled.
2340 * This method is similar to sigprocmask(3).
2341 *
2342 * === NOTE
2343 *
2344 * Asynchronous interrupts are difficult to use.
2345 *
2346 * If you need to communicate between threads, please consider to use another way such as Queue.
2347 *
2348 * Or use them with deep understanding about this method.
2349 *
2350 * === Usage
2351 *
2352 * In this example, we can guard from Thread#raise exceptions.
2353 *
2354 * Using the +:never+ TimingSymbol the RuntimeError exception will always be
2355 * ignored in the first block of the main thread. In the second
2356 * ::handle_interrupt block we can purposefully handle RuntimeError exceptions.
2357 *
2358 * th = Thread.new do
2359 * Thread.handle_interrupt(RuntimeError => :never) {
2360 * begin
2361 * # You can write resource allocation code safely.
2362 * Thread.handle_interrupt(RuntimeError => :immediate) {
2363 * # ...
2364 * }
2365 * ensure
2366 * # You can write resource deallocation code safely.
2367 * end
2368 * }
2369 * end
2370 * Thread.pass
2371 * # ...
2372 * th.raise "stop"
2373 *
2374 * While we are ignoring the RuntimeError exception, it's safe to write our
2375 * resource allocation code. Then, the ensure block is where we can safely
2376 * deallocate your resources.
2377 *
2378 * ==== Stack control settings
2379 *
2380 * It's possible to stack multiple levels of ::handle_interrupt blocks in order
2381 * to control more than one ExceptionClass and TimingSymbol at a time.
2382 *
2383 * Thread.handle_interrupt(FooError => :never) {
2384 * Thread.handle_interrupt(BarError => :never) {
2385 * # FooError and BarError are prohibited.
2386 * }
2387 * }
2388 *
2389 * ==== Inheritance with ExceptionClass
2390 *
2391 * All exceptions inherited from the ExceptionClass parameter will be considered.
2392 *
2393 * Thread.handle_interrupt(Exception => :never) {
2394 * # all exceptions inherited from Exception are prohibited.
2395 * }
2396 *
2397 * For handling all interrupts, use +Object+ and not +Exception+
2398 * as the ExceptionClass, as kill/terminate interrupts are not handled by +Exception+.
2399 */
2400static VALUE
2401rb_thread_s_handle_interrupt(VALUE self, VALUE mask_arg)
2402{
2403 VALUE mask = Qundef;
2404 rb_execution_context_t * volatile ec = GET_EC();
2405 rb_thread_t * volatile th = rb_ec_thread_ptr(ec);
2406 volatile VALUE r = Qnil;
2407 enum ruby_tag_type state;
2408
2409 if (!rb_block_given_p()) {
2410 rb_raise(rb_eArgError, "block is needed.");
2411 }
2412
2413 mask_arg = rb_to_hash_type(mask_arg);
2414
2415 if (OBJ_FROZEN(mask_arg) && rb_hash_compare_by_id_p(mask_arg)) {
2416 mask = Qnil;
2417 }
2418
2419 rb_hash_foreach(mask_arg, handle_interrupt_arg_check_i, (VALUE)&mask);
2420
2421 if (UNDEF_P(mask)) {
2422 return rb_yield(Qnil);
2423 }
2424
2425 if (!RTEST(mask)) {
2426 mask = mask_arg;
2427 }
2428 else if (RB_TYPE_P(mask, T_HASH)) {
2429 OBJ_FREEZE(mask);
2430 }
2431
2432 rb_ary_push(th->pending_interrupt_mask_stack, mask);
2433 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2434 th->pending_interrupt_queue_checked = 0;
2435 RUBY_VM_SET_INTERRUPT(th->ec);
2436 }
2437
2438 EC_PUSH_TAG(th->ec);
2439 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
2440 r = rb_yield(Qnil);
2441 }
2442 EC_POP_TAG();
2443
2444 rb_ary_pop(th->pending_interrupt_mask_stack);
2445 if (!rb_threadptr_pending_interrupt_empty_p(th)) {
2446 th->pending_interrupt_queue_checked = 0;
2447 RUBY_VM_SET_INTERRUPT(th->ec);
2448 }
2449
2450 RUBY_VM_CHECK_INTS(th->ec);
2451
2452 if (state) {
2453 EC_JUMP_TAG(th->ec, state);
2454 }
2455
2456 return r;
2457}
2458
2459/*
2460 * call-seq:
2461 * target_thread.pending_interrupt?(error = nil) -> true/false
2462 *
2463 * Returns whether or not the asynchronous queue is empty for the target thread.
2464 *
2465 * If +error+ is given, then check only for +error+ type deferred events.
2466 *
2467 * See ::pending_interrupt? for more information.
2468 */
2469static VALUE
2470rb_thread_pending_interrupt_p(int argc, VALUE *argv, VALUE target_thread)
2471{
2472 rb_thread_t *target_th = rb_thread_ptr(target_thread);
2473
2474 if (!target_th->pending_interrupt_queue) {
2475 return Qfalse;
2476 }
2477 if (rb_threadptr_pending_interrupt_empty_p(target_th)) {
2478 return Qfalse;
2479 }
2480 if (rb_check_arity(argc, 0, 1)) {
2481 VALUE err = argv[0];
2482 if (!rb_obj_is_kind_of(err, rb_cModule)) {
2483 rb_raise(rb_eTypeError, "class or module required for rescue clause");
2484 }
2485 return RBOOL(rb_threadptr_pending_interrupt_include_p(target_th, err));
2486 }
2487 else {
2488 return Qtrue;
2489 }
2490}
2491
2492/*
2493 * call-seq:
2494 * Thread.pending_interrupt?(error = nil) -> true/false
2495 *
2496 * Returns whether or not the asynchronous queue is empty.
2497 *
2498 * Since Thread::handle_interrupt can be used to defer asynchronous events,
2499 * this method can be used to determine if there are any deferred events.
2500 *
2501 * If you find this method returns true, then you may finish +:never+ blocks.
2502 *
2503 * For example, the following method processes deferred asynchronous events
2504 * immediately.
2505 *
2506 * def Thread.kick_interrupt_immediately
2507 * Thread.handle_interrupt(Object => :immediate) {
2508 * Thread.pass
2509 * }
2510 * end
2511 *
2512 * If +error+ is given, then check only for +error+ type deferred events.
2513 *
2514 * === Usage
2515 *
2516 * th = Thread.new{
2517 * Thread.handle_interrupt(RuntimeError => :on_blocking){
2518 * while true
2519 * ...
2520 * # reach safe point to invoke interrupt
2521 * if Thread.pending_interrupt?
2522 * Thread.handle_interrupt(Object => :immediate){}
2523 * end
2524 * ...
2525 * end
2526 * }
2527 * }
2528 * ...
2529 * th.raise # stop thread
2530 *
2531 * This example can also be written as the following, which you should use to
2532 * avoid asynchronous interrupts.
2533 *
2534 * flag = true
2535 * th = Thread.new{
2536 * Thread.handle_interrupt(RuntimeError => :on_blocking){
2537 * while true
2538 * ...
2539 * # reach safe point to invoke interrupt
2540 * break if flag == false
2541 * ...
2542 * end
2543 * }
2544 * }
2545 * ...
2546 * flag = false # stop thread
2547 */
2548
2549static VALUE
2550rb_thread_s_pending_interrupt_p(int argc, VALUE *argv, VALUE self)
2551{
2552 return rb_thread_pending_interrupt_p(argc, argv, GET_THREAD()->self);
2553}
2554
2555NORETURN(static void rb_threadptr_to_kill(rb_thread_t *th));
2556
2557static void
2558rb_threadptr_to_kill(rb_thread_t *th)
2559{
2560 VM_ASSERT(GET_THREAD() == th);
2561 rb_threadptr_pending_interrupt_clear(th);
2562 th->status = THREAD_RUNNABLE;
2563 th->to_kill = 1;
2564 th->ec->errinfo = INT2FIX(TAG_FATAL);
2565 EC_JUMP_TAG(th->ec, TAG_FATAL);
2566}
2567
2568static inline rb_atomic_t
2569threadptr_get_interrupts(rb_thread_t *th)
2570{
2571 rb_execution_context_t *ec = th->ec;
2572 rb_atomic_t interrupt;
2573 rb_atomic_t old;
2574
2575 old = ATOMIC_LOAD_RELAXED(ec->interrupt_flag);
2576 do {
2577 interrupt = old;
2578 old = ATOMIC_CAS(ec->interrupt_flag, interrupt, interrupt & ec->interrupt_mask);
2579 } while (old != interrupt);
2580 return interrupt & (rb_atomic_t)~ec->interrupt_mask;
2581}
2582
2583static void threadptr_interrupt_exec_exec(rb_thread_t *th);
2584
2585// Execute interrupts on currently running thread
2586// In certain situations, calling this function will raise an exception. Some examples are:
2587// * during VM shutdown (`rb_ractor_terminate_all`)
2588// * Call to Thread#exit for current thread (`rb_thread_kill`)
2589// * Call to Thread#raise for current thread
2590int
2591rb_threadptr_execute_interrupts(rb_thread_t *th, int blocking_timing)
2592{
2593 rb_atomic_t interrupt;
2594 int postponed_job_interrupt = 0;
2595 int ret = FALSE;
2596
2597 VM_ASSERT(GET_THREAD() == th);
2598
2599 if (th->ec->raised_flag) return ret;
2600
2601 while ((interrupt = threadptr_get_interrupts(th)) != 0) {
2602 int sig;
2603 int timer_interrupt;
2604 int pending_interrupt;
2605 int trap_interrupt;
2606 int terminate_interrupt;
2607
2608 timer_interrupt = interrupt & TIMER_INTERRUPT_MASK;
2609 pending_interrupt = interrupt & PENDING_INTERRUPT_MASK;
2610 postponed_job_interrupt = interrupt & POSTPONED_JOB_INTERRUPT_MASK;
2611 trap_interrupt = interrupt & TRAP_INTERRUPT_MASK;
2612 terminate_interrupt = interrupt & TERMINATE_INTERRUPT_MASK; // request from other ractors
2613
2614 if (interrupt & VM_BARRIER_INTERRUPT_MASK) {
2615 RB_VM_LOCKING();
2616 }
2617
2618 if (postponed_job_interrupt) {
2619 rb_postponed_job_flush(th->vm);
2620 }
2621
2622 if (trap_interrupt) {
2623 /* signal handling */
2624 if (th == th->vm->ractor.main_thread) {
2625 enum rb_thread_status prev_status = th->status;
2626
2627 th->status = THREAD_RUNNABLE;
2628 {
2629 while ((sig = rb_get_next_signal()) != 0) {
2630 ret |= rb_signal_exec(th, sig);
2631 }
2632 }
2633 th->status = prev_status;
2634 }
2635
2636 if (!ccan_list_empty(&th->interrupt_exec_tasks)) {
2637 enum rb_thread_status prev_status = th->status;
2638
2639 th->status = THREAD_RUNNABLE;
2640 {
2641 threadptr_interrupt_exec_exec(th);
2642 }
2643 th->status = prev_status;
2644 }
2645 }
2646
2647 /* exception from another thread */
2648 if (pending_interrupt && threadptr_pending_interrupt_active_p(th)) {
2649 VALUE err = rb_threadptr_pending_interrupt_deque(th, blocking_timing ? INTERRUPT_ON_BLOCKING : INTERRUPT_NONE);
2650 RUBY_DEBUG_LOG("err:%"PRIdVALUE, err);
2651 ret = TRUE;
2652
2653 if (UNDEF_P(err)) {
2654 /* no error */
2655 }
2656 else if (err == RUBY_FATAL_THREAD_KILLED /* Thread#kill received */ ||
2657 err == RUBY_FATAL_THREAD_TERMINATED /* Terminate thread */ ||
2658 err == INT2FIX(TAG_FATAL) /* Thread.exit etc. */ ) {
2659 terminate_interrupt = 1;
2660 }
2661 else {
2662 if (err == th->vm->special_exceptions[ruby_error_stream_closed]) {
2663 /* the only special exception to be queued across thread */
2664 err = ruby_vm_special_exception_copy(err);
2665 }
2666 /* set runnable if th was slept. */
2667 if (th->status == THREAD_STOPPED ||
2668 th->status == THREAD_STOPPED_FOREVER)
2669 th->status = THREAD_RUNNABLE;
2670 rb_exc_raise(err);
2671 }
2672 }
2673
2674 if (terminate_interrupt) {
2675 rb_threadptr_to_kill(th);
2676 }
2677
2678 if (timer_interrupt) {
2679 uint32_t limits_us = thread_default_quantum_ms * 1000;
2680
2681 if (th->priority > 0)
2682 limits_us <<= th->priority;
2683 else
2684 limits_us >>= -th->priority;
2685
2686 if (th->status == THREAD_RUNNABLE)
2687 th->running_time_us += 10 * 1000; // 10ms = 10_000us // TODO: use macro
2688
2689 VM_ASSERT(th->ec->cfp);
2690 EXEC_EVENT_HOOK(th->ec, RUBY_INTERNAL_EVENT_SWITCH, th->ec->cfp->self,
2691 0, 0, 0, Qundef);
2692
2693 rb_thread_schedule_limits(limits_us);
2694 }
2695 }
2696 return ret;
2697}
2698
2699void
2700rb_thread_execute_interrupts(VALUE thval)
2701{
2702 rb_threadptr_execute_interrupts(rb_thread_ptr(thval), 1);
2703}
2704
2705static void
2706rb_threadptr_ready(rb_thread_t *th)
2707{
2708 rb_threadptr_interrupt(th);
2709}
2710
2711static VALUE
2712rb_threadptr_raise(rb_thread_t *target_th, int argc, VALUE *argv)
2713{
2714 if (rb_threadptr_dead(target_th)) {
2715 return Qnil;
2716 }
2717
2718 VALUE exception = rb_exception_setup(argc, argv);
2719
2720 /* making an exception object can switch thread,
2721 so we need to check thread deadness again */
2722 if (rb_threadptr_dead(target_th)) {
2723 return Qnil;
2724 }
2725
2726 rb_threadptr_pending_interrupt_enque(target_th, exception);
2727 rb_threadptr_interrupt(target_th);
2728
2729 return Qnil;
2730}
2731
2732void
2733rb_threadptr_signal_raise(rb_thread_t *th, int sig)
2734{
2735 VALUE argv[2];
2736
2737 argv[0] = rb_eSignal;
2738 argv[1] = INT2FIX(sig);
2739 rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2740}
2741
2742void
2743rb_threadptr_signal_exit(rb_thread_t *th)
2744{
2745 VALUE argv[2];
2746
2747 argv[0] = rb_eSystemExit;
2748 argv[1] = rb_str_new2("exit");
2749
2750 // TODO: check signal raise deliverly
2751 rb_threadptr_raise(th->vm->ractor.main_thread, 2, argv);
2752}
2753
2754int
2755rb_ec_set_raised(rb_execution_context_t *ec)
2756{
2757 if (ec->raised_flag & RAISED_EXCEPTION) {
2758 return 1;
2759 }
2760 ec->raised_flag |= RAISED_EXCEPTION;
2761 return 0;
2762}
2763
2764int
2765rb_ec_reset_raised(rb_execution_context_t *ec)
2766{
2767 if (!(ec->raised_flag & RAISED_EXCEPTION)) {
2768 return 0;
2769 }
2770 ec->raised_flag &= ~RAISED_EXCEPTION;
2771 return 1;
2772}
2773
2774/*
2775 * Thread-safe IO closing mechanism.
2776 *
2777 * When an IO is closed while other threads or fibers are blocked on it, we need to:
2778 * 1. Track and notify all blocking operations through io->blocking_operations
2779 * 2. Ensure only one thread can close at a time using io->closing_ec
2780 * 3. Synchronize cleanup using wakeup_mutex
2781 *
2782 * The close process works as follows:
2783 * - First check if any thread is already closing (io->closing_ec)
2784 * - Set up wakeup_mutex for synchronization
2785 * - Iterate through all blocking operations in io->blocking_operations
2786 * - For each blocked fiber with a scheduler:
2787 * - Notify via rb_fiber_scheduler_fiber_interrupt
2788 * - For each blocked thread without a scheduler:
2789 * - Enqueue IOError via rb_threadptr_pending_interrupt_enque
2790 * - Wake via rb_threadptr_interrupt
2791 * - Wait on wakeup_mutex until all operations are cleaned up
2792 * - Only then clear closing state and allow actual close to proceed
2793 */
2794static VALUE
2795thread_io_close_notify_all(VALUE _io)
2796{
2797 struct rb_io *io = (struct rb_io *)_io;
2798
2799 size_t count = 0;
2800 rb_vm_t *vm = io->closing_ec->thread_ptr->vm;
2801 VALUE error = vm->special_exceptions[ruby_error_stream_closed];
2802
2803 struct rb_io_blocking_operation *blocking_operation;
2804 ccan_list_for_each(rb_io_blocking_operations(io), blocking_operation, list) {
2805 rb_execution_context_t *ec = blocking_operation->ec;
2806
2807 // If the operation is in progress, we need to interrupt it:
2808 if (ec) {
2809 rb_thread_t *thread = ec->thread_ptr;
2810
2811 VALUE result = RUBY_Qundef;
2812 if (thread->scheduler != Qnil) {
2813 result = rb_fiber_scheduler_fiber_interrupt(thread->scheduler, rb_fiberptr_self(ec->fiber_ptr), error);
2814 }
2815
2816 if (result == RUBY_Qundef) {
2817 // If the thread is not the current thread, we need to enqueue an error:
2818 rb_threadptr_pending_interrupt_enque(thread, error);
2819 rb_threadptr_interrupt(thread);
2820 }
2821 }
2822
2823 count += 1;
2824 }
2825
2826 return (VALUE)count;
2827}
2828
2829size_t
2830rb_thread_io_close_interrupt(struct rb_io *io)
2831{
2832 // We guard this operation based on `io->closing_ec` -> only one thread will ever enter this function.
2833 if (io->closing_ec) {
2834 return 0;
2835 }
2836
2837 // If there are no blocking operations, we are done:
2838 if (ccan_list_empty(rb_io_blocking_operations(io))) {
2839 return 0;
2840 }
2841
2842 // Otherwise, we are now closing the IO:
2843 rb_execution_context_t *ec = GET_EC();
2844 io->closing_ec = ec;
2845
2846 // This is used to ensure the correct execution context is woken up after the blocking operation is interrupted:
2847 io->wakeup_mutex = rb_mutex_new();
2848 rb_mutex_allow_trap(io->wakeup_mutex, 1);
2849
2850 // We need to use a mutex here as entering the fiber scheduler may cause a context switch:
2851 VALUE result = rb_mutex_synchronize(io->wakeup_mutex, thread_io_close_notify_all, (VALUE)io);
2852
2853 return (size_t)result;
2854}
2855
2856void
2857rb_thread_io_close_wait(struct rb_io* io)
2858{
2859 VALUE wakeup_mutex = io->wakeup_mutex;
2860
2861 if (!RB_TEST(wakeup_mutex)) {
2862 // There was nobody else using this file when we closed it, so we never bothered to allocate a mutex:
2863 return;
2864 }
2865
2866 rb_mutex_lock(wakeup_mutex);
2867 while (!ccan_list_empty(rb_io_blocking_operations(io))) {
2868 rb_mutex_sleep(wakeup_mutex, Qnil);
2869 }
2870 rb_mutex_unlock(wakeup_mutex);
2871
2872 // We are done closing:
2873 io->wakeup_mutex = Qnil;
2874 io->closing_ec = NULL;
2875}
2876
2877void
2878rb_thread_fd_close(int fd)
2879{
2880 rb_warn("rb_thread_fd_close is deprecated (and is now a no-op).");
2881}
2882
2883/*
2884 * call-seq:
2885 * thr.raise
2886 * thr.raise(string)
2887 * thr.raise(exception [, string [, array]])
2888 *
2889 * Raises an exception from the given thread. The caller does not have to be
2890 * +thr+. See Kernel#raise for more information.
2891 *
2892 * Thread.abort_on_exception = true
2893 * a = Thread.new { sleep(200) }
2894 * a.raise("Gotcha")
2895 *
2896 * This will produce:
2897 *
2898 * prog.rb:3: Gotcha (RuntimeError)
2899 * from prog.rb:2:in `initialize'
2900 * from prog.rb:2:in `new'
2901 * from prog.rb:2
2902 */
2903
2904static VALUE
2905thread_raise_m(int argc, VALUE *argv, VALUE self)
2906{
2907 rb_thread_t *target_th = rb_thread_ptr(self);
2908 const rb_thread_t *current_th = GET_THREAD();
2909
2910 threadptr_check_pending_interrupt_queue(target_th);
2911 rb_threadptr_raise(target_th, argc, argv);
2912
2913 /* To perform Thread.current.raise as Kernel.raise */
2914 if (current_th == target_th) {
2915 RUBY_VM_CHECK_INTS(target_th->ec);
2916 }
2917 return Qnil;
2918}
2919
2920
2921/*
2922 * call-seq:
2923 * thr.exit -> thr
2924 * thr.kill -> thr
2925 * thr.terminate -> thr
2926 *
2927 * Terminates +thr+ and schedules another thread to be run, returning
2928 * the terminated Thread. If this is the main thread, or the last
2929 * thread, exits the process.
2930 */
2931
2933rb_thread_kill(VALUE thread)
2934{
2935 rb_thread_t *target_th = rb_thread_ptr(thread);
2936
2937 if (target_th->to_kill || target_th->status == THREAD_KILLED) {
2938 return thread;
2939 }
2940 if (target_th == target_th->vm->ractor.main_thread) {
2941 rb_exit(EXIT_SUCCESS);
2942 }
2943
2944 RUBY_DEBUG_LOG("target_th:%u", rb_th_serial(target_th));
2945
2946 if (target_th == GET_THREAD()) {
2947 /* kill myself immediately */
2948 rb_threadptr_to_kill(target_th);
2949 }
2950 else {
2951 threadptr_check_pending_interrupt_queue(target_th);
2952 rb_threadptr_pending_interrupt_enque(target_th, RUBY_FATAL_THREAD_KILLED);
2953 rb_threadptr_interrupt(target_th);
2954 }
2955
2956 return thread;
2957}
2958
2959int
2960rb_thread_to_be_killed(VALUE thread)
2961{
2962 rb_thread_t *target_th = rb_thread_ptr(thread);
2963
2964 if (target_th->to_kill || target_th->status == THREAD_KILLED) {
2965 return TRUE;
2966 }
2967 return FALSE;
2968}
2969
2970/*
2971 * call-seq:
2972 * Thread.kill(thread) -> thread
2973 *
2974 * Causes the given +thread+ to exit, see also Thread::exit.
2975 *
2976 * count = 0
2977 * a = Thread.new { loop { count += 1 } }
2978 * sleep(0.1) #=> 0
2979 * Thread.kill(a) #=> #<Thread:0x401b3d30 dead>
2980 * count #=> 93947
2981 * a.alive? #=> false
2982 */
2983
2984static VALUE
2985rb_thread_s_kill(VALUE obj, VALUE th)
2986{
2987 return rb_thread_kill(th);
2988}
2989
2990
2991/*
2992 * call-seq:
2993 * Thread.exit -> thread
2994 *
2995 * Terminates the currently running thread and schedules another thread to be
2996 * run.
2997 *
2998 * If this thread is already marked to be killed, ::exit returns the Thread.
2999 *
3000 * If this is the main thread, or the last thread, exit the process.
3001 */
3002
3003static VALUE
3004rb_thread_exit(VALUE _)
3005{
3006 rb_thread_t *th = GET_THREAD();
3007 return rb_thread_kill(th->self);
3008}
3009
3010
3011/*
3012 * call-seq:
3013 * thr.wakeup -> thr
3014 *
3015 * Marks a given thread as eligible for scheduling, however it may still
3016 * remain blocked on I/O.
3017 *
3018 * *Note:* This does not invoke the scheduler, see #run for more information.
3019 *
3020 * c = Thread.new { Thread.stop; puts "hey!" }
3021 * sleep 0.1 while c.status!='sleep'
3022 * c.wakeup
3023 * c.join
3024 * #=> "hey!"
3025 */
3026
3028rb_thread_wakeup(VALUE thread)
3029{
3030 if (!RTEST(rb_thread_wakeup_alive(thread))) {
3031 rb_raise(rb_eThreadError, "killed thread");
3032 }
3033 return thread;
3034}
3035
3038{
3039 rb_thread_t *target_th = rb_thread_ptr(thread);
3040 if (target_th->status == THREAD_KILLED) return Qnil;
3041
3042 rb_threadptr_ready(target_th);
3043
3044 if (target_th->status == THREAD_STOPPED ||
3045 target_th->status == THREAD_STOPPED_FOREVER) {
3046 target_th->status = THREAD_RUNNABLE;
3047 }
3048
3049 return thread;
3050}
3051
3052
3053/*
3054 * call-seq:
3055 * thr.run -> thr
3056 *
3057 * Wakes up +thr+, making it eligible for scheduling.
3058 *
3059 * a = Thread.new { puts "a"; Thread.stop; puts "c" }
3060 * sleep 0.1 while a.status!='sleep'
3061 * puts "Got here"
3062 * a.run
3063 * a.join
3064 *
3065 * This will produce:
3066 *
3067 * a
3068 * Got here
3069 * c
3070 *
3071 * See also the instance method #wakeup.
3072 */
3073
3075rb_thread_run(VALUE thread)
3076{
3077 rb_thread_wakeup(thread);
3079 return thread;
3080}
3081
3082
3084rb_thread_stop(void)
3085{
3086 if (rb_thread_alone()) {
3087 rb_raise(rb_eThreadError,
3088 "stopping only thread\n\tnote: use sleep to stop forever");
3089 }
3091 return Qnil;
3092}
3093
3094/*
3095 * call-seq:
3096 * Thread.stop -> nil
3097 *
3098 * Stops execution of the current thread, putting it into a ``sleep'' state,
3099 * and schedules execution of another thread.
3100 *
3101 * a = Thread.new { print "a"; Thread.stop; print "c" }
3102 * sleep 0.1 while a.status!='sleep'
3103 * print "b"
3104 * a.run
3105 * a.join
3106 * #=> "abc"
3107 */
3108
3109static VALUE
3110thread_stop(VALUE _)
3111{
3112 return rb_thread_stop();
3113}
3114
3115/********************************************************************/
3116
3117VALUE
3118rb_thread_list(void)
3119{
3120 // TODO
3121 return rb_ractor_thread_list();
3122}
3123
3124/*
3125 * call-seq:
3126 * Thread.list -> array
3127 *
3128 * Returns an array of Thread objects for all threads that are either runnable
3129 * or stopped.
3130 *
3131 * Thread.new { sleep(200) }
3132 * Thread.new { 1000000.times {|i| i*i } }
3133 * Thread.new { Thread.stop }
3134 * Thread.list.each {|t| p t}
3135 *
3136 * This will produce:
3137 *
3138 * #<Thread:0x401b3e84 sleep>
3139 * #<Thread:0x401b3f38 run>
3140 * #<Thread:0x401b3fb0 sleep>
3141 * #<Thread:0x401bdf4c run>
3142 */
3143
3144static VALUE
3145thread_list(VALUE _)
3146{
3147 return rb_thread_list();
3148}
3149
3152{
3153 return GET_THREAD()->self;
3154}
3155
3156/*
3157 * call-seq:
3158 * Thread.current -> thread
3159 *
3160 * Returns the currently executing thread.
3161 *
3162 * Thread.current #=> #<Thread:0x401bdf4c run>
3163 */
3164
3165static VALUE
3166thread_s_current(VALUE klass)
3167{
3168 return rb_thread_current();
3169}
3170
3172rb_thread_main(void)
3173{
3174 return GET_RACTOR()->threads.main->self;
3175}
3176
3177/*
3178 * call-seq:
3179 * Thread.main -> thread
3180 *
3181 * Returns the main thread.
3182 */
3183
3184static VALUE
3185rb_thread_s_main(VALUE klass)
3186{
3187 return rb_thread_main();
3188}
3189
3190
3191/*
3192 * call-seq:
3193 * Thread.abort_on_exception -> true or false
3194 *
3195 * Returns the status of the global ``abort on exception'' condition.
3196 *
3197 * The default is +false+.
3198 *
3199 * When set to +true+, if any thread is aborted by an exception, the
3200 * raised exception will be re-raised in the main thread.
3201 *
3202 * Can also be specified by the global $DEBUG flag or command line option
3203 * +-d+.
3204 *
3205 * See also ::abort_on_exception=.
3206 *
3207 * There is also an instance level method to set this for a specific thread,
3208 * see #abort_on_exception.
3209 */
3210
3211static VALUE
3212rb_thread_s_abort_exc(VALUE _)
3213{
3214 return RBOOL(GET_THREAD()->vm->thread_abort_on_exception);
3215}
3216
3217
3218/*
3219 * call-seq:
3220 * Thread.abort_on_exception= boolean -> true or false
3221 *
3222 * When set to +true+, if any thread is aborted by an exception, the
3223 * raised exception will be re-raised in the main thread.
3224 * Returns the new state.
3225 *
3226 * Thread.abort_on_exception = true
3227 * t1 = Thread.new do
3228 * puts "In new thread"
3229 * raise "Exception from thread"
3230 * end
3231 * sleep(1)
3232 * puts "not reached"
3233 *
3234 * This will produce:
3235 *
3236 * In new thread
3237 * prog.rb:4: Exception from thread (RuntimeError)
3238 * from prog.rb:2:in `initialize'
3239 * from prog.rb:2:in `new'
3240 * from prog.rb:2
3241 *
3242 * See also ::abort_on_exception.
3243 *
3244 * There is also an instance level method to set this for a specific thread,
3245 * see #abort_on_exception=.
3246 */
3247
3248static VALUE
3249rb_thread_s_abort_exc_set(VALUE self, VALUE val)
3250{
3251 GET_THREAD()->vm->thread_abort_on_exception = RTEST(val);
3252 return val;
3253}
3254
3255
3256/*
3257 * call-seq:
3258 * thr.abort_on_exception -> true or false
3259 *
3260 * Returns the status of the thread-local ``abort on exception'' condition for
3261 * this +thr+.
3262 *
3263 * The default is +false+.
3264 *
3265 * See also #abort_on_exception=.
3266 *
3267 * There is also a class level method to set this for all threads, see
3268 * ::abort_on_exception.
3269 */
3270
3271static VALUE
3272rb_thread_abort_exc(VALUE thread)
3273{
3274 return RBOOL(rb_thread_ptr(thread)->abort_on_exception);
3275}
3276
3277
3278/*
3279 * call-seq:
3280 * thr.abort_on_exception= boolean -> true or false
3281 *
3282 * When set to +true+, if this +thr+ is aborted by an exception, the
3283 * raised exception will be re-raised in the main thread.
3284 *
3285 * See also #abort_on_exception.
3286 *
3287 * There is also a class level method to set this for all threads, see
3288 * ::abort_on_exception=.
3289 */
3290
3291static VALUE
3292rb_thread_abort_exc_set(VALUE thread, VALUE val)
3293{
3294 rb_thread_ptr(thread)->abort_on_exception = RTEST(val);
3295 return val;
3296}
3297
3298
3299/*
3300 * call-seq:
3301 * Thread.report_on_exception -> true or false
3302 *
3303 * Returns the status of the global ``report on exception'' condition.
3304 *
3305 * The default is +true+ since Ruby 2.5.
3306 *
3307 * All threads created when this flag is true will report
3308 * a message on $stderr if an exception kills the thread.
3309 *
3310 * Thread.new { 1.times { raise } }
3311 *
3312 * will produce this output on $stderr:
3313 *
3314 * #<Thread:...> terminated with exception (report_on_exception is true):
3315 * Traceback (most recent call last):
3316 * 2: from -e:1:in `block in <main>'
3317 * 1: from -e:1:in `times'
3318 *
3319 * This is done to catch errors in threads early.
3320 * In some cases, you might not want this output.
3321 * There are multiple ways to avoid the extra output:
3322 *
3323 * * If the exception is not intended, the best is to fix the cause of
3324 * the exception so it does not happen anymore.
3325 * * If the exception is intended, it might be better to rescue it closer to
3326 * where it is raised rather then let it kill the Thread.
3327 * * If it is guaranteed the Thread will be joined with Thread#join or
3328 * Thread#value, then it is safe to disable this report with
3329 * <code>Thread.current.report_on_exception = false</code>
3330 * when starting the Thread.
3331 * However, this might handle the exception much later, or not at all
3332 * if the Thread is never joined due to the parent thread being blocked, etc.
3333 *
3334 * See also ::report_on_exception=.
3335 *
3336 * There is also an instance level method to set this for a specific thread,
3337 * see #report_on_exception=.
3338 *
3339 */
3340
3341static VALUE
3342rb_thread_s_report_exc(VALUE _)
3343{
3344 return RBOOL(GET_THREAD()->vm->thread_report_on_exception);
3345}
3346
3347
3348/*
3349 * call-seq:
3350 * Thread.report_on_exception= boolean -> true or false
3351 *
3352 * Returns the new state.
3353 * When set to +true+, all threads created afterwards will inherit the
3354 * condition and report a message on $stderr if an exception kills a thread:
3355 *
3356 * Thread.report_on_exception = true
3357 * t1 = Thread.new do
3358 * puts "In new thread"
3359 * raise "Exception from thread"
3360 * end
3361 * sleep(1)
3362 * puts "In the main thread"
3363 *
3364 * This will produce:
3365 *
3366 * In new thread
3367 * #<Thread:...prog.rb:2> terminated with exception (report_on_exception is true):
3368 * Traceback (most recent call last):
3369 * prog.rb:4:in `block in <main>': Exception from thread (RuntimeError)
3370 * In the main thread
3371 *
3372 * See also ::report_on_exception.
3373 *
3374 * There is also an instance level method to set this for a specific thread,
3375 * see #report_on_exception=.
3376 */
3377
3378static VALUE
3379rb_thread_s_report_exc_set(VALUE self, VALUE val)
3380{
3381 GET_THREAD()->vm->thread_report_on_exception = RTEST(val);
3382 return val;
3383}
3384
3385
3386/*
3387 * call-seq:
3388 * Thread.ignore_deadlock -> true or false
3389 *
3390 * Returns the status of the global ``ignore deadlock'' condition.
3391 * The default is +false+, so that deadlock conditions are not ignored.
3392 *
3393 * See also ::ignore_deadlock=.
3394 *
3395 */
3396
3397static VALUE
3398rb_thread_s_ignore_deadlock(VALUE _)
3399{
3400 return RBOOL(GET_THREAD()->vm->thread_ignore_deadlock);
3401}
3402
3403
3404/*
3405 * call-seq:
3406 * Thread.ignore_deadlock = boolean -> true or false
3407 *
3408 * Returns the new state.
3409 * When set to +true+, the VM will not check for deadlock conditions.
3410 * It is only useful to set this if your application can break a
3411 * deadlock condition via some other means, such as a signal.
3412 *
3413 * Thread.ignore_deadlock = true
3414 * queue = Thread::Queue.new
3415 *
3416 * trap(:SIGUSR1){queue.push "Received signal"}
3417 *
3418 * # raises fatal error unless ignoring deadlock
3419 * puts queue.pop
3420 *
3421 * See also ::ignore_deadlock.
3422 */
3423
3424static VALUE
3425rb_thread_s_ignore_deadlock_set(VALUE self, VALUE val)
3426{
3427 GET_THREAD()->vm->thread_ignore_deadlock = RTEST(val);
3428 return val;
3429}
3430
3431
3432/*
3433 * call-seq:
3434 * thr.report_on_exception -> true or false
3435 *
3436 * Returns the status of the thread-local ``report on exception'' condition for
3437 * this +thr+.
3438 *
3439 * The default value when creating a Thread is the value of
3440 * the global flag Thread.report_on_exception.
3441 *
3442 * See also #report_on_exception=.
3443 *
3444 * There is also a class level method to set this for all new threads, see
3445 * ::report_on_exception=.
3446 */
3447
3448static VALUE
3449rb_thread_report_exc(VALUE thread)
3450{
3451 return RBOOL(rb_thread_ptr(thread)->report_on_exception);
3452}
3453
3454
3455/*
3456 * call-seq:
3457 * thr.report_on_exception= boolean -> true or false
3458 *
3459 * When set to +true+, a message is printed on $stderr if an exception
3460 * kills this +thr+. See ::report_on_exception for details.
3461 *
3462 * See also #report_on_exception.
3463 *
3464 * There is also a class level method to set this for all new threads, see
3465 * ::report_on_exception=.
3466 */
3467
3468static VALUE
3469rb_thread_report_exc_set(VALUE thread, VALUE val)
3470{
3471 rb_thread_ptr(thread)->report_on_exception = RTEST(val);
3472 return val;
3473}
3474
3475
3476/*
3477 * call-seq:
3478 * thr.group -> thgrp or nil
3479 *
3480 * Returns the ThreadGroup which contains the given thread.
3481 *
3482 * Thread.main.group #=> #<ThreadGroup:0x4029d914>
3483 */
3484
3485VALUE
3486rb_thread_group(VALUE thread)
3487{
3488 return rb_thread_ptr(thread)->thgroup;
3489}
3490
3491static const char *
3492thread_status_name(rb_thread_t *th, int detail)
3493{
3494 switch (th->status) {
3495 case THREAD_RUNNABLE:
3496 return th->to_kill ? "aborting" : "run";
3497 case THREAD_STOPPED_FOREVER:
3498 if (detail) return "sleep_forever";
3499 case THREAD_STOPPED:
3500 return "sleep";
3501 case THREAD_KILLED:
3502 return "dead";
3503 default:
3504 return "unknown";
3505 }
3506}
3507
3508static int
3509rb_threadptr_dead(rb_thread_t *th)
3510{
3511 return th->status == THREAD_KILLED;
3512}
3513
3514
3515/*
3516 * call-seq:
3517 * thr.status -> string, false or nil
3518 *
3519 * Returns the status of +thr+.
3520 *
3521 * [<tt>"sleep"</tt>]
3522 * Returned if this thread is sleeping or waiting on I/O
3523 * [<tt>"run"</tt>]
3524 * When this thread is executing
3525 * [<tt>"aborting"</tt>]
3526 * If this thread is aborting
3527 * [+false+]
3528 * When this thread is terminated normally
3529 * [+nil+]
3530 * If terminated with an exception.
3531 *
3532 * a = Thread.new { raise("die now") }
3533 * b = Thread.new { Thread.stop }
3534 * c = Thread.new { Thread.exit }
3535 * d = Thread.new { sleep }
3536 * d.kill #=> #<Thread:0x401b3678 aborting>
3537 * a.status #=> nil
3538 * b.status #=> "sleep"
3539 * c.status #=> false
3540 * d.status #=> "aborting"
3541 * Thread.current.status #=> "run"
3542 *
3543 * See also the instance methods #alive? and #stop?
3544 */
3545
3546static VALUE
3547rb_thread_status(VALUE thread)
3548{
3549 rb_thread_t *target_th = rb_thread_ptr(thread);
3550
3551 if (rb_threadptr_dead(target_th)) {
3552 if (!NIL_P(target_th->ec->errinfo) &&
3553 !FIXNUM_P(target_th->ec->errinfo)) {
3554 return Qnil;
3555 }
3556 else {
3557 return Qfalse;
3558 }
3559 }
3560 else {
3561 return rb_str_new2(thread_status_name(target_th, FALSE));
3562 }
3563}
3564
3565
3566/*
3567 * call-seq:
3568 * thr.alive? -> true or false
3569 *
3570 * Returns +true+ if +thr+ is running or sleeping.
3571 *
3572 * thr = Thread.new { }
3573 * thr.join #=> #<Thread:0x401b3fb0 dead>
3574 * Thread.current.alive? #=> true
3575 * thr.alive? #=> false
3576 *
3577 * See also #stop? and #status.
3578 */
3579
3580static VALUE
3581rb_thread_alive_p(VALUE thread)
3582{
3583 return RBOOL(!thread_finished(rb_thread_ptr(thread)));
3584}
3585
3586/*
3587 * call-seq:
3588 * thr.stop? -> true or false
3589 *
3590 * Returns +true+ if +thr+ is dead or sleeping.
3591 *
3592 * a = Thread.new { Thread.stop }
3593 * b = Thread.current
3594 * a.stop? #=> true
3595 * b.stop? #=> false
3596 *
3597 * See also #alive? and #status.
3598 */
3599
3600static VALUE
3601rb_thread_stop_p(VALUE thread)
3602{
3603 rb_thread_t *th = rb_thread_ptr(thread);
3604
3605 if (rb_threadptr_dead(th)) {
3606 return Qtrue;
3607 }
3608 return RBOOL(th->status == THREAD_STOPPED || th->status == THREAD_STOPPED_FOREVER);
3609}
3610
3611/*
3612 * call-seq:
3613 * thr.name -> string
3614 *
3615 * show the name of the thread.
3616 */
3617
3618static VALUE
3619rb_thread_getname(VALUE thread)
3620{
3621 return rb_thread_ptr(thread)->name;
3622}
3623
3624/*
3625 * call-seq:
3626 * thr.name=(name) -> string
3627 *
3628 * set given name to the ruby thread.
3629 * On some platform, it may set the name to pthread and/or kernel.
3630 */
3631
3632static VALUE
3633rb_thread_setname(VALUE thread, VALUE name)
3634{
3635 rb_thread_t *target_th = rb_thread_ptr(thread);
3636
3637 if (!NIL_P(name)) {
3638 rb_encoding *enc;
3639 StringValueCStr(name);
3640 enc = rb_enc_get(name);
3641 if (!rb_enc_asciicompat(enc)) {
3642 rb_raise(rb_eArgError, "ASCII incompatible encoding (%s)",
3643 rb_enc_name(enc));
3644 }
3645 name = rb_str_new_frozen(name);
3646 }
3647 target_th->name = name;
3648 if (threadptr_initialized(target_th) && target_th->has_dedicated_nt) {
3649 native_set_another_thread_name(target_th->nt->thread_id, name);
3650 }
3651 return name;
3652}
3653
3654#if USE_NATIVE_THREAD_NATIVE_THREAD_ID
3655/*
3656 * call-seq:
3657 * thr.native_thread_id -> integer
3658 *
3659 * Return the native thread ID which is used by the Ruby thread.
3660 *
3661 * The ID depends on the OS. (not POSIX thread ID returned by pthread_self(3))
3662 * * On Linux it is TID returned by gettid(2).
3663 * * On macOS it is the system-wide unique integral ID of thread returned
3664 * by pthread_threadid_np(3).
3665 * * On FreeBSD it is the unique integral ID of the thread returned by
3666 * pthread_getthreadid_np(3).
3667 * * On Windows it is the thread identifier returned by GetThreadId().
3668 * * On other platforms, it raises NotImplementedError.
3669 *
3670 * NOTE:
3671 * If the thread is not associated yet or already deassociated with a native
3672 * thread, it returns _nil_.
3673 * If the Ruby implementation uses M:N thread model, the ID may change
3674 * depending on the timing.
3675 */
3676
3677static VALUE
3678rb_thread_native_thread_id(VALUE thread)
3679{
3680 rb_thread_t *target_th = rb_thread_ptr(thread);
3681 if (rb_threadptr_dead(target_th)) return Qnil;
3682 return native_thread_native_thread_id(target_th);
3683}
3684#else
3685# define rb_thread_native_thread_id rb_f_notimplement
3686#endif
3687
3688/*
3689 * call-seq:
3690 * thr.to_s -> string
3691 *
3692 * Dump the name, id, and status of _thr_ to a string.
3693 */
3694
3695static VALUE
3696rb_thread_to_s(VALUE thread)
3697{
3698 VALUE cname = rb_class_path(rb_obj_class(thread));
3699 rb_thread_t *target_th = rb_thread_ptr(thread);
3700 const char *status;
3701 VALUE str, loc;
3702
3703 status = thread_status_name(target_th, TRUE);
3704 str = rb_sprintf("#<%"PRIsVALUE":%p", cname, (void *)thread);
3705 if (!NIL_P(target_th->name)) {
3706 rb_str_catf(str, "@%"PRIsVALUE, target_th->name);
3707 }
3708 if ((loc = threadptr_invoke_proc_location(target_th)) != Qnil) {
3709 rb_str_catf(str, " %"PRIsVALUE":%"PRIsVALUE,
3710 RARRAY_AREF(loc, 0), RARRAY_AREF(loc, 1));
3711 }
3712 rb_str_catf(str, " %s>", status);
3713
3714 return str;
3715}
3716
3717/* variables for recursive traversals */
3718#define recursive_key id__recursive_key__
3719
3720static VALUE
3721threadptr_local_aref(rb_thread_t *th, ID id)
3722{
3723 if (id == recursive_key) {
3724 return th->ec->local_storage_recursive_hash;
3725 }
3726 else {
3727 VALUE val;
3728 struct rb_id_table *local_storage = th->ec->local_storage;
3729
3730 if (local_storage != NULL && rb_id_table_lookup(local_storage, id, &val)) {
3731 return val;
3732 }
3733 else {
3734 return Qnil;
3735 }
3736 }
3737}
3738
3740rb_thread_local_aref(VALUE thread, ID id)
3741{
3742 return threadptr_local_aref(rb_thread_ptr(thread), id);
3743}
3744
3745/*
3746 * call-seq:
3747 * thr[sym] -> obj or nil
3748 *
3749 * Attribute Reference---Returns the value of a fiber-local variable (current thread's root fiber
3750 * if not explicitly inside a Fiber), using either a symbol or a string name.
3751 * If the specified variable does not exist, returns +nil+.
3752 *
3753 * [
3754 * Thread.new { Thread.current["name"] = "A" },
3755 * Thread.new { Thread.current[:name] = "B" },
3756 * Thread.new { Thread.current["name"] = "C" }
3757 * ].each do |th|
3758 * th.join
3759 * puts "#{th.inspect}: #{th[:name]}"
3760 * end
3761 *
3762 * This will produce:
3763 *
3764 * #<Thread:0x00000002a54220 dead>: A
3765 * #<Thread:0x00000002a541a8 dead>: B
3766 * #<Thread:0x00000002a54130 dead>: C
3767 *
3768 * Thread#[] and Thread#[]= are not thread-local but fiber-local.
3769 * This confusion did not exist in Ruby 1.8 because
3770 * fibers are only available since Ruby 1.9.
3771 * Ruby 1.9 chooses that the methods behaves fiber-local to save
3772 * following idiom for dynamic scope.
3773 *
3774 * def meth(newvalue)
3775 * begin
3776 * oldvalue = Thread.current[:name]
3777 * Thread.current[:name] = newvalue
3778 * yield
3779 * ensure
3780 * Thread.current[:name] = oldvalue
3781 * end
3782 * end
3783 *
3784 * The idiom may not work as dynamic scope if the methods are thread-local
3785 * and a given block switches fiber.
3786 *
3787 * f = Fiber.new {
3788 * meth(1) {
3789 * Fiber.yield
3790 * }
3791 * }
3792 * meth(2) {
3793 * f.resume
3794 * }
3795 * f.resume
3796 * p Thread.current[:name]
3797 * #=> nil if fiber-local
3798 * #=> 2 if thread-local (The value 2 is leaked to outside of meth method.)
3799 *
3800 * For thread-local variables, please see #thread_variable_get and
3801 * #thread_variable_set.
3802 *
3803 */
3804
3805static VALUE
3806rb_thread_aref(VALUE thread, VALUE key)
3807{
3808 ID id = rb_check_id(&key);
3809 if (!id) return Qnil;
3810 return rb_thread_local_aref(thread, id);
3811}
3812
3813/*
3814 * call-seq:
3815 * thr.fetch(sym) -> obj
3816 * thr.fetch(sym) { } -> obj
3817 * thr.fetch(sym, default) -> obj
3818 *
3819 * Returns a fiber-local for the given key. If the key can't be
3820 * found, there are several options: With no other arguments, it will
3821 * raise a KeyError exception; if <i>default</i> is given, then that
3822 * will be returned; if the optional code block is specified, then
3823 * that will be run and its result returned. See Thread#[] and
3824 * Hash#fetch.
3825 */
3826static VALUE
3827rb_thread_fetch(int argc, VALUE *argv, VALUE self)
3828{
3829 VALUE key, val;
3830 ID id;
3831 rb_thread_t *target_th = rb_thread_ptr(self);
3832 int block_given;
3833
3834 rb_check_arity(argc, 1, 2);
3835 key = argv[0];
3836
3837 block_given = rb_block_given_p();
3838 if (block_given && argc == 2) {
3839 rb_warn("block supersedes default value argument");
3840 }
3841
3842 id = rb_check_id(&key);
3843
3844 if (id == recursive_key) {
3845 return target_th->ec->local_storage_recursive_hash;
3846 }
3847 else if (id && target_th->ec->local_storage &&
3848 rb_id_table_lookup(target_th->ec->local_storage, id, &val)) {
3849 return val;
3850 }
3851 else if (block_given) {
3852 return rb_yield(key);
3853 }
3854 else if (argc == 1) {
3855 rb_key_err_raise(rb_sprintf("key not found: %+"PRIsVALUE, key), self, key);
3856 }
3857 else {
3858 return argv[1];
3859 }
3860}
3861
3862static VALUE
3863threadptr_local_aset(rb_thread_t *th, ID id, VALUE val)
3864{
3865 if (id == recursive_key) {
3866 th->ec->local_storage_recursive_hash = val;
3867 return val;
3868 }
3869 else {
3870 struct rb_id_table *local_storage = th->ec->local_storage;
3871
3872 if (NIL_P(val)) {
3873 if (!local_storage) return Qnil;
3874 rb_id_table_delete(local_storage, id);
3875 return Qnil;
3876 }
3877 else {
3878 if (local_storage == NULL) {
3879 th->ec->local_storage = local_storage = rb_id_table_create(0);
3880 }
3881 rb_id_table_insert(local_storage, id, val);
3882 return val;
3883 }
3884 }
3885}
3886
3888rb_thread_local_aset(VALUE thread, ID id, VALUE val)
3889{
3890 if (OBJ_FROZEN(thread)) {
3891 rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3892 }
3893
3894 return threadptr_local_aset(rb_thread_ptr(thread), id, val);
3895}
3896
3897/*
3898 * call-seq:
3899 * thr[sym] = obj -> obj
3900 *
3901 * Attribute Assignment---Sets or creates the value of a fiber-local variable,
3902 * using either a symbol or a string.
3903 *
3904 * See also Thread#[].
3905 *
3906 * For thread-local variables, please see #thread_variable_set and
3907 * #thread_variable_get.
3908 */
3909
3910static VALUE
3911rb_thread_aset(VALUE self, VALUE id, VALUE val)
3912{
3913 return rb_thread_local_aset(self, rb_to_id(id), val);
3914}
3915
3916/*
3917 * call-seq:
3918 * thr.thread_variable_get(key) -> obj or nil
3919 *
3920 * Returns the value of a thread local variable that has been set. Note that
3921 * these are different than fiber local values. For fiber local values,
3922 * please see Thread#[] and Thread#[]=.
3923 *
3924 * Thread local values are carried along with threads, and do not respect
3925 * fibers. For example:
3926 *
3927 * Thread.new {
3928 * Thread.current.thread_variable_set("foo", "bar") # set a thread local
3929 * Thread.current["foo"] = "bar" # set a fiber local
3930 *
3931 * Fiber.new {
3932 * Fiber.yield [
3933 * Thread.current.thread_variable_get("foo"), # get the thread local
3934 * Thread.current["foo"], # get the fiber local
3935 * ]
3936 * }.resume
3937 * }.join.value # => ['bar', nil]
3938 *
3939 * The value "bar" is returned for the thread local, where nil is returned
3940 * for the fiber local. The fiber is executed in the same thread, so the
3941 * thread local values are available.
3942 */
3943
3944static VALUE
3945rb_thread_variable_get(VALUE thread, VALUE key)
3946{
3947 VALUE locals;
3948 VALUE symbol = rb_to_symbol(key);
3949
3950 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
3951 return Qnil;
3952 }
3953 locals = rb_thread_local_storage(thread);
3954 return rb_hash_aref(locals, symbol);
3955}
3956
3957/*
3958 * call-seq:
3959 * thr.thread_variable_set(key, value)
3960 *
3961 * Sets a thread local with +key+ to +value+. Note that these are local to
3962 * threads, and not to fibers. Please see Thread#thread_variable_get and
3963 * Thread#[] for more information.
3964 */
3965
3966static VALUE
3967rb_thread_variable_set(VALUE thread, VALUE key, VALUE val)
3968{
3969 VALUE locals;
3970
3971 if (OBJ_FROZEN(thread)) {
3972 rb_frozen_error_raise(thread, "can't modify frozen thread locals");
3973 }
3974
3975 locals = rb_thread_local_storage(thread);
3976 return rb_hash_aset(locals, rb_to_symbol(key), val);
3977}
3978
3979/*
3980 * call-seq:
3981 * thr.key?(sym) -> true or false
3982 *
3983 * Returns +true+ if the given string (or symbol) exists as a fiber-local
3984 * variable.
3985 *
3986 * me = Thread.current
3987 * me[:oliver] = "a"
3988 * me.key?(:oliver) #=> true
3989 * me.key?(:stanley) #=> false
3990 */
3991
3992static VALUE
3993rb_thread_key_p(VALUE self, VALUE key)
3994{
3995 VALUE val;
3996 ID id = rb_check_id(&key);
3997 struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
3998
3999 if (!id || local_storage == NULL) {
4000 return Qfalse;
4001 }
4002 return RBOOL(rb_id_table_lookup(local_storage, id, &val));
4003}
4004
4005static enum rb_id_table_iterator_result
4006thread_keys_i(ID key, VALUE value, void *ary)
4007{
4008 rb_ary_push((VALUE)ary, ID2SYM(key));
4009 return ID_TABLE_CONTINUE;
4010}
4011
4013rb_thread_alone(void)
4014{
4015 // TODO
4016 return rb_ractor_living_thread_num(GET_RACTOR()) == 1;
4017}
4018
4019/*
4020 * call-seq:
4021 * thr.keys -> array
4022 *
4023 * Returns an array of the names of the fiber-local variables (as Symbols).
4024 *
4025 * thr = Thread.new do
4026 * Thread.current[:cat] = 'meow'
4027 * Thread.current["dog"] = 'woof'
4028 * end
4029 * thr.join #=> #<Thread:0x401b3f10 dead>
4030 * thr.keys #=> [:dog, :cat]
4031 */
4032
4033static VALUE
4034rb_thread_keys(VALUE self)
4035{
4036 struct rb_id_table *local_storage = rb_thread_ptr(self)->ec->local_storage;
4037 VALUE ary = rb_ary_new();
4038
4039 if (local_storage) {
4040 rb_id_table_foreach(local_storage, thread_keys_i, (void *)ary);
4041 }
4042 return ary;
4043}
4044
4045static int
4046keys_i(VALUE key, VALUE value, VALUE ary)
4047{
4048 rb_ary_push(ary, key);
4049 return ST_CONTINUE;
4050}
4051
4052/*
4053 * call-seq:
4054 * thr.thread_variables -> array
4055 *
4056 * Returns an array of the names of the thread-local variables (as Symbols).
4057 *
4058 * thr = Thread.new do
4059 * Thread.current.thread_variable_set(:cat, 'meow')
4060 * Thread.current.thread_variable_set("dog", 'woof')
4061 * end
4062 * thr.join #=> #<Thread:0x401b3f10 dead>
4063 * thr.thread_variables #=> [:dog, :cat]
4064 *
4065 * Note that these are not fiber local variables. Please see Thread#[] and
4066 * Thread#thread_variable_get for more details.
4067 */
4068
4069static VALUE
4070rb_thread_variables(VALUE thread)
4071{
4072 VALUE locals;
4073 VALUE ary;
4074
4075 ary = rb_ary_new();
4076 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
4077 return ary;
4078 }
4079 locals = rb_thread_local_storage(thread);
4080 rb_hash_foreach(locals, keys_i, ary);
4081
4082 return ary;
4083}
4084
4085/*
4086 * call-seq:
4087 * thr.thread_variable?(key) -> true or false
4088 *
4089 * Returns +true+ if the given string (or symbol) exists as a thread-local
4090 * variable.
4091 *
4092 * me = Thread.current
4093 * me.thread_variable_set(:oliver, "a")
4094 * me.thread_variable?(:oliver) #=> true
4095 * me.thread_variable?(:stanley) #=> false
4096 *
4097 * Note that these are not fiber local variables. Please see Thread#[] and
4098 * Thread#thread_variable_get for more details.
4099 */
4100
4101static VALUE
4102rb_thread_variable_p(VALUE thread, VALUE key)
4103{
4104 VALUE locals;
4105 VALUE symbol = rb_to_symbol(key);
4106
4107 if (LIKELY(!THREAD_LOCAL_STORAGE_INITIALISED_P(thread))) {
4108 return Qfalse;
4109 }
4110 locals = rb_thread_local_storage(thread);
4111
4112 return RBOOL(rb_hash_lookup(locals, symbol) != Qnil);
4113}
4114
4115/*
4116 * call-seq:
4117 * thr.priority -> integer
4118 *
4119 * Returns the priority of <i>thr</i>. Default is inherited from the
4120 * current thread which creating the new thread, or zero for the
4121 * initial main thread; higher-priority thread will run more frequently
4122 * than lower-priority threads (but lower-priority threads can also run).
4123 *
4124 * This is just hint for Ruby thread scheduler. It may be ignored on some
4125 * platform.
4126 *
4127 * Thread.current.priority #=> 0
4128 */
4129
4130static VALUE
4131rb_thread_priority(VALUE thread)
4132{
4133 return INT2NUM(rb_thread_ptr(thread)->priority);
4134}
4135
4136
4137/*
4138 * call-seq:
4139 * thr.priority= integer -> thr
4140 *
4141 * Sets the priority of <i>thr</i> to <i>integer</i>. Higher-priority threads
4142 * will run more frequently than lower-priority threads (but lower-priority
4143 * threads can also run).
4144 *
4145 * This is just hint for Ruby thread scheduler. It may be ignored on some
4146 * platform.
4147 *
4148 * count1 = count2 = 0
4149 * a = Thread.new do
4150 * loop { count1 += 1 }
4151 * end
4152 * a.priority = -1
4153 *
4154 * b = Thread.new do
4155 * loop { count2 += 1 }
4156 * end
4157 * b.priority = -2
4158 * sleep 1 #=> 1
4159 * count1 #=> 622504
4160 * count2 #=> 5832
4161 */
4162
4163static VALUE
4164rb_thread_priority_set(VALUE thread, VALUE prio)
4165{
4166 rb_thread_t *target_th = rb_thread_ptr(thread);
4167 int priority;
4168
4169#if USE_NATIVE_THREAD_PRIORITY
4170 target_th->priority = NUM2INT(prio);
4171 native_thread_apply_priority(th);
4172#else
4173 priority = NUM2INT(prio);
4174 if (priority > RUBY_THREAD_PRIORITY_MAX) {
4175 priority = RUBY_THREAD_PRIORITY_MAX;
4176 }
4177 else if (priority < RUBY_THREAD_PRIORITY_MIN) {
4178 priority = RUBY_THREAD_PRIORITY_MIN;
4179 }
4180 target_th->priority = (int8_t)priority;
4181#endif
4182 return INT2NUM(target_th->priority);
4183}
4184
4185/* for IO */
4186
4187#if defined(NFDBITS) && defined(HAVE_RB_FD_INIT)
4188
4189/*
4190 * several Unix platforms support file descriptors bigger than FD_SETSIZE
4191 * in select(2) system call.
4192 *
4193 * - Linux 2.2.12 (?)
4194 * - NetBSD 1.2 (src/sys/kern/sys_generic.c:1.25)
4195 * select(2) documents how to allocate fd_set dynamically.
4196 * http://netbsd.gw.com/cgi-bin/man-cgi?select++NetBSD-4.0
4197 * - FreeBSD 2.2 (src/sys/kern/sys_generic.c:1.19)
4198 * - OpenBSD 2.0 (src/sys/kern/sys_generic.c:1.4)
4199 * select(2) documents how to allocate fd_set dynamically.
4200 * http://www.openbsd.org/cgi-bin/man.cgi?query=select&manpath=OpenBSD+4.4
4201 * - Solaris 8 has select_large_fdset
4202 * - Mac OS X 10.7 (Lion)
4203 * select(2) returns EINVAL if nfds is greater than FD_SET_SIZE and
4204 * _DARWIN_UNLIMITED_SELECT (or _DARWIN_C_SOURCE) isn't defined.
4205 * https://developer.apple.com/library/archive/releasenotes/Darwin/SymbolVariantsRelNotes/index.html
4206 *
4207 * When fd_set is not big enough to hold big file descriptors,
4208 * it should be allocated dynamically.
4209 * Note that this assumes fd_set is structured as bitmap.
4210 *
4211 * rb_fd_init allocates the memory.
4212 * rb_fd_term free the memory.
4213 * rb_fd_set may re-allocates bitmap.
4214 *
4215 * So rb_fd_set doesn't reject file descriptors bigger than FD_SETSIZE.
4216 */
4217
4218void
4220{
4221 fds->maxfd = 0;
4222 fds->fdset = ALLOC(fd_set);
4223 FD_ZERO(fds->fdset);
4224}
4225
4226void
4227rb_fd_init_copy(rb_fdset_t *dst, rb_fdset_t *src)
4228{
4229 size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
4230
4231 if (size < sizeof(fd_set))
4232 size = sizeof(fd_set);
4233 dst->maxfd = src->maxfd;
4234 dst->fdset = xmalloc(size);
4235 memcpy(dst->fdset, src->fdset, size);
4236}
4237
4238void
4240{
4241 xfree(fds->fdset);
4242 fds->maxfd = 0;
4243 fds->fdset = 0;
4244}
4245
4246void
4248{
4249 if (fds->fdset)
4250 MEMZERO(fds->fdset, fd_mask, howmany(fds->maxfd, NFDBITS));
4251}
4252
4253static void
4254rb_fd_resize(int n, rb_fdset_t *fds)
4255{
4256 size_t m = howmany(n + 1, NFDBITS) * sizeof(fd_mask);
4257 size_t o = howmany(fds->maxfd, NFDBITS) * sizeof(fd_mask);
4258
4259 if (m < sizeof(fd_set)) m = sizeof(fd_set);
4260 if (o < sizeof(fd_set)) o = sizeof(fd_set);
4261
4262 if (m > o) {
4263 fds->fdset = xrealloc(fds->fdset, m);
4264 memset((char *)fds->fdset + o, 0, m - o);
4265 }
4266 if (n >= fds->maxfd) fds->maxfd = n + 1;
4267}
4268
4269void
4270rb_fd_set(int n, rb_fdset_t *fds)
4271{
4272 rb_fd_resize(n, fds);
4273 FD_SET(n, fds->fdset);
4274}
4275
4276void
4277rb_fd_clr(int n, rb_fdset_t *fds)
4278{
4279 if (n >= fds->maxfd) return;
4280 FD_CLR(n, fds->fdset);
4281}
4282
4283int
4284rb_fd_isset(int n, const rb_fdset_t *fds)
4285{
4286 if (n >= fds->maxfd) return 0;
4287 return FD_ISSET(n, fds->fdset) != 0; /* "!= 0" avoids FreeBSD PR 91421 */
4288}
4289
4290void
4291rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
4292{
4293 size_t size = howmany(max, NFDBITS) * sizeof(fd_mask);
4294
4295 if (size < sizeof(fd_set)) size = sizeof(fd_set);
4296 dst->maxfd = max;
4297 dst->fdset = xrealloc(dst->fdset, size);
4298 memcpy(dst->fdset, src, size);
4299}
4300
4301void
4302rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
4303{
4304 size_t size = howmany(rb_fd_max(src), NFDBITS) * sizeof(fd_mask);
4305
4306 if (size < sizeof(fd_set))
4307 size = sizeof(fd_set);
4308 dst->maxfd = src->maxfd;
4309 dst->fdset = xrealloc(dst->fdset, size);
4310 memcpy(dst->fdset, src->fdset, size);
4311}
4312
4313int
4314rb_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout)
4315{
4316 fd_set *r = NULL, *w = NULL, *e = NULL;
4317 if (readfds) {
4318 rb_fd_resize(n - 1, readfds);
4319 r = rb_fd_ptr(readfds);
4320 }
4321 if (writefds) {
4322 rb_fd_resize(n - 1, writefds);
4323 w = rb_fd_ptr(writefds);
4324 }
4325 if (exceptfds) {
4326 rb_fd_resize(n - 1, exceptfds);
4327 e = rb_fd_ptr(exceptfds);
4328 }
4329 return select(n, r, w, e, timeout);
4330}
4331
4332#define rb_fd_no_init(fds) ((void)((fds)->fdset = 0), (void)((fds)->maxfd = 0))
4333
4334#undef FD_ZERO
4335#undef FD_SET
4336#undef FD_CLR
4337#undef FD_ISSET
4338
4339#define FD_ZERO(f) rb_fd_zero(f)
4340#define FD_SET(i, f) rb_fd_set((i), (f))
4341#define FD_CLR(i, f) rb_fd_clr((i), (f))
4342#define FD_ISSET(i, f) rb_fd_isset((i), (f))
4343
4344#elif defined(_WIN32)
4345
4346void
4348{
4349 set->capa = FD_SETSIZE;
4350 set->fdset = ALLOC(fd_set);
4351 FD_ZERO(set->fdset);
4352}
4353
4354void
4355rb_fd_init_copy(rb_fdset_t *dst, rb_fdset_t *src)
4356{
4357 rb_fd_init(dst);
4358 rb_fd_dup(dst, src);
4359}
4360
4361void
4363{
4364 xfree(set->fdset);
4365 set->fdset = NULL;
4366 set->capa = 0;
4367}
4368
4369void
4370rb_fd_set(int fd, rb_fdset_t *set)
4371{
4372 unsigned int i;
4373 SOCKET s = rb_w32_get_osfhandle(fd);
4374
4375 for (i = 0; i < set->fdset->fd_count; i++) {
4376 if (set->fdset->fd_array[i] == s) {
4377 return;
4378 }
4379 }
4380 if (set->fdset->fd_count >= (unsigned)set->capa) {
4381 set->capa = (set->fdset->fd_count / FD_SETSIZE + 1) * FD_SETSIZE;
4382 set->fdset =
4383 rb_xrealloc_mul_add(
4384 set->fdset, set->capa, sizeof(SOCKET), sizeof(unsigned int));
4385 }
4386 set->fdset->fd_array[set->fdset->fd_count++] = s;
4387}
4388
4389#undef FD_ZERO
4390#undef FD_SET
4391#undef FD_CLR
4392#undef FD_ISSET
4393
4394#define FD_ZERO(f) rb_fd_zero(f)
4395#define FD_SET(i, f) rb_fd_set((i), (f))
4396#define FD_CLR(i, f) rb_fd_clr((i), (f))
4397#define FD_ISSET(i, f) rb_fd_isset((i), (f))
4398
4399#define rb_fd_no_init(fds) (void)((fds)->fdset = 0)
4400
4401#endif
4402
4403#ifndef rb_fd_no_init
4404#define rb_fd_no_init(fds) (void)(fds)
4405#endif
4406
4407static int
4408wait_retryable(volatile int *result, int errnum, rb_hrtime_t *rel, rb_hrtime_t end)
4409{
4410 int r = *result;
4411 if (r < 0) {
4412 switch (errnum) {
4413 case EINTR:
4414#ifdef ERESTART
4415 case ERESTART:
4416#endif
4417 *result = 0;
4418 if (rel && hrtime_update_expire(rel, end)) {
4419 *rel = 0;
4420 }
4421 return TRUE;
4422 }
4423 return FALSE;
4424 }
4425 else if (r == 0) {
4426 /* check for spurious wakeup */
4427 if (rel) {
4428 return !hrtime_update_expire(rel, end);
4429 }
4430 return TRUE;
4431 }
4432 return FALSE;
4433}
4435struct select_set {
4436 int max;
4437 rb_thread_t *th;
4438 rb_fdset_t *rset;
4439 rb_fdset_t *wset;
4440 rb_fdset_t *eset;
4441 rb_fdset_t orig_rset;
4442 rb_fdset_t orig_wset;
4443 rb_fdset_t orig_eset;
4444 struct timeval *timeout;
4445};
4446
4447static VALUE
4448select_set_free(VALUE p)
4449{
4450 struct select_set *set = (struct select_set *)p;
4451
4452 rb_fd_term(&set->orig_rset);
4453 rb_fd_term(&set->orig_wset);
4454 rb_fd_term(&set->orig_eset);
4455
4456 return Qfalse;
4457}
4458
4459static VALUE
4460do_select(VALUE p)
4461{
4462 struct select_set *set = (struct select_set *)p;
4463 volatile int result = 0;
4464 int lerrno;
4465 rb_hrtime_t *to, rel, end = 0;
4466
4467 timeout_prepare(&to, &rel, &end, set->timeout);
4468 volatile rb_hrtime_t endtime = end;
4469#define restore_fdset(dst, src) \
4470 ((dst) ? rb_fd_dup(dst, src) : (void)0)
4471#define do_select_update() \
4472 (restore_fdset(set->rset, &set->orig_rset), \
4473 restore_fdset(set->wset, &set->orig_wset), \
4474 restore_fdset(set->eset, &set->orig_eset), \
4475 TRUE)
4476
4477 do {
4478 lerrno = 0;
4479
4480 BLOCKING_REGION(set->th, {
4481 struct timeval tv;
4482
4483 if (!RUBY_VM_INTERRUPTED(set->th->ec)) {
4484 result = native_fd_select(set->max,
4485 set->rset, set->wset, set->eset,
4486 rb_hrtime2timeval(&tv, to), set->th);
4487 if (result < 0) lerrno = errno;
4488 }
4489 }, ubf_select, set->th, TRUE);
4490
4491 RUBY_VM_CHECK_INTS_BLOCKING(set->th->ec); /* may raise */
4492 } while (wait_retryable(&result, lerrno, to, endtime) && do_select_update());
4493
4494 RUBY_VM_CHECK_INTS_BLOCKING(set->th->ec);
4495
4496 if (result < 0) {
4497 errno = lerrno;
4498 }
4499
4500 return (VALUE)result;
4501}
4502
4504rb_thread_fd_select(int max, rb_fdset_t * read, rb_fdset_t * write, rb_fdset_t * except,
4505 struct timeval *timeout)
4506{
4507 struct select_set set;
4508
4509 set.th = GET_THREAD();
4510 RUBY_VM_CHECK_INTS_BLOCKING(set.th->ec);
4511 set.max = max;
4512 set.rset = read;
4513 set.wset = write;
4514 set.eset = except;
4515 set.timeout = timeout;
4516
4517 if (!set.rset && !set.wset && !set.eset) {
4518 if (!timeout) {
4520 return 0;
4521 }
4522 rb_thread_wait_for(*timeout);
4523 return 0;
4524 }
4525
4526#define fd_init_copy(f) do { \
4527 if (set.f) { \
4528 rb_fd_resize(set.max - 1, set.f); \
4529 if (&set.orig_##f != set.f) { /* sigwait_fd */ \
4530 rb_fd_init_copy(&set.orig_##f, set.f); \
4531 } \
4532 } \
4533 else { \
4534 rb_fd_no_init(&set.orig_##f); \
4535 } \
4536 } while (0)
4537 fd_init_copy(rset);
4538 fd_init_copy(wset);
4539 fd_init_copy(eset);
4540#undef fd_init_copy
4541
4542 return (int)rb_ensure(do_select, (VALUE)&set, select_set_free, (VALUE)&set);
4543}
4544
4545#ifdef USE_POLL
4546
4547/* The same with linux kernel. TODO: make platform independent definition. */
4548#define POLLIN_SET (POLLRDNORM | POLLRDBAND | POLLIN | POLLHUP | POLLERR)
4549#define POLLOUT_SET (POLLWRBAND | POLLWRNORM | POLLOUT | POLLERR)
4550#define POLLEX_SET (POLLPRI)
4551
4552#ifndef POLLERR_SET /* defined for FreeBSD for now */
4553# define POLLERR_SET (0)
4554#endif
4555
4556static int
4557wait_for_single_fd_blocking_region(rb_thread_t *th, struct pollfd *fds, nfds_t nfds,
4558 rb_hrtime_t *const to, volatile int *lerrno)
4559{
4560 struct timespec ts;
4561 volatile int result = 0;
4562
4563 *lerrno = 0;
4564 BLOCKING_REGION(th, {
4565 if (!RUBY_VM_INTERRUPTED(th->ec)) {
4566 result = ppoll(fds, nfds, rb_hrtime2timespec(&ts, to), 0);
4567 if (result < 0) *lerrno = errno;
4568 }
4569 }, ubf_select, th, TRUE);
4570 return result;
4571}
4572
4573/*
4574 * returns a mask of events
4575 */
4576static int
4577thread_io_wait(struct rb_io *io, int fd, int events, struct timeval *timeout)
4578{
4579 struct pollfd fds[1] = {{
4580 .fd = fd,
4581 .events = (short)events,
4582 .revents = 0,
4583 }};
4584 volatile int result = 0;
4585 nfds_t nfds;
4586 struct rb_io_blocking_operation blocking_operation;
4587 enum ruby_tag_type state;
4588 volatile int lerrno;
4589
4590 rb_execution_context_t *ec = GET_EC();
4591 rb_thread_t *th = rb_ec_thread_ptr(ec);
4592
4593 if (io) {
4594 blocking_operation.ec = ec;
4595 rb_io_blocking_operation_enter(io, &blocking_operation);
4596 }
4597
4598 if (timeout == NULL && thread_io_wait_events(th, fd, events, NULL)) {
4599 // fd is readable
4600 state = 0;
4601 fds[0].revents = events;
4602 errno = 0;
4603 }
4604 else {
4605 EC_PUSH_TAG(ec);
4606 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
4607 rb_hrtime_t *to, rel, end = 0;
4608 RUBY_VM_CHECK_INTS_BLOCKING(ec);
4609 timeout_prepare(&to, &rel, &end, timeout);
4610 do {
4611 nfds = numberof(fds);
4612 result = wait_for_single_fd_blocking_region(th, fds, nfds, to, &lerrno);
4613
4614 RUBY_VM_CHECK_INTS_BLOCKING(ec);
4615 } while (wait_retryable(&result, lerrno, to, end));
4616
4617 RUBY_VM_CHECK_INTS_BLOCKING(ec);
4618 }
4619
4620 EC_POP_TAG();
4621 }
4622
4623 if (io) {
4624 rb_io_blocking_operation_exit(io, &blocking_operation);
4625 }
4626
4627 if (state) {
4628 EC_JUMP_TAG(ec, state);
4629 }
4630
4631 if (result < 0) {
4632 errno = lerrno;
4633 return -1;
4634 }
4635
4636 if (fds[0].revents & POLLNVAL) {
4637 errno = EBADF;
4638 return -1;
4639 }
4640
4641 /*
4642 * POLLIN, POLLOUT have a different meanings from select(2)'s read/write bit.
4643 * Therefore we need to fix it up.
4644 */
4645 result = 0;
4646 if (fds[0].revents & POLLIN_SET)
4647 result |= RB_WAITFD_IN;
4648 if (fds[0].revents & POLLOUT_SET)
4649 result |= RB_WAITFD_OUT;
4650 if (fds[0].revents & POLLEX_SET)
4651 result |= RB_WAITFD_PRI;
4652
4653 /* all requested events are ready if there is an error */
4654 if (fds[0].revents & POLLERR_SET)
4655 result |= events;
4656
4657 return result;
4658}
4659#else /* ! USE_POLL - implement rb_io_poll_fd() using select() */
4660struct select_args {
4661 struct rb_io *io;
4662 struct rb_io_blocking_operation *blocking_operation;
4663
4664 union {
4665 int fd;
4666 int error;
4667 } as;
4668 rb_fdset_t *read;
4669 rb_fdset_t *write;
4670 rb_fdset_t *except;
4671 struct timeval *tv;
4672};
4673
4674static VALUE
4675select_single(VALUE ptr)
4676{
4677 struct select_args *args = (struct select_args *)ptr;
4678 int r;
4679
4680 r = rb_thread_fd_select(args->as.fd + 1,
4681 args->read, args->write, args->except, args->tv);
4682 if (r == -1)
4683 args->as.error = errno;
4684 if (r > 0) {
4685 r = 0;
4686 if (args->read && rb_fd_isset(args->as.fd, args->read))
4687 r |= RB_WAITFD_IN;
4688 if (args->write && rb_fd_isset(args->as.fd, args->write))
4689 r |= RB_WAITFD_OUT;
4690 if (args->except && rb_fd_isset(args->as.fd, args->except))
4691 r |= RB_WAITFD_PRI;
4692 }
4693 return (VALUE)r;
4694}
4695
4696static VALUE
4697select_single_cleanup(VALUE ptr)
4698{
4699 struct select_args *args = (struct select_args *)ptr;
4700
4701 if (args->blocking_operation) {
4702 rb_io_blocking_operation_exit(args->io, args->blocking_operation);
4703 }
4704
4705 if (args->read) rb_fd_term(args->read);
4706 if (args->write) rb_fd_term(args->write);
4707 if (args->except) rb_fd_term(args->except);
4708
4709 return (VALUE)-1;
4710}
4711
4712static rb_fdset_t *
4713init_set_fd(int fd, rb_fdset_t *fds)
4714{
4715 if (fd < 0) {
4716 return 0;
4717 }
4718 rb_fd_init(fds);
4719 rb_fd_set(fd, fds);
4720
4721 return fds;
4722}
4723
4724static int
4725thread_io_wait(struct rb_io *io, int fd, int events, struct timeval *timeout)
4726{
4727 rb_fdset_t rfds, wfds, efds;
4728 struct select_args args;
4729 VALUE ptr = (VALUE)&args;
4730
4731 struct rb_io_blocking_operation blocking_operation;
4732 if (io) {
4733 args.io = io;
4734 blocking_operation.ec = GET_EC();
4735 rb_io_blocking_operation_enter(io, &blocking_operation);
4736 args.blocking_operation = &blocking_operation;
4737 }
4738 else {
4739 args.io = NULL;
4740 blocking_operation.ec = NULL;
4741 args.blocking_operation = NULL;
4742 }
4743
4744 args.as.fd = fd;
4745 args.read = (events & RB_WAITFD_IN) ? init_set_fd(fd, &rfds) : NULL;
4746 args.write = (events & RB_WAITFD_OUT) ? init_set_fd(fd, &wfds) : NULL;
4747 args.except = (events & RB_WAITFD_PRI) ? init_set_fd(fd, &efds) : NULL;
4748 args.tv = timeout;
4749
4750 int result = (int)rb_ensure(select_single, ptr, select_single_cleanup, ptr);
4751 if (result == -1)
4752 errno = args.as.error;
4753
4754 return result;
4755}
4756#endif /* ! USE_POLL */
4757
4758int
4759rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
4760{
4761 return thread_io_wait(NULL, fd, events, timeout);
4762}
4763
4764int
4765rb_thread_io_wait(struct rb_io *io, int events, struct timeval * timeout)
4766{
4767 return thread_io_wait(io, io->fd, events, timeout);
4768}
4769
4770/*
4771 * for GC
4772 */
4773
4774#ifdef USE_CONSERVATIVE_STACK_END
4775void
4776rb_gc_set_stack_end(VALUE **stack_end_p)
4777{
4778 VALUE stack_end;
4779COMPILER_WARNING_PUSH
4780#if RBIMPL_COMPILER_IS(GCC)
4781COMPILER_WARNING_IGNORED(-Wdangling-pointer);
4782#endif
4783 *stack_end_p = &stack_end;
4784COMPILER_WARNING_POP
4785}
4786#endif
4787
4788/*
4789 *
4790 */
4791
4792void
4793rb_threadptr_check_signal(rb_thread_t *mth)
4794{
4795 /* mth must be main_thread */
4796 if (rb_signal_buff_size() > 0) {
4797 /* wakeup main thread */
4798 threadptr_trap_interrupt(mth);
4799 }
4800}
4801
4802static void
4803async_bug_fd(const char *mesg, int errno_arg, int fd)
4804{
4805 char buff[64];
4806 size_t n = strlcpy(buff, mesg, sizeof(buff));
4807 if (n < sizeof(buff)-3) {
4808 ruby_snprintf(buff+n, sizeof(buff)-n, "(%d)", fd);
4809 }
4810 rb_async_bug_errno(buff, errno_arg);
4811}
4812
4813/* VM-dependent API is not available for this function */
4814static int
4815consume_communication_pipe(int fd)
4816{
4817#if USE_EVENTFD
4818 uint64_t buff[1];
4819#else
4820 /* buffer can be shared because no one refers to them. */
4821 static char buff[1024];
4822#endif
4823 ssize_t result;
4824 int ret = FALSE; /* for rb_sigwait_sleep */
4825
4826 while (1) {
4827 result = read(fd, buff, sizeof(buff));
4828#if USE_EVENTFD
4829 RUBY_DEBUG_LOG("resultf:%d buff:%lu", (int)result, (unsigned long)buff[0]);
4830#else
4831 RUBY_DEBUG_LOG("result:%d", (int)result);
4832#endif
4833 if (result > 0) {
4834 ret = TRUE;
4835 if (USE_EVENTFD || result < (ssize_t)sizeof(buff)) {
4836 return ret;
4837 }
4838 }
4839 else if (result == 0) {
4840 return ret;
4841 }
4842 else if (result < 0) {
4843 int e = errno;
4844 switch (e) {
4845 case EINTR:
4846 continue; /* retry */
4847 case EAGAIN:
4848#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
4849 case EWOULDBLOCK:
4850#endif
4851 return ret;
4852 default:
4853 async_bug_fd("consume_communication_pipe: read", e, fd);
4854 }
4855 }
4856 }
4857}
4858
4859void
4860rb_thread_stop_timer_thread(void)
4861{
4862 if (TIMER_THREAD_CREATED_P() && native_stop_timer_thread()) {
4863 native_reset_timer_thread();
4864 }
4865}
4866
4867void
4868rb_thread_reset_timer_thread(void)
4869{
4870 native_reset_timer_thread();
4871}
4872
4873void
4874rb_thread_start_timer_thread(void)
4875{
4876 system_working = 1;
4877 rb_thread_create_timer_thread();
4878}
4879
4880static int
4881clear_coverage_i(st_data_t key, st_data_t val, st_data_t dummy)
4882{
4883 int i;
4884 VALUE coverage = (VALUE)val;
4885 VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
4886 VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
4887
4888 if (lines) {
4889 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
4890 rb_ary_clear(lines);
4891 }
4892 else {
4893 int i;
4894 for (i = 0; i < RARRAY_LEN(lines); i++) {
4895 if (RARRAY_AREF(lines, i) != Qnil)
4896 RARRAY_ASET(lines, i, INT2FIX(0));
4897 }
4898 }
4899 }
4900 if (branches) {
4901 VALUE counters = RARRAY_AREF(branches, 1);
4902 for (i = 0; i < RARRAY_LEN(counters); i++) {
4903 RARRAY_ASET(counters, i, INT2FIX(0));
4904 }
4905 }
4906
4907 return ST_CONTINUE;
4908}
4909
4910void
4911rb_clear_coverages(void)
4912{
4913 VALUE coverages = rb_get_coverages();
4914 if (RTEST(coverages)) {
4915 rb_hash_foreach(coverages, clear_coverage_i, 0);
4916 }
4917}
4918
4919#if defined(HAVE_WORKING_FORK)
4920
4921static void
4922rb_thread_atfork_internal(rb_thread_t *th, void (*atfork)(rb_thread_t *, const rb_thread_t *))
4923{
4924 rb_thread_t *i = 0;
4925 rb_vm_t *vm = th->vm;
4926 rb_ractor_t *r = th->ractor;
4927 vm->ractor.main_ractor = r;
4928 vm->ractor.main_thread = th;
4929 r->threads.main = th;
4930 r->status_ = ractor_created;
4931
4932 thread_sched_atfork(TH_SCHED(th));
4933 ubf_list_atfork();
4934 rb_signal_atfork();
4935
4936 // OK. Only this thread accesses:
4937 ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
4938 if (r != vm->ractor.main_ractor) {
4939 rb_ractor_terminate_atfork(vm, r);
4940 }
4941 ccan_list_for_each(&r->threads.set, i, lt_node) {
4942 atfork(i, th);
4943 }
4944 }
4945 rb_vm_living_threads_init(vm);
4946
4947 rb_ractor_atfork(vm, th);
4948 rb_vm_postponed_job_atfork();
4949
4950 /* may be held by any thread in parent */
4951 rb_native_mutex_initialize(&th->interrupt_lock);
4952 ccan_list_head_init(&th->interrupt_exec_tasks);
4953
4954 vm->fork_gen++;
4955 rb_ractor_sleeper_threads_clear(th->ractor);
4956 rb_clear_coverages();
4957
4958 // restart timer thread (timer threads access to `vm->waitpid_lock` and so on.
4959 rb_thread_reset_timer_thread();
4960 rb_thread_start_timer_thread();
4961
4962 VM_ASSERT(vm->ractor.blocking_cnt == 0);
4963 VM_ASSERT(vm->ractor.cnt == 1);
4964}
4965
4966static void
4967terminate_atfork_i(rb_thread_t *th, const rb_thread_t *current_th)
4968{
4969 if (th != current_th) {
4970 rb_native_mutex_initialize(&th->interrupt_lock);
4971 rb_mutex_abandon_keeping_mutexes(th);
4972 rb_mutex_abandon_locking_mutex(th);
4973 thread_cleanup_func(th, TRUE);
4974 }
4975}
4976
4977void rb_fiber_atfork(rb_thread_t *);
4978void
4979rb_thread_atfork(void)
4980{
4981 rb_thread_t *th = GET_THREAD();
4982 rb_threadptr_pending_interrupt_clear(th);
4983 rb_thread_atfork_internal(th, terminate_atfork_i);
4984 th->join_list = NULL;
4985 rb_fiber_atfork(th);
4986
4987 /* We don't want reproduce CVE-2003-0900. */
4989}
4990
4991static void
4992terminate_atfork_before_exec_i(rb_thread_t *th, const rb_thread_t *current_th)
4993{
4994 if (th != current_th) {
4995 thread_cleanup_func_before_exec(th);
4996 }
4997}
4998
4999void
5001{
5002 rb_thread_t *th = GET_THREAD();
5003 rb_thread_atfork_internal(th, terminate_atfork_before_exec_i);
5004}
5005#else
5006void
5007rb_thread_atfork(void)
5008{
5009}
5010
5011void
5013{
5014}
5015#endif
5017struct thgroup {
5018 int enclosed;
5019};
5020
5021static const rb_data_type_t thgroup_data_type = {
5022 "thgroup",
5023 {
5024 0,
5026 NULL, // No external memory to report
5027 },
5028 0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED | RUBY_TYPED_EMBEDDABLE
5029};
5030
5031/*
5032 * Document-class: ThreadGroup
5033 *
5034 * ThreadGroup provides a means of keeping track of a number of threads as a
5035 * group.
5036 *
5037 * A given Thread object can only belong to one ThreadGroup at a time; adding
5038 * a thread to a new group will remove it from any previous group.
5039 *
5040 * Newly created threads belong to the same group as the thread from which they
5041 * were created.
5042 */
5043
5044/*
5045 * Document-const: Default
5046 *
5047 * The default ThreadGroup created when Ruby starts; all Threads belong to it
5048 * by default.
5049 */
5050static VALUE
5051thgroup_s_alloc(VALUE klass)
5052{
5053 VALUE group;
5054 struct thgroup *data;
5055
5056 group = TypedData_Make_Struct(klass, struct thgroup, &thgroup_data_type, data);
5057 data->enclosed = 0;
5058
5059 return group;
5060}
5061
5062/*
5063 * call-seq:
5064 * thgrp.list -> array
5065 *
5066 * Returns an array of all existing Thread objects that belong to this group.
5067 *
5068 * ThreadGroup::Default.list #=> [#<Thread:0x401bdf4c run>]
5069 */
5070
5071static VALUE
5072thgroup_list(VALUE group)
5073{
5074 VALUE ary = rb_ary_new();
5075 rb_thread_t *th = 0;
5076 rb_ractor_t *r = GET_RACTOR();
5077
5078 ccan_list_for_each(&r->threads.set, th, lt_node) {
5079 if (th->thgroup == group) {
5080 rb_ary_push(ary, th->self);
5081 }
5082 }
5083 return ary;
5084}
5085
5086
5087/*
5088 * call-seq:
5089 * thgrp.enclose -> thgrp
5090 *
5091 * Prevents threads from being added to or removed from the receiving
5092 * ThreadGroup.
5093 *
5094 * New threads can still be started in an enclosed ThreadGroup.
5095 *
5096 * ThreadGroup::Default.enclose #=> #<ThreadGroup:0x4029d914>
5097 * thr = Thread.new { Thread.stop } #=> #<Thread:0x402a7210 sleep>
5098 * tg = ThreadGroup.new #=> #<ThreadGroup:0x402752d4>
5099 * tg.add thr
5100 * #=> ThreadError: can't move from the enclosed thread group
5101 */
5102
5103static VALUE
5104thgroup_enclose(VALUE group)
5105{
5106 struct thgroup *data;
5107
5108 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
5109 data->enclosed = 1;
5110
5111 return group;
5112}
5113
5114
5115/*
5116 * call-seq:
5117 * thgrp.enclosed? -> true or false
5118 *
5119 * Returns +true+ if the +thgrp+ is enclosed. See also ThreadGroup#enclose.
5120 */
5121
5122static VALUE
5123thgroup_enclosed_p(VALUE group)
5124{
5125 struct thgroup *data;
5126
5127 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
5128 return RBOOL(data->enclosed);
5129}
5130
5131
5132/*
5133 * call-seq:
5134 * thgrp.add(thread) -> thgrp
5135 *
5136 * Adds the given +thread+ to this group, removing it from any other
5137 * group to which it may have previously been a member.
5138 *
5139 * puts "Initial group is #{ThreadGroup::Default.list}"
5140 * tg = ThreadGroup.new
5141 * t1 = Thread.new { sleep }
5142 * t2 = Thread.new { sleep }
5143 * puts "t1 is #{t1}"
5144 * puts "t2 is #{t2}"
5145 * tg.add(t1)
5146 * puts "Initial group now #{ThreadGroup::Default.list}"
5147 * puts "tg group now #{tg.list}"
5148 *
5149 * This will produce:
5150 *
5151 * Initial group is #<Thread:0x401bdf4c>
5152 * t1 is #<Thread:0x401b3c90>
5153 * t2 is #<Thread:0x401b3c18>
5154 * Initial group now #<Thread:0x401b3c18>#<Thread:0x401bdf4c>
5155 * tg group now #<Thread:0x401b3c90>
5156 */
5157
5158static VALUE
5159thgroup_add(VALUE group, VALUE thread)
5160{
5161 rb_thread_t *target_th = rb_thread_ptr(thread);
5162 struct thgroup *data;
5163
5164 if (OBJ_FROZEN(group)) {
5165 rb_raise(rb_eThreadError, "can't move to the frozen thread group");
5166 }
5167 TypedData_Get_Struct(group, struct thgroup, &thgroup_data_type, data);
5168 if (data->enclosed) {
5169 rb_raise(rb_eThreadError, "can't move to the enclosed thread group");
5170 }
5171
5172 if (OBJ_FROZEN(target_th->thgroup)) {
5173 rb_raise(rb_eThreadError, "can't move from the frozen thread group");
5174 }
5175 TypedData_Get_Struct(target_th->thgroup, struct thgroup, &thgroup_data_type, data);
5176 if (data->enclosed) {
5177 rb_raise(rb_eThreadError,
5178 "can't move from the enclosed thread group");
5179 }
5180
5181 target_th->thgroup = group;
5182 return group;
5183}
5184
5185/*
5186 * Document-class: ThreadShield
5187 */
5188static void
5189thread_shield_mark(void *ptr)
5190{
5191 rb_gc_mark((VALUE)ptr);
5192}
5193
5194static const rb_data_type_t thread_shield_data_type = {
5195 "thread_shield",
5196 {thread_shield_mark, 0, 0,},
5197 0, 0, RUBY_TYPED_FREE_IMMEDIATELY
5198};
5199
5200static VALUE
5201thread_shield_alloc(VALUE klass)
5202{
5203 return TypedData_Wrap_Struct(klass, &thread_shield_data_type, (void *)mutex_alloc(0));
5204}
5205
5206#define GetThreadShieldPtr(obj) ((VALUE)rb_check_typeddata((obj), &thread_shield_data_type))
5207#define THREAD_SHIELD_WAITING_MASK (((FL_USER19-1)&~(FL_USER0-1))|FL_USER19)
5208#define THREAD_SHIELD_WAITING_SHIFT (FL_USHIFT)
5209#define THREAD_SHIELD_WAITING_MAX (THREAD_SHIELD_WAITING_MASK>>THREAD_SHIELD_WAITING_SHIFT)
5210STATIC_ASSERT(THREAD_SHIELD_WAITING_MAX, THREAD_SHIELD_WAITING_MAX <= UINT_MAX);
5211static inline unsigned int
5212rb_thread_shield_waiting(VALUE b)
5213{
5214 return ((RBASIC(b)->flags&THREAD_SHIELD_WAITING_MASK)>>THREAD_SHIELD_WAITING_SHIFT);
5215}
5216
5217static inline void
5218rb_thread_shield_waiting_inc(VALUE b)
5219{
5220 unsigned int w = rb_thread_shield_waiting(b);
5221 w++;
5222 if (w > THREAD_SHIELD_WAITING_MAX)
5223 rb_raise(rb_eRuntimeError, "waiting count overflow");
5224 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
5225 RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
5226}
5227
5228static inline void
5229rb_thread_shield_waiting_dec(VALUE b)
5230{
5231 unsigned int w = rb_thread_shield_waiting(b);
5232 if (!w) rb_raise(rb_eRuntimeError, "waiting count underflow");
5233 w--;
5234 RBASIC(b)->flags &= ~THREAD_SHIELD_WAITING_MASK;
5235 RBASIC(b)->flags |= ((VALUE)w << THREAD_SHIELD_WAITING_SHIFT);
5236}
5237
5238VALUE
5239rb_thread_shield_new(void)
5240{
5241 VALUE thread_shield = thread_shield_alloc(rb_cThreadShield);
5242 rb_mutex_lock((VALUE)DATA_PTR(thread_shield));
5243 return thread_shield;
5244}
5245
5246bool
5247rb_thread_shield_owned(VALUE self)
5248{
5249 VALUE mutex = GetThreadShieldPtr(self);
5250 if (!mutex) return false;
5251
5252 rb_mutex_t *m = mutex_ptr(mutex);
5253
5254 return m->fiber == GET_EC()->fiber_ptr;
5255}
5256
5257/*
5258 * Wait a thread shield.
5259 *
5260 * Returns
5261 * true: acquired the thread shield
5262 * false: the thread shield was destroyed and no other threads waiting
5263 * nil: the thread shield was destroyed but still in use
5264 */
5265VALUE
5266rb_thread_shield_wait(VALUE self)
5267{
5268 VALUE mutex = GetThreadShieldPtr(self);
5269 rb_mutex_t *m;
5270
5271 if (!mutex) return Qfalse;
5272 m = mutex_ptr(mutex);
5273 if (m->fiber == GET_EC()->fiber_ptr) return Qnil;
5274 rb_thread_shield_waiting_inc(self);
5275 rb_mutex_lock(mutex);
5276 rb_thread_shield_waiting_dec(self);
5277 if (DATA_PTR(self)) return Qtrue;
5278 rb_mutex_unlock(mutex);
5279 return rb_thread_shield_waiting(self) > 0 ? Qnil : Qfalse;
5280}
5281
5282static VALUE
5283thread_shield_get_mutex(VALUE self)
5284{
5285 VALUE mutex = GetThreadShieldPtr(self);
5286 if (!mutex)
5287 rb_raise(rb_eThreadError, "destroyed thread shield - %p", (void *)self);
5288 return mutex;
5289}
5290
5291/*
5292 * Release a thread shield, and return true if it has waiting threads.
5293 */
5294VALUE
5295rb_thread_shield_release(VALUE self)
5296{
5297 VALUE mutex = thread_shield_get_mutex(self);
5298 rb_mutex_unlock(mutex);
5299 return RBOOL(rb_thread_shield_waiting(self) > 0);
5300}
5301
5302/*
5303 * Release and destroy a thread shield, and return true if it has waiting threads.
5304 */
5305VALUE
5306rb_thread_shield_destroy(VALUE self)
5307{
5308 VALUE mutex = thread_shield_get_mutex(self);
5309 DATA_PTR(self) = 0;
5310 rb_mutex_unlock(mutex);
5311 return RBOOL(rb_thread_shield_waiting(self) > 0);
5312}
5313
5314static VALUE
5315threadptr_recursive_hash(rb_thread_t *th)
5316{
5317 return th->ec->local_storage_recursive_hash;
5318}
5319
5320static void
5321threadptr_recursive_hash_set(rb_thread_t *th, VALUE hash)
5322{
5323 th->ec->local_storage_recursive_hash = hash;
5324}
5325
5327
5328/*
5329 * Returns the current "recursive list" used to detect recursion.
5330 * This list is a hash table, unique for the current thread and for
5331 * the current __callee__.
5332 */
5333
5334static VALUE
5335recursive_list_access(VALUE sym)
5336{
5337 rb_thread_t *th = GET_THREAD();
5338 VALUE hash = threadptr_recursive_hash(th);
5339 VALUE list;
5340 if (NIL_P(hash) || !RB_TYPE_P(hash, T_HASH)) {
5341 hash = rb_ident_hash_new();
5342 threadptr_recursive_hash_set(th, hash);
5343 list = Qnil;
5344 }
5345 else {
5346 list = rb_hash_aref(hash, sym);
5347 }
5348 if (NIL_P(list) || !RB_TYPE_P(list, T_HASH)) {
5349 list = rb_ident_hash_new();
5350 rb_hash_aset(hash, sym, list);
5351 }
5352 return list;
5353}
5354
5355/*
5356 * Returns true if and only if obj (or the pair <obj, paired_obj>) is already
5357 * in the recursion list.
5358 * Assumes the recursion list is valid.
5359 */
5360
5361static bool
5362recursive_check(VALUE list, VALUE obj, VALUE paired_obj_id)
5363{
5364#if SIZEOF_LONG == SIZEOF_VOIDP
5365 #define OBJ_ID_EQL(obj_id, other) ((obj_id) == (other))
5366#elif SIZEOF_LONG_LONG == SIZEOF_VOIDP
5367 #define OBJ_ID_EQL(obj_id, other) (RB_BIGNUM_TYPE_P((obj_id)) ? \
5368 rb_big_eql((obj_id), (other)) : ((obj_id) == (other)))
5369#endif
5370
5371 VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
5372 if (UNDEF_P(pair_list))
5373 return false;
5374 if (paired_obj_id) {
5375 if (!RB_TYPE_P(pair_list, T_HASH)) {
5376 if (!OBJ_ID_EQL(paired_obj_id, pair_list))
5377 return false;
5378 }
5379 else {
5380 if (NIL_P(rb_hash_lookup(pair_list, paired_obj_id)))
5381 return false;
5382 }
5383 }
5384 return true;
5385}
5386
5387/*
5388 * Pushes obj (or the pair <obj, paired_obj>) in the recursion list.
5389 * For a single obj, it sets list[obj] to Qtrue.
5390 * For a pair, it sets list[obj] to paired_obj_id if possible,
5391 * otherwise list[obj] becomes a hash like:
5392 * {paired_obj_id_1 => true, paired_obj_id_2 => true, ... }
5393 * Assumes the recursion list is valid.
5394 */
5395
5396static void
5397recursive_push(VALUE list, VALUE obj, VALUE paired_obj)
5398{
5399 VALUE pair_list;
5400
5401 if (!paired_obj) {
5402 rb_hash_aset(list, obj, Qtrue);
5403 }
5404 else if (UNDEF_P(pair_list = rb_hash_lookup2(list, obj, Qundef))) {
5405 rb_hash_aset(list, obj, paired_obj);
5406 }
5407 else {
5408 if (!RB_TYPE_P(pair_list, T_HASH)){
5409 VALUE other_paired_obj = pair_list;
5410 pair_list = rb_hash_new();
5411 rb_hash_aset(pair_list, other_paired_obj, Qtrue);
5412 rb_hash_aset(list, obj, pair_list);
5413 }
5414 rb_hash_aset(pair_list, paired_obj, Qtrue);
5415 }
5416}
5417
5418/*
5419 * Pops obj (or the pair <obj, paired_obj>) from the recursion list.
5420 * For a pair, if list[obj] is a hash, then paired_obj_id is
5421 * removed from the hash and no attempt is made to simplify
5422 * list[obj] from {only_one_paired_id => true} to only_one_paired_id
5423 * Assumes the recursion list is valid.
5424 */
5425
5426static int
5427recursive_pop(VALUE list, VALUE obj, VALUE paired_obj)
5428{
5429 if (paired_obj) {
5430 VALUE pair_list = rb_hash_lookup2(list, obj, Qundef);
5431 if (UNDEF_P(pair_list)) {
5432 return 0;
5433 }
5434 if (RB_TYPE_P(pair_list, T_HASH)) {
5435 rb_hash_delete_entry(pair_list, paired_obj);
5436 if (!RHASH_EMPTY_P(pair_list)) {
5437 return 1; /* keep hash until is empty */
5438 }
5439 }
5440 }
5441 rb_hash_delete_entry(list, obj);
5442 return 1;
5443}
5445struct exec_recursive_params {
5446 VALUE (*func) (VALUE, VALUE, int);
5447 VALUE list;
5448 VALUE obj;
5449 VALUE pairid;
5450 VALUE arg;
5451};
5452
5453static VALUE
5454exec_recursive_i(RB_BLOCK_CALL_FUNC_ARGLIST(tag, data))
5455{
5456 struct exec_recursive_params *p = (void *)data;
5457 return (*p->func)(p->obj, p->arg, FALSE);
5458}
5459
5460/*
5461 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5462 * current method is called recursively on obj, or on the pair <obj, pairid>
5463 * If outer is 0, then the innermost func will be called with recursive set
5464 * to true, otherwise the outermost func will be called. In the latter case,
5465 * all inner func are short-circuited by throw.
5466 * Implementation details: the value thrown is the recursive list which is
5467 * proper to the current method and unlikely to be caught anywhere else.
5468 * list[recursive_key] is used as a flag for the outermost call.
5469 */
5470
5471static VALUE
5472exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE pairid, VALUE arg, int outer, ID mid)
5473{
5474 VALUE result = Qundef;
5475 const VALUE sym = mid ? ID2SYM(mid) : ID2SYM(idNULL);
5476 struct exec_recursive_params p;
5477 int outermost;
5478 p.list = recursive_list_access(sym);
5479 p.obj = obj;
5480 p.pairid = pairid;
5481 p.arg = arg;
5482 outermost = outer && !recursive_check(p.list, ID2SYM(recursive_key), 0);
5483
5484 if (recursive_check(p.list, p.obj, pairid)) {
5485 if (outer && !outermost) {
5486 rb_throw_obj(p.list, p.list);
5487 }
5488 return (*func)(obj, arg, TRUE);
5489 }
5490 else {
5491 enum ruby_tag_type state;
5492
5493 p.func = func;
5494
5495 if (outermost) {
5496 recursive_push(p.list, ID2SYM(recursive_key), 0);
5497 recursive_push(p.list, p.obj, p.pairid);
5498 result = rb_catch_protect(p.list, exec_recursive_i, (VALUE)&p, &state);
5499 if (!recursive_pop(p.list, p.obj, p.pairid)) goto invalid;
5500 if (!recursive_pop(p.list, ID2SYM(recursive_key), 0)) goto invalid;
5501 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5502 if (result == p.list) {
5503 result = (*func)(obj, arg, TRUE);
5504 }
5505 }
5506 else {
5507 volatile VALUE ret = Qundef;
5508 recursive_push(p.list, p.obj, p.pairid);
5509 EC_PUSH_TAG(GET_EC());
5510 if ((state = EC_EXEC_TAG()) == TAG_NONE) {
5511 ret = (*func)(obj, arg, FALSE);
5512 }
5513 EC_POP_TAG();
5514 if (!recursive_pop(p.list, p.obj, p.pairid)) {
5515 goto invalid;
5516 }
5517 if (state != TAG_NONE) EC_JUMP_TAG(GET_EC(), state);
5518 result = ret;
5519 }
5520 }
5521 *(volatile struct exec_recursive_params *)&p;
5522 return result;
5523
5524 invalid:
5525 rb_raise(rb_eTypeError, "invalid inspect_tbl pair_list "
5526 "for %+"PRIsVALUE" in %+"PRIsVALUE,
5527 sym, rb_thread_current());
5529}
5530
5531/*
5532 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5533 * current method is called recursively on obj
5534 */
5535
5536VALUE
5537rb_exec_recursive(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
5538{
5539 return exec_recursive(func, obj, 0, arg, 0, rb_frame_last_func());
5540}
5541
5542/*
5543 * Calls func(obj, arg, recursive), where recursive is non-zero if the
5544 * current method is called recursively on the ordered pair <obj, paired_obj>
5545 */
5546
5547VALUE
5548rb_exec_recursive_paired(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
5549{
5550 return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 0, rb_frame_last_func());
5551}
5552
5553/*
5554 * If recursion is detected on the current method and obj, the outermost
5555 * func will be called with (obj, arg, true). All inner func will be
5556 * short-circuited using throw.
5557 */
5558
5559VALUE
5560rb_exec_recursive_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg)
5561{
5562 return exec_recursive(func, obj, 0, arg, 1, rb_frame_last_func());
5563}
5564
5565VALUE
5566rb_exec_recursive_outer_mid(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE arg, ID mid)
5567{
5568 return exec_recursive(func, obj, 0, arg, 1, mid);
5569}
5570
5571/*
5572 * If recursion is detected on the current method, obj and paired_obj,
5573 * the outermost func will be called with (obj, arg, true). All inner
5574 * func will be short-circuited using throw.
5575 */
5576
5577VALUE
5578rb_exec_recursive_paired_outer(VALUE (*func) (VALUE, VALUE, int), VALUE obj, VALUE paired_obj, VALUE arg)
5579{
5580 return exec_recursive(func, obj, rb_memory_id(paired_obj), arg, 1, rb_frame_last_func());
5581}
5582
5583/*
5584 * call-seq:
5585 * thread.backtrace -> array or nil
5586 *
5587 * Returns the current backtrace of the target thread.
5588 *
5589 */
5590
5591static VALUE
5592rb_thread_backtrace_m(int argc, VALUE *argv, VALUE thval)
5593{
5594 return rb_vm_thread_backtrace(argc, argv, thval);
5595}
5596
5597/* call-seq:
5598 * thread.backtrace_locations(*args) -> array or nil
5599 *
5600 * Returns the execution stack for the target thread---an array containing
5601 * backtrace location objects.
5602 *
5603 * See Thread::Backtrace::Location for more information.
5604 *
5605 * This method behaves similarly to Kernel#caller_locations except it applies
5606 * to a specific thread.
5607 */
5608static VALUE
5609rb_thread_backtrace_locations_m(int argc, VALUE *argv, VALUE thval)
5610{
5611 return rb_vm_thread_backtrace_locations(argc, argv, thval);
5612}
5613
5614void
5615Init_Thread_Mutex(void)
5616{
5617 rb_thread_t *th = GET_THREAD();
5618
5619 rb_native_mutex_initialize(&th->vm->workqueue_lock);
5620 rb_native_mutex_initialize(&th->interrupt_lock);
5621}
5622
5623/*
5624 * Document-class: ThreadError
5625 *
5626 * Raised when an invalid operation is attempted on a thread.
5627 *
5628 * For example, when no other thread has been started:
5629 *
5630 * Thread.stop
5631 *
5632 * This will raises the following exception:
5633 *
5634 * ThreadError: stopping only thread
5635 * note: use sleep to stop forever
5636 */
5637
5638void
5639Init_Thread(void)
5640{
5641 rb_thread_t *th = GET_THREAD();
5642
5643 sym_never = ID2SYM(rb_intern_const("never"));
5644 sym_immediate = ID2SYM(rb_intern_const("immediate"));
5645 sym_on_blocking = ID2SYM(rb_intern_const("on_blocking"));
5646
5647 rb_define_singleton_method(rb_cThread, "new", thread_s_new, -1);
5648 rb_define_singleton_method(rb_cThread, "start", thread_start, -2);
5649 rb_define_singleton_method(rb_cThread, "fork", thread_start, -2);
5650 rb_define_singleton_method(rb_cThread, "main", rb_thread_s_main, 0);
5651 rb_define_singleton_method(rb_cThread, "current", thread_s_current, 0);
5652 rb_define_singleton_method(rb_cThread, "stop", thread_stop, 0);
5653 rb_define_singleton_method(rb_cThread, "kill", rb_thread_s_kill, 1);
5654 rb_define_singleton_method(rb_cThread, "exit", rb_thread_exit, 0);
5655 rb_define_singleton_method(rb_cThread, "pass", thread_s_pass, 0);
5656 rb_define_singleton_method(rb_cThread, "list", thread_list, 0);
5657 rb_define_singleton_method(rb_cThread, "abort_on_exception", rb_thread_s_abort_exc, 0);
5658 rb_define_singleton_method(rb_cThread, "abort_on_exception=", rb_thread_s_abort_exc_set, 1);
5659 rb_define_singleton_method(rb_cThread, "report_on_exception", rb_thread_s_report_exc, 0);
5660 rb_define_singleton_method(rb_cThread, "report_on_exception=", rb_thread_s_report_exc_set, 1);
5661 rb_define_singleton_method(rb_cThread, "ignore_deadlock", rb_thread_s_ignore_deadlock, 0);
5662 rb_define_singleton_method(rb_cThread, "ignore_deadlock=", rb_thread_s_ignore_deadlock_set, 1);
5663 rb_define_singleton_method(rb_cThread, "handle_interrupt", rb_thread_s_handle_interrupt, 1);
5664 rb_define_singleton_method(rb_cThread, "pending_interrupt?", rb_thread_s_pending_interrupt_p, -1);
5665 rb_define_method(rb_cThread, "pending_interrupt?", rb_thread_pending_interrupt_p, -1);
5666
5667 rb_define_method(rb_cThread, "initialize", thread_initialize, -2);
5668 rb_define_method(rb_cThread, "raise", thread_raise_m, -1);
5669 rb_define_method(rb_cThread, "join", thread_join_m, -1);
5670 rb_define_method(rb_cThread, "value", thread_value, 0);
5671 rb_define_method(rb_cThread, "kill", rb_thread_kill, 0);
5672 rb_define_method(rb_cThread, "terminate", rb_thread_kill, 0);
5673 rb_define_method(rb_cThread, "exit", rb_thread_kill, 0);
5674 rb_define_method(rb_cThread, "run", rb_thread_run, 0);
5675 rb_define_method(rb_cThread, "wakeup", rb_thread_wakeup, 0);
5676 rb_define_method(rb_cThread, "[]", rb_thread_aref, 1);
5677 rb_define_method(rb_cThread, "[]=", rb_thread_aset, 2);
5678 rb_define_method(rb_cThread, "fetch", rb_thread_fetch, -1);
5679 rb_define_method(rb_cThread, "key?", rb_thread_key_p, 1);
5680 rb_define_method(rb_cThread, "keys", rb_thread_keys, 0);
5681 rb_define_method(rb_cThread, "priority", rb_thread_priority, 0);
5682 rb_define_method(rb_cThread, "priority=", rb_thread_priority_set, 1);
5683 rb_define_method(rb_cThread, "status", rb_thread_status, 0);
5684 rb_define_method(rb_cThread, "thread_variable_get", rb_thread_variable_get, 1);
5685 rb_define_method(rb_cThread, "thread_variable_set", rb_thread_variable_set, 2);
5686 rb_define_method(rb_cThread, "thread_variables", rb_thread_variables, 0);
5687 rb_define_method(rb_cThread, "thread_variable?", rb_thread_variable_p, 1);
5688 rb_define_method(rb_cThread, "alive?", rb_thread_alive_p, 0);
5689 rb_define_method(rb_cThread, "stop?", rb_thread_stop_p, 0);
5690 rb_define_method(rb_cThread, "abort_on_exception", rb_thread_abort_exc, 0);
5691 rb_define_method(rb_cThread, "abort_on_exception=", rb_thread_abort_exc_set, 1);
5692 rb_define_method(rb_cThread, "report_on_exception", rb_thread_report_exc, 0);
5693 rb_define_method(rb_cThread, "report_on_exception=", rb_thread_report_exc_set, 1);
5694 rb_define_method(rb_cThread, "group", rb_thread_group, 0);
5695 rb_define_method(rb_cThread, "backtrace", rb_thread_backtrace_m, -1);
5696 rb_define_method(rb_cThread, "backtrace_locations", rb_thread_backtrace_locations_m, -1);
5697
5698 rb_define_method(rb_cThread, "name", rb_thread_getname, 0);
5699 rb_define_method(rb_cThread, "name=", rb_thread_setname, 1);
5700 rb_define_method(rb_cThread, "native_thread_id", rb_thread_native_thread_id, 0);
5701 rb_define_method(rb_cThread, "to_s", rb_thread_to_s, 0);
5702 rb_define_alias(rb_cThread, "inspect", "to_s");
5703
5704 rb_vm_register_special_exception(ruby_error_stream_closed, rb_eIOError,
5705 "stream closed in another thread");
5706
5707 cThGroup = rb_define_class("ThreadGroup", rb_cObject);
5708 rb_define_alloc_func(cThGroup, thgroup_s_alloc);
5709 rb_define_method(cThGroup, "list", thgroup_list, 0);
5710 rb_define_method(cThGroup, "enclose", thgroup_enclose, 0);
5711 rb_define_method(cThGroup, "enclosed?", thgroup_enclosed_p, 0);
5712 rb_define_method(cThGroup, "add", thgroup_add, 1);
5713
5714 const char * ptr = getenv("RUBY_THREAD_TIMESLICE");
5715
5716 if (ptr) {
5717 long quantum = strtol(ptr, NULL, 0);
5718 if (quantum > 0 && !(SIZEOF_LONG > 4 && quantum > UINT32_MAX)) {
5719 thread_default_quantum_ms = (uint32_t)quantum;
5720 }
5721 else if (0) {
5722 fprintf(stderr, "Ignored RUBY_THREAD_TIMESLICE=%s\n", ptr);
5723 }
5724 }
5725
5726 {
5727 th->thgroup = th->ractor->thgroup_default = rb_obj_alloc(cThGroup);
5728 rb_define_const(cThGroup, "Default", th->thgroup);
5729 }
5730
5732
5733 /* init thread core */
5734 {
5735 /* main thread setting */
5736 {
5737 /* acquire global vm lock */
5738#ifdef HAVE_PTHREAD_NP_H
5739 VM_ASSERT(TH_SCHED(th)->running == th);
5740#endif
5741 // thread_sched_to_running() should not be called because
5742 // it assumes blocked by thread_sched_to_waiting().
5743 // thread_sched_to_running(sched, th);
5744
5745 th->pending_interrupt_queue = rb_ary_hidden_new(0);
5746 th->pending_interrupt_queue_checked = 0;
5747 th->pending_interrupt_mask_stack = rb_ary_hidden_new(0);
5748 }
5749 }
5750
5751 rb_thread_create_timer_thread();
5752
5753 Init_thread_sync();
5754
5755 // TODO: Suppress unused function warning for now
5756 // if (0) rb_thread_sched_destroy(NULL);
5757}
5758
5761{
5762 rb_thread_t *th = ruby_thread_from_native();
5763
5764 return th != 0;
5765}
5766
5767#ifdef NON_SCALAR_THREAD_ID
5768 #define thread_id_str(th) (NULL)
5769#else
5770 #define thread_id_str(th) ((void *)(uintptr_t)(th)->nt->thread_id)
5771#endif
5772
5773static void
5774debug_deadlock_check(rb_ractor_t *r, VALUE msg)
5775{
5776 rb_thread_t *th = 0;
5777 VALUE sep = rb_str_new_cstr("\n ");
5778
5779 rb_str_catf(msg, "\n%d threads, %d sleeps current:%p main thread:%p\n",
5780 rb_ractor_living_thread_num(r), rb_ractor_sleeper_thread_num(r),
5781 (void *)GET_THREAD(), (void *)r->threads.main);
5782
5783 ccan_list_for_each(&r->threads.set, th, lt_node) {
5784 rb_str_catf(msg, "* %+"PRIsVALUE"\n rb_thread_t:%p "
5785 "native:%p int:%u",
5786 th->self, (void *)th, th->nt ? thread_id_str(th) : "N/A", th->ec->interrupt_flag);
5787
5788 if (th->locking_mutex) {
5789 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5790 rb_str_catf(msg, " mutex:%p cond:%"PRIuSIZE,
5791 (void *)mutex->fiber, rb_mutex_num_waiting(mutex));
5792 }
5793
5794 {
5795 struct rb_waiting_list *list = th->join_list;
5796 while (list) {
5797 rb_str_catf(msg, "\n depended by: tb_thread_id:%p", (void *)list->thread);
5798 list = list->next;
5799 }
5800 }
5801 rb_str_catf(msg, "\n ");
5802 rb_str_concat(msg, rb_ary_join(rb_ec_backtrace_str_ary(th->ec, RUBY_BACKTRACE_START, RUBY_ALL_BACKTRACE_LINES), sep));
5803 rb_str_catf(msg, "\n");
5804 }
5805}
5806
5807static void
5808rb_check_deadlock(rb_ractor_t *r)
5809{
5810 if (GET_THREAD()->vm->thread_ignore_deadlock) return;
5811
5812#ifdef RUBY_THREAD_PTHREAD_H
5813 if (r->threads.sched.readyq_cnt > 0) return;
5814#endif
5815
5816 int sleeper_num = rb_ractor_sleeper_thread_num(r);
5817 int ltnum = rb_ractor_living_thread_num(r);
5818
5819 if (ltnum > sleeper_num) return;
5820 if (ltnum < sleeper_num) rb_bug("sleeper must not be more than vm_living_thread_num(vm)");
5821
5822 int found = 0;
5823 rb_thread_t *th = NULL;
5824
5825 ccan_list_for_each(&r->threads.set, th, lt_node) {
5826 if (th->status != THREAD_STOPPED_FOREVER || RUBY_VM_INTERRUPTED(th->ec)) {
5827 found = 1;
5828 }
5829 else if (th->locking_mutex) {
5830 rb_mutex_t *mutex = mutex_ptr(th->locking_mutex);
5831 if (mutex->fiber == th->ec->fiber_ptr || (!mutex->fiber && !ccan_list_empty(&mutex->waitq))) {
5832 found = 1;
5833 }
5834 }
5835 if (found)
5836 break;
5837 }
5838
5839 if (!found) {
5840 VALUE argv[2];
5841 argv[0] = rb_eFatal;
5842 argv[1] = rb_str_new2("No live threads left. Deadlock?");
5843 debug_deadlock_check(r, argv[1]);
5844 rb_ractor_sleeper_threads_dec(GET_RACTOR());
5845 rb_threadptr_raise(r->threads.main, 2, argv);
5846 }
5847}
5848
5849static void
5850update_line_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5851{
5852 const rb_control_frame_t *cfp = GET_EC()->cfp;
5853 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5854 if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5855 VALUE lines = RARRAY_AREF(coverage, COVERAGE_INDEX_LINES);
5856 if (lines) {
5857 long line = rb_sourceline() - 1;
5858 VM_ASSERT(line >= 0);
5859 long count;
5860 VALUE num;
5861 void rb_iseq_clear_event_flags(const rb_iseq_t *iseq, size_t pos, rb_event_flag_t reset);
5862 if (GET_VM()->coverage_mode & COVERAGE_TARGET_ONESHOT_LINES) {
5863 rb_iseq_clear_event_flags(cfp->iseq, cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded - 1, RUBY_EVENT_COVERAGE_LINE);
5864 rb_ary_push(lines, LONG2FIX(line + 1));
5865 return;
5866 }
5867 if (line >= RARRAY_LEN(lines)) { /* no longer tracked */
5868 return;
5869 }
5870 num = RARRAY_AREF(lines, line);
5871 if (!FIXNUM_P(num)) return;
5872 count = FIX2LONG(num) + 1;
5873 if (POSFIXABLE(count)) {
5874 RARRAY_ASET(lines, line, LONG2FIX(count));
5875 }
5876 }
5877 }
5878}
5879
5880static void
5881update_branch_coverage(VALUE data, const rb_trace_arg_t *trace_arg)
5882{
5883 const rb_control_frame_t *cfp = GET_EC()->cfp;
5884 VALUE coverage = rb_iseq_coverage(cfp->iseq);
5885 if (RB_TYPE_P(coverage, T_ARRAY) && !RBASIC_CLASS(coverage)) {
5886 VALUE branches = RARRAY_AREF(coverage, COVERAGE_INDEX_BRANCHES);
5887 if (branches) {
5888 long pc = cfp->pc - ISEQ_BODY(cfp->iseq)->iseq_encoded - 1;
5889 long idx = FIX2INT(RARRAY_AREF(ISEQ_PC2BRANCHINDEX(cfp->iseq), pc)), count;
5890 VALUE counters = RARRAY_AREF(branches, 1);
5891 VALUE num = RARRAY_AREF(counters, idx);
5892 count = FIX2LONG(num) + 1;
5893 if (POSFIXABLE(count)) {
5894 RARRAY_ASET(counters, idx, LONG2FIX(count));
5895 }
5896 }
5897 }
5898}
5899
5900const rb_method_entry_t *
5901rb_resolve_me_location(const rb_method_entry_t *me, VALUE resolved_location[5])
5902{
5903 VALUE path, beg_pos_lineno, beg_pos_column, end_pos_lineno, end_pos_column;
5904
5905 if (!me->def) return NULL; // negative cme
5906
5907 retry:
5908 switch (me->def->type) {
5909 case VM_METHOD_TYPE_ISEQ: {
5910 const rb_iseq_t *iseq = me->def->body.iseq.iseqptr;
5911 rb_iseq_location_t *loc = &ISEQ_BODY(iseq)->location;
5912 path = rb_iseq_path(iseq);
5913 beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5914 beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5915 end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5916 end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5917 break;
5918 }
5919 case VM_METHOD_TYPE_BMETHOD: {
5920 const rb_iseq_t *iseq = rb_proc_get_iseq(me->def->body.bmethod.proc, 0);
5921 if (iseq) {
5922 rb_iseq_location_t *loc;
5923 rb_iseq_check(iseq);
5924 path = rb_iseq_path(iseq);
5925 loc = &ISEQ_BODY(iseq)->location;
5926 beg_pos_lineno = INT2FIX(loc->code_location.beg_pos.lineno);
5927 beg_pos_column = INT2FIX(loc->code_location.beg_pos.column);
5928 end_pos_lineno = INT2FIX(loc->code_location.end_pos.lineno);
5929 end_pos_column = INT2FIX(loc->code_location.end_pos.column);
5930 break;
5931 }
5932 return NULL;
5933 }
5934 case VM_METHOD_TYPE_ALIAS:
5935 me = me->def->body.alias.original_me;
5936 goto retry;
5937 case VM_METHOD_TYPE_REFINED:
5938 me = me->def->body.refined.orig_me;
5939 if (!me) return NULL;
5940 goto retry;
5941 default:
5942 return NULL;
5943 }
5944
5945 /* found */
5946 if (RB_TYPE_P(path, T_ARRAY)) {
5947 path = rb_ary_entry(path, 1);
5948 if (!RB_TYPE_P(path, T_STRING)) return NULL; /* just for the case... */
5949 }
5950 if (resolved_location) {
5951 resolved_location[0] = path;
5952 resolved_location[1] = beg_pos_lineno;
5953 resolved_location[2] = beg_pos_column;
5954 resolved_location[3] = end_pos_lineno;
5955 resolved_location[4] = end_pos_column;
5956 }
5957 return me;
5958}
5959
5960static void
5961update_method_coverage(VALUE me2counter, rb_trace_arg_t *trace_arg)
5962{
5963 const rb_control_frame_t *cfp = GET_EC()->cfp;
5964 const rb_callable_method_entry_t *cme = rb_vm_frame_method_entry(cfp);
5965 const rb_method_entry_t *me = (const rb_method_entry_t *)cme;
5966 VALUE rcount;
5967 long count;
5968
5969 me = rb_resolve_me_location(me, 0);
5970 if (!me) return;
5971
5972 rcount = rb_hash_aref(me2counter, (VALUE) me);
5973 count = FIXNUM_P(rcount) ? FIX2LONG(rcount) + 1 : 1;
5974 if (POSFIXABLE(count)) {
5975 rb_hash_aset(me2counter, (VALUE) me, LONG2FIX(count));
5976 }
5977}
5978
5979VALUE
5980rb_get_coverages(void)
5981{
5982 return GET_VM()->coverages;
5983}
5984
5985int
5986rb_get_coverage_mode(void)
5987{
5988 return GET_VM()->coverage_mode;
5989}
5990
5991void
5992rb_set_coverages(VALUE coverages, int mode, VALUE me2counter)
5993{
5994 GET_VM()->coverages = coverages;
5995 GET_VM()->me2counter = me2counter;
5996 GET_VM()->coverage_mode = mode;
5997}
5998
5999void
6000rb_resume_coverages(void)
6001{
6002 int mode = GET_VM()->coverage_mode;
6003 VALUE me2counter = GET_VM()->me2counter;
6004 rb_add_event_hook2((rb_event_hook_func_t) update_line_coverage, RUBY_EVENT_COVERAGE_LINE, Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
6005 if (mode & COVERAGE_TARGET_BRANCHES) {
6006 rb_add_event_hook2((rb_event_hook_func_t) update_branch_coverage, RUBY_EVENT_COVERAGE_BRANCH, Qnil, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
6007 }
6008 if (mode & COVERAGE_TARGET_METHODS) {
6009 rb_add_event_hook2((rb_event_hook_func_t) update_method_coverage, RUBY_EVENT_CALL, me2counter, RUBY_EVENT_HOOK_FLAG_SAFE | RUBY_EVENT_HOOK_FLAG_RAW_ARG);
6010 }
6011}
6012
6013void
6014rb_suspend_coverages(void)
6015{
6016 rb_remove_event_hook((rb_event_hook_func_t) update_line_coverage);
6017 if (GET_VM()->coverage_mode & COVERAGE_TARGET_BRANCHES) {
6018 rb_remove_event_hook((rb_event_hook_func_t) update_branch_coverage);
6019 }
6020 if (GET_VM()->coverage_mode & COVERAGE_TARGET_METHODS) {
6021 rb_remove_event_hook((rb_event_hook_func_t) update_method_coverage);
6022 }
6023}
6024
6025/* Make coverage arrays empty so old covered files are no longer tracked. */
6026void
6027rb_reset_coverages(void)
6028{
6029 rb_clear_coverages();
6030 rb_iseq_remove_coverage_all();
6031 GET_VM()->coverages = Qfalse;
6032}
6033
6034VALUE
6035rb_default_coverage(int n)
6036{
6037 VALUE coverage = rb_ary_hidden_new_fill(3);
6038 VALUE lines = Qfalse, branches = Qfalse;
6039 int mode = GET_VM()->coverage_mode;
6040
6041 if (mode & COVERAGE_TARGET_LINES) {
6042 lines = n > 0 ? rb_ary_hidden_new_fill(n) : rb_ary_hidden_new(0);
6043 }
6044 RARRAY_ASET(coverage, COVERAGE_INDEX_LINES, lines);
6045
6046 if (mode & COVERAGE_TARGET_BRANCHES) {
6047 branches = rb_ary_hidden_new_fill(2);
6048 /* internal data structures for branch coverage:
6049 *
6050 * { branch base node =>
6051 * [base_type, base_first_lineno, base_first_column, base_last_lineno, base_last_column, {
6052 * branch target id =>
6053 * [target_type, target_first_lineno, target_first_column, target_last_lineno, target_last_column, target_counter_index],
6054 * ...
6055 * }],
6056 * ...
6057 * }
6058 *
6059 * Example:
6060 * { NODE_CASE =>
6061 * [1, 0, 4, 3, {
6062 * NODE_WHEN => [2, 8, 2, 9, 0],
6063 * NODE_WHEN => [3, 8, 3, 9, 1],
6064 * ...
6065 * }],
6066 * ...
6067 * }
6068 */
6069 VALUE structure = rb_hash_new();
6070 rb_obj_hide(structure);
6071 RARRAY_ASET(branches, 0, structure);
6072 /* branch execution counters */
6073 RARRAY_ASET(branches, 1, rb_ary_hidden_new(0));
6074 }
6075 RARRAY_ASET(coverage, COVERAGE_INDEX_BRANCHES, branches);
6076
6077 return coverage;
6078}
6079
6080static VALUE
6081uninterruptible_exit(VALUE v)
6082{
6083 rb_thread_t *cur_th = GET_THREAD();
6084 rb_ary_pop(cur_th->pending_interrupt_mask_stack);
6085
6086 cur_th->pending_interrupt_queue_checked = 0;
6087 if (!rb_threadptr_pending_interrupt_empty_p(cur_th)) {
6088 RUBY_VM_SET_INTERRUPT(cur_th->ec);
6089 }
6090 return Qnil;
6091}
6092
6093VALUE
6094rb_uninterruptible(VALUE (*b_proc)(VALUE), VALUE data)
6095{
6096 VALUE interrupt_mask = rb_ident_hash_new();
6097 rb_thread_t *cur_th = GET_THREAD();
6098
6099 rb_hash_aset(interrupt_mask, rb_cObject, sym_never);
6100 OBJ_FREEZE(interrupt_mask);
6101 rb_ary_push(cur_th->pending_interrupt_mask_stack, interrupt_mask);
6102
6103 VALUE ret = rb_ensure(b_proc, data, uninterruptible_exit, Qnil);
6104
6105 RUBY_VM_CHECK_INTS(cur_th->ec);
6106 return ret;
6107}
6108
6109static void
6110thread_specific_storage_alloc(rb_thread_t *th)
6111{
6112 VM_ASSERT(th->specific_storage == NULL);
6113
6114 if (UNLIKELY(specific_key_count > 0)) {
6115 th->specific_storage = ZALLOC_N(void *, RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
6116 }
6117}
6118
6119rb_internal_thread_specific_key_t
6121{
6122 rb_vm_t *vm = GET_VM();
6123
6124 if (specific_key_count == 0 && vm->ractor.cnt > 1) {
6125 rb_raise(rb_eThreadError, "The first rb_internal_thread_specific_key_create() is called with multiple ractors");
6126 }
6127 else if (specific_key_count > RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX) {
6128 rb_raise(rb_eThreadError, "rb_internal_thread_specific_key_create() is called more than %d times", RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
6129 }
6130 else {
6131 rb_internal_thread_specific_key_t key = specific_key_count++;
6132
6133 if (key == 0) {
6134 // allocate
6135 rb_ractor_t *cr = GET_RACTOR();
6136 rb_thread_t *th;
6137
6138 ccan_list_for_each(&cr->threads.set, th, lt_node) {
6139 thread_specific_storage_alloc(th);
6140 }
6141 }
6142 return key;
6143 }
6144}
6145
6146// async and native thread safe.
6147void *
6148rb_internal_thread_specific_get(VALUE thread_val, rb_internal_thread_specific_key_t key)
6149{
6150 rb_thread_t *th = DATA_PTR(thread_val);
6151
6152 VM_ASSERT(rb_thread_ptr(thread_val) == th);
6153 VM_ASSERT(key < RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
6154 VM_ASSERT(th->specific_storage);
6155
6156 return th->specific_storage[key];
6157}
6158
6159// async and native thread safe.
6160void
6161rb_internal_thread_specific_set(VALUE thread_val, rb_internal_thread_specific_key_t key, void *data)
6162{
6163 rb_thread_t *th = DATA_PTR(thread_val);
6164
6165 VM_ASSERT(rb_thread_ptr(thread_val) == th);
6166 VM_ASSERT(key < RB_INTERNAL_THREAD_SPECIFIC_KEY_MAX);
6167 VM_ASSERT(th->specific_storage);
6168
6169 th->specific_storage[key] = data;
6170}
6171
6172// interrupt_exec
6175 struct ccan_list_node node;
6176
6177 rb_interrupt_exec_func_t *func;
6178 void *data;
6179 enum rb_interrupt_exec_flag flags;
6180};
6181
6182void
6183rb_threadptr_interrupt_exec_task_mark(rb_thread_t *th)
6184{
6185 struct rb_interrupt_exec_task *task;
6186
6187 ccan_list_for_each(&th->interrupt_exec_tasks, task, node) {
6188 if (task->flags & rb_interrupt_exec_flag_value_data) {
6189 rb_gc_mark((VALUE)task->data);
6190 }
6191 }
6192}
6193
6194// native thread safe
6195// th should be available
6196void
6197rb_threadptr_interrupt_exec(rb_thread_t *th, rb_interrupt_exec_func_t *func, void *data, enum rb_interrupt_exec_flag flags)
6198{
6199 // should not use ALLOC
6201 *task = (struct rb_interrupt_exec_task) {
6202 .flags = flags,
6203 .func = func,
6204 .data = data,
6205 };
6206
6207 rb_native_mutex_lock(&th->interrupt_lock);
6208 {
6209 ccan_list_add_tail(&th->interrupt_exec_tasks, &task->node);
6210 threadptr_set_interrupt_locked(th, true);
6211 }
6212 rb_native_mutex_unlock(&th->interrupt_lock);
6213}
6214
6215static void
6216threadptr_interrupt_exec_exec(rb_thread_t *th)
6217{
6218 while (1) {
6219 struct rb_interrupt_exec_task *task;
6220
6221 rb_native_mutex_lock(&th->interrupt_lock);
6222 {
6223 task = ccan_list_pop(&th->interrupt_exec_tasks, struct rb_interrupt_exec_task, node);
6224 }
6225 rb_native_mutex_unlock(&th->interrupt_lock);
6226
6227 RUBY_DEBUG_LOG("task:%p", task);
6228
6229 if (task) {
6230 if (task->flags & rb_interrupt_exec_flag_new_thread) {
6231 rb_thread_create(task->func, task->data);
6232 }
6233 else {
6234 (*task->func)(task->data);
6235 }
6236 ruby_xfree(task);
6237 }
6238 else {
6239 break;
6240 }
6241 }
6242}
6243
6244static void
6245threadptr_interrupt_exec_cleanup(rb_thread_t *th)
6246{
6247 rb_native_mutex_lock(&th->interrupt_lock);
6248 {
6249 struct rb_interrupt_exec_task *task;
6250
6251 while ((task = ccan_list_pop(&th->interrupt_exec_tasks, struct rb_interrupt_exec_task, node)) != NULL) {
6252 ruby_xfree(task);
6253 }
6254 }
6255 rb_native_mutex_unlock(&th->interrupt_lock);
6256}
6257
6258// native thread safe
6259// func/data should be native thread safe
6260void
6261rb_ractor_interrupt_exec(struct rb_ractor_struct *target_r,
6262 rb_interrupt_exec_func_t *func, void *data, enum rb_interrupt_exec_flag flags)
6263{
6264 RUBY_DEBUG_LOG("flags:%d", (int)flags);
6265
6266 rb_thread_t *main_th = target_r->threads.main;
6267 rb_threadptr_interrupt_exec(main_th, func, data, flags | rb_interrupt_exec_flag_new_thread);
6268}
6269
#define RUBY_ASSERT_ALWAYS(expr,...)
A variant of RUBY_ASSERT that does not interface with RUBY_DEBUG.
Definition assert.h:199
#define RUBY_ASSERT(...)
Asserts that the given expression is truthy if and only if RUBY_DEBUG is truthy.
Definition assert.h:219
std::atomic< unsigned > rb_atomic_t
Type that is eligible for atomic operations.
Definition atomic.h:69
#define rb_define_method(klass, mid, func, arity)
Defines klass#mid.
#define rb_define_singleton_method(klass, mid, func, arity)
Defines klass.mid.
#define RUBY_INTERNAL_EVENT_SWITCH
Thread switched.
Definition event.h:90
int rb_remove_event_hook(rb_event_hook_func_t func)
Removes the passed function from the list of event hooks.
Definition vm_trace.c:313
#define RUBY_EVENT_THREAD_BEGIN
Encountered a new thread.
Definition event.h:57
void(* rb_event_hook_func_t)(rb_event_flag_t evflag, VALUE data, VALUE self, ID mid, VALUE klass)
Type of event hooks.
Definition event.h:120
uint32_t rb_event_flag_t
Represents event(s).
Definition event.h:108
#define RUBY_EVENT_CALL
A method, written in Ruby, is called.
Definition event.h:41
#define RUBY_EVENT_THREAD_END
Encountered an end of a thread.
Definition event.h:58
static void RB_FL_SET_RAW(VALUE obj, VALUE flags)
This is an implementation detail of RB_FL_SET().
Definition fl_type.h:600
VALUE rb_define_class(const char *name, VALUE super)
Defines a top-level class.
Definition class.c:1474
void rb_define_alias(VALUE klass, const char *name1, const char *name2)
Defines an alias of a method.
Definition class.c:2843
ID rb_frame_last_func(void)
Returns the ID of the last method in the call stack.
Definition eval.c:1259
int rb_keyword_given_p(void)
Determines if the current method is given a keyword argument.
Definition eval.c:1049
int rb_block_given_p(void)
Determines if the current method is given a block.
Definition eval.c:1036
#define rb_str_new2
Old name of rb_str_new_cstr.
Definition string.h:1675
#define ALLOC
Old name of RB_ALLOC.
Definition memory.h:400
#define T_STRING
Old name of RUBY_T_STRING.
Definition value_type.h:78
#define xfree
Old name of ruby_xfree.
Definition xmalloc.h:58
#define Qundef
Old name of RUBY_Qundef.
#define INT2FIX
Old name of RB_INT2FIX.
Definition long.h:48
#define OBJ_FROZEN
Old name of RB_OBJ_FROZEN.
Definition fl_type.h:136
#define xrealloc
Old name of ruby_xrealloc.
Definition xmalloc.h:56
#define ID2SYM
Old name of RB_ID2SYM.
Definition symbol.h:44
#define OBJ_FREEZE
Old name of RB_OBJ_FREEZE.
Definition fl_type.h:134
#define UNREACHABLE_RETURN
Old name of RBIMPL_UNREACHABLE_RETURN.
Definition assume.h:29
#define CLASS_OF
Old name of rb_class_of.
Definition globals.h:206
#define xmalloc
Old name of ruby_xmalloc.
Definition xmalloc.h:53
#define LONG2FIX
Old name of RB_INT2FIX.
Definition long.h:49
#define FIX2INT
Old name of RB_FIX2INT.
Definition int.h:41
#define ZALLOC_N
Old name of RB_ZALLOC_N.
Definition memory.h:401
#define T_ICLASS
Old name of RUBY_T_ICLASS.
Definition value_type.h:66
#define T_HASH
Old name of RUBY_T_HASH.
Definition value_type.h:65
#define Qtrue
Old name of RUBY_Qtrue.
#define NUM2INT
Old name of RB_NUM2INT.
Definition int.h:44
#define INT2NUM
Old name of RB_INT2NUM.
Definition int.h:43
#define Qnil
Old name of RUBY_Qnil.
#define Qfalse
Old name of RUBY_Qfalse.
#define FIX2LONG
Old name of RB_FIX2LONG.
Definition long.h:46
#define T_ARRAY
Old name of RUBY_T_ARRAY.
Definition value_type.h:56
#define T_OBJECT
Old name of RUBY_T_OBJECT.
Definition value_type.h:75
#define NIL_P
Old name of RB_NIL_P.
#define POSFIXABLE
Old name of RB_POSFIXABLE.
Definition fixnum.h:29
#define BUILTIN_TYPE
Old name of RB_BUILTIN_TYPE.
Definition value_type.h:85
#define FIXNUM_P
Old name of RB_FIXNUM_P.
#define SYMBOL_P
Old name of RB_SYMBOL_P.
Definition value_type.h:88
void ruby_stop(int ex)
Calls ruby_cleanup() and exits the process.
Definition eval.c:289
#define ruby_debug
This variable controls whether the interpreter is in debug mode.
Definition error.h:486
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition eval.c:682
VALUE rb_eSystemExit
SystemExit exception.
Definition error.c:1423
VALUE rb_eIOError
IOError exception.
Definition io.c:189
VALUE rb_eStandardError
StandardError exception.
Definition error.c:1427
VALUE rb_eTypeError
TypeError exception.
Definition error.c:1430
void rb_frozen_error_raise(VALUE frozen_obj, const char *fmt,...)
Raises an instance of rb_eFrozenError.
Definition error.c:4120
VALUE rb_eFatal
fatal exception.
Definition error.c:1426
VALUE rb_eRuntimeError
RuntimeError exception.
Definition error.c:1428
void rb_warn(const char *fmt,...)
Identical to rb_warning(), except it reports unless $VERBOSE is nil.
Definition error.c:466
VALUE rb_eException
Mother of all exceptions.
Definition error.c:1422
VALUE rb_eThreadError
ThreadError exception.
Definition eval.c:1054
void rb_exit(int status)
Terminates the current execution context.
Definition process.c:4360
VALUE rb_eSignal
SignalException exception.
Definition error.c:1425
VALUE rb_obj_alloc(VALUE klass)
Allocates an instance of the given class.
Definition object.c:2123
VALUE rb_cInteger
Module class.
Definition numeric.c:198
VALUE rb_obj_hide(VALUE obj)
Make the object invisible from Ruby code.
Definition object.c:101
VALUE rb_obj_class(VALUE obj)
Queries the class of an object.
Definition object.c:243
VALUE rb_cThread
Thread class.
Definition vm.c:550
VALUE rb_cModule
Module class.
Definition object.c:63
double rb_num2dbl(VALUE num)
Converts an instance of rb_cNumeric into C's double.
Definition object.c:3744
VALUE rb_obj_is_kind_of(VALUE obj, VALUE klass)
Queries if the given object is an instance (of possibly descendants) of the given class.
Definition object.c:878
VALUE rb_ary_shift(VALUE ary)
Destructively deletes an element from the beginning of the passed array and returns what was deleted.
VALUE rb_ary_dup(VALUE ary)
Duplicates an array.
VALUE rb_ary_delete_at(VALUE ary, long pos)
Destructively removes an element which resides at the specific index of the passed array.
VALUE rb_ary_new(void)
Allocates a new, empty array.
VALUE rb_ary_pop(VALUE ary)
Destructively deletes an element from the end of the passed array and returns what was deleted.
VALUE rb_ary_hidden_new(long capa)
Allocates a hidden (no class) empty array.
VALUE rb_ary_clear(VALUE ary)
Destructively removes everything form an array.
VALUE rb_ary_push(VALUE ary, VALUE elem)
Special case of rb_ary_cat() that it adds only one element.
VALUE rb_ary_entry(VALUE ary, long off)
Queries an element of an array.
VALUE rb_ary_join(VALUE ary, VALUE sep)
Recursively stringises the elements of the passed array, flattens that result, then joins the sequenc...
static int rb_check_arity(int argc, int min, int max)
Ensures that the passed integer is in the passed range.
Definition error.h:284
VALUE rb_block_proc(void)
Constructs a Proc object from implicitly passed components.
Definition proc.c:847
void rb_reset_random_seed(void)
Resets the RNG behind rb_genrand_int32()/rb_genrand_real().
Definition random.c:1803
VALUE rb_str_new_frozen(VALUE str)
Creates a frozen copy of the string, if necessary.
Definition string.c:1493
VALUE rb_str_concat(VALUE dst, VALUE src)
Identical to rb_str_append(), except it also accepts an integer as a codepoint.
Definition string.c:3994
#define rb_str_cat_cstr(buf, str)
Identical to rb_str_cat(), except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1656
#define rb_str_new_cstr(str)
Identical to rb_str_new, except it assumes the passed pointer is a pointer to a C string.
Definition string.h:1514
int rb_thread_interrupted(VALUE thval)
Checks if the thread's execution was recently interrupted.
Definition thread.c:1460
VALUE rb_thread_local_aref(VALUE thread, ID key)
This badly named function reads from a Fiber local storage.
Definition thread.c:3739
VALUE rb_mutex_new(void)
Creates a mutex.
VALUE rb_thread_kill(VALUE thread)
Terminates the given thread.
Definition thread.c:2932
#define RUBY_UBF_IO
A special UBF for blocking IO operations.
Definition thread.h:382
VALUE rb_thread_main(void)
Obtains the "main" thread.
Definition thread.c:3171
VALUE rb_exec_recursive(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h)
"Recursion" API entry point.
void rb_thread_sleep_forever(void)
Blocks indefinitely.
Definition thread.c:1390
void rb_thread_fd_close(int fd)
This funciton is now a no-op.
Definition thread.c:2877
void rb_thread_wait_for(struct timeval time)
Identical to rb_thread_sleep(), except it takes struct timeval instead.
Definition thread.c:1422
VALUE rb_mutex_synchronize(VALUE mutex, VALUE(*func)(VALUE arg), VALUE arg)
Obtains the lock, runs the passed function, and releases the lock when it completes.
VALUE rb_thread_stop(void)
Stops the current thread.
Definition thread.c:3083
VALUE rb_mutex_sleep(VALUE self, VALUE timeout)
Releases the lock held in the mutex and waits for the period of time; reacquires the lock on wakeup.
VALUE rb_exec_recursive_paired(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE p, VALUE h)
Identical to rb_exec_recursive(), except it checks for the recursion on the ordered pair of { g,...
void rb_unblock_function_t(void *)
This is the type of UBFs.
Definition thread.h:336
void rb_thread_atfork_before_exec(void)
:FIXME: situation of this function is unclear.
Definition thread.c:5011
void rb_thread_check_ints(void)
Checks for interrupts.
Definition thread.c:1443
VALUE rb_thread_run(VALUE thread)
This is a rb_thread_wakeup() + rb_thread_schedule() combo.
Definition thread.c:3074
VALUE rb_thread_wakeup(VALUE thread)
Marks a given thread as eligible for scheduling.
Definition thread.c:3027
VALUE rb_mutex_unlock(VALUE mutex)
Releases the mutex.
VALUE rb_exec_recursive_paired_outer(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE p, VALUE h)
Identical to rb_exec_recursive_outer(), except it checks for the recursion on the ordered pair of { g...
void rb_thread_sleep_deadly(void)
Identical to rb_thread_sleep_forever(), except the thread calling this function is considered "dead" ...
Definition thread.c:1397
void rb_thread_atfork(void)
A pthread_atfork(3posix)-like API.
Definition thread.c:5006
VALUE rb_thread_current(void)
Obtains the "current" thread.
Definition thread.c:3150
int rb_thread_alone(void)
Checks if the thread this function is running is the only thread that is currently alive.
Definition thread.c:4012
VALUE rb_thread_local_aset(VALUE thread, ID key, VALUE val)
This badly named function writes to a Fiber local storage.
Definition thread.c:3887
void rb_thread_schedule(void)
Tries to switch to another thread.
Definition thread.c:1491
#define RUBY_UBF_PROCESS
A special UBF for blocking process operations.
Definition thread.h:389
VALUE rb_exec_recursive_outer(VALUE(*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h)
Identical to rb_exec_recursive(), except it calls f for outermost recursion only.
VALUE rb_thread_wakeup_alive(VALUE thread)
Identical to rb_thread_wakeup(), except it doesn't raise on an already killed thread.
Definition thread.c:3036
VALUE rb_mutex_lock(VALUE mutex)
Attempts to lock the mutex.
void rb_thread_sleep(int sec)
Blocks for the given period of time.
Definition thread.c:1466
void rb_timespec_now(struct timespec *ts)
Fills the current time into the given struct.
Definition time.c:2002
struct timeval rb_time_timeval(VALUE time)
Converts an instance of rb_cTime to a struct timeval that represents the identical point of time.
Definition time.c:2954
VALUE rb_ivar_set(VALUE obj, ID name, VALUE val)
Identical to rb_iv_set(), except it accepts the name as an ID instead of a C string.
Definition variable.c:2080
VALUE rb_ivar_get(VALUE obj, ID name)
Identical to rb_iv_get(), except it accepts the name as an ID instead of a C string.
Definition variable.c:1460
VALUE rb_class_path(VALUE mod)
Identical to rb_mod_name(), except it returns #<Class: ...> style inspection for anonymous modules.
Definition variable.c:379
void rb_define_alloc_func(VALUE klass, rb_alloc_func_t func)
Sets the allocator function of a class.
int rb_sourceline(void)
Resembles __LINE__.
Definition vm.c:1912
static ID rb_intern_const(const char *str)
This is a "tiny optimisation" over rb_intern().
Definition symbol.h:284
ID rb_check_id(volatile VALUE *namep)
Detects if the given name is already interned or not.
Definition symbol.c:1145
VALUE rb_to_symbol(VALUE name)
Identical to rb_intern_str(), except it generates a dynamic symbol if necessary.
Definition string.c:12576
ID rb_to_id(VALUE str)
Definition string.c:12566
#define RB_IO_POINTER(obj, fp)
Queries the underlying IO pointer.
Definition io.h:436
VALUE rb_eIOTimeoutError
Indicates that a timeout has occurred while performing an IO operation.
Definition io.c:190
#define RB_NOGVL_UBF_ASYNC_SAFE
Passing this flag to rb_nogvl() indicates that the passed UBF is async-signal-safe.
Definition thread.h:60
void * rb_internal_thread_specific_get(VALUE thread_val, rb_internal_thread_specific_key_t key)
Get thread and tool specific data.
Definition thread.c:6147
#define RB_NOGVL_INTR_FAIL
Passing this flag to rb_nogvl() prevents it from checking interrupts.
Definition thread.h:48
void rb_internal_thread_specific_set(VALUE thread_val, rb_internal_thread_specific_key_t key, void *data)
Set thread and tool specific data.
Definition thread.c:6160
rb_internal_thread_specific_key_t rb_internal_thread_specific_key_create(void)
Create a key to store thread specific data.
Definition thread.c:6119
void * rb_nogvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2, int flags)
Identical to rb_thread_call_without_gvl(), except it additionally takes "flags" that change the behav...
Definition thread.c:1571
void * rb_thread_call_with_gvl(void *(*func)(void *), void *data1)
(Re-)acquires the GVL.
Definition thread.c:2037
#define RB_NOGVL_OFFLOAD_SAFE
Passing this flag to rb_nogvl() indicates that the passed function is safe to offload to a background...
Definition thread.h:73
void * rb_thread_call_without_gvl2(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Identical to rb_thread_call_without_gvl(), except it does not interface with signals etc.
Definition thread.c:1708
void * rb_thread_call_without_gvl(void *(*func)(void *), void *data1, rb_unblock_function_t *ubf, void *data2)
Allows the passed function to run in parallel with other Ruby threads.
#define RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)
Shim for block function parameters.
Definition iterator.h:58
VALUE rb_yield(VALUE val)
Yields the block.
Definition vm_eval.c:1372
void rb_throw_obj(VALUE tag, VALUE val)
Identical to rb_throw(), except it allows arbitrary Ruby object to become a tag.
Definition vm_eval.c:2518
static int rb_fd_max(const rb_fdset_t *f)
It seems this function has no use.
Definition largesize.h:209
void rb_fd_copy(rb_fdset_t *dst, const fd_set *src, int max)
Destructively overwrites an fdset with another.
void rb_fd_dup(rb_fdset_t *dst, const rb_fdset_t *src)
Identical to rb_fd_copy(), except it copies unlimited number of file descriptors.
void rb_fd_term(rb_fdset_t *f)
Destroys the rb_fdset_t, releasing any memory and resources it used.
static fd_set * rb_fd_ptr(const rb_fdset_t *f)
Raw pointer to fd_set.
Definition largesize.h:195
#define MEMCPY(p1, p2, type, n)
Handy macro to call memcpy.
Definition memory.h:372
#define ALLOCA_N(type, n)
Definition memory.h:292
#define MEMZERO(p, type, n)
Handy macro to erase a region of memory.
Definition memory.h:360
VALUE rb_thread_create(type *q, void *w)
Creates a rb_cThread instance.
void rb_hash_foreach(VALUE q, int_type *w, VALUE e)
Iteration over the given hash.
VALUE rb_ensure(type *q, VALUE w, type *e, VALUE r)
An equivalent of ensure clause.
#define rb_fd_isset
Queries if the given fd is in the rb_fdset_t.
Definition posix.h:60
#define rb_fd_select
Waits for multiple file descriptors at once.
Definition posix.h:66
#define rb_fd_init
Initialises the :given :rb_fdset_t.
Definition posix.h:63
#define rb_fd_set
Sets the given fd to the rb_fdset_t.
Definition posix.h:54
#define rb_fd_zero
Clears the given rb_fdset_t.
Definition posix.h:51
#define rb_fd_clr
Unsets the given fd from the rb_fdset_t.
Definition posix.h:57
#define RARRAY_LEN
Just another name of rb_array_len.
Definition rarray.h:51
static int RARRAY_LENINT(VALUE ary)
Identical to rb_array_len(), except it differs for the return type.
Definition rarray.h:281
static void RARRAY_ASET(VALUE ary, long i, VALUE v)
Assigns an object in an array.
Definition rarray.h:386
#define RARRAY_AREF(a, i)
Definition rarray.h:403
#define RARRAY_CONST_PTR
Just another name of rb_array_const_ptr.
Definition rarray.h:52
static VALUE RBASIC_CLASS(VALUE obj)
Queries the class of an object.
Definition rbasic.h:163
#define RBASIC(obj)
Convenient casting macro.
Definition rbasic.h:40
#define RCLASS_SUPER
Just another name of rb_class_get_superclass.
Definition rclass.h:44
#define DATA_PTR(obj)
Convenient getter macro.
Definition rdata.h:67
#define RHASH_EMPTY_P(h)
Checks if the hash is empty.
Definition rhash.h:79
#define StringValueCStr(v)
Identical to StringValuePtr, except it additionally checks for the contents for viability as a C stri...
Definition rstring.h:89
#define RUBY_TYPED_DEFAULT_FREE
This is a value you can set to rb_data_type_struct::dfree.
Definition rtypeddata.h:79
#define TypedData_Get_Struct(obj, type, data_type, sval)
Obtains a C struct from inside of a wrapper Ruby object.
Definition rtypeddata.h:515
#define TypedData_Wrap_Struct(klass, data_type, sval)
Converts sval, a pointer to your struct, into a Ruby object.
Definition rtypeddata.h:450
#define TypedData_Make_Struct(klass, type, data_type, sval)
Identical to TypedData_Wrap_Struct, except it allocates a new data region internally instead of takin...
Definition rtypeddata.h:497
#define errno
Ractor-aware version of errno.
Definition ruby.h:388
int ruby_native_thread_p(void)
Queries if the thread which calls this function is a ruby's thread.
Definition thread.c:5759
int ruby_snprintf(char *str, size_t n, char const *fmt,...)
Our own locale-insensitive version of snprintf(3).
Definition sprintf.c:1041
#define RB_PASS_CALLED_KEYWORDS
Pass keywords if current method is called with keywords, useful for argument delegation.
Definition scan_args.h:78
Scheduler APIs.
VALUE rb_fiber_scheduler_blocking_operation_wait(VALUE scheduler, void *(*function)(void *), void *data, rb_unblock_function_t *unblock_function, void *data2, int flags, struct rb_fiber_scheduler_blocking_operation_state *state)
Defer the execution of the passed function to the scheduler.
Definition scheduler.c:1046
VALUE rb_fiber_scheduler_current(void)
Identical to rb_fiber_scheduler_get(), except it also returns RUBY_Qnil in case of a blocking fiber.
Definition scheduler.c:463
VALUE rb_fiber_scheduler_fiber_interrupt(VALUE scheduler, VALUE fiber, VALUE exception)
Interrupt a fiber by raising an exception.
Definition scheduler.c:1077
VALUE rb_fiber_scheduler_block(VALUE scheduler, VALUE blocker, VALUE timeout)
Non-blocking wait for the passed "blocker", which is for instance Thread.join or Mutex....
Definition scheduler.c:627
VALUE rb_fiber_scheduler_set(VALUE scheduler)
Destructively assigns the passed scheduler to that of the current thread that is calling this functio...
Definition scheduler.c:424
VALUE rb_fiber_scheduler_unblock(VALUE scheduler, VALUE blocker, VALUE fiber)
Wakes up a fiber previously blocked using rb_fiber_scheduler_block().
Definition scheduler.c:646
int rb_thread_fd_select(int nfds, rb_fdset_t *rfds, rb_fdset_t *wfds, rb_fdset_t *efds, struct timeval *timeout)
Waits for multiple file descriptors at once.
Definition thread.c:4503
#define rb_fd_resize(n, f)
Does nothing (defined for compatibility).
Definition select.h:43
static bool RB_TEST(VALUE obj)
Emulates Ruby's "if" statement.
@ RUBY_Qundef
Represents so-called undef.
#define RTEST
This is an old name of RB_TEST.
#define _(args)
This was a transition path from K&R to ANSI.
Definition stdarg.h:35
Definition method.h:63
This is the struct that holds necessary info for a struct.
Definition rtypeddata.h:203
The data structure which wraps the fd_set bitmap used by select(2).
Definition largesize.h:71
int maxfd
Maximum allowed number of FDs.
Definition largesize.h:72
fd_set * fdset
File descriptors buffer.
Definition largesize.h:73
int capa
Maximum allowed number of FDs.
Definition win32.h:50
Ruby's IO, metadata and buffers.
Definition io.h:295
VALUE self
The IO's Ruby level counterpart.
Definition io.h:298
int fd
file descriptor.
Definition io.h:306
struct ccan_list_head blocking_operations
Threads that are performing a blocking operation without the GVL using this IO.
Definition io.h:131
Definition method.h:55
const rb_iseq_t * iseqptr
iseq pointer, should be separated from iseqval
Definition method.h:136
void rb_nativethread_lock_lock(rb_nativethread_lock_t *lock)
Blocks until the current thread obtains a lock.
Definition thread.c:296
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_lock.
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_initialize.
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_unlock.
void rb_nativethread_lock_unlock(rb_nativethread_lock_t *lock)
Releases a lock.
Definition thread.c:302
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
Just another name of rb_nativethread_lock_destroy.
void rb_nativethread_lock_initialize(rb_nativethread_lock_t *lock)
Fills the passed lock with an initial value.
Definition thread.c:284
void rb_nativethread_lock_destroy(rb_nativethread_lock_t *lock)
Destroys the passed mutex.
Definition thread.c:290
uintptr_t ID
Type that represents a Ruby identifier such as a variable name.
Definition value.h:52
uintptr_t VALUE
Type that represents a Ruby object.
Definition value.h:40
static bool RB_TYPE_P(VALUE obj, enum ruby_value_type t)
Queries if the given object is of given type.
Definition value_type.h:376